From 046e14afb3561523efd0047c35c20793ae5f8848 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 5 Feb 2020 15:43:36 +0200 Subject: gpio: Avoid kernel.h inclusion where it's possible Inclusion of kernel.h increases the mess with the header dependencies. Avoid kernel.h inclusion where it's possible. Besides that, clean up a bit other inclusions inside GPIO subsystem headers. It includes: - removal pin control bits (forward declaration and header) from linux/gpio.h - removal of.h from asm-generic/gpio.h - use of explicit headers in gpio/consumer.h - add FIXME note with regard to gpio.h inclusion in of_gpio,h Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200205134336.20197-1-andriy.shevchenko@linux.intel.com Signed-off-by: Linus Walleij --- include/linux/gpio.h | 2 -- include/linux/gpio/consumer.h | 5 ++++- include/linux/of_gpio.h | 9 ++++++--- 3 files changed, 10 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 2157717c2136..008ad3ee56b7 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -102,11 +102,9 @@ void devm_gpio_free(struct device *dev, unsigned int gpio); #include #include #include -#include struct device; struct gpio_chip; -struct pinctrl_dev; static inline bool gpio_is_valid(int number) { diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index bf2d017dd7b7..0a72fccf60ff 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -2,9 +2,10 @@ #ifndef __LINUX_GPIO_CONSUMER_H #define __LINUX_GPIO_CONSUMER_H +#include #include +#include #include -#include struct device; @@ -189,6 +190,8 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev, #else /* CONFIG_GPIOLIB */ +#include + static inline int gpiod_count(struct device *dev, const char *con_id) { return 0; diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index 16967390a3fe..f821095218b0 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -11,9 +11,8 @@ #define __LINUX_OF_GPIO_H #include -#include -#include -#include +#include +#include /* FIXME: Shouldn't be here */ #include struct device_node; @@ -34,6 +33,8 @@ enum of_gpio_flags { #ifdef CONFIG_OF_GPIO +#include + /* * OF GPIO chip for memory mapped banks */ @@ -63,6 +64,8 @@ extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc); #else /* CONFIG_OF_GPIO */ +#include + /* Drivers may not strictly depend on the GPIO support, so let them link. */ static inline int of_get_named_gpio_flags(struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags) -- cgit v1.2.3-58-ga151 From ec2daf6e33f9f9113ba085b6ff88592907b6f1ce Mon Sep 17 00:00:00 2001 From: Jon Flatley Date: Fri, 24 Jan 2020 15:18:32 -0800 Subject: platform: chrome: Add cros-usbpd-notify driver ChromiumOS uses ACPI device with HID "GOOG0003" for power delivery related events. The existing cros-usbpd-charger driver relies on these events without ever actually receiving them on ACPI platforms. This is because in the ChromeOS kernel trees, the GOOG0003 device is owned by an ACPI driver that offers firmware updates to USB-C chargers. Introduce a new platform driver under cros-ec, the ChromeOS embedded controller, that handles these PD events and dispatches them appropriately over a notifier chain to all drivers that use them. On platforms that don't have the ACPI device defined, the driver gets instantiated for ECs which support the EC_FEATURE_USB_PD feature bit, and the notification events will get delivered using the MKBP event handling mechanism. Co-Developed-by: Prashant Malani Reviewed-by: Gwendal Grignou Reviewed-by: Benson Leung Signed-off-by: Jon Flatley Signed-off-by: Prashant Malani Acked-By: Enric Balletbo i Serra Signed-off-by: Benson Leung --- drivers/platform/chrome/Kconfig | 14 ++ drivers/platform/chrome/Makefile | 1 + drivers/platform/chrome/cros_usbpd_notify.c | 169 ++++++++++++++++++++++++ include/linux/platform_data/cros_usbpd_notify.h | 17 +++ 4 files changed, 201 insertions(+) create mode 100644 drivers/platform/chrome/cros_usbpd_notify.c create mode 100644 include/linux/platform_data/cros_usbpd_notify.h (limited to 'include/linux') diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig index 5f57282a28da..15fc8b8a2db8 100644 --- a/drivers/platform/chrome/Kconfig +++ b/drivers/platform/chrome/Kconfig @@ -226,6 +226,20 @@ config CROS_USBPD_LOGGER To compile this driver as a module, choose M here: the module will be called cros_usbpd_logger. +config CROS_USBPD_NOTIFY + tristate "ChromeOS Type-C power delivery event notifier" + depends on MFD_CROS_EC_DEV + default MFD_CROS_EC_DEV + help + If you say Y here, you get support for Type-C PD event notifications + from the ChromeOS EC. On ACPI platorms this driver will bind to the + GOOG0003 ACPI device, and on platforms which don't have this device it + will get initialized on ECs which support the feature + EC_FEATURE_USB_PD. + + To compile this driver as a module, choose M here: the + module will be called cros_usbpd_notify. + source "drivers/platform/chrome/wilco_ec/Kconfig" endif # CHROMEOS_PLATFORMS diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile index aacd5920d8a1..f6465f8ef0b5 100644 --- a/drivers/platform/chrome/Makefile +++ b/drivers/platform/chrome/Makefile @@ -22,5 +22,6 @@ obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o obj-$(CONFIG_CROS_EC_SENSORHUB) += cros_ec_sensorhub.o obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o +obj-$(CONFIG_CROS_USBPD_NOTIFY) += cros_usbpd_notify.o obj-$(CONFIG_WILCO_EC) += wilco_ec/ diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c new file mode 100644 index 000000000000..3851bbd6e9a3 --- /dev/null +++ b/drivers/platform/chrome/cros_usbpd_notify.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2020 Google LLC + * + * This driver serves as the receiver of cros_ec PD host events. + */ + +#include +#include +#include +#include +#include + +#define DRV_NAME "cros-usbpd-notify" +#define ACPI_DRV_NAME "GOOG0003" + +static BLOCKING_NOTIFIER_HEAD(cros_usbpd_notifier_list); + +/** + * cros_usbpd_register_notify - Register a notifier callback for PD events. + * @nb: Notifier block pointer to register + * + * On ACPI platforms this corresponds to host events on the ECPD + * "GOOG0003" ACPI device. On non-ACPI platforms this will filter mkbp events + * for USB PD events. + * + * Return: 0 on success or negative error code. + */ +int cros_usbpd_register_notify(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&cros_usbpd_notifier_list, + nb); +} +EXPORT_SYMBOL_GPL(cros_usbpd_register_notify); + +/** + * cros_usbpd_unregister_notify - Unregister notifier callback for PD events. + * @nb: Notifier block pointer to unregister + * + * Unregister a notifier callback that was previously registered with + * cros_usbpd_register_notify(). + */ +void cros_usbpd_unregister_notify(struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&cros_usbpd_notifier_list, nb); +} +EXPORT_SYMBOL_GPL(cros_usbpd_unregister_notify); + +#ifdef CONFIG_ACPI + +static int cros_usbpd_notify_add_acpi(struct acpi_device *adev) +{ + return 0; +} + +static void cros_usbpd_notify_acpi(struct acpi_device *adev, u32 event) +{ + blocking_notifier_call_chain(&cros_usbpd_notifier_list, event, NULL); +} + +static const struct acpi_device_id cros_usbpd_notify_acpi_device_ids[] = { + { ACPI_DRV_NAME, 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, cros_usbpd_notify_acpi_device_ids); + +static struct acpi_driver cros_usbpd_notify_acpi_driver = { + .name = DRV_NAME, + .class = DRV_NAME, + .ids = cros_usbpd_notify_acpi_device_ids, + .ops = { + .add = cros_usbpd_notify_add_acpi, + .notify = cros_usbpd_notify_acpi, + }, +}; + +#endif /* CONFIG_ACPI */ + +static int cros_usbpd_notify_plat(struct notifier_block *nb, + unsigned long queued_during_suspend, + void *data) +{ + struct cros_ec_device *ec_dev = (struct cros_ec_device *)data; + u32 host_event = cros_ec_get_host_event(ec_dev); + + if (!host_event) + return NOTIFY_BAD; + + if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU)) { + blocking_notifier_call_chain(&cros_usbpd_notifier_list, + host_event, NULL); + return NOTIFY_OK; + } + return NOTIFY_DONE; +} + +static int cros_usbpd_notify_probe_plat(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cros_ec_dev *ecdev = dev_get_drvdata(dev->parent); + struct notifier_block *nb; + int ret; + + nb = devm_kzalloc(dev, sizeof(*nb), GFP_KERNEL); + if (!nb) + return -ENOMEM; + + nb->notifier_call = cros_usbpd_notify_plat; + dev_set_drvdata(dev, nb); + + ret = blocking_notifier_chain_register(&ecdev->ec_dev->event_notifier, + nb); + if (ret < 0) { + dev_err(dev, "Failed to register notifier\n"); + return ret; + } + + return 0; +} + +static int cros_usbpd_notify_remove_plat(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cros_ec_dev *ecdev = dev_get_drvdata(dev->parent); + struct notifier_block *nb = + (struct notifier_block *)dev_get_drvdata(dev); + + blocking_notifier_chain_unregister(&ecdev->ec_dev->event_notifier, nb); + + return 0; +} + +static struct platform_driver cros_usbpd_notify_plat_driver = { + .driver = { + .name = DRV_NAME, + }, + .probe = cros_usbpd_notify_probe_plat, + .remove = cros_usbpd_notify_remove_plat, +}; + +static int __init cros_usbpd_notify_init(void) +{ + int ret; + + ret = platform_driver_register(&cros_usbpd_notify_plat_driver); + if (ret < 0) + return ret; + +#ifdef CONFIG_ACPI + acpi_bus_register_driver(&cros_usbpd_notify_acpi_driver); +#endif + return 0; +} + +static void __exit cros_usbpd_notify_exit(void) +{ +#ifdef CONFIG_ACPI + acpi_bus_unregister_driver(&cros_usbpd_notify_acpi_driver); +#endif + platform_driver_unregister(&cros_usbpd_notify_plat_driver); +} + +module_init(cros_usbpd_notify_init); +module_exit(cros_usbpd_notify_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ChromeOS power delivery notifier device"); +MODULE_AUTHOR("Jon Flatley "); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/include/linux/platform_data/cros_usbpd_notify.h b/include/linux/platform_data/cros_usbpd_notify.h new file mode 100644 index 000000000000..4f2791722b6d --- /dev/null +++ b/include/linux/platform_data/cros_usbpd_notify.h @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ChromeOS EC Power Delivery Notifier Driver + * + * Copyright 2020 Google LLC + */ + +#ifndef __LINUX_PLATFORM_DATA_CROS_USBPD_NOTIFY_H +#define __LINUX_PLATFORM_DATA_CROS_USBPD_NOTIFY_H + +#include + +int cros_usbpd_register_notify(struct notifier_block *nb); + +void cros_usbpd_unregister_notify(struct notifier_block *nb); + +#endif /* __LINUX_PLATFORM_DATA_CROS_USBPD_NOTIFY_H */ -- cgit v1.2.3-58-ga151 From 8673e944b50ec6e5afd4f599cf12b2798b629f3d Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 5 Feb 2020 11:48:28 +0200 Subject: platform/chrome: wilco_ec: Platform data shouldn't include kernel.h Replace with appropriate types.h. Also there is no need to include device.h, but mutex.h. For the pointers to unknown structures use forward declarations. In the *.c files we need to include all headers that provide APIs being used in the module. Signed-off-by: Andy Shevchenko Signed-off-by: Enric Balletbo i Serra --- drivers/platform/chrome/wilco_ec/properties.c | 3 +++ drivers/platform/chrome/wilco_ec/sysfs.c | 4 ++++ include/linux/platform_data/wilco-ec.h | 8 ++++++-- 3 files changed, 13 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/platform/chrome/wilco_ec/properties.c b/drivers/platform/chrome/wilco_ec/properties.c index 62f27610dd33..c2bf4c95c5d2 100644 --- a/drivers/platform/chrome/wilco_ec/properties.c +++ b/drivers/platform/chrome/wilco_ec/properties.c @@ -3,8 +3,11 @@ * Copyright 2019 Google LLC */ +#include +#include #include #include +#include #include /* Operation code; what the EC should do with the property */ diff --git a/drivers/platform/chrome/wilco_ec/sysfs.c b/drivers/platform/chrome/wilco_ec/sysfs.c index f0d174b6bb21..3c587b4054a5 100644 --- a/drivers/platform/chrome/wilco_ec/sysfs.c +++ b/drivers/platform/chrome/wilco_ec/sysfs.c @@ -8,8 +8,12 @@ * See Documentation/ABI/testing/sysfs-platform-wilco-ec for more information. */ +#include +#include #include +#include #include +#include #define CMD_KB_CMOS 0x7C #define SUB_CMD_KB_CMOS_AUTO_ON 0x03 diff --git a/include/linux/platform_data/wilco-ec.h b/include/linux/platform_data/wilco-ec.h index afede15a95bf..25f46a939637 100644 --- a/include/linux/platform_data/wilco-ec.h +++ b/include/linux/platform_data/wilco-ec.h @@ -8,8 +8,8 @@ #ifndef WILCO_EC_H #define WILCO_EC_H -#include -#include +#include +#include /* Message flags for using the mailbox() interface */ #define WILCO_EC_FLAG_NO_RESPONSE BIT(0) /* EC does not respond */ @@ -17,6 +17,10 @@ /* Normal commands have a maximum 32 bytes of data */ #define EC_MAILBOX_DATA_SIZE 32 +struct device; +struct resource; +struct platform_device; + /** * struct wilco_ec_device - Wilco Embedded Controller handle. * @dev: Device handle. -- cgit v1.2.3-58-ga151 From 42cd0ab476e2daffc23982c37822a78f9a53cdd5 Mon Sep 17 00:00:00 2001 From: Yicheng Li Date: Mon, 3 Feb 2020 14:53:56 -0800 Subject: platform/chrome: cros_ec: Query EC protocol version if EC transitions between RO/RW RO and RW of EC may have different EC protocol version. If EC transitions between RO and RW, but AP does not reboot (this is true for fingerprint microcontroller / cros_fp, but not true for main ec / cros_ec), the AP still uses the protocol version queried before transition, which can cause problems. In the case of fingerprint microcontroller, this causes AP to send the wrong version of EC_CMD_GET_NEXT_EVENT to RO in the interrupt handler, which in turn prevents RO to clear the interrupt line to AP, in an infinite loop. Once an EC_HOST_EVENT_INTERFACE_READY is received, we know that there might have been a transition between RO and RW, so re-query the protocol. Signed-off-by: Yicheng Li Tested-by: Marek Szyprowski Reviewed-by: Gwendal Grignou Signed-off-by: Enric Balletbo i Serra --- drivers/platform/chrome/cros_ec.c | 30 +++++++++++++++++++++++++++++ include/linux/platform_data/cros_ec_proto.h | 4 ++++ 2 files changed, 34 insertions(+) (limited to 'include/linux') diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c index 6fc8f2c3ac51..7ee43b2e0654 100644 --- a/drivers/platform/chrome/cros_ec.c +++ b/drivers/platform/chrome/cros_ec.c @@ -138,6 +138,24 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event) return ret; } +static int cros_ec_ready_event(struct notifier_block *nb, + unsigned long queued_during_suspend, + void *_notify) +{ + struct cros_ec_device *ec_dev = container_of(nb, struct cros_ec_device, + notifier_ready); + u32 host_event = cros_ec_get_host_event(ec_dev); + + if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_INTERFACE_READY)) { + mutex_lock(&ec_dev->lock); + cros_ec_query_all(ec_dev); + mutex_unlock(&ec_dev->lock); + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + /** * cros_ec_register() - Register a new ChromeOS EC, using the provided info. * @ec_dev: Device to register. @@ -237,6 +255,18 @@ int cros_ec_register(struct cros_ec_device *ec_dev) dev_dbg(ec_dev->dev, "Error %d clearing sleep event to ec", err); + if (ec_dev->mkbp_event_supported) { + /* + * Register the notifier for EC_HOST_EVENT_INTERFACE_READY + * event. + */ + ec_dev->notifier_ready.notifier_call = cros_ec_ready_event; + err = blocking_notifier_chain_register(&ec_dev->event_notifier, + &ec_dev->notifier_ready); + if (err) + return err; + } + dev_info(dev, "Chrome EC device registered\n"); return 0; diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h index ba5914770191..383243326676 100644 --- a/include/linux/platform_data/cros_ec_proto.h +++ b/include/linux/platform_data/cros_ec_proto.h @@ -125,6 +125,9 @@ struct cros_ec_command { * @host_event_wake_mask: Mask of host events that cause wake from suspend. * @last_event_time: exact time from the hard irq when we got notified of * a new event. + * @notifier_ready: The notifier_block to let the kernel re-query EC + * communication protocol when the EC sends + * EC_HOST_EVENT_INTERFACE_READY. * @ec: The platform_device used by the mfd driver to interface with the * main EC. * @pd: The platform_device used by the mfd driver to interface with the @@ -166,6 +169,7 @@ struct cros_ec_device { u32 host_event_wake_mask; u32 last_resume_result; ktime_t last_event_time; + struct notifier_block notifier_ready; /* The platform devices used by the mfd driver */ struct platform_device *ec; -- cgit v1.2.3-58-ga151 From 3f2e4c11e136e2cffd60dbc840b59ff65f017328 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Tue, 17 Dec 2019 18:48:55 +0100 Subject: kfifo: provide noirqsave variants of spinlocked in and out helpers Provide variants of spinlocked kfifo_in() and kfifo_out() routines which don't disable interrupts. Signed-off-by: Bartosz Golaszewski Acked-by: Stefani Seibold --- include/linux/kfifo.h | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index fc4b0b10210f..123c200ed7cb 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -517,6 +517,26 @@ __kfifo_uint_must_check_helper( \ __ret; \ }) +/** + * kfifo_in_spinlocked_noirqsave - put data into fifo using a spinlock for + * locking, don't disable interrupts + * @fifo: address of the fifo to be used + * @buf: the data to be added + * @n: number of elements to be added + * @lock: pointer to the spinlock to use for locking + * + * This is a variant of kfifo_in_spinlocked() but uses spin_lock/unlock() + * for locking and doesn't disable interrupts. + */ +#define kfifo_in_spinlocked_noirqsave(fifo, buf, n, lock) \ +({ \ + unsigned int __ret; \ + spin_lock(lock); \ + __ret = kfifo_in(fifo, buf, n); \ + spin_unlock(lock); \ + __ret; \ +}) + /* alias for kfifo_in_spinlocked, will be removed in a future release */ #define kfifo_in_locked(fifo, buf, n, lock) \ kfifo_in_spinlocked(fifo, buf, n, lock) @@ -569,6 +589,28 @@ __kfifo_uint_must_check_helper( \ }) \ ) +/** + * kfifo_out_spinlocked_noirqsave - get data from the fifo using a spinlock + * for locking, don't disable interrupts + * @fifo: address of the fifo to be used + * @buf: pointer to the storage buffer + * @n: max. number of elements to get + * @lock: pointer to the spinlock to use for locking + * + * This is a variant of kfifo_out_spinlocked() which uses spin_lock/unlock() + * for locking and doesn't disable interrupts. + */ +#define kfifo_out_spinlocked_noirqsave(fifo, buf, n, lock) \ +__kfifo_uint_must_check_helper( \ +({ \ + unsigned int __ret; \ + spin_lock(lock); \ + __ret = kfifo_out(fifo, buf, n); \ + spin_unlock(lock); \ + __ret; \ +}) \ +) + /* alias for kfifo_out_spinlocked, will be removed in a future release */ #define kfifo_out_locked(fifo, buf, n, lock) \ kfifo_out_spinlocked(fifo, buf, n, lock) -- cgit v1.2.3-58-ga151 From 5195a89e8583bba43ec13871a7226763e401b44e Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Tue, 17 Dec 2019 11:30:59 +0100 Subject: kfifo: provide kfifo_is_empty_spinlocked() Provide two spinlocked versions of kfifo_is_empty() to be used with spinlocked variants of kfifo_in() and kfifo_out(). Signed-off-by: Bartosz Golaszewski Acked-by: Stefani Seibold --- include/linux/kfifo.h | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 123c200ed7cb..86249476b57f 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -246,6 +246,37 @@ __kfifo_int_must_check_helper(int val) __tmpq->kfifo.in == __tmpq->kfifo.out; \ }) +/** + * kfifo_is_empty_spinlocked - returns true if the fifo is empty using + * a spinlock for locking + * @fifo: address of the fifo to be used + * @lock: spinlock to be used for locking + */ +#define kfifo_is_empty_spinlocked(fifo, lock) \ +({ \ + unsigned long __flags; \ + bool __ret; \ + spin_lock_irqsave(lock, __flags); \ + __ret = kfifo_is_empty(fifo); \ + spin_unlock_irqrestore(lock, __flags); \ + __ret; \ +}) + +/** + * kfifo_is_empty_spinlocked_noirqsave - returns true if the fifo is empty + * using a spinlock for locking, doesn't disable interrupts + * @fifo: address of the fifo to be used + * @lock: spinlock to be used for locking + */ +#define kfifo_is_empty_spinlocked_noirqsave(fifo, lock) \ +({ \ + bool __ret; \ + spin_lock(lock); \ + __ret = kfifo_is_empty(fifo); \ + spin_unlock(lock); \ + __ret; \ +}) + /** * kfifo_is_full - returns true if the fifo is full * @fifo: address of the fifo to be used -- cgit v1.2.3-58-ga151 From f43caa2adc96fc9c95fd77eef63cdff86ebf33cb Mon Sep 17 00:00:00 2001 From: Michal Koutný Date: Fri, 24 Jan 2020 12:40:16 +0100 Subject: cgroup: Clean up css_set task traversal MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit css_task_iter stores pointer to head of each iterable list, this dates back to commit 0f0a2b4fa621 ("cgroup: reorganize css_task_iter") when we did not store cur_cset. Let us utilize list heads directly in cur_cset and streamline css_task_iter_advance_css_set a bit. This is no intentional function change. Signed-off-by: Michal Koutný Signed-off-by: Tejun Heo --- include/linux/cgroup.h | 3 --- kernel/cgroup/cgroup.c | 61 +++++++++++++++++++++++--------------------------- 2 files changed, 28 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index e75d2191226b..f1219b927817 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -58,9 +58,6 @@ struct css_task_iter { struct list_head *tcset_head; struct list_head *task_pos; - struct list_head *tasks_head; - struct list_head *mg_tasks_head; - struct list_head *dying_tasks_head; struct list_head *cur_tasks_head; struct css_set *cur_cset; diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index c719a4154d6d..b4c4c4fbd6de 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -4391,29 +4391,24 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it) lockdep_assert_held(&css_set_lock); - /* Advance to the next non-empty css_set */ - do { - cset = css_task_iter_next_css_set(it); - if (!cset) { - it->task_pos = NULL; - return; + /* Advance to the next non-empty css_set and find first non-empty tasks list*/ + while ((cset = css_task_iter_next_css_set(it))) { + if (!list_empty(&cset->tasks)) { + it->cur_tasks_head = &cset->tasks; + break; + } else if (!list_empty(&cset->mg_tasks)) { + it->cur_tasks_head = &cset->mg_tasks; + break; + } else if (!list_empty(&cset->dying_tasks)) { + it->cur_tasks_head = &cset->dying_tasks; + break; } - } while (!css_set_populated(cset) && list_empty(&cset->dying_tasks)); - - if (!list_empty(&cset->tasks)) { - it->task_pos = cset->tasks.next; - it->cur_tasks_head = &cset->tasks; - } else if (!list_empty(&cset->mg_tasks)) { - it->task_pos = cset->mg_tasks.next; - it->cur_tasks_head = &cset->mg_tasks; - } else { - it->task_pos = cset->dying_tasks.next; - it->cur_tasks_head = &cset->dying_tasks; } - - it->tasks_head = &cset->tasks; - it->mg_tasks_head = &cset->mg_tasks; - it->dying_tasks_head = &cset->dying_tasks; + if (!cset) { + it->task_pos = NULL; + return; + } + it->task_pos = it->cur_tasks_head->next; /* * We don't keep css_sets locked across iteration steps and thus @@ -4458,24 +4453,24 @@ static void css_task_iter_advance(struct css_task_iter *it) repeat: if (it->task_pos) { /* - * Advance iterator to find next entry. cset->tasks is - * consumed first and then ->mg_tasks. After ->mg_tasks, - * we move onto the next cset. + * Advance iterator to find next entry. We go through cset + * tasks, mg_tasks and dying_tasks, when consumed we move onto + * the next cset. */ if (it->flags & CSS_TASK_ITER_SKIPPED) it->flags &= ~CSS_TASK_ITER_SKIPPED; else it->task_pos = it->task_pos->next; - if (it->task_pos == it->tasks_head) { - it->task_pos = it->mg_tasks_head->next; - it->cur_tasks_head = it->mg_tasks_head; + if (it->task_pos == &it->cur_cset->tasks) { + it->cur_tasks_head = &it->cur_cset->mg_tasks; + it->task_pos = it->cur_tasks_head->next; } - if (it->task_pos == it->mg_tasks_head) { - it->task_pos = it->dying_tasks_head->next; - it->cur_tasks_head = it->dying_tasks_head; + if (it->task_pos == &it->cur_cset->mg_tasks) { + it->cur_tasks_head = &it->cur_cset->dying_tasks; + it->task_pos = it->cur_tasks_head->next; } - if (it->task_pos == it->dying_tasks_head) + if (it->task_pos == &it->cur_cset->dying_tasks) css_task_iter_advance_css_set(it); } else { /* called from start, proceed to the first cset */ @@ -4493,12 +4488,12 @@ repeat: goto repeat; /* and dying leaders w/o live member threads */ - if (it->cur_tasks_head == it->dying_tasks_head && + if (it->cur_tasks_head == &it->cur_cset->dying_tasks && !atomic_read(&task->signal->live)) goto repeat; } else { /* skip all dying ones */ - if (it->cur_tasks_head == it->dying_tasks_head) + if (it->cur_tasks_head == &it->cur_cset->dying_tasks) goto repeat; } } -- cgit v1.2.3-58-ga151 From a49e4629b5edf1db856de05fbf1aae05502ef1af Mon Sep 17 00:00:00 2001 From: Prateek Sood Date: Fri, 24 Jan 2020 20:37:29 +0530 Subject: cpuset: Make cpuset hotplug synchronous Convert cpuset_hotplug_workfn() into synchronous call for cpu hotplug path. For memory hotplug path it still gets queued as a work item. Since cpuset_hotplug_workfn() can be made synchronous for cpu hotplug path, it is not required to wait for cpuset hotplug while thawing processes. Signed-off-by: Prateek Sood Signed-off-by: Tejun Heo --- include/linux/cpuset.h | 3 --- kernel/cgroup/cpuset.c | 31 +++++++++++++++++++------------ kernel/power/process.c | 2 -- 3 files changed, 19 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 04c20de66afc..cede4cb98b78 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -54,7 +54,6 @@ extern int cpuset_init(void); extern void cpuset_init_smp(void); extern void cpuset_force_rebuild(void); extern void cpuset_update_active_cpus(void); -extern void cpuset_wait_for_hotplug(void); extern void cpuset_read_lock(void); extern void cpuset_read_unlock(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); @@ -176,8 +175,6 @@ static inline void cpuset_update_active_cpus(void) partition_sched_domains(1, NULL, NULL); } -static inline void cpuset_wait_for_hotplug(void) { } - static inline void cpuset_read_lock(void) { } static inline void cpuset_read_unlock(void) { } diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 58f5073acff7..cafd4d2ff882 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -3101,7 +3101,7 @@ update_tasks: } /** - * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset + * cpuset_hotplug - handle CPU/memory hotunplug for a cpuset * * This function is called after either CPU or memory configuration has * changed and updates cpuset accordingly. The top_cpuset is always @@ -3116,7 +3116,7 @@ update_tasks: * Note that CPU offlining during suspend is ignored. We don't modify * cpusets across suspend/resume cycles at all. */ -static void cpuset_hotplug_workfn(struct work_struct *work) +static void cpuset_hotplug(bool use_cpu_hp_lock) { static cpumask_t new_cpus; static nodemask_t new_mems; @@ -3201,25 +3201,32 @@ static void cpuset_hotplug_workfn(struct work_struct *work) /* rebuild sched domains if cpus_allowed has changed */ if (cpus_updated || force_rebuild) { force_rebuild = false; - rebuild_sched_domains(); + if (use_cpu_hp_lock) + rebuild_sched_domains(); + else { + /* Acquiring cpu_hotplug_lock is not required. + * When cpuset_hotplug() is called in hotplug path, + * cpu_hotplug_lock is held by the hotplug context + * which is waiting for cpuhp_thread_fun to indicate + * completion of callback. + */ + percpu_down_write(&cpuset_rwsem); + rebuild_sched_domains_locked(); + percpu_up_write(&cpuset_rwsem); + } } free_cpumasks(NULL, ptmp); } -void cpuset_update_active_cpus(void) +static void cpuset_hotplug_workfn(struct work_struct *work) { - /* - * We're inside cpu hotplug critical region which usually nests - * inside cgroup synchronization. Bounce actual hotplug processing - * to a work item to avoid reverse locking order. - */ - schedule_work(&cpuset_hotplug_work); + cpuset_hotplug(true); } -void cpuset_wait_for_hotplug(void) +void cpuset_update_active_cpus(void) { - flush_work(&cpuset_hotplug_work); + cpuset_hotplug(false); } /* diff --git a/kernel/power/process.c b/kernel/power/process.c index 4b6a54da7e65..08f7019357ee 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -204,8 +204,6 @@ void thaw_processes(void) __usermodehelper_set_disable_depth(UMH_FREEZING); thaw_workqueues(); - cpuset_wait_for_hotplug(); - read_lock(&tasklist_lock); for_each_process_thread(g, p) { /* No other threads should have PF_SUSPEND_TASK set */ -- cgit v1.2.3-58-ga151 From ef2c41cf38a7559bbf91af42d5b6a4429db8fc68 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Wed, 5 Feb 2020 14:26:22 +0100 Subject: clone3: allow spawning processes into cgroups This adds support for creating a process in a different cgroup than its parent. Callers can limit and account processes and threads right from the moment they are spawned: - A service manager can directly spawn new services into dedicated cgroups. - A process can be directly created in a frozen cgroup and will be frozen as well. - The initial accounting jitter experienced by process supervisors and daemons is eliminated with this. - Threaded applications or even thread implementations can choose to create a specific cgroup layout where each thread is spawned directly into a dedicated cgroup. This feature is limited to the unified hierarchy. Callers need to pass a directory file descriptor for the target cgroup. The caller can choose to pass an O_PATH file descriptor. All usual migration restrictions apply, i.e. there can be no processes in inner nodes. In general, creating a process directly in a target cgroup adheres to all migration restrictions. One of the biggest advantages of this feature is that CLONE_INTO_GROUP does not need to grab the write side of the cgroup cgroup_threadgroup_rwsem. This global lock makes moving tasks/threads around super expensive. With clone3() this lock is avoided. Cc: Tejun Heo Cc: Ingo Molnar Cc: Oleg Nesterov Cc: Johannes Weiner Cc: Li Zefan Cc: Peter Zijlstra Cc: cgroups@vger.kernel.org Signed-off-by: Christian Brauner Signed-off-by: Tejun Heo --- include/linux/cgroup-defs.h | 5 +- include/linux/cgroup.h | 20 +++-- include/linux/sched/task.h | 4 + include/uapi/linux/sched.h | 5 ++ kernel/cgroup/cgroup.c | 191 ++++++++++++++++++++++++++++++++++++++------ kernel/cgroup/pids.c | 15 +++- kernel/fork.c | 13 ++- 7 files changed, 214 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 63097cb243cb..68c391f451d1 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -628,8 +628,9 @@ struct cgroup_subsys { void (*cancel_attach)(struct cgroup_taskset *tset); void (*attach)(struct cgroup_taskset *tset); void (*post_attach)(void); - int (*can_fork)(struct task_struct *task); - void (*cancel_fork)(struct task_struct *task); + int (*can_fork)(struct task_struct *task, + struct css_set *cset); + void (*cancel_fork)(struct task_struct *task, struct css_set *cset); void (*fork)(struct task_struct *task); void (*exit)(struct task_struct *task); void (*release)(struct task_struct *task); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index f1219b927817..4598e4da6b1b 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -27,6 +27,8 @@ #include +struct kernel_clone_args; + #ifdef CONFIG_CGROUPS /* @@ -119,9 +121,12 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk); void cgroup_fork(struct task_struct *p); -extern int cgroup_can_fork(struct task_struct *p); -extern void cgroup_cancel_fork(struct task_struct *p); -extern void cgroup_post_fork(struct task_struct *p); +extern int cgroup_can_fork(struct task_struct *p, + struct kernel_clone_args *kargs); +extern void cgroup_cancel_fork(struct task_struct *p, + struct kernel_clone_args *kargs); +extern void cgroup_post_fork(struct task_struct *p, + struct kernel_clone_args *kargs); void cgroup_exit(struct task_struct *p); void cgroup_release(struct task_struct *p); void cgroup_free(struct task_struct *p); @@ -705,9 +710,12 @@ static inline int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) { return -EINVAL; } static inline void cgroup_fork(struct task_struct *p) {} -static inline int cgroup_can_fork(struct task_struct *p) { return 0; } -static inline void cgroup_cancel_fork(struct task_struct *p) {} -static inline void cgroup_post_fork(struct task_struct *p) {} +static inline int cgroup_can_fork(struct task_struct *p, + struct kernel_clone_args *kargs) { return 0; } +static inline void cgroup_cancel_fork(struct task_struct *p, + struct kernel_clone_args *kargs) {} +static inline void cgroup_post_fork(struct task_struct *p, + struct kernel_clone_args *kargs) {} static inline void cgroup_exit(struct task_struct *p) {} static inline void cgroup_release(struct task_struct *p) {} static inline void cgroup_free(struct task_struct *p) {} diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index f1879884238e..38359071236a 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -13,6 +13,7 @@ struct task_struct; struct rusage; union thread_union; +struct css_set; /* All the bits taken by the old clone syscall. */ #define CLONE_LEGACY_FLAGS 0xffffffffULL @@ -29,6 +30,9 @@ struct kernel_clone_args { pid_t *set_tid; /* Number of elements in *set_tid */ size_t set_tid_size; + int cgroup; + struct cgroup *cgrp; + struct css_set *cset; }; /* diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h index 2e3bc22c6f20..3bac0a8ceab2 100644 --- a/include/uapi/linux/sched.h +++ b/include/uapi/linux/sched.h @@ -35,6 +35,7 @@ /* Flags for the clone3() syscall. */ #define CLONE_CLEAR_SIGHAND 0x100000000ULL /* Clear any signal handler and reset to SIG_DFL. */ +#define CLONE_INTO_CGROUP 0x200000000ULL /* Clone into a specific cgroup given the right permissions. */ /* * cloning flags intersect with CSIGNAL so can be used with unshare and clone3 @@ -81,6 +82,8 @@ * @set_tid_size: This defines the size of the array referenced * in @set_tid. This cannot be larger than the * kernel's limit of nested PID namespaces. + * @cgroup: If CLONE_INTO_CGROUP is specified set this to + * a file descriptor for the cgroup. * * The structure is versioned by size and thus extensible. * New struct members must go at the end of the struct and @@ -97,11 +100,13 @@ struct clone_args { __aligned_u64 tls; __aligned_u64 set_tid; __aligned_u64 set_tid_size; + __aligned_u64 cgroup; }; #endif #define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */ #define CLONE_ARGS_SIZE_VER1 80 /* sizeof second published struct */ +#define CLONE_ARGS_SIZE_VER2 88 /* sizeof third published struct */ /* * Scheduling policies diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 6d8bdddd8c28..9a8a5ded3c48 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -5881,8 +5881,7 @@ out: * @child: pointer to task_struct of forking parent process. * * A task is associated with the init_css_set until cgroup_post_fork() - * attaches it to the parent's css_set. Empty cg_list indicates that - * @child isn't holding reference to its css_set. + * attaches it to the target css_set. */ void cgroup_fork(struct task_struct *child) { @@ -5908,24 +5907,154 @@ static struct cgroup *cgroup_get_from_file(struct file *f) return cgrp; } +/** + * cgroup_css_set_fork - find or create a css_set for a child process + * @kargs: the arguments passed to create the child process + * + * This functions finds or creates a new css_set which the child + * process will be attached to in cgroup_post_fork(). By default, + * the child process will be given the same css_set as its parent. + * + * If CLONE_INTO_CGROUP is specified this function will try to find an + * existing css_set which includes the requested cgroup and if not create + * a new css_set that the child will be attached to later. If this function + * succeeds it will hold cgroup_threadgroup_rwsem on return. If + * CLONE_INTO_CGROUP is requested this function will grab cgroup mutex + * before grabbing cgroup_threadgroup_rwsem and will hold a reference + * to the target cgroup. + */ +static int cgroup_css_set_fork(struct kernel_clone_args *kargs) + __acquires(&cgroup_mutex) __acquires(&cgroup_threadgroup_rwsem) +{ + int ret; + struct cgroup *dst_cgrp = NULL; + struct css_set *cset; + struct super_block *sb; + struct file *f; + + if (kargs->flags & CLONE_INTO_CGROUP) + mutex_lock(&cgroup_mutex); + + cgroup_threadgroup_change_begin(current); + + spin_lock_irq(&css_set_lock); + cset = task_css_set(current); + get_css_set(cset); + spin_unlock_irq(&css_set_lock); + + if (!(kargs->flags & CLONE_INTO_CGROUP)) { + kargs->cset = cset; + return 0; + } + + f = fget_raw(kargs->cgroup); + if (!f) { + ret = -EBADF; + goto err; + } + sb = f->f_path.dentry->d_sb; + + dst_cgrp = cgroup_get_from_file(f); + if (IS_ERR(dst_cgrp)) { + ret = PTR_ERR(dst_cgrp); + dst_cgrp = NULL; + goto err; + } + + if (cgroup_is_dead(dst_cgrp)) { + ret = -ENODEV; + goto err; + } + + /* + * Verify that we the target cgroup is writable for us. This is + * usually done by the vfs layer but since we're not going through + * the vfs layer here we need to do it "manually". + */ + ret = cgroup_may_write(dst_cgrp, sb); + if (ret) + goto err; + + ret = cgroup_attach_permissions(cset->dfl_cgrp, dst_cgrp, sb, + !(kargs->flags & CLONE_THREAD)); + if (ret) + goto err; + + kargs->cset = find_css_set(cset, dst_cgrp); + if (!kargs->cset) { + ret = -ENOMEM; + goto err; + } + + put_css_set(cset); + fput(f); + kargs->cgrp = dst_cgrp; + return ret; + +err: + cgroup_threadgroup_change_end(current); + mutex_unlock(&cgroup_mutex); + if (f) + fput(f); + if (dst_cgrp) + cgroup_put(dst_cgrp); + put_css_set(cset); + if (kargs->cset) + put_css_set(kargs->cset); + return ret; +} + +/** + * cgroup_css_set_put_fork - drop references we took during fork + * @kargs: the arguments passed to create the child process + * + * Drop references to the prepared css_set and target cgroup if + * CLONE_INTO_CGROUP was requested. + */ +static void cgroup_css_set_put_fork(struct kernel_clone_args *kargs) + __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex) +{ + cgroup_threadgroup_change_end(current); + + if (kargs->flags & CLONE_INTO_CGROUP) { + struct cgroup *cgrp = kargs->cgrp; + struct css_set *cset = kargs->cset; + + mutex_unlock(&cgroup_mutex); + + if (cset) { + put_css_set(cset); + kargs->cset = NULL; + } + + if (cgrp) { + cgroup_put(cgrp); + kargs->cgrp = NULL; + } + } +} + /** * cgroup_can_fork - called on a new task before the process is exposed * @child: the child process * + * This prepares a new css_set for the child process which the child will + * be attached to in cgroup_post_fork(). * This calls the subsystem can_fork() callbacks. If the cgroup_can_fork() * callback returns an error, the fork aborts with that error code. This * allows for a cgroup subsystem to conditionally allow or deny new forks. */ -int cgroup_can_fork(struct task_struct *child) - __acquires(&cgroup_threadgroup_rwsem) __releases(&cgroup_threadgroup_rwsem) +int cgroup_can_fork(struct task_struct *child, struct kernel_clone_args *kargs) { struct cgroup_subsys *ss; int i, j, ret; - cgroup_threadgroup_change_begin(current); + ret = cgroup_css_set_fork(kargs); + if (ret) + return ret; do_each_subsys_mask(ss, i, have_canfork_callback) { - ret = ss->can_fork(child); + ret = ss->can_fork(child, kargs->cset); if (ret) goto out_revert; } while_each_subsys_mask(); @@ -5937,32 +6066,34 @@ out_revert: if (j >= i) break; if (ss->cancel_fork) - ss->cancel_fork(child); + ss->cancel_fork(child, kargs->cset); } - cgroup_threadgroup_change_end(current); + cgroup_css_set_put_fork(kargs); return ret; } /** - * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork() - * @child: the child process - * - * This calls the cancel_fork() callbacks if a fork failed *after* - * cgroup_can_fork() succeded. - */ -void cgroup_cancel_fork(struct task_struct *child) - __releases(&cgroup_threadgroup_rwsem) + * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork() + * @child: the child process + * @kargs: the arguments passed to create the child process + * + * This calls the cancel_fork() callbacks if a fork failed *after* + * cgroup_can_fork() succeded and cleans up references we took to + * prepare a new css_set for the child process in cgroup_can_fork(). + */ +void cgroup_cancel_fork(struct task_struct *child, + struct kernel_clone_args *kargs) { struct cgroup_subsys *ss; int i; for_each_subsys(ss, i) if (ss->cancel_fork) - ss->cancel_fork(child); + ss->cancel_fork(child, kargs->cset); - cgroup_threadgroup_change_end(current); + cgroup_css_set_put_fork(kargs); } /** @@ -5972,22 +6103,27 @@ void cgroup_cancel_fork(struct task_struct *child) * Attach the child process to its css_set calling the subsystem fork() * callbacks. */ -void cgroup_post_fork(struct task_struct *child) - __releases(&cgroup_threadgroup_rwsem) +void cgroup_post_fork(struct task_struct *child, + struct kernel_clone_args *kargs) + __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex) { struct cgroup_subsys *ss; struct css_set *cset; int i; + cset = kargs->cset; + kargs->cset = NULL; + spin_lock_irq(&css_set_lock); /* init tasks are special, only link regular threads */ if (likely(child->pid)) { WARN_ON_ONCE(!list_empty(&child->cg_list)); - cset = task_css_set(current); /* current is @child's parent */ - get_css_set(cset); cset->nr_tasks++; css_set_move_task(child, NULL, cset, false); + } else { + put_css_set(cset); + cset = NULL; } /* @@ -6020,7 +6156,16 @@ void cgroup_post_fork(struct task_struct *child) ss->fork(child); } while_each_subsys_mask(); - cgroup_threadgroup_change_end(current); + /* Make the new cset the root_cset of the new cgroup namespace. */ + if (kargs->flags & CLONE_NEWCGROUP) { + struct css_set *rcset = child->nsproxy->cgroup_ns->root_cset; + + get_css_set(cset); + child->nsproxy->cgroup_ns->root_cset = cset; + put_css_set(rcset); + } + + cgroup_css_set_put_fork(kargs); } /** diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c index 138059eb730d..511af87f685e 100644 --- a/kernel/cgroup/pids.c +++ b/kernel/cgroup/pids.c @@ -33,6 +33,7 @@ #include #include #include +#include #define PIDS_MAX (PID_MAX_LIMIT + 1ULL) #define PIDS_MAX_STR "max" @@ -214,13 +215,16 @@ static void pids_cancel_attach(struct cgroup_taskset *tset) * task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies * on cgroup_threadgroup_change_begin() held by the copy_process(). */ -static int pids_can_fork(struct task_struct *task) +static int pids_can_fork(struct task_struct *task, struct css_set *cset) { struct cgroup_subsys_state *css; struct pids_cgroup *pids; int err; - css = task_css_check(current, pids_cgrp_id, true); + if (cset) + css = cset->subsys[pids_cgrp_id]; + else + css = task_css_check(current, pids_cgrp_id, true); pids = css_pids(css); err = pids_try_charge(pids, 1); if (err) { @@ -235,12 +239,15 @@ static int pids_can_fork(struct task_struct *task) return err; } -static void pids_cancel_fork(struct task_struct *task) +static void pids_cancel_fork(struct task_struct *task, struct css_set *cset) { struct cgroup_subsys_state *css; struct pids_cgroup *pids; - css = task_css_check(current, pids_cgrp_id, true); + if (cset) + css = cset->subsys[pids_cgrp_id]; + else + css = task_css_check(current, pids_cgrp_id, true); pids = css_pids(css); pids_uncharge(pids, 1); } diff --git a/kernel/fork.c b/kernel/fork.c index 9245b6e53f55..635d6369dfb9 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2180,7 +2180,7 @@ static __latent_entropy struct task_struct *copy_process( * between here and cgroup_post_fork() if an organisation operation is in * progress. */ - retval = cgroup_can_fork(p); + retval = cgroup_can_fork(p, args); if (retval) goto bad_fork_put_pidfd; @@ -2287,7 +2287,7 @@ static __latent_entropy struct task_struct *copy_process( write_unlock_irq(&tasklist_lock); proc_fork_connector(p); - cgroup_post_fork(p); + cgroup_post_fork(p, args); perf_event_fork(p); trace_task_newtask(p, clone_flags); @@ -2298,7 +2298,7 @@ static __latent_entropy struct task_struct *copy_process( bad_fork_cancel_cgroup: spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); - cgroup_cancel_fork(p); + cgroup_cancel_fork(p, args); bad_fork_put_pidfd: if (clone_flags & CLONE_PIDFD) { fput(pidfile); @@ -2627,6 +2627,9 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs, !valid_signal(args.exit_signal))) return -EINVAL; + if ((args.flags & CLONE_INTO_CGROUP) && args.cgroup < 0) + return -EINVAL; + *kargs = (struct kernel_clone_args){ .flags = args.flags, .pidfd = u64_to_user_ptr(args.pidfd), @@ -2637,6 +2640,7 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs, .stack_size = args.stack_size, .tls = args.tls, .set_tid_size = args.set_tid_size, + .cgroup = args.cgroup, }; if (args.set_tid && @@ -2680,7 +2684,8 @@ static inline bool clone3_stack_valid(struct kernel_clone_args *kargs) static bool clone3_args_valid(struct kernel_clone_args *kargs) { /* Verify that no unknown flags are passed along. */ - if (kargs->flags & ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND)) + if (kargs->flags & + ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND | CLONE_INTO_CGROUP)) return false; /* -- cgit v1.2.3-58-ga151 From fe98d0ff5d5c43ee179e801275bb37641d398c6e Mon Sep 17 00:00:00 2001 From: Jianxin Pan Date: Wed, 15 Jan 2020 19:30:28 +0800 Subject: firmware: meson_sm: Add secure power domain support The Amlogic Meson A1/C1 Secure Monitor implements calls to control power domain. Signed-off-by: Jianxin Pan Signed-off-by: Kevin Hilman Link: https://lore.kernel.org/r/1579087831-94965-2-git-send-email-jianxin.pan@amlogic.com --- drivers/firmware/meson/meson_sm.c | 2 ++ include/linux/firmware/meson/meson_sm.h | 2 ++ 2 files changed, 4 insertions(+) (limited to 'include/linux') diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c index 1d5b4d74f96d..2854b56f6e0b 100644 --- a/drivers/firmware/meson/meson_sm.c +++ b/drivers/firmware/meson/meson_sm.c @@ -44,6 +44,8 @@ static const struct meson_sm_chip gxbb_chip = { CMD(SM_EFUSE_WRITE, 0x82000031), CMD(SM_EFUSE_USER_MAX, 0x82000033), CMD(SM_GET_CHIP_ID, 0x82000044), + CMD(SM_A1_PWRC_SET, 0x82000093), + CMD(SM_A1_PWRC_GET, 0x82000095), { /* sentinel */ }, }, }; diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h index 6669e2a1d5fd..95b0da2326a9 100644 --- a/include/linux/firmware/meson/meson_sm.h +++ b/include/linux/firmware/meson/meson_sm.h @@ -12,6 +12,8 @@ enum { SM_EFUSE_WRITE, SM_EFUSE_USER_MAX, SM_GET_CHIP_ID, + SM_A1_PWRC_SET, + SM_A1_PWRC_GET, }; struct meson_sm_firmware; -- cgit v1.2.3-58-ga151 From c67f3df88ffca45531a12214e8faffbdab1fa422 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 13 Feb 2020 17:20:20 -0600 Subject: of: Drop struct of_pci_range.pci_space field There's no more users of struct of_pci_range.pci_space field, so remove it. Signed-off-by: Rob Herring --- drivers/of/address.c | 1 - include/linux/of_address.h | 1 - 2 files changed, 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/of/address.c b/drivers/of/address.c index 846045a48395..5d608d7c10d6 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -736,7 +736,6 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, if (!parser->range || parser->range + parser->np > parser->end) return NULL; - range->pci_space = be32_to_cpup(parser->range); range->flags = of_bus_pci_get_flags(parser->range); range->pci_addr = of_read_number(parser->range + 1, ns); if (parser->dma) diff --git a/include/linux/of_address.h b/include/linux/of_address.h index eac7ab109df4..8d12bf18e80b 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -16,7 +16,6 @@ struct of_pci_range_parser { }; struct of_pci_range { - u32 pci_space; u64 pci_addr; u64 cpu_addr; u64 size; -- cgit v1.2.3-58-ga151 From bc5e522ec47174770a75df0a76d90f9ebb20132e Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 6 Feb 2020 14:01:05 +0000 Subject: of/address: Rework of_pci_range parsing for non-PCI buses The only PCI specific part of of_pci_range_parser_one() is the handling of the 3rd address cell. Rework it to work on regular 1 and 2 cell addresses. Use defines and a union to avoid a treewide renaming of the parsing helpers and struct. Signed-off-by: Rob Herring --- drivers/of/address.c | 33 +++++++++++++++++++++------------ include/linux/of_address.h | 12 +++++++++--- 2 files changed, 30 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/of/address.c b/drivers/of/address.c index 5d608d7c10d6..6d33f849f114 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -694,12 +694,12 @@ EXPORT_SYMBOL(of_get_address); static int parser_init(struct of_pci_range_parser *parser, struct device_node *node, const char *name) { - const int na = 3, ns = 2; int rlen; parser->node = node; parser->pna = of_n_addr_cells(node); - parser->np = parser->pna + na + ns; + parser->na = of_bus_n_addr_cells(node); + parser->ns = of_bus_n_size_cells(node); parser->dma = !strcmp(name, "dma-ranges"); parser->range = of_get_property(node, name, &rlen); @@ -724,20 +724,28 @@ int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser, return parser_init(parser, node, "dma-ranges"); } EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init); +#define of_dma_range_parser_init of_pci_dma_range_parser_init struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, struct of_pci_range *range) { - const int na = 3, ns = 2; + int na = parser->na; + int ns = parser->ns; + int np = parser->pna + na + ns; if (!range) return NULL; - if (!parser->range || parser->range + parser->np > parser->end) + if (!parser->range || parser->range + np > parser->end) return NULL; - range->flags = of_bus_pci_get_flags(parser->range); - range->pci_addr = of_read_number(parser->range + 1, ns); + if (parser->na == 3) + range->flags = of_bus_pci_get_flags(parser->range); + else + range->flags = 0; + + range->pci_addr = of_read_number(parser->range, na); + if (parser->dma) range->cpu_addr = of_translate_dma_address(parser->node, parser->range + na); @@ -746,15 +754,16 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, parser->range + na); range->size = of_read_number(parser->range + parser->pna + na, ns); - parser->range += parser->np; + parser->range += np; /* Now consume following elements while they are contiguous */ - while (parser->range + parser->np <= parser->end) { - u32 flags; + while (parser->range + np <= parser->end) { + u32 flags = 0; u64 pci_addr, cpu_addr, size; - flags = of_bus_pci_get_flags(parser->range); - pci_addr = of_read_number(parser->range + 1, ns); + if (parser->na == 3) + flags = of_bus_pci_get_flags(parser->range); + pci_addr = of_read_number(parser->range, na); if (parser->dma) cpu_addr = of_translate_dma_address(parser->node, parser->range + na); @@ -770,7 +779,7 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, break; range->size += size; - parser->range += parser->np; + parser->range += np; } return range; diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 8d12bf18e80b..763022ed3456 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -10,20 +10,27 @@ struct of_pci_range_parser { struct device_node *node; const __be32 *range; const __be32 *end; - int np; + int na; + int ns; int pna; bool dma; }; +#define of_range_parser of_pci_range_parser struct of_pci_range { - u64 pci_addr; + union { + u64 pci_addr; + u64 bus_addr; + }; u64 cpu_addr; u64 size; u32 flags; }; +#define of_range of_pci_range #define for_each_of_pci_range(parser, range) \ for (; of_pci_range_parser_one(parser, range);) +#define for_each_of_range for_each_of_pci_range /* Translate a DMA address from device space to CPU space */ extern u64 of_translate_dma_address(struct device_node *dev, @@ -142,4 +149,3 @@ static inline int of_pci_range_to_resource(struct of_pci_range *range, #endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */ #endif /* __OF_ADDRESS_H */ - -- cgit v1.2.3-58-ga151 From 8c79fa6c44deac8042bd747527fea06a32738158 Mon Sep 17 00:00:00 2001 From: Jungseung Lee Date: Mon, 13 Jan 2020 14:59:05 +0900 Subject: mtd: spi-nor: introduce SR_BP_SHIFT define The shift variable of SR_BP is conclusive because the first bit of SR_BP is fixed on all known flashes. Replace ffs operation with SR_BP_SHIFT. Signed-off-by: Jungseung Lee Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/spi-nor.c | 11 +++++------ include/linux/mtd/spi-nor.h | 2 ++ 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 864ed6f49e87..b5ef17b2897a 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -1767,7 +1767,6 @@ static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs, struct mtd_info *mtd = &nor->mtd; u8 mask = SR_BP2 | SR_BP1 | SR_BP0; u8 tb_mask = SR_TB_BIT5; - int shift = ffs(mask) - 1; int pow; if (nor->flags & SNOR_F_HAS_SR_TB_BIT6) @@ -1778,7 +1777,7 @@ static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs, *ofs = 0; *len = 0; } else { - pow = ((sr & mask) ^ mask) >> shift; + pow = ((sr & mask) ^ mask) >> SR_BP_SHIFT; *len = mtd->size >> pow; if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask) *ofs = 0; @@ -1860,7 +1859,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) int ret, status_old, status_new; u8 mask = SR_BP2 | SR_BP1 | SR_BP0; u8 tb_mask = SR_TB_BIT5; - u8 shift = ffs(mask) - 1, pow, val; + u8 pow, val; loff_t lock_len; bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB; bool use_top; @@ -1909,7 +1908,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len)) */ pow = ilog2(mtd->size) - ilog2(lock_len); - val = mask - (pow << shift); + val = mask - (pow << SR_BP_SHIFT); if (val & ~mask) return -EINVAL; /* Don't "lock" with no region! */ @@ -1946,7 +1945,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) int ret, status_old, status_new; u8 mask = SR_BP2 | SR_BP1 | SR_BP0; u8 tb_mask = SR_TB_BIT5; - u8 shift = ffs(mask) - 1, pow, val; + u8 pow, val; loff_t lock_len; bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB; bool use_top; @@ -1997,7 +1996,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) if (lock_len == 0) { val = 0; /* fully unlocked */ } else { - val = mask - (pow << shift); + val = mask - (pow << SR_BP_SHIFT); /* Some power-of-two sizes are not supported */ if (val & ~mask) return -EINVAL; diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 5abd91cc6dfa..61be6ed33097 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -137,6 +137,8 @@ #define SR1_QUAD_EN_BIT6 BIT(6) +#define SR_BP_SHIFT 2 + /* Enhanced Volatile Configuration Register bits */ #define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */ -- cgit v1.2.3-58-ga151 From 10fa9512769fa3b15ea29f4f331f4604c17b4b2c Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 13 Feb 2020 12:20:58 +0100 Subject: usb: audio-v2: Add uac2_effect_unit_descriptor definition The UAC2 Effect Unit Descriptor has a slightly different definition from other similar ones like Processing Unit or Extension Unit. Define it here so that it can be used in USB-audio driver in a later patch. Acked-by: Greg Kroah-Hartman Link: https://lore.kernel.org/r/20200213112059.18745-2-tiwai@suse.de Signed-off-by: Takashi Iwai --- include/linux/usb/audio-v2.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include/linux') diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h index ba4b3e3327ff..cb9900b34b67 100644 --- a/include/linux/usb/audio-v2.h +++ b/include/linux/usb/audio-v2.h @@ -156,6 +156,18 @@ struct uac2_feature_unit_descriptor { __u8 bmaControls[0]; /* variable length */ } __attribute__((packed)); +/* 4.7.2.10 Effect Unit Descriptor */ + +struct uac2_effect_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bUnitID; + __le16 wEffectType; + __u8 bSourceID; + __u8 bmaControls[]; /* variable length */ +} __attribute__((packed)); + /* 4.9.2 Class-Specific AS Interface Descriptor */ struct uac2_as_header_descriptor { -- cgit v1.2.3-58-ga151 From b2ca916ce392a9d4cea3489a3efb2b627b839eaf Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sun, 16 Feb 2020 12:00:48 -0800 Subject: ACPI: NUMA: Up-level "map to online node" functionality The acpi_map_pxm_to_online_node() helper is used to find the closest online node to a given proximity domain. This is used to map devices in a proximity domain with no online memory or cpus to the closest online node and populate a device's 'numa_node' property. The numa_node property allows applications to be migrated "close" to a resource. In preparation for providing a generic facility to optionally map an address range to its closest online node, or the node the range would represent were it to be onlined (target_node), up-level the core of acpi_map_pxm_to_online_node() to a generic mm/numa helper. Cc: Michal Hocko Acked-by: Rafael J. Wysocki Reviewed-by: Ingo Molnar Signed-off-by: Dan Williams Link: https://lore.kernel.org/r/158188324802.894464.13128795207831894206.stgit@dwillia2-desk3.amr.corp.intel.com --- drivers/acpi/numa/srat.c | 41 ----------------------------------------- include/linux/acpi.h | 23 ++++++++++++++++++++++- include/linux/numa.h | 9 +++++++++ mm/mempolicy.c | 30 ++++++++++++++++++++++++++++++ 4 files changed, 61 insertions(+), 42 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index eadbf90e65d1..47b4969d9b93 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -72,47 +72,6 @@ int acpi_map_pxm_to_node(int pxm) } EXPORT_SYMBOL(acpi_map_pxm_to_node); -/** - * acpi_map_pxm_to_online_node - Map proximity ID to online node - * @pxm: ACPI proximity ID - * - * This is similar to acpi_map_pxm_to_node(), but always returns an online - * node. When the mapped node from a given proximity ID is offline, it - * looks up the node distance table and returns the nearest online node. - * - * ACPI device drivers, which are called after the NUMA initialization has - * completed in the kernel, can call this interface to obtain their device - * NUMA topology from ACPI tables. Such drivers do not have to deal with - * offline nodes. A node may be offline when a device proximity ID is - * unique, SRAT memory entry does not exist, or NUMA is disabled, ex. - * "numa=off" on x86. - */ -int acpi_map_pxm_to_online_node(int pxm) -{ - int node, min_node; - - node = acpi_map_pxm_to_node(pxm); - - if (node == NUMA_NO_NODE) - node = 0; - - min_node = node; - if (!node_online(node)) { - int min_dist = INT_MAX, dist, n; - - for_each_online_node(n) { - dist = node_distance(node, n); - if (dist < min_dist) { - min_dist = dist; - min_node = n; - } - } - } - - return min_node; -} -EXPORT_SYMBOL(acpi_map_pxm_to_online_node); - static void __init acpi_table_print_srat_entry(struct acpi_subtable_header *header) { diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 0f24d701fbdc..3839363081f3 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -416,9 +416,30 @@ extern void acpi_osi_setup(char *str); extern bool acpi_osi_is_win8(void); #ifdef CONFIG_ACPI_NUMA -int acpi_map_pxm_to_online_node(int pxm); int acpi_map_pxm_to_node(int pxm); int acpi_get_node(acpi_handle handle); + +/** + * acpi_map_pxm_to_online_node - Map proximity ID to online node + * @pxm: ACPI proximity ID + * + * This is similar to acpi_map_pxm_to_node(), but always returns an online + * node. When the mapped node from a given proximity ID is offline, it + * looks up the node distance table and returns the nearest online node. + * + * ACPI device drivers, which are called after the NUMA initialization has + * completed in the kernel, can call this interface to obtain their device + * NUMA topology from ACPI tables. Such drivers do not have to deal with + * offline nodes. A node may be offline when a device proximity ID is + * unique, SRAT memory entry does not exist, or NUMA is disabled, ex. + * "numa=off" on x86. + */ +static inline int acpi_map_pxm_to_online_node(int pxm) +{ + int node = acpi_map_pxm_to_node(pxm); + + return numa_map_to_online_node(node); +} #else static inline int acpi_map_pxm_to_online_node(int pxm) { diff --git a/include/linux/numa.h b/include/linux/numa.h index 110b0e5d0fb0..20f4e44b186c 100644 --- a/include/linux/numa.h +++ b/include/linux/numa.h @@ -13,4 +13,13 @@ #define NUMA_NO_NODE (-1) +#ifdef CONFIG_NUMA +int numa_map_to_online_node(int node); +#else +static inline int numa_map_to_online_node(int node) +{ + return NUMA_NO_NODE; +} +#endif + #endif /* _LINUX_NUMA_H */ diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 977c641f78cf..756d6e5bb59f 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -127,6 +127,36 @@ static struct mempolicy default_policy = { static struct mempolicy preferred_node_policy[MAX_NUMNODES]; +/** + * numa_map_to_online_node - Find closest online node + * @nid: Node id to start the search + * + * Lookup the next closest node by distance if @nid is not online. + */ +int numa_map_to_online_node(int node) +{ + int min_node; + + if (node == NUMA_NO_NODE) + node = 0; + + min_node = node; + if (!node_online(node)) { + int min_dist = INT_MAX, dist, n; + + for_each_online_node(n) { + dist = node_distance(node, n); + if (dist < min_dist) { + min_dist = dist; + min_node = n; + } + } + } + + return min_node; +} +EXPORT_SYMBOL_GPL(numa_map_to_online_node); + struct mempolicy *get_task_policy(struct task_struct *p) { struct mempolicy *pol = p->mempolicy; -- cgit v1.2.3-58-ga151 From 1e5d8e1e47afde23e3249aed25d7d124feff5c1c Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sun, 16 Feb 2020 12:01:04 -0800 Subject: x86/mm: Introduce CONFIG_NUMA_KEEP_MEMINFO Currently x86 numa_meminfo is marked __initdata in the CONFIG_MEMORY_HOTPLUG=n case. In support of a new facility to allow drivers to map reserved memory to a 'target_node' (phys_to_target_node()), add support for removing the __initdata designation for those users. Both memory hotplug and phys_to_target_node() users select CONFIG_NUMA_KEEP_MEMINFO to tell the arch to maintain its physical address to NUMA mapping infrastructure post init. Cc: Dave Hansen Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Cc: Andrew Morton Cc: David Hildenbrand Cc: Michal Hocko Reviewed-by: Ingo Molnar Signed-off-by: Dan Williams Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/158188326422.894464.15742054998046628934.stgit@dwillia2-desk3.amr.corp.intel.com --- arch/x86/mm/numa.c | 6 +----- include/linux/numa.h | 7 +++++++ mm/Kconfig | 5 +++++ 3 files changed, 13 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 99f7a68738f0..2450b21cc28a 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -25,11 +25,7 @@ nodemask_t numa_nodes_parsed __initdata; struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); -static struct numa_meminfo numa_meminfo -#ifndef CONFIG_MEMORY_HOTPLUG -__initdata -#endif -; +static struct numa_meminfo numa_meminfo __initdata_or_meminfo; static int numa_distance_cnt; static u8 *numa_distance; diff --git a/include/linux/numa.h b/include/linux/numa.h index 20f4e44b186c..5773cd2613fc 100644 --- a/include/linux/numa.h +++ b/include/linux/numa.h @@ -13,6 +13,13 @@ #define NUMA_NO_NODE (-1) +/* optionally keep NUMA memory info available post init */ +#ifdef CONFIG_NUMA_KEEP_MEMINFO +#define __initdata_or_meminfo +#else +#define __initdata_or_meminfo __initdata +#endif + #ifdef CONFIG_NUMA int numa_map_to_online_node(int node); #else diff --git a/mm/Kconfig b/mm/Kconfig index ab80933be65f..328268473fec 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -139,6 +139,10 @@ config HAVE_FAST_GUP config ARCH_KEEP_MEMBLOCK bool +# Keep arch NUMA mapping infrastructure post-init. +config NUMA_KEEP_MEMINFO + bool + config MEMORY_ISOLATION bool @@ -154,6 +158,7 @@ config MEMORY_HOTPLUG bool "Allow for memory hot-add" depends on SPARSEMEM || X86_64_ACPI_NUMA depends on ARCH_ENABLE_MEMORY_HOTPLUG + select NUMA_KEEP_MEMINFO if NUMA config MEMORY_HOTPLUG_SPARSE def_bool y -- cgit v1.2.3-58-ga151 From 5d30f92e7631286b8617777c5400c8eadcae50a1 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sun, 16 Feb 2020 12:01:09 -0800 Subject: x86/NUMA: Provide a range-to-target_node lookup facility The DEV_DAX_KMEM facility is a generic mechanism to allow device-dax instances, fronting performance-differentiated-memory like pmem, to be added to the System RAM pool. The NUMA node for that hot-added memory is derived from the device-dax instance's 'target_node' attribute. Recall that the 'target_node' is the ACPI-PXM-to-node translation for memory when it comes online whereas the 'numa_node' attribute of the device represents the closest online cpu node. Presently useful target_node information from the ACPI SRAT is discarded with the expectation that "Reserved" memory will never be onlined. Now, DEV_DAX_KMEM violates that assumption, there is a need to retain the translation. Move, rather than discard, numa_memblk data to a secondary array that memory_add_physaddr_to_target_node() may consider at a later point in time. Cc: Dave Hansen Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Cc: Andrew Morton Cc: David Hildenbrand Cc: Michal Hocko Reported-by: kbuild test robot Reviewed-by: Ingo Molnar Signed-off-by: Dan Williams Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/158188326978.894464.217282995221175417.stgit@dwillia2-desk3.amr.corp.intel.com --- arch/x86/mm/numa.c | 61 +++++++++++++++++++++++++++++++++++++++++++--------- include/linux/numa.h | 14 +++++++++++- 2 files changed, 64 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 2450b21cc28a..59ba008504dc 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -26,6 +26,7 @@ struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); static struct numa_meminfo numa_meminfo __initdata_or_meminfo; +static struct numa_meminfo numa_reserved_meminfo __initdata_or_meminfo; static int numa_distance_cnt; static u8 *numa_distance; @@ -164,6 +165,19 @@ void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) (mi->nr_blks - idx) * sizeof(mi->blk[0])); } +/** + * numa_move_tail_memblk - Move a numa_memblk from one numa_meminfo to another + * @dst: numa_meminfo to append block to + * @idx: Index of memblk to remove + * @src: numa_meminfo to remove memblk from + */ +static void __init numa_move_tail_memblk(struct numa_meminfo *dst, int idx, + struct numa_meminfo *src) +{ + dst->blk[dst->nr_blks++] = src->blk[idx]; + numa_remove_memblk_from(idx, src); +} + /** * numa_add_memblk - Add one numa_memblk to numa_meminfo * @nid: NUMA node ID of the new memblk @@ -233,14 +247,19 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi) for (i = 0; i < mi->nr_blks; i++) { struct numa_memblk *bi = &mi->blk[i]; - /* make sure all blocks are inside the limits */ + /* move / save reserved memory ranges */ + if (!memblock_overlaps_region(&memblock.memory, + bi->start, bi->end - bi->start)) { + numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi); + continue; + } + + /* make sure all non-reserved blocks are inside the limits */ bi->start = max(bi->start, low); bi->end = min(bi->end, high); - /* and there's no empty or non-exist block */ - if (bi->start >= bi->end || - !memblock_overlaps_region(&memblock.memory, - bi->start, bi->end - bi->start)) + /* and there's no empty block */ + if (bi->start >= bi->end) numa_remove_memblk_from(i--, mi); } @@ -877,16 +896,38 @@ EXPORT_SYMBOL(cpumask_of_node); #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ -#ifdef CONFIG_MEMORY_HOTPLUG -int memory_add_physaddr_to_nid(u64 start) +#ifdef CONFIG_NUMA_KEEP_MEMINFO +static int meminfo_to_nid(struct numa_meminfo *mi, u64 start) { - struct numa_meminfo *mi = &numa_meminfo; - int nid = mi->blk[0].nid; int i; for (i = 0; i < mi->nr_blks; i++) if (mi->blk[i].start <= start && mi->blk[i].end > start) - nid = mi->blk[i].nid; + return mi->blk[i].nid; + return NUMA_NO_NODE; +} + +int phys_to_target_node(phys_addr_t start) +{ + int nid = meminfo_to_nid(&numa_meminfo, start); + + /* + * Prefer online nodes, but if reserved memory might be + * hot-added continue the search with reserved ranges. + */ + if (nid != NUMA_NO_NODE) + return nid; + + return meminfo_to_nid(&numa_reserved_meminfo, start); +} +EXPORT_SYMBOL_GPL(phys_to_target_node); + +int memory_add_physaddr_to_nid(u64 start) +{ + int nid = meminfo_to_nid(&numa_meminfo, start); + + if (nid == NUMA_NO_NODE) + nid = numa_meminfo.blk[0].nid; return nid; } EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); diff --git a/include/linux/numa.h b/include/linux/numa.h index 5773cd2613fc..a42df804679e 100644 --- a/include/linux/numa.h +++ b/include/linux/numa.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NUMA_H #define _LINUX_NUMA_H - +#include #ifdef CONFIG_NODES_SHIFT #define NODES_SHIFT CONFIG_NODES_SHIFT @@ -21,12 +21,24 @@ #endif #ifdef CONFIG_NUMA +/* Generic implementation available */ int numa_map_to_online_node(int node); + +/* + * Optional architecture specific implementation, users need a "depends + * on $ARCH" + */ +int phys_to_target_node(phys_addr_t addr); #else static inline int numa_map_to_online_node(int node) { return NUMA_NO_NODE; } + +static inline int phys_to_target_node(phys_addr_t addr) +{ + return NUMA_NO_NODE; +} #endif #endif /* _LINUX_NUMA_H */ -- cgit v1.2.3-58-ga151 From 0be298a939b748256035f66716fca409dd26d0dc Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Mon, 20 Jan 2020 14:10:04 +0200 Subject: ARM: at91: pm: add pmc_version member to at91_pm_data This will be used to differentiate b/w different PLLs settings to be applied in the final/first steps of the suspend/resume process by doing PLL specific configurations. Signed-off-by: Claudiu Beznea Acked-by: Stephen Boyd Signed-off-by: Alexandre Belloni Link: https://lore.kernel.org/r/1579522208-19523-5-git-send-email-claudiu.beznea@microchip.com --- arch/arm/mach-at91/pm.c | 7 +++++++ arch/arm/mach-at91/pm.h | 1 + arch/arm/mach-at91/pm_data-offsets.c | 2 ++ arch/arm/mach-at91/pm_suspend.S | 4 ++++ include/linux/clk/at91_pmc.h | 3 +++ 5 files changed, 17 insertions(+) (limited to 'include/linux') diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index ae7b148febd9..074bde64064e 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -737,28 +737,34 @@ backup_default: struct pmc_info { unsigned long uhp_udp_mask; unsigned long mckr; + unsigned long version; }; static const struct pmc_info pmc_infos[] __initconst = { { .uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP, .mckr = 0x30, + .version = AT91_PMC_V1, }, { .uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP, .mckr = 0x30, + .version = AT91_PMC_V1, }, { .uhp_udp_mask = AT91SAM926x_PMC_UHP, .mckr = 0x30, + .version = AT91_PMC_V1, }, { .uhp_udp_mask = 0, .mckr = 0x30, + .version = AT91_PMC_V1, }, { .uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP, .mckr = 0x28, + .version = AT91_PMC_V2, }, }; @@ -797,6 +803,7 @@ static void __init at91_pm_init(void (*pm_idle)(void)) pmc = of_id->data; soc_pm.data.uhp_udp_mask = pmc->uhp_udp_mask; soc_pm.data.pmc_mckr_offset = pmc->mckr; + soc_pm.data.pmc_version = pmc->version; if (pm_idle) arm_pm_idle = pm_idle; diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h index 6f7f4236865a..218e8d1a30fb 100644 --- a/arch/arm/mach-at91/pm.h +++ b/arch/arm/mach-at91/pm.h @@ -34,6 +34,7 @@ struct at91_pm_data { unsigned int standby_mode; unsigned int suspend_mode; unsigned int pmc_mckr_offset; + unsigned int pmc_version; }; #endif diff --git a/arch/arm/mach-at91/pm_data-offsets.c b/arch/arm/mach-at91/pm_data-offsets.c index dfcbe626865c..82089ff258c0 100644 --- a/arch/arm/mach-at91/pm_data-offsets.c +++ b/arch/arm/mach-at91/pm_data-offsets.c @@ -14,6 +14,8 @@ int main(void) DEFINE(PM_DATA_SFRBU, offsetof(struct at91_pm_data, sfrbu)); DEFINE(PM_DATA_PMC_MCKR_OFFSET, offsetof(struct at91_pm_data, pmc_mckr_offset)); + DEFINE(PM_DATA_PMC_VERSION, offsetof(struct at91_pm_data, + pmc_version)); return 0; } diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S index 64460b4e0fc1..5fa0c2aa10f7 100644 --- a/arch/arm/mach-at91/pm_suspend.S +++ b/arch/arm/mach-at91/pm_suspend.S @@ -95,6 +95,8 @@ ENTRY(at91_pm_suspend_in_sram) str tmp1, .pm_mode ldr tmp1, [r0, #PM_DATA_PMC_MCKR_OFFSET] str tmp1, .mckr_offset + ldr tmp1, [r0, #PM_DATA_PMC_VERSION] + str tmp1, .pmc_version /* Both ldrne below are here to preload their address in the TLB */ ldr tmp1, [r0, #PM_DATA_SHDWC] str tmp1, .shdwc @@ -542,6 +544,8 @@ ENDPROC(at91_sramc_self_refresh) .word 0 .mckr_offset: .word 0 +.pmc_version: + .word 0 .saved_mckr: .word 0 .saved_pllar: diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index 390437887b46..f3d691fc5f29 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -12,6 +12,9 @@ #ifndef AT91_PMC_H #define AT91_PMC_H +#define AT91_PMC_V1 (1) /* PMC version 1 */ +#define AT91_PMC_V2 (2) /* PMC version 2 [SAM9X60] */ + #define AT91_PMC_SCER 0x00 /* System Clock Enable Register */ #define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */ -- cgit v1.2.3-58-ga151 From e13208ab5d938e51e46ba44a1dec8073142c3d8c Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Mon, 20 Jan 2020 14:10:06 +0200 Subject: clk: at91: move sam9x60's PLL register offsets to PMC header Move SAM9X60's PLL register offsets to PMC header so that the definitions would also be available from arch/arm/mach-at91/pm_suspend.S. This is necessary to disable/enable PLLA for SAM9X60 on suspend/resume. Signed-off-by: Claudiu Beznea Acked-by: Stephen Boyd Signed-off-by: Alexandre Belloni Link: https://lore.kernel.org/r/1579522208-19523-7-git-send-email-claudiu.beznea@microchip.com --- drivers/clk/at91/clk-sam9x60-pll.c | 91 ++++++++++++++++---------------------- include/linux/clk/at91_pmc.h | 20 +++++++++ 2 files changed, 57 insertions(+), 54 deletions(-) (limited to 'include/linux') diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c index dfb354a5ff18..e699803986e5 100644 --- a/drivers/clk/at91/clk-sam9x60-pll.c +++ b/drivers/clk/at91/clk-sam9x60-pll.c @@ -14,27 +14,8 @@ #include "pmc.h" -#define PMC_PLL_CTRL0 0xc -#define PMC_PLL_CTRL0_DIV_MSK GENMASK(7, 0) -#define PMC_PLL_CTRL0_ENPLL BIT(28) -#define PMC_PLL_CTRL0_ENPLLCK BIT(29) -#define PMC_PLL_CTRL0_ENLOCK BIT(31) - -#define PMC_PLL_CTRL1 0x10 -#define PMC_PLL_CTRL1_FRACR_MSK GENMASK(21, 0) -#define PMC_PLL_CTRL1_MUL_MSK GENMASK(30, 24) - -#define PMC_PLL_ACR 0x18 -#define PMC_PLL_ACR_DEFAULT_UPLL 0x12020010UL -#define PMC_PLL_ACR_DEFAULT_PLLA 0x00020010UL -#define PMC_PLL_ACR_UTMIVR BIT(12) -#define PMC_PLL_ACR_UTMIBG BIT(13) -#define PMC_PLL_ACR_LOOP_FILTER_MSK GENMASK(31, 24) - -#define PMC_PLL_UPDT 0x1c -#define PMC_PLL_UPDT_UPDATE BIT(8) - -#define PMC_PLL_ISR0 0xec +#define PMC_PLL_CTRL0_DIV_MSK GENMASK(7, 0) +#define PMC_PLL_CTRL1_MUL_MSK GENMASK(30, 24) #define PLL_DIV_MAX (FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, UINT_MAX) + 1) #define UPLL_DIV 2 @@ -59,7 +40,7 @@ static inline bool sam9x60_pll_ready(struct regmap *regmap, int id) { unsigned int status; - regmap_read(regmap, PMC_PLL_ISR0, &status); + regmap_read(regmap, AT91_PMC_PLL_ISR0, &status); return !!(status & BIT(id)); } @@ -74,12 +55,12 @@ static int sam9x60_pll_prepare(struct clk_hw *hw) u32 val; spin_lock_irqsave(pll->lock, flags); - regmap_write(regmap, PMC_PLL_UPDT, pll->id); + regmap_write(regmap, AT91_PMC_PLL_UPDT, pll->id); - regmap_read(regmap, PMC_PLL_CTRL0, &val); + regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val); div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, val); - regmap_read(regmap, PMC_PLL_CTRL1, &val); + regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val); mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, val); if (sam9x60_pll_ready(regmap, pll->id) && @@ -88,39 +69,39 @@ static int sam9x60_pll_prepare(struct clk_hw *hw) return 0; } - /* Recommended value for PMC_PLL_ACR */ + /* Recommended value for AT91_PMC_PLL_ACR */ if (pll->characteristics->upll) - val = PMC_PLL_ACR_DEFAULT_UPLL; + val = AT91_PMC_PLL_ACR_DEFAULT_UPLL; else - val = PMC_PLL_ACR_DEFAULT_PLLA; - regmap_write(regmap, PMC_PLL_ACR, val); + val = AT91_PMC_PLL_ACR_DEFAULT_PLLA; + regmap_write(regmap, AT91_PMC_PLL_ACR, val); - regmap_write(regmap, PMC_PLL_CTRL1, + regmap_write(regmap, AT91_PMC_PLL_CTRL1, FIELD_PREP(PMC_PLL_CTRL1_MUL_MSK, pll->mul)); if (pll->characteristics->upll) { /* Enable the UTMI internal bandgap */ - val |= PMC_PLL_ACR_UTMIBG; - regmap_write(regmap, PMC_PLL_ACR, val); + val |= AT91_PMC_PLL_ACR_UTMIBG; + regmap_write(regmap, AT91_PMC_PLL_ACR, val); udelay(10); /* Enable the UTMI internal regulator */ - val |= PMC_PLL_ACR_UTMIVR; - regmap_write(regmap, PMC_PLL_ACR, val); + val |= AT91_PMC_PLL_ACR_UTMIVR; + regmap_write(regmap, AT91_PMC_PLL_ACR, val); udelay(10); } - regmap_update_bits(regmap, PMC_PLL_UPDT, - PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE); + regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, + AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE); - regmap_write(regmap, PMC_PLL_CTRL0, - PMC_PLL_CTRL0_ENLOCK | PMC_PLL_CTRL0_ENPLL | - PMC_PLL_CTRL0_ENPLLCK | pll->div); + regmap_write(regmap, AT91_PMC_PLL_CTRL0, + AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL | + AT91_PMC_PLL_CTRL0_ENPLLCK | pll->div); - regmap_update_bits(regmap, PMC_PLL_UPDT, - PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE); + regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, + AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE); while (!sam9x60_pll_ready(regmap, pll->id)) cpu_relax(); @@ -144,22 +125,24 @@ static void sam9x60_pll_unprepare(struct clk_hw *hw) spin_lock_irqsave(pll->lock, flags); - regmap_write(pll->regmap, PMC_PLL_UPDT, pll->id); + regmap_write(pll->regmap, AT91_PMC_PLL_UPDT, pll->id); - regmap_update_bits(pll->regmap, PMC_PLL_CTRL0, - PMC_PLL_CTRL0_ENPLLCK, 0); + regmap_update_bits(pll->regmap, AT91_PMC_PLL_CTRL0, + AT91_PMC_PLL_CTRL0_ENPLLCK, 0); - regmap_update_bits(pll->regmap, PMC_PLL_UPDT, - PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE); + regmap_update_bits(pll->regmap, AT91_PMC_PLL_UPDT, + AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE); - regmap_update_bits(pll->regmap, PMC_PLL_CTRL0, PMC_PLL_CTRL0_ENPLL, 0); + regmap_update_bits(pll->regmap, AT91_PMC_PLL_CTRL0, + AT91_PMC_PLL_CTRL0_ENPLL, 0); if (pll->characteristics->upll) - regmap_update_bits(pll->regmap, PMC_PLL_ACR, - PMC_PLL_ACR_UTMIBG | PMC_PLL_ACR_UTMIVR, 0); + regmap_update_bits(pll->regmap, AT91_PMC_PLL_ACR, + AT91_PMC_PLL_ACR_UTMIBG | + AT91_PMC_PLL_ACR_UTMIVR, 0); - regmap_update_bits(pll->regmap, PMC_PLL_UPDT, - PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE); + regmap_update_bits(pll->regmap, AT91_PMC_PLL_UPDT, + AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE); spin_unlock_irqrestore(pll->lock, flags); } @@ -316,10 +299,10 @@ sam9x60_clk_register_pll(struct regmap *regmap, spinlock_t *lock, pll->regmap = regmap; pll->lock = lock; - regmap_write(regmap, PMC_PLL_UPDT, id); - regmap_read(regmap, PMC_PLL_CTRL0, &pllr); + regmap_write(regmap, AT91_PMC_PLL_UPDT, id); + regmap_read(regmap, AT91_PMC_PLL_CTRL0, &pllr); pll->div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, pllr); - regmap_read(regmap, PMC_PLL_CTRL1, &pllr); + regmap_read(regmap, AT91_PMC_PLL_CTRL1, &pllr); pll->mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, pllr); hw = &pll->hw; diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index f3d691fc5f29..49a53a137610 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -33,16 +33,34 @@ #define AT91_PMC_HCK0 (1 << 16) /* AHB Clock (USB host) [AT91SAM9261 only] */ #define AT91_PMC_HCK1 (1 << 17) /* AHB Clock (LCD) [AT91SAM9261 only] */ +#define AT91_PMC_PLL_CTRL0 0x0C /* PLL Control Register 0 [for SAM9X60] */ +#define AT91_PMC_PLL_CTRL0_ENPLL (1 << 28) /* Enable PLL */ +#define AT91_PMC_PLL_CTRL0_ENPLLCK (1 << 29) /* Enable PLL clock for PMC */ +#define AT91_PMC_PLL_CTRL0_ENLOCK (1 << 31) /* Enable PLL lock */ + +#define AT91_PMC_PLL_CTRL1 0x10 /* PLL Control Register 1 [for SAM9X60] */ + #define AT91_PMC_PCER 0x10 /* Peripheral Clock Enable Register */ #define AT91_PMC_PCDR 0x14 /* Peripheral Clock Disable Register */ #define AT91_PMC_PCSR 0x18 /* Peripheral Clock Status Register */ +#define AT91_PMC_PLL_ACR 0x18 /* PLL Analog Control Register [for SAM9X60] */ +#define AT91_PMC_PLL_ACR_DEFAULT_UPLL 0x12020010UL /* Default PLL ACR value for UPLL */ +#define AT91_PMC_PLL_ACR_DEFAULT_PLLA 0x00020010UL /* Default PLL ACR value for PLLA */ +#define AT91_PMC_PLL_ACR_UTMIVR (1 << 12) /* UPLL Voltage regulator Control */ +#define AT91_PMC_PLL_ACR_UTMIBG (1 << 13) /* UPLL Bandgap Control */ + #define AT91_CKGR_UCKR 0x1C /* UTMI Clock Register [some SAM9] */ #define AT91_PMC_UPLLEN (1 << 16) /* UTMI PLL Enable */ #define AT91_PMC_UPLLCOUNT (0xf << 20) /* UTMI PLL Start-up Time */ #define AT91_PMC_BIASEN (1 << 24) /* UTMI BIAS Enable */ #define AT91_PMC_BIASCOUNT (0xf << 28) /* UTMI BIAS Start-up Time */ +#define AT91_PMC_PLL_UPDT 0x1C /* PMC PLL update register [for SAM9X60] */ +#define AT91_PMC_PLL_UPDT_UPDATE (1 << 8) /* Update PLL settings */ +#define AT91_PMC_PLL_UPDT_ID (1 << 0) /* PLL ID */ +#define AT91_PMC_PLL_UPDT_STUPTIM (0xff << 16) /* Startup time */ + #define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */ #define AT91_PMC_MOSCEN (1 << 0) /* Main Oscillator Enable */ #define AT91_PMC_OSCBYPASS (1 << 1) /* Oscillator Bypass */ @@ -183,6 +201,8 @@ #define AT91_PMC_WPVS (0x1 << 0) /* Write Protect Violation Status */ #define AT91_PMC_WPVSRC (0xffff << 8) /* Write Protect Violation Source */ +#define AT91_PMC_PLL_ISR0 0xEC /* PLL Interrupt Status Register 0 [SAM9X60 only] */ + #define AT91_PMC_PCER1 0x100 /* Peripheral Clock Enable Register 1 [SAMA5 only]*/ #define AT91_PMC_PCDR1 0x104 /* Peripheral Clock Enable Register 1 */ #define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */ -- cgit v1.2.3-58-ga151 From df5c21002cf4bb9c755c6330d101487c5d530c10 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 19 Feb 2020 00:24:10 +0300 Subject: mtd: spi-nor: use spi-mem dirmap API Make use of the spi-mem direct mapping API to let advanced controllers optimize read/write operations when they support direct mapping. Based on the original patch by Boris Brezillon . Signed-off-by: Sergei Shtylyov Reviewed-by: Boris Brezillon Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/spi-nor.c | 94 ++++++++++++++++++++++++++++++++++++++----- include/linux/mtd/spi-nor.h | 6 +++ 2 files changed, 90 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 1ce9784d86e8..1224247b26cc 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -306,6 +306,7 @@ static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from, SPI_MEM_OP_DUMMY(nor->read_dummy, 1), SPI_MEM_OP_DATA_IN(len, buf, 1)); bool usebouncebuf; + ssize_t nbytes; int error; /* get transfer protocols. */ @@ -319,14 +320,20 @@ static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from, usebouncebuf = spi_nor_spimem_bounce(nor, &op); - error = spi_nor_spimem_exec_op(nor, &op); - if (error) - return error; + if (nor->dirmap.rdesc) { + nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val, + op.data.nbytes, op.data.buf.in); + } else { + error = spi_nor_spimem_exec_op(nor, &op); + if (error) + return error; + nbytes = op.data.nbytes; + } - if (usebouncebuf) - memcpy(buf, op.data.buf.in, op.data.nbytes); + if (usebouncebuf && nbytes > 0) + memcpy(buf, op.data.buf.in, nbytes); - return op.data.nbytes; + return nbytes; } /** @@ -365,6 +372,7 @@ static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to, SPI_MEM_OP_ADDR(nor->addr_width, to, 1), SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_DATA_OUT(len, buf, 1)); + ssize_t nbytes; int error; op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto); @@ -377,11 +385,17 @@ static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to, if (spi_nor_spimem_bounce(nor, &op)) memcpy(nor->bouncebuf, buf, op.data.nbytes); - error = spi_nor_spimem_exec_op(nor, &op); - if (error) - return error; + if (nor->dirmap.wdesc) { + nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val, + op.data.nbytes, op.data.buf.out); + } else { + error = spi_nor_spimem_exec_op(nor, &op); + if (error) + return error; + nbytes = op.data.nbytes; + } - return op.data.nbytes; + return nbytes; } /** @@ -5265,6 +5279,58 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, } EXPORT_SYMBOL_GPL(spi_nor_scan); +static int spi_nor_create_read_dirmap(struct spi_nor *nor) +{ + struct spi_mem_dirmap_info info = { + .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1), + SPI_MEM_OP_ADDR(nor->addr_width, 0, 1), + SPI_MEM_OP_DUMMY(nor->read_dummy, 1), + SPI_MEM_OP_DATA_IN(0, NULL, 1)), + .offset = 0, + .length = nor->mtd.size, + }; + struct spi_mem_op *op = &info.op_tmpl; + + /* get transfer protocols. */ + op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto); + op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto); + op->dummy.buswidth = op->addr.buswidth; + op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto); + + /* convert the dummy cycles to the number of bytes */ + op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8; + + nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, + &info); + return PTR_ERR_OR_ZERO(nor->dirmap.rdesc); +} + +static int spi_nor_create_write_dirmap(struct spi_nor *nor) +{ + struct spi_mem_dirmap_info info = { + .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1), + SPI_MEM_OP_ADDR(nor->addr_width, 0, 1), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(0, NULL, 1)), + .offset = 0, + .length = nor->mtd.size, + }; + struct spi_mem_op *op = &info.op_tmpl; + + /* get transfer protocols. */ + op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto); + op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto); + op->dummy.buswidth = op->addr.buswidth; + op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto); + + if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) + op->addr.nbytes = 0; + + nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, + &info); + return PTR_ERR_OR_ZERO(nor->dirmap.wdesc); +} + static int spi_nor_probe(struct spi_mem *spimem) { struct spi_device *spi = spimem->spi; @@ -5326,6 +5392,14 @@ static int spi_nor_probe(struct spi_mem *spimem) return -ENOMEM; } + ret = spi_nor_create_read_dirmap(nor); + if (ret) + return ret; + + ret = spi_nor_create_write_dirmap(nor); + if (ret) + return ret; + return mtd_device_register(&nor->mtd, data ? data->parts : NULL, data ? data->nr_parts : 0); } diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 61be6ed33097..de90724f62f1 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -580,6 +580,7 @@ struct flash_info; * The structure includes legacy flash parameters and * settings that can be overwritten by the spi_nor_fixups * hooks, or dynamically when parsing the SFDP tables. + * @dirmap: pointers to struct spi_mem_dirmap_desc for reads/writes. * @priv: the private data */ struct spi_nor { @@ -606,6 +607,11 @@ struct spi_nor { struct spi_nor_flash_parameter params; + struct { + struct spi_mem_dirmap_desc *rdesc; + struct spi_mem_dirmap_desc *wdesc; + } dirmap; + void *priv; }; -- cgit v1.2.3-58-ga151 From 9255782f70614c89b1a15ec6997c4b72ce9e630a Mon Sep 17 00:00:00 2001 From: Sourabh Jain Date: Wed, 11 Dec 2019 21:39:06 +0530 Subject: sysfs: Wrap __compat_only_sysfs_link_entry_to_kobj function to change the symlink name The __compat_only_sysfs_link_entry_to_kobj function creates a symlink to a kobject but doesn't provide an option to change the symlink file name. This patch adds a wrapper function compat_only_sysfs_link_entry_to_kobj that extends the __compat_only_sysfs_link_entry_to_kobj functionality which allows function caller to customize the symlink name. Signed-off-by: Sourabh Jain [mpe: Fix compile error when CONFIG_SYSFS=n] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20191211160910.21656-3-sourabhjain@linux.ibm.com --- fs/sysfs/group.c | 28 +++++++++++++++++++++++++--- include/linux/sysfs.h | 12 ++++++++++++ 2 files changed, 37 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c index c4ab045926b7..1e2a096057bc 100644 --- a/fs/sysfs/group.c +++ b/fs/sysfs/group.c @@ -424,6 +424,25 @@ EXPORT_SYMBOL_GPL(sysfs_remove_link_from_group); int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, struct kobject *target_kobj, const char *target_name) +{ + return compat_only_sysfs_link_entry_to_kobj(kobj, target_kobj, + target_name, NULL); +} +EXPORT_SYMBOL_GPL(__compat_only_sysfs_link_entry_to_kobj); + +/** + * compat_only_sysfs_link_entry_to_kobj - add a symlink to a kobject pointing + * to a group or an attribute + * @kobj: The kobject containing the group. + * @target_kobj: The target kobject. + * @target_name: The name of the target group or attribute. + * @symlink_name: The name of the symlink file (target_name will be + * considered if symlink_name is NULL). + */ +int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name, + const char *symlink_name) { struct kernfs_node *target; struct kernfs_node *entry; @@ -448,12 +467,15 @@ int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, return -ENOENT; } - link = kernfs_create_link(kobj->sd, target_name, entry); + if (!symlink_name) + symlink_name = target_name; + + link = kernfs_create_link(kobj->sd, symlink_name, entry); if (PTR_ERR(link) == -EEXIST) - sysfs_warn_dup(kobj->sd, target_name); + sysfs_warn_dup(kobj->sd, symlink_name); kernfs_put(entry); kernfs_put(target); return PTR_ERR_OR_ZERO(link); } -EXPORT_SYMBOL_GPL(__compat_only_sysfs_link_entry_to_kobj); +EXPORT_SYMBOL_GPL(compat_only_sysfs_link_entry_to_kobj); diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index fa7ee503fb76..7462315a643b 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -300,6 +300,10 @@ void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, struct kobject *target_kobj, const char *target_name); +int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name, + const char *symlink_name); void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); @@ -508,6 +512,14 @@ static inline int __compat_only_sysfs_link_entry_to_kobj( return 0; } +static inline int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name, + const char *symlink_name) +{ + return 0; +} + static inline void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr) { -- cgit v1.2.3-58-ga151 From 1d0827b75ee7df497f611a2ac412a88135fb0ef5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 30 Jan 2020 12:06:01 -0800 Subject: mm/memremap_pages: Kill unused __devm_memremap_pages() Kill this definition that was introduced in commit 41e94a851304 ("add devm_memremap_pages") add never used. Cc: Christoph Hellwig Reviewed-by: Aneesh Kumar K.V Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/158041476158.3889308.4221100673554151124.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- include/linux/io.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/io.h b/include/linux/io.h index b1c44bb4b2d7..8394c56babc2 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -77,8 +77,6 @@ void *devm_memremap(struct device *dev, resource_size_t offset, size_t size, unsigned long flags); void devm_memunmap(struct device *dev, void *addr); -void *__devm_memremap_pages(struct device *dev, struct resource *res); - #ifdef CONFIG_PCI /* * The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and -- cgit v1.2.3-58-ga151 From 9ffc1d19fc4a6dfcfe06c91c2861ad6d44fdd92d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 30 Jan 2020 12:06:07 -0800 Subject: mm/memremap_pages: Introduce memremap_compat_align() The "sub-section memory hotplug" facility allows memremap_pages() users like libnvdimm to compensate for hardware platforms like x86 that have a section size larger than their hardware memory mapping granularity. The compensation that sub-section support affords is being tolerant of physical memory resources shifting by units smaller (64MiB on x86) than the memory-hotplug section size (128 MiB). Where the platform physical-memory mapping granularity is limited by the number and capability of address-decode-registers in the memory controller. While the sub-section support allows memremap_pages() to operate on sub-section (2MiB) granularity, the Power architecture may still require 16MiB alignment on "!radix_enabled()" platforms. In order for libnvdimm to be able to detect and manage this per-arch limitation, introduce memremap_compat_align() as a common minimum alignment across all driver-facing memory-mapping interfaces, and let Power override it to 16MiB in the "!radix_enabled()" case. The assumption / requirement for 16MiB to be a viable memremap_compat_align() value is that Power does not have platforms where its equivalent of address-decode-registers never hardware remaps a persistent memory resource on smaller than 16MiB boundaries. Note that I tried my best to not add a new Kconfig symbol, but header include entanglements defeated the #ifndef memremap_compat_align design pattern and the need to export it defeats the __weak design pattern for arch overrides. Based on an initial patch by Aneesh. Link: http://lore.kernel.org/r/CAPcyv4gBGNP95APYaBcsocEa50tQj9b5h__83vgngjq3ouGX_Q@mail.gmail.com Reported-by: Aneesh Kumar K.V Reported-by: Jeff Moyer Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Reviewed-by: Aneesh Kumar K.V Acked-by: Michael Ellerman (powerpc) Signed-off-by: Dan Williams --- arch/powerpc/Kconfig | 1 + arch/powerpc/mm/ioremap.c | 21 +++++++++++++++++++++ drivers/nvdimm/pfn_devs.c | 2 +- include/linux/memremap.h | 8 ++++++++ include/linux/mmzone.h | 1 + lib/Kconfig | 3 +++ mm/memremap.c | 23 +++++++++++++++++++++++ 7 files changed, 58 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 497b7d0b2d7e..e6ffe905e2b9 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -122,6 +122,7 @@ config PPC select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_KCOV select ARCH_HAS_HUGEPD if HUGETLB_PAGE + select ARCH_HAS_MEMREMAP_COMPAT_ALIGN select ARCH_HAS_MMIOWB if PPC64 select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_PMEM_API diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c index fc669643ce6a..b1a0aebe8c48 100644 --- a/arch/powerpc/mm/ioremap.c +++ b/arch/powerpc/mm/ioremap.c @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -97,3 +98,23 @@ void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size, return NULL; } + +#ifdef CONFIG_ZONE_DEVICE +/* + * Override the generic version in mm/memremap.c. + * + * With hash translation, the direct-map range is mapped with just one + * page size selected by htab_init_page_sizes(). Consult + * mmu_psize_defs[] to determine the minimum page size alignment. +*/ +unsigned long memremap_compat_align(void) +{ + unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift; + + if (radix_enabled()) + return SUBSECTION_SIZE; + return max(SUBSECTION_SIZE, 1UL << shift); + +} +EXPORT_SYMBOL_GPL(memremap_compat_align); +#endif diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index b94f7a7e94b8..a5c25cb87116 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -750,7 +750,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) start = nsio->res.start; size = resource_size(&nsio->res); npfns = PHYS_PFN(size - SZ_8K); - align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT)); + align = max(nd_pfn->align, SUBSECTION_SIZE); end_trunc = start + size - ALIGN_DOWN(start + size, align); if (nd_pfn->mode == PFN_MODE_PMEM) { /* diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 6fefb09af7c3..8af1cbd8f293 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -132,6 +132,7 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn, unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); +unsigned long memremap_compat_align(void); #else static inline void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) @@ -165,6 +166,12 @@ static inline void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) { } + +/* when memremap_pages() is disabled all archs can remap a single page */ +static inline unsigned long memremap_compat_align(void) +{ + return PAGE_SIZE; +} #endif /* CONFIG_ZONE_DEVICE */ static inline void put_dev_pagemap(struct dev_pagemap *pgmap) @@ -172,4 +179,5 @@ static inline void put_dev_pagemap(struct dev_pagemap *pgmap) if (pgmap) percpu_ref_put(pgmap->ref); } + #endif /* _LINUX_MEMREMAP_H_ */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 462f6873905a..6b77f7239af5 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1170,6 +1170,7 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec) #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) #define SUBSECTION_SHIFT 21 +#define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT) #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT) #define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT) diff --git a/lib/Kconfig b/lib/Kconfig index bc7e56370129..5d53f9609c25 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -615,6 +615,9 @@ config ARCH_HAS_PMEM_API config MEMREGION bool +config ARCH_HAS_MEMREMAP_COMPAT_ALIGN + bool + # use memcpy to implement user copies for nommu architectures config UACCESS_MEMCPY bool diff --git a/mm/memremap.c b/mm/memremap.c index 09b5b7adc773..3e7afaf05639 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -14,6 +15,28 @@ static DEFINE_XARRAY(pgmap_array); +/* + * The memremap() and memremap_pages() interfaces are alternately used + * to map persistent memory namespaces. These interfaces place different + * constraints on the alignment and size of the mapping (namespace). + * memremap() can map individual PAGE_SIZE pages. memremap_pages() can + * only map subsections (2MB), and at least one architecture (PowerPC) + * the minimum mapping granularity of memremap_pages() is 16MB. + * + * The role of memremap_compat_align() is to communicate the minimum + * arch supported alignment of a namespace such that it can freely + * switch modes without violating the arch constraint. Namely, do not + * allow a namespace to be PAGE_SIZE aligned since that namespace may be + * reconfigured into a mode that requires SUBSECTION_SIZE alignment. + */ +#ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN +unsigned long memremap_compat_align(void) +{ + return SUBSECTION_SIZE; +} +EXPORT_SYMBOL_GPL(memremap_compat_align); +#endif + #ifdef CONFIG_DEV_PAGEMAP_OPS DEFINE_STATIC_KEY_FALSE(devmap_managed_key); EXPORT_SYMBOL(devmap_managed_key); -- cgit v1.2.3-58-ga151 From 4f929d0877543df8a834afa5b8732d469c05cd84 Mon Sep 17 00:00:00 2001 From: Leonard Crestez Date: Thu, 20 Feb 2020 17:56:49 +0200 Subject: firmware: imx: Remove IMX_SC_RPC_SVC_ABORT This is not used by linux and not supported as part of imx SCU api, it was added by mistake. The constant value "9" has since been reassigned in firmware to a different service. Signed-off-by: Leonard Crestez Signed-off-by: Shawn Guo --- include/linux/firmware/imx/ipc.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/firmware/imx/ipc.h b/include/linux/firmware/imx/ipc.h index 6312c8cb084a..891057434858 100644 --- a/include/linux/firmware/imx/ipc.h +++ b/include/linux/firmware/imx/ipc.h @@ -25,7 +25,6 @@ enum imx_sc_rpc_svc { IMX_SC_RPC_SVC_PAD = 6, IMX_SC_RPC_SVC_MISC = 7, IMX_SC_RPC_SVC_IRQ = 8, - IMX_SC_RPC_SVC_ABORT = 9 }; struct imx_sc_rpc_msg { -- cgit v1.2.3-58-ga151 From 5779dd0a7dbd71e82478fb0bf125cc6cd3c43266 Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Mon, 24 Feb 2020 15:23:34 +0530 Subject: PCI: endpoint: Use notification chain mechanism to notify EPC events to EPF Use atomic_notifier_call_chain() to notify EPC events like linkup to EPF driver instead of using linkup ops in EPF driver. This is in preparation for adding proper locking mechanism to EPF ops. This will also enable to add more events (in addition to linkup) in the future. Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Lorenzo Pieralisi Tested-by: Vidya Sagar --- drivers/pci/endpoint/functions/pci-epf-test.c | 13 ++++++++++--- drivers/pci/endpoint/pci-epc-core.c | 9 ++------- drivers/pci/endpoint/pci-epf-core.c | 22 +--------------------- include/linux/pci-epc.h | 8 ++++++++ include/linux/pci-epf.h | 6 ++---- 5 files changed, 23 insertions(+), 35 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 5d74f81ddfe4..bddff15052cc 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -360,12 +360,16 @@ reset_handler: msecs_to_jiffies(1)); } -static void pci_epf_test_linkup(struct pci_epf *epf) +static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val, + void *data) { + struct pci_epf *epf = container_of(nb, struct pci_epf, nb); struct pci_epf_test *epf_test = epf_get_drvdata(epf); queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, msecs_to_jiffies(1)); + + return NOTIFY_OK; } static void pci_epf_test_unbind(struct pci_epf *epf) @@ -546,8 +550,12 @@ static int pci_epf_test_bind(struct pci_epf *epf) } } - if (!linkup_notifier) + if (linkup_notifier) { + epf->nb.notifier_call = pci_epf_test_notifier; + pci_epc_register_notifier(epc, &epf->nb); + } else { queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work); + } return 0; } @@ -580,7 +588,6 @@ static int pci_epf_test_probe(struct pci_epf *epf) static struct pci_epf_ops ops = { .unbind = pci_epf_test_unbind, .bind = pci_epf_test_bind, - .linkup = pci_epf_test_linkup, }; static struct pci_epf_driver test_driver = { diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index 2091508c1620..2f6436599fcb 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -539,16 +539,10 @@ EXPORT_SYMBOL_GPL(pci_epc_remove_epf); */ void pci_epc_linkup(struct pci_epc *epc) { - unsigned long flags; - struct pci_epf *epf; - if (!epc || IS_ERR(epc)) return; - spin_lock_irqsave(&epc->lock, flags); - list_for_each_entry(epf, &epc->pci_epf, list) - pci_epf_linkup(epf); - spin_unlock_irqrestore(&epc->lock, flags); + atomic_notifier_call_chain(&epc->notifier, 0, NULL); } EXPORT_SYMBOL_GPL(pci_epc_linkup); @@ -612,6 +606,7 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, spin_lock_init(&epc->lock); INIT_LIST_HEAD(&epc->pci_epf); + ATOMIC_INIT_NOTIFIER_HEAD(&epc->notifier); device_initialize(&epc->dev); epc->dev.class = pci_epc_class; diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index fb1306de8f40..93f28c65ace0 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -20,26 +20,6 @@ static DEFINE_MUTEX(pci_epf_mutex); static struct bus_type pci_epf_bus_type; static const struct device_type pci_epf_type; -/** - * pci_epf_linkup() - Notify the function driver that EPC device has - * established a connection with the Root Complex. - * @epf: the EPF device bound to the EPC device which has established - * the connection with the host - * - * Invoke to notify the function driver that EPC device has established - * a connection with the Root Complex. - */ -void pci_epf_linkup(struct pci_epf *epf) -{ - if (!epf->driver) { - dev_WARN(&epf->dev, "epf device not bound to driver\n"); - return; - } - - epf->driver->ops->linkup(epf); -} -EXPORT_SYMBOL_GPL(pci_epf_linkup); - /** * pci_epf_unbind() - Notify the function driver that the binding between the * EPF device and EPC device has been lost @@ -214,7 +194,7 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver, if (!driver->ops) return -EINVAL; - if (!driver->ops->bind || !driver->ops->unbind || !driver->ops->linkup) + if (!driver->ops->bind || !driver->ops->unbind) return -EINVAL; driver->driver.bus = &pci_epf_bus_type; diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 56f1846b9d39..36644ccd32ac 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -89,6 +89,7 @@ struct pci_epc_mem { * @max_functions: max number of functions that can be configured in this EPC * @group: configfs group representing the PCI EPC device * @lock: spinlock to protect pci_epc ops + * @notifier: used to notify EPF of any EPC events (like linkup) */ struct pci_epc { struct device dev; @@ -99,6 +100,7 @@ struct pci_epc { struct config_group *group; /* spinlock to protect against concurrent access of EP controller */ spinlock_t lock; + struct atomic_notifier_head notifier; }; /** @@ -141,6 +143,12 @@ static inline void *epc_get_drvdata(struct pci_epc *epc) return dev_get_drvdata(&epc->dev); } +static inline int +pci_epc_register_notifier(struct pci_epc *epc, struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&epc->notifier, nb); +} + struct pci_epc * __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, struct module *owner); diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 2d6f07556682..4993f7f6439b 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -55,13 +55,10 @@ struct pci_epf_header { * @bind: ops to perform when a EPC device has been bound to EPF device * @unbind: ops to perform when a binding has been lost between a EPC device * and EPF device - * @linkup: ops to perform when the EPC device has established a connection with - * a host system */ struct pci_epf_ops { int (*bind)(struct pci_epf *epf); void (*unbind)(struct pci_epf *epf); - void (*linkup)(struct pci_epf *epf); }; /** @@ -112,6 +109,7 @@ struct pci_epf_bar { * @epc: the EPC device to which this EPF device is bound * @driver: the EPF driver to which this EPF device is bound * @list: to add pci_epf as a list of PCI endpoint functions to pci_epc + * @nb: notifier block to notify EPF of any EPC events (like linkup) */ struct pci_epf { struct device dev; @@ -125,6 +123,7 @@ struct pci_epf { struct pci_epc *epc; struct pci_epf_driver *driver; struct list_head list; + struct notifier_block nb; }; #define to_pci_epf(epf_dev) container_of((epf_dev), struct pci_epf, dev) @@ -154,5 +153,4 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar); int pci_epf_bind(struct pci_epf *epf); void pci_epf_unbind(struct pci_epf *epf); -void pci_epf_linkup(struct pci_epf *epf); #endif /* __LINUX_PCI_EPF_H */ -- cgit v1.2.3-58-ga151 From 3d3248dbd018502f654064c78efcd2e165ab3486 Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Mon, 24 Feb 2020 15:23:35 +0530 Subject: PCI: endpoint: Replace spinlock with mutex The pci_epc_ops is not intended to be invoked from interrupt context. Hence replace spin_lock_irqsave and spin_unlock_irqrestore with mutex_lock and mutex_unlock respectively. Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Lorenzo Pieralisi --- drivers/pci/endpoint/pci-epc-core.c | 82 ++++++++++++++----------------------- include/linux/pci-epc.h | 6 +-- 2 files changed, 34 insertions(+), 54 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index 2f6436599fcb..e51a12ed85bb 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -120,7 +120,6 @@ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, u8 func_no) { const struct pci_epc_features *epc_features; - unsigned long flags; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) return NULL; @@ -128,9 +127,9 @@ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, if (!epc->ops->get_features) return NULL; - spin_lock_irqsave(&epc->lock, flags); + mutex_lock(&epc->lock); epc_features = epc->ops->get_features(epc, func_no); - spin_unlock_irqrestore(&epc->lock, flags); + mutex_unlock(&epc->lock); return epc_features; } @@ -144,14 +143,12 @@ EXPORT_SYMBOL_GPL(pci_epc_get_features); */ void pci_epc_stop(struct pci_epc *epc) { - unsigned long flags; - if (IS_ERR(epc) || !epc->ops->stop) return; - spin_lock_irqsave(&epc->lock, flags); + mutex_lock(&epc->lock); epc->ops->stop(epc); - spin_unlock_irqrestore(&epc->lock, flags); + mutex_unlock(&epc->lock); } EXPORT_SYMBOL_GPL(pci_epc_stop); @@ -164,7 +161,6 @@ EXPORT_SYMBOL_GPL(pci_epc_stop); int pci_epc_start(struct pci_epc *epc) { int ret; - unsigned long flags; if (IS_ERR(epc)) return -EINVAL; @@ -172,9 +168,9 @@ int pci_epc_start(struct pci_epc *epc) if (!epc->ops->start) return 0; - spin_lock_irqsave(&epc->lock, flags); + mutex_lock(&epc->lock); ret = epc->ops->start(epc); - spin_unlock_irqrestore(&epc->lock, flags); + mutex_unlock(&epc->lock); return ret; } @@ -193,7 +189,6 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num) { int ret; - unsigned long flags; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) return -EINVAL; @@ -201,9 +196,9 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, if (!epc->ops->raise_irq) return 0; - spin_lock_irqsave(&epc->lock, flags); + mutex_lock(&epc->lock); ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num); - spin_unlock_irqrestore(&epc->lock, flags); + mutex_unlock(&epc->lock); return ret; } @@ -219,7 +214,6 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq); int pci_epc_get_msi(struct pci_epc *epc, u8 func_no) { int interrupt; - unsigned long flags; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) return 0; @@ -227,9 +221,9 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no) if (!epc->ops->get_msi) return 0; - spin_lock_irqsave(&epc->lock, flags); + mutex_lock(&epc->lock); interrupt = epc->ops->get_msi(epc, func_no); - spin_unlock_irqrestore(&epc->lock, flags); + mutex_unlock(&epc->lock); if (interrupt < 0) return 0; @@ -252,7 +246,6 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts) { int ret; u8 encode_int; - unsigned long flags; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || interrupts > 32) @@ -263,9 +256,9 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts) encode_int = order_base_2(interrupts); - spin_lock_irqsave(&epc->lock, flags); + mutex_lock(&epc->lock); ret = epc->ops->set_msi(epc, func_no, encode_int); - spin_unlock_irqrestore(&epc->lock, flags); + mutex_unlock(&epc->lock); return ret; } @@ -281,7 +274,6 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msi); int pci_epc_get_msix(struct pci_epc *epc, u8 func_no) { int interrupt; - unsigned long flags; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) return 0; @@ -289,9 +281,9 @@ int pci_epc_get_msix(struct pci_epc *epc, u8 func_no) if (!epc->ops->get_msix) return 0; - spin_lock_irqsave(&epc->lock, flags); + mutex_lock(&epc->lock); interrupt = epc->ops->get_msix(epc, func_no); - spin_unlock_irqrestore(&epc->lock, flags); + mutex_unlock(&epc->lock); if (interrupt < 0) return 0; @@ -311,7 +303,6 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msix); int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts) { int ret; - unsigned long flags; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || interrupts < 1 || interrupts > 2048) @@ -320,9 +311,9 @@ int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts) if (!epc->ops->set_msix) return 0; - spin_lock_irqsave(&epc->lock, flags); + mutex_lock(&epc->lock); ret = epc->ops->set_msix(epc, func_no, interrupts - 1); - spin_unlock_irqrestore(&epc->lock, flags); + mutex_unlock(&epc->lock); return ret; } @@ -339,17 +330,15 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msix); void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, phys_addr_t phys_addr) { - unsigned long flags; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) return; if (!epc->ops->unmap_addr) return; - spin_lock_irqsave(&epc->lock, flags); + mutex_lock(&epc->lock); epc->ops->unmap_addr(epc, func_no, phys_addr); - spin_unlock_irqrestore(&epc->lock, flags); + mutex_unlock(&epc->lock); } EXPORT_SYMBOL_GPL(pci_epc_unmap_addr); @@ -367,7 +356,6 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, phys_addr_t phys_addr, u64 pci_addr, size_t size) { int ret; - unsigned long flags; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) return -EINVAL; @@ -375,9 +363,9 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, if (!epc->ops->map_addr) return 0; - spin_lock_irqsave(&epc->lock, flags); + mutex_lock(&epc->lock); ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size); - spin_unlock_irqrestore(&epc->lock, flags); + mutex_unlock(&epc->lock); return ret; } @@ -394,8 +382,6 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr); void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, struct pci_epf_bar *epf_bar) { - unsigned long flags; - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || (epf_bar->barno == BAR_5 && epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) @@ -404,9 +390,9 @@ void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, if (!epc->ops->clear_bar) return; - spin_lock_irqsave(&epc->lock, flags); + mutex_lock(&epc->lock); epc->ops->clear_bar(epc, func_no, epf_bar); - spin_unlock_irqrestore(&epc->lock, flags); + mutex_unlock(&epc->lock); } EXPORT_SYMBOL_GPL(pci_epc_clear_bar); @@ -422,7 +408,6 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, struct pci_epf_bar *epf_bar) { int ret; - unsigned long irq_flags; int flags = epf_bar->flags; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || @@ -437,9 +422,9 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, if (!epc->ops->set_bar) return 0; - spin_lock_irqsave(&epc->lock, irq_flags); + mutex_lock(&epc->lock); ret = epc->ops->set_bar(epc, func_no, epf_bar); - spin_unlock_irqrestore(&epc->lock, irq_flags); + mutex_unlock(&epc->lock); return ret; } @@ -460,7 +445,6 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no, struct pci_epf_header *header) { int ret; - unsigned long flags; if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) return -EINVAL; @@ -468,9 +452,9 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no, if (!epc->ops->write_header) return 0; - spin_lock_irqsave(&epc->lock, flags); + mutex_lock(&epc->lock); ret = epc->ops->write_header(epc, func_no, header); - spin_unlock_irqrestore(&epc->lock, flags); + mutex_unlock(&epc->lock); return ret; } @@ -487,8 +471,6 @@ EXPORT_SYMBOL_GPL(pci_epc_write_header); */ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf) { - unsigned long flags; - if (epf->epc) return -EBUSY; @@ -500,9 +482,9 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf) epf->epc = epc; - spin_lock_irqsave(&epc->lock, flags); + mutex_lock(&epc->lock); list_add_tail(&epf->list, &epc->pci_epf); - spin_unlock_irqrestore(&epc->lock, flags); + mutex_unlock(&epc->lock); return 0; } @@ -517,15 +499,13 @@ EXPORT_SYMBOL_GPL(pci_epc_add_epf); */ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf) { - unsigned long flags; - if (!epc || IS_ERR(epc) || !epf) return; - spin_lock_irqsave(&epc->lock, flags); + mutex_lock(&epc->lock); list_del(&epf->list); epf->epc = NULL; - spin_unlock_irqrestore(&epc->lock, flags); + mutex_unlock(&epc->lock); } EXPORT_SYMBOL_GPL(pci_epc_remove_epf); @@ -604,7 +584,7 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, goto err_ret; } - spin_lock_init(&epc->lock); + mutex_init(&epc->lock); INIT_LIST_HEAD(&epc->pci_epf); ATOMIC_INIT_NOTIFIER_HEAD(&epc->notifier); diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 36644ccd32ac..9dd60f2e9705 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -88,7 +88,7 @@ struct pci_epc_mem { * @mem: address space of the endpoint controller * @max_functions: max number of functions that can be configured in this EPC * @group: configfs group representing the PCI EPC device - * @lock: spinlock to protect pci_epc ops + * @lock: mutex to protect pci_epc ops * @notifier: used to notify EPF of any EPC events (like linkup) */ struct pci_epc { @@ -98,8 +98,8 @@ struct pci_epc { struct pci_epc_mem *mem; u8 max_functions; struct config_group *group; - /* spinlock to protect against concurrent access of EP controller */ - spinlock_t lock; + /* mutex to protect against concurrent access of EP controller */ + struct mutex lock; struct atomic_notifier_head notifier; }; -- cgit v1.2.3-58-ga151 From 04e046ca57ebed3943422dee10eec9e73aec081e Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Mon, 24 Feb 2020 15:23:36 +0530 Subject: PCI: endpoint: Fix for concurrent memory allocation in OB address region pci-epc-mem uses a bitmap to manage the Endpoint outbound (OB) address region. This address region will be shared by multiple endpoint functions (in the case of multi function endpoint) and it has to be protected from concurrent access to avoid updating an inconsistent state. Use a mutex to protect bitmap updates to prevent the memory allocation API from returning incorrect addresses. Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Lorenzo Pieralisi Cc: stable@vger.kernel.org # v4.14+ --- drivers/pci/endpoint/pci-epc-mem.c | 10 ++++++++-- include/linux/pci-epc.h | 3 +++ 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c index d2b174ce15de..abfac1109a13 100644 --- a/drivers/pci/endpoint/pci-epc-mem.c +++ b/drivers/pci/endpoint/pci-epc-mem.c @@ -79,6 +79,7 @@ int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size, mem->page_size = page_size; mem->pages = pages; mem->size = size; + mutex_init(&mem->lock); epc->mem = mem; @@ -122,7 +123,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, phys_addr_t *phys_addr, size_t size) { int pageno; - void __iomem *virt_addr; + void __iomem *virt_addr = NULL; struct pci_epc_mem *mem = epc->mem; unsigned int page_shift = ilog2(mem->page_size); int order; @@ -130,15 +131,18 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, size = ALIGN(size, mem->page_size); order = pci_epc_mem_get_order(mem, size); + mutex_lock(&mem->lock); pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order); if (pageno < 0) - return NULL; + goto ret; *phys_addr = mem->phys_base + ((phys_addr_t)pageno << page_shift); virt_addr = ioremap(*phys_addr, size); if (!virt_addr) bitmap_release_region(mem->bitmap, pageno, order); +ret: + mutex_unlock(&mem->lock); return virt_addr; } EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr); @@ -164,7 +168,9 @@ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr, pageno = (phys_addr - mem->phys_base) >> page_shift; size = ALIGN(size, mem->page_size); order = pci_epc_mem_get_order(mem, size); + mutex_lock(&mem->lock); bitmap_release_region(mem->bitmap, pageno, order); + mutex_unlock(&mem->lock); } EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr); diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 9dd60f2e9705..4e3e527c49d1 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -71,6 +71,7 @@ struct pci_epc_ops { * @bitmap: bitmap to manage the PCI address space * @pages: number of bits representing the address region * @page_size: size of each page + * @lock: mutex to protect bitmap */ struct pci_epc_mem { phys_addr_t phys_base; @@ -78,6 +79,8 @@ struct pci_epc_mem { unsigned long *bitmap; size_t page_size; int pages; + /* mutex to protect against concurrent access for memory allocation*/ + struct mutex lock; }; /** -- cgit v1.2.3-58-ga151 From 07301c982643a432212840a4b648b5d3f5a061fa Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Mon, 24 Feb 2020 15:23:37 +0530 Subject: PCI: endpoint: Protect concurrent access to pci_epf_ops with mutex Protect concurrent access to pci_epf_ops with a mutex. Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Lorenzo Pieralisi --- drivers/pci/endpoint/pci-epf-core.c | 11 ++++++++++- include/linux/pci-epf.h | 3 +++ 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 93f28c65ace0..6e0648991b5c 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -35,7 +35,9 @@ void pci_epf_unbind(struct pci_epf *epf) return; } + mutex_lock(&epf->lock); epf->driver->ops->unbind(epf); + mutex_unlock(&epf->lock); module_put(epf->driver->owner); } EXPORT_SYMBOL_GPL(pci_epf_unbind); @@ -49,6 +51,8 @@ EXPORT_SYMBOL_GPL(pci_epf_unbind); */ int pci_epf_bind(struct pci_epf *epf) { + int ret; + if (!epf->driver) { dev_WARN(&epf->dev, "epf device not bound to driver\n"); return -EINVAL; @@ -57,7 +61,11 @@ int pci_epf_bind(struct pci_epf *epf) if (!try_module_get(epf->driver->owner)) return -EAGAIN; - return epf->driver->ops->bind(epf); + mutex_lock(&epf->lock); + ret = epf->driver->ops->bind(epf); + mutex_unlock(&epf->lock); + + return ret; } EXPORT_SYMBOL_GPL(pci_epf_bind); @@ -252,6 +260,7 @@ struct pci_epf *pci_epf_create(const char *name) device_initialize(dev); dev->bus = &pci_epf_bus_type; dev->type = &pci_epf_type; + mutex_init(&epf->lock); ret = dev_set_name(dev, "%s", name); if (ret) { diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 4993f7f6439b..bcdf4f07bde7 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -110,6 +110,7 @@ struct pci_epf_bar { * @driver: the EPF driver to which this EPF device is bound * @list: to add pci_epf as a list of PCI endpoint functions to pci_epc * @nb: notifier block to notify EPF of any EPC events (like linkup) + * @lock: mutex to protect pci_epf_ops */ struct pci_epf { struct device dev; @@ -124,6 +125,8 @@ struct pci_epf { struct pci_epf_driver *driver; struct list_head list; struct notifier_block nb; + /* mutex to protect against concurrent access of pci_epf_ops */ + struct mutex lock; }; #define to_pci_epf(epf_dev) container_of((epf_dev), struct pci_epf, dev) -- cgit v1.2.3-58-ga151 From 2499ee84e02774a8573b7b4c76c8f2ea38669313 Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Mon, 24 Feb 2020 15:23:38 +0530 Subject: PCI: endpoint: Assign function number for each PF in EPC core The PCIe endpoint core relies on the drivers that invoke the pci_epc_add_epf() API to allocate and assign a function number to each physical function (PF). Since endpoint function device can be created by multiple mechanisms (configfs, devicetree, etc..), allowing each of these mechanisms to assign a function number would result in mutliple endpoint function devices having the same function number. In order to avoid this, let EPC core assign a function number to the endpoint device. Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Lorenzo Pieralisi --- drivers/pci/endpoint/pci-ep-cfs.c | 27 +++++---------------------- drivers/pci/endpoint/pci-epc-core.c | 26 ++++++++++++++++++++++---- include/linux/pci-epc.h | 2 ++ 3 files changed, 29 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c index d1288a0bd530..e7e8367eead1 100644 --- a/drivers/pci/endpoint/pci-ep-cfs.c +++ b/drivers/pci/endpoint/pci-ep-cfs.c @@ -29,7 +29,6 @@ struct pci_epc_group { struct config_group group; struct pci_epc *epc; bool start; - unsigned long function_num_map; }; static inline struct pci_epf_group *to_pci_epf_group(struct config_item *item) @@ -89,37 +88,22 @@ static int pci_epc_epf_link(struct config_item *epc_item, struct config_item *epf_item) { int ret; - u32 func_no = 0; struct pci_epf_group *epf_group = to_pci_epf_group(epf_item); struct pci_epc_group *epc_group = to_pci_epc_group(epc_item); struct pci_epc *epc = epc_group->epc; struct pci_epf *epf = epf_group->epf; - func_no = find_first_zero_bit(&epc_group->function_num_map, - BITS_PER_LONG); - if (func_no >= BITS_PER_LONG) - return -EINVAL; - - set_bit(func_no, &epc_group->function_num_map); - epf->func_no = func_no; - ret = pci_epc_add_epf(epc, epf); if (ret) - goto err_add_epf; + return ret; ret = pci_epf_bind(epf); - if (ret) - goto err_epf_bind; + if (ret) { + pci_epc_remove_epf(epc, epf); + return ret; + } return 0; - -err_epf_bind: - pci_epc_remove_epf(epc, epf); - -err_add_epf: - clear_bit(func_no, &epc_group->function_num_map); - - return ret; } static void pci_epc_epf_unlink(struct config_item *epc_item, @@ -134,7 +118,6 @@ static void pci_epc_epf_unlink(struct config_item *epc_item, epc = epc_group->epc; epf = epf_group->epf; - clear_bit(epf->func_no, &epc_group->function_num_map); pci_epf_unbind(epf); pci_epc_remove_epf(epc, epf); } diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index e51a12ed85bb..dc1c673534e0 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -471,22 +471,39 @@ EXPORT_SYMBOL_GPL(pci_epc_write_header); */ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf) { + u32 func_no; + int ret = 0; + if (epf->epc) return -EBUSY; if (IS_ERR(epc)) return -EINVAL; - if (epf->func_no > epc->max_functions - 1) - return -EINVAL; + mutex_lock(&epc->lock); + func_no = find_first_zero_bit(&epc->function_num_map, + BITS_PER_LONG); + if (func_no >= BITS_PER_LONG) { + ret = -EINVAL; + goto ret; + } + + if (func_no > epc->max_functions - 1) { + dev_err(&epc->dev, "Exceeding max supported Function Number\n"); + ret = -EINVAL; + goto ret; + } + set_bit(func_no, &epc->function_num_map); + epf->func_no = func_no; epf->epc = epc; - mutex_lock(&epc->lock); list_add_tail(&epf->list, &epc->pci_epf); + +ret: mutex_unlock(&epc->lock); - return 0; + return ret; } EXPORT_SYMBOL_GPL(pci_epc_add_epf); @@ -503,6 +520,7 @@ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf) return; mutex_lock(&epc->lock); + clear_bit(epf->func_no, &epc->function_num_map); list_del(&epf->list); epf->epc = NULL; mutex_unlock(&epc->lock); diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 4e3e527c49d1..ccaf6e3fa931 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -92,6 +92,7 @@ struct pci_epc_mem { * @max_functions: max number of functions that can be configured in this EPC * @group: configfs group representing the PCI EPC device * @lock: mutex to protect pci_epc ops + * @function_num_map: bitmap to manage physical function number * @notifier: used to notify EPF of any EPC events (like linkup) */ struct pci_epc { @@ -103,6 +104,7 @@ struct pci_epc { struct config_group *group; /* mutex to protect against concurrent access of EP controller */ struct mutex lock; + unsigned long function_num_map; struct atomic_notifier_head notifier; }; -- cgit v1.2.3-58-ga151 From 0231453bc08f63584545dda1c05d61b19755d3a9 Mon Sep 17 00:00:00 2001 From: Rander Wang Date: Tue, 14 Jan 2020 18:08:43 -0600 Subject: soundwire: bus: add clock stop helpers SoundWire supports two clock stop modes. Add support to handle the clock stop modes and add pm_runtime calls in the bus. Credits: this patch is based on an earlier internal contribution by Vinod Koul, Sanyog Kale, Shreyas Nc and Hardik Shah. Signed-off-by: Bard Liao Signed-off-by: Rander Wang Signed-off-by: Pierre-Louis Bossart Link: https://lore.kernel.org/r/20200115000844.14695-10-pierre-louis.bossart@linux.intel.com Signed-off-by: Vinod Koul --- drivers/soundwire/bus.c | 332 ++++++++++++++++++++++++++++++++++++++++++ include/linux/soundwire/sdw.h | 24 +++ 2 files changed, 356 insertions(+) (limited to 'include/linux') diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index 23bc24c8e9d1..3395abd2ed39 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -2,6 +2,7 @@ // Copyright(c) 2015-17 Intel Corporation. #include +#include #include #include #include @@ -359,6 +360,52 @@ static int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value) return sdw_nwrite_no_pm(slave, addr, 1, &value); } +static int +sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr) +{ + struct sdw_msg msg; + u8 buf; + int ret; + + ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, + SDW_MSG_FLAG_READ, &buf); + if (ret) + return ret; + + ret = sdw_transfer(bus, &msg); + if (ret < 0) + return ret; + else + return buf; +} + +static int +sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value) +{ + struct sdw_msg msg; + int ret; + + ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, + SDW_MSG_FLAG_WRITE, &value); + if (ret) + return ret; + + return sdw_transfer(bus, &msg); +} + +static int +sdw_read_no_pm(struct sdw_slave *slave, u32 addr) +{ + u8 buf; + int ret; + + ret = sdw_nread_no_pm(slave, addr, 1, &buf); + if (ret < 0) + return ret; + else + return buf; +} + /** * sdw_nread() - Read "n" contiguous SDW Slave registers * @slave: SDW Slave @@ -673,6 +720,291 @@ static void sdw_modify_slave_status(struct sdw_slave *slave, mutex_unlock(&slave->bus->bus_lock); } +static enum sdw_clk_stop_mode sdw_get_clk_stop_mode(struct sdw_slave *slave) +{ + enum sdw_clk_stop_mode mode; + + /* + * Query for clock stop mode if Slave implements + * ops->get_clk_stop_mode, else read from property. + */ + if (slave->ops && slave->ops->get_clk_stop_mode) { + mode = slave->ops->get_clk_stop_mode(slave); + } else { + if (slave->prop.clk_stop_mode1) + mode = SDW_CLK_STOP_MODE1; + else + mode = SDW_CLK_STOP_MODE0; + } + + return mode; +} + +static int sdw_slave_clk_stop_callback(struct sdw_slave *slave, + enum sdw_clk_stop_mode mode, + enum sdw_clk_stop_type type) +{ + int ret; + + if (slave->ops && slave->ops->clk_stop) { + ret = slave->ops->clk_stop(slave, mode, type); + if (ret < 0) { + dev_err(&slave->dev, + "Clk Stop type =%d failed: %d\n", type, ret); + return ret; + } + } + + return 0; +} + +static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave, + enum sdw_clk_stop_mode mode, + bool prepare) +{ + bool wake_en; + u32 val = 0; + int ret; + + wake_en = slave->prop.wake_capable; + + if (prepare) { + val = SDW_SCP_SYSTEMCTRL_CLK_STP_PREP; + + if (mode == SDW_CLK_STOP_MODE1) + val |= SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1; + + if (wake_en) + val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN; + } else { + val = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL); + + val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP); + } + + ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val); + + if (ret != 0) + dev_err(&slave->dev, + "Clock Stop prepare failed for slave: %d", ret); + + return ret; +} + +static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num) +{ + int retry = bus->clk_stop_timeout; + int val; + + do { + val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT) & + SDW_SCP_STAT_CLK_STP_NF; + if (!val) { + dev_info(bus->dev, "clock stop prep/de-prep done slave:%d", + dev_num); + return 0; + } + + usleep_range(1000, 1500); + retry--; + } while (retry); + + dev_err(bus->dev, "clock stop prep/de-prep failed slave:%d", + dev_num); + + return -ETIMEDOUT; +} + +/** + * sdw_bus_prep_clk_stop: prepare Slave(s) for clock stop + * + * @bus: SDW bus instance + * + * Query Slave for clock stop mode and prepare for that mode. + */ +int sdw_bus_prep_clk_stop(struct sdw_bus *bus) +{ + enum sdw_clk_stop_mode slave_mode; + bool simple_clk_stop = true; + struct sdw_slave *slave; + bool is_slave = false; + int ret = 0; + + /* + * In order to save on transition time, prepare + * each Slave and then wait for all Slave(s) to be + * prepared for clock stop. + */ + list_for_each_entry(slave, &bus->slaves, node) { + if (!slave->dev_num) + continue; + + /* Identify if Slave(s) are available on Bus */ + is_slave = true; + + if (slave->status != SDW_SLAVE_ATTACHED && + slave->status != SDW_SLAVE_ALERT) + continue; + + slave_mode = sdw_get_clk_stop_mode(slave); + slave->curr_clk_stop_mode = slave_mode; + + ret = sdw_slave_clk_stop_callback(slave, slave_mode, + SDW_CLK_PRE_PREPARE); + if (ret < 0) { + dev_err(&slave->dev, + "pre-prepare failed:%d", ret); + return ret; + } + + ret = sdw_slave_clk_stop_prepare(slave, + slave_mode, true); + if (ret < 0) { + dev_err(&slave->dev, + "pre-prepare failed:%d", ret); + return ret; + } + + if (slave_mode == SDW_CLK_STOP_MODE1) + simple_clk_stop = false; + } + + if (is_slave && !simple_clk_stop) { + ret = sdw_bus_wait_for_clk_prep_deprep(bus, + SDW_BROADCAST_DEV_NUM); + if (ret < 0) + return ret; + } + + /* Inform slaves that prep is done */ + list_for_each_entry(slave, &bus->slaves, node) { + if (!slave->dev_num) + continue; + + if (slave->status != SDW_SLAVE_ATTACHED && + slave->status != SDW_SLAVE_ALERT) + continue; + + slave_mode = slave->curr_clk_stop_mode; + + if (slave_mode == SDW_CLK_STOP_MODE1) { + ret = sdw_slave_clk_stop_callback(slave, + slave_mode, + SDW_CLK_POST_PREPARE); + + if (ret < 0) { + dev_err(&slave->dev, + "post-prepare failed:%d", ret); + } + } + } + + return ret; +} +EXPORT_SYMBOL(sdw_bus_prep_clk_stop); + +/** + * sdw_bus_clk_stop: stop bus clock + * + * @bus: SDW bus instance + * + * After preparing the Slaves for clock stop, stop the clock by broadcasting + * write to SCP_CTRL register. + */ +int sdw_bus_clk_stop(struct sdw_bus *bus) +{ + int ret; + + /* + * broadcast clock stop now, attached Slaves will ACK this, + * unattached will ignore + */ + ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM, + SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW); + if (ret < 0) { + dev_err(bus->dev, + "ClockStopNow Broadcast message failed %d", ret); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(sdw_bus_clk_stop); + +/** + * sdw_bus_exit_clk_stop: Exit clock stop mode + * + * @bus: SDW bus instance + * + * This De-prepares the Slaves by exiting Clock Stop Mode 0. For the Slaves + * exiting Clock Stop Mode 1, they will be de-prepared after they enumerate + * back. + */ +int sdw_bus_exit_clk_stop(struct sdw_bus *bus) +{ + enum sdw_clk_stop_mode mode; + bool simple_clk_stop = true; + struct sdw_slave *slave; + bool is_slave = false; + int ret; + + /* + * In order to save on transition time, de-prepare + * each Slave and then wait for all Slave(s) to be + * de-prepared after clock resume. + */ + list_for_each_entry(slave, &bus->slaves, node) { + if (!slave->dev_num) + continue; + + /* Identify if Slave(s) are available on Bus */ + is_slave = true; + + if (slave->status != SDW_SLAVE_ATTACHED && + slave->status != SDW_SLAVE_ALERT) + continue; + + mode = slave->curr_clk_stop_mode; + + if (mode == SDW_CLK_STOP_MODE1) { + simple_clk_stop = false; + continue; + } + + ret = sdw_slave_clk_stop_callback(slave, mode, + SDW_CLK_PRE_DEPREPARE); + if (ret < 0) + dev_warn(&slave->dev, + "clk stop deprep failed:%d", ret); + + ret = sdw_slave_clk_stop_prepare(slave, mode, + false); + + if (ret < 0) + dev_warn(&slave->dev, + "clk stop deprep failed:%d", ret); + } + + if (is_slave && !simple_clk_stop) + sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM); + + list_for_each_entry(slave, &bus->slaves, node) { + if (!slave->dev_num) + continue; + + if (slave->status != SDW_SLAVE_ATTACHED && + slave->status != SDW_SLAVE_ALERT) + continue; + + mode = slave->curr_clk_stop_mode; + sdw_slave_clk_stop_callback(slave, mode, + SDW_CLK_POST_DEPREPARE); + } + + return 0; +} +EXPORT_SYMBOL(sdw_bus_exit_clk_stop); + int sdw_configure_dpn_intr(struct sdw_slave *slave, int port, bool enable, int mask) { diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index b451bb622335..b8427df034ce 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -79,6 +79,21 @@ enum sdw_slave_status { SDW_SLAVE_RESERVED = 3, }; +/** + * enum sdw_clk_stop_type: clock stop operations + * + * @SDW_CLK_PRE_PREPARE: pre clock stop prepare + * @SDW_CLK_POST_PREPARE: post clock stop prepare + * @SDW_CLK_PRE_DEPREPARE: pre clock stop de-prepare + * @SDW_CLK_POST_DEPREPARE: post clock stop de-prepare + */ +enum sdw_clk_stop_type { + SDW_CLK_PRE_PREPARE = 0, + SDW_CLK_POST_PREPARE, + SDW_CLK_PRE_DEPREPARE, + SDW_CLK_POST_DEPREPARE, +}; + /** * enum sdw_command_response - Command response as defined by SDW spec * @SDW_CMD_OK: cmd was successful @@ -533,6 +548,11 @@ struct sdw_slave_ops { int (*port_prep)(struct sdw_slave *slave, struct sdw_prepare_ch *prepare_ch, enum sdw_port_prep_ops pre_ops); + int (*get_clk_stop_mode)(struct sdw_slave *slave); + int (*clk_stop)(struct sdw_slave *slave, + enum sdw_clk_stop_mode mode, + enum sdw_clk_stop_type type); + }; /** @@ -575,6 +595,7 @@ struct sdw_slave { #endif struct list_head node; struct completion *port_ready; + enum sdw_clk_stop_mode curr_clk_stop_mode; u16 dev_num; u16 dev_num_sticky; bool probed; @@ -892,6 +913,9 @@ int sdw_prepare_stream(struct sdw_stream_runtime *stream); int sdw_enable_stream(struct sdw_stream_runtime *stream); int sdw_disable_stream(struct sdw_stream_runtime *stream); int sdw_deprepare_stream(struct sdw_stream_runtime *stream); +int sdw_bus_prep_clk_stop(struct sdw_bus *bus); +int sdw_bus_clk_stop(struct sdw_bus *bus); +int sdw_bus_exit_clk_stop(struct sdw_bus *bus); /* messaging and data APIs */ -- cgit v1.2.3-58-ga151 From 3d5f7d9f6a38ddcc105ebfb23b640630bbabba65 Mon Sep 17 00:00:00 2001 From: Vidya Sagar Date: Mon, 17 Feb 2020 17:40:32 +0530 Subject: PCI: endpoint: Add core init notifying feature Add a new feature core_init_notifier for cores that can notify about their availability for initialization. Signed-off-by: Vidya Sagar Signed-off-by: Lorenzo Pieralisi Acked-by: Kishon Vijay Abraham I --- include/linux/pci-epc.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index ccaf6e3fa931..9ffe6bd081ae 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -120,6 +120,7 @@ struct pci_epc { */ struct pci_epc_features { unsigned int linkup_notifier : 1; + unsigned int core_init_notifier : 1; unsigned int msi_capable : 1; unsigned int msix_capable : 1; u8 reserved_bar; -- cgit v1.2.3-58-ga151 From 0ef22dcf0c1871888c4c0ee46a9d9c494f2fe997 Mon Sep 17 00:00:00 2001 From: Vidya Sagar Date: Mon, 17 Feb 2020 17:40:34 +0530 Subject: PCI: endpoint: Add notification for core init completion Add support to send notifications to EPF from EPC once the core registers initialization is complete. Signed-off-by: Vidya Sagar Signed-off-by: Lorenzo Pieralisi Acked-by: Kishon Vijay Abraham I --- drivers/pci/endpoint/pci-epc-core.c | 19 ++++++++++++++++++- include/linux/pci-epc.h | 1 + include/linux/pci-epf.h | 5 +++++ 3 files changed, 24 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index dc1c673534e0..0d22a377a0cf 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -540,10 +540,27 @@ void pci_epc_linkup(struct pci_epc *epc) if (!epc || IS_ERR(epc)) return; - atomic_notifier_call_chain(&epc->notifier, 0, NULL); + atomic_notifier_call_chain(&epc->notifier, LINK_UP, NULL); } EXPORT_SYMBOL_GPL(pci_epc_linkup); +/** + * pci_epc_init_notify() - Notify the EPF device that EPC device's core + * initialization is completed. + * @epc: the EPC device whose core initialization is completeds + * + * Invoke to Notify the EPF device that the EPC device's initialization + * is completed. + */ +void pci_epc_init_notify(struct pci_epc *epc) +{ + if (!epc || IS_ERR(epc)) + return; + + atomic_notifier_call_chain(&epc->notifier, CORE_INIT, NULL); +} +EXPORT_SYMBOL_GPL(pci_epc_init_notify); + /** * pci_epc_destroy() - destroy the EPC device * @epc: the EPC device that has to be destroyed diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 9ffe6bd081ae..0d7e91bad91e 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -165,6 +165,7 @@ void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc); void pci_epc_destroy(struct pci_epc *epc); int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf); void pci_epc_linkup(struct pci_epc *epc); +void pci_epc_init_notify(struct pci_epc *epc); void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf); int pci_epc_write_header(struct pci_epc *epc, u8 func_no, struct pci_epf_header *hdr); diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index bcdf4f07bde7..0c628e30c582 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -15,6 +15,11 @@ struct pci_epf; +enum pci_notify_event { + CORE_INIT, + LINK_UP, +}; + enum pci_barno { BAR_0, BAR_1, -- cgit v1.2.3-58-ga151 From 2c6cff682d6681fb1cdb03b3cdbbecd3fb0e4c89 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Tue, 25 Feb 2020 11:00:41 -0600 Subject: soundwire: add helper macros for devID fields Move bit extractors to macros, so that the definitions can be used by other drivers parsing the MIPI definitions extracted from firmware tables (ACPI or DT). Signed-off-by: Pierre-Louis Bossart Link: https://lore.kernel.org/r/20200225170041.23644-4-pierre-louis.bossart@linux.intel.com Signed-off-by: Vinod Koul --- drivers/soundwire/bus.c | 21 +++++---------------- include/linux/soundwire/sdw.h | 23 +++++++++++++++++++++++ 2 files changed, 28 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index 6106577fb3ed..ab35b09e7231 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -504,22 +504,11 @@ void sdw_extract_slave_id(struct sdw_bus *bus, { dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr); - /* - * Spec definition - * Register Bit Contents - * DevId_0 [7:4] 47:44 sdw_version - * DevId_0 [3:0] 43:40 unique_id - * DevId_1 39:32 mfg_id [15:8] - * DevId_2 31:24 mfg_id [7:0] - * DevId_3 23:16 part_id [15:8] - * DevId_4 15:08 part_id [7:0] - * DevId_5 07:00 class_id - */ - id->sdw_version = (addr >> 44) & GENMASK(3, 0); - id->unique_id = (addr >> 40) & GENMASK(3, 0); - id->mfg_id = (addr >> 24) & GENMASK(15, 0); - id->part_id = (addr >> 8) & GENMASK(15, 0); - id->class_id = addr & GENMASK(7, 0); + id->sdw_version = SDW_VERSION(addr); + id->unique_id = SDW_UNIQUE_ID(addr); + id->mfg_id = SDW_MFG_ID(addr); + id->part_id = SDW_PART_ID(addr); + id->class_id = SDW_CLASS_ID(addr); dev_dbg(bus->dev, "SDW Slave class_id %x, part_id %x, mfg_id %x, unique_id %x, version %x\n", diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index b451bb622335..56273c5c1f6b 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -424,6 +424,29 @@ struct sdw_slave_id { __u8 sdw_version:4; }; +/* + * Helper macros to extract the MIPI-defined IDs + * + * Spec definition + * Register Bit Contents + * DevId_0 [7:4] 47:44 sdw_version + * DevId_0 [3:0] 43:40 unique_id + * DevId_1 39:32 mfg_id [15:8] + * DevId_2 31:24 mfg_id [7:0] + * DevId_3 23:16 part_id [15:8] + * DevId_4 15:08 part_id [7:0] + * DevId_5 07:00 class_id + * + * The MIPI DisCo for SoundWire defines in addition the link_id as bits 51:48 + */ + +#define SDW_DISCO_LINK_ID(adr) (((adr) >> 48) & GENMASK(3, 0)) +#define SDW_VERSION(adr) (((adr) >> 44) & GENMASK(3, 0)) +#define SDW_UNIQUE_ID(adr) (((adr) >> 40) & GENMASK(3, 0)) +#define SDW_MFG_ID(adr) (((adr) >> 24) & GENMASK(15, 0)) +#define SDW_PART_ID(adr) (((adr) >> 8) & GENMASK(15, 0)) +#define SDW_CLASS_ID(adr) ((adr) & GENMASK(7, 0)) + /** * struct sdw_slave_intr_status - Slave interrupt status * @control_port: control port status -- cgit v1.2.3-58-ga151 From c7896490dd1a4e6b346e8a475e4a433356362770 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 7 Jan 2020 15:10:28 +0100 Subject: leds: ns2: Absorb platform data Nothing in the kernel includes the external header so just push the contents into the ns2 leds driver. If someone wants to use platform data or board files to describe this device they should be able to do so using GPIO machine descriptors but in any case device tree should be the way forward for these systems in all cases I can think of, and the driver already supports that. Cc: Simon Guinot Cc: Vincent Donnefort Signed-off-by: Linus Walleij Tested-by: Simon Guinot Signed-off-by: Pavel Machek --- drivers/leds/leds-ns2.c | 30 +++++++++++++++++-- include/linux/platform_data/leds-kirkwood-ns2.h | 38 ------------------------- 2 files changed, 27 insertions(+), 41 deletions(-) delete mode 100644 include/linux/platform_data/leds-kirkwood-ns2.h (limited to 'include/linux') diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c index 7c500dfdcfa3..6d37dda12c39 100644 --- a/drivers/leds/leds-ns2.c +++ b/drivers/leds/leds-ns2.c @@ -12,14 +12,38 @@ #include #include #include -#include +#include #include #include -#include #include -#include #include "leds.h" +enum ns2_led_modes { + NS_V2_LED_OFF, + NS_V2_LED_ON, + NS_V2_LED_SATA, +}; + +struct ns2_led_modval { + enum ns2_led_modes mode; + int cmd_level; + int slow_level; +}; + +struct ns2_led { + const char *name; + const char *default_trigger; + unsigned cmd; + unsigned slow; + int num_modes; + struct ns2_led_modval *modval; +}; + +struct ns2_led_platform_data { + int num_leds; + struct ns2_led *leds; +}; + /* * The Network Space v2 dual-GPIO LED is wired to a CPLD. Three different LED * modes are available: off, on and SATA activity blinking. The LED modes are diff --git a/include/linux/platform_data/leds-kirkwood-ns2.h b/include/linux/platform_data/leds-kirkwood-ns2.h deleted file mode 100644 index eb8a6860e816..000000000000 --- a/include/linux/platform_data/leds-kirkwood-ns2.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Platform data structure for Network Space v2 LED driver - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#ifndef __LEDS_KIRKWOOD_NS2_H -#define __LEDS_KIRKWOOD_NS2_H - -enum ns2_led_modes { - NS_V2_LED_OFF, - NS_V2_LED_ON, - NS_V2_LED_SATA, -}; - -struct ns2_led_modval { - enum ns2_led_modes mode; - int cmd_level; - int slow_level; -}; - -struct ns2_led { - const char *name; - const char *default_trigger; - unsigned cmd; - unsigned slow; - int num_modes; - struct ns2_led_modval *modval; -}; - -struct ns2_led_platform_data { - int num_leds; - struct ns2_led *leds; -}; - -#endif /* __LEDS_KIRKWOOD_NS2_H */ -- cgit v1.2.3-58-ga151 From feaa8baee82ababa46af95b03cfc28680ad647a6 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 24 Feb 2020 12:58:03 -0800 Subject: bus: ti-sysc: Implement SoC revision handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to know SoC type and features for cases where the same SoC may be installed in various versions on the same board and would need a separate dts file otherwise for the different variants. For example, am3703 is pin compatible with omap3630, but has sgx and iva accelerators disabled. We must not try to access the sgx or iva module registers on am3703, and need to set the unavailable devices disabled early. Let's also detect omap3430 as that is needed for display subsystem (DSS) reset later on, and GP vs EMU or HS devices. Further SoC specific disabled device detection can be added as needed, such as dra71x vs dra76x rtc and usb4. Cc: Adam Ford Cc: André Hentschel Cc: H. Nikolaus Schaller Cc: Keerthy Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/pdata-quirks.c | 6 ++ drivers/bus/ti-sysc.c | 194 +++++++++++++++++++++++++++++++++- include/linux/platform_data/ti-sysc.h | 1 + 3 files changed, 200 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index dbb7c2acef31..2a4fe3e68b82 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -397,10 +397,16 @@ static int ti_sysc_shutdown_module(struct device *dev, return omap_hwmod_shutdown(cookie->data); } +static bool ti_sysc_soc_type_gp(void) +{ + return omap_type() == OMAP2_DEVICE_TYPE_GP; +} + static struct of_dev_auxdata omap_auxdata_lookup[]; static struct ti_sysc_platform_data ti_sysc_pdata = { .auxdata = omap_auxdata_lookup, + .soc_type_gp = ti_sysc_soc_type_gp, .init_clockdomain = ti_sysc_clkdm_init, .clkdm_deny_idle = ti_sysc_clkdm_deny_idle, .clkdm_allow_idle = ti_sysc_clkdm_allow_idle, diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 4e87eb5e8ed7..4c377c576582 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -15,15 +16,47 @@ #include #include #include +#include #include #include #include +#define DIS_ISP BIT(2) +#define DIS_IVA BIT(1) +#define DIS_SGX BIT(0) + +#define SOC_FLAG(match, flag) { .machine = match, .data = (void *)(flag), } + #define MAX_MODULE_SOFTRESET_WAIT 10000 -static const char * const reg_names[] = { "rev", "sysc", "syss", }; +enum sysc_soc { + SOC_UNKNOWN, + SOC_2420, + SOC_2430, + SOC_3430, + SOC_3630, + SOC_4430, + SOC_4460, + SOC_4470, + SOC_5430, + SOC_AM3, + SOC_AM4, + SOC_DRA7, +}; + +struct sysc_address { + unsigned long base; + struct list_head node; +}; + +struct sysc_soc_info { + unsigned long general_purpose:1; + enum sysc_soc soc; + struct mutex list_lock; /* disabled modules list lock */ + struct list_head disabled_modules; +}; enum sysc_clocks { SYSC_FCK, @@ -39,6 +72,8 @@ enum sysc_clocks { SYSC_MAX_CLOCKS, }; +static struct sysc_soc_info *sysc_soc; +static const char * const reg_names[] = { "rev", "sysc", "syss", }; static const char * const clock_names[SYSC_MAX_CLOCKS] = { "fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4", "opt5", "opt6", "opt7", @@ -2382,6 +2417,154 @@ static void ti_sysc_idle(struct work_struct *work) pm_runtime_put_sync(ddata->dev); } +/* + * SoC model and features detection. Only needed for SoCs that need + * special handling for quirks, no need to list others. + */ +static const struct soc_device_attribute sysc_soc_match[] = { + SOC_FLAG("OMAP242*", SOC_2420), + SOC_FLAG("OMAP243*", SOC_2430), + SOC_FLAG("OMAP3[45]*", SOC_3430), + SOC_FLAG("OMAP3[67]*", SOC_3630), + SOC_FLAG("OMAP443*", SOC_4430), + SOC_FLAG("OMAP446*", SOC_4460), + SOC_FLAG("OMAP447*", SOC_4470), + SOC_FLAG("OMAP54*", SOC_5430), + SOC_FLAG("AM433", SOC_AM3), + SOC_FLAG("AM43*", SOC_AM4), + SOC_FLAG("DRA7*", SOC_DRA7), + + { /* sentinel */ }, +}; + +/* + * List of SoCs variants with disabled features. By default we assume all + * devices in the device tree are available so no need to list those SoCs. + */ +static const struct soc_device_attribute sysc_soc_feat_match[] = { + /* OMAP3430/3530 and AM3517 variants with some accelerators disabled */ + SOC_FLAG("AM3505", DIS_SGX), + SOC_FLAG("OMAP3525", DIS_SGX), + SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX), + SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX), + + /* OMAP3630/DM3730 variants with some accelerators disabled */ + SOC_FLAG("AM3703", DIS_IVA | DIS_SGX), + SOC_FLAG("DM3725", DIS_SGX), + SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX), + SOC_FLAG("OMAP3615/AM3715", DIS_IVA), + SOC_FLAG("OMAP3621", DIS_ISP), + + { /* sentinel */ }, +}; + +static int sysc_add_disabled(unsigned long base) +{ + struct sysc_address *disabled_module; + + disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL); + if (!disabled_module) + return -ENOMEM; + + disabled_module->base = base; + + mutex_lock(&sysc_soc->list_lock); + list_add(&disabled_module->node, &sysc_soc->disabled_modules); + mutex_unlock(&sysc_soc->list_lock); + + return 0; +} + +/* + * One time init to detect the booted SoC and disable unavailable features. + * Note that we initialize static data shared across all ti-sysc instances + * so ddata is only used for SoC type. This can be called from module_init + * once we no longer need to rely on platform data. + */ +static int sysc_init_soc(struct sysc *ddata) +{ + const struct soc_device_attribute *match; + struct ti_sysc_platform_data *pdata; + unsigned long features = 0; + + if (sysc_soc) + return 0; + + sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL); + if (!sysc_soc) + return -ENOMEM; + + mutex_init(&sysc_soc->list_lock); + INIT_LIST_HEAD(&sysc_soc->disabled_modules); + sysc_soc->general_purpose = true; + + pdata = dev_get_platdata(ddata->dev); + if (pdata && pdata->soc_type_gp) + sysc_soc->general_purpose = pdata->soc_type_gp(); + + match = soc_device_match(sysc_soc_match); + if (match && match->data) + sysc_soc->soc = (int)match->data; + + match = soc_device_match(sysc_soc_feat_match); + if (!match) + return 0; + + if (match->data) + features = (unsigned long)match->data; + + /* + * Add disabled devices to the list based on the module base. + * Note that this must be done before we attempt to access the + * device and have module revision checks working. + */ + if (features & DIS_ISP) + sysc_add_disabled(0x480bd400); + if (features & DIS_IVA) + sysc_add_disabled(0x5d000000); + if (features & DIS_SGX) + sysc_add_disabled(0x50000000); + + return 0; +} + +static void sysc_cleanup_soc(void) +{ + struct sysc_address *disabled_module; + struct list_head *pos, *tmp; + + if (!sysc_soc) + return; + + mutex_lock(&sysc_soc->list_lock); + list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) { + disabled_module = list_entry(pos, struct sysc_address, node); + list_del(pos); + kfree(disabled_module); + } + mutex_unlock(&sysc_soc->list_lock); +} + +static int sysc_check_disabled_devices(struct sysc *ddata) +{ + struct sysc_address *disabled_module; + struct list_head *pos; + int error = 0; + + mutex_lock(&sysc_soc->list_lock); + list_for_each(pos, &sysc_soc->disabled_modules) { + disabled_module = list_entry(pos, struct sysc_address, node); + if (ddata->module_pa == disabled_module->base) { + dev_dbg(ddata->dev, "module disabled for this SoC\n"); + error = -ENODEV; + break; + } + } + mutex_unlock(&sysc_soc->list_lock); + + return error; +} + static const struct of_device_id sysc_match_table[] = { { .compatible = "simple-bus", }, { /* sentinel */ }, @@ -2400,6 +2583,10 @@ static int sysc_probe(struct platform_device *pdev) ddata->dev = &pdev->dev; platform_set_drvdata(pdev, ddata); + error = sysc_init_soc(ddata); + if (error) + return error; + error = sysc_init_match(ddata); if (error) return error; @@ -2430,6 +2617,10 @@ static int sysc_probe(struct platform_device *pdev) sysc_init_early_quirks(ddata); + error = sysc_check_disabled_devices(ddata); + if (error) + return error; + error = sysc_get_clocks(ddata); if (error) return error; @@ -2560,6 +2751,7 @@ static void __exit sysc_exit(void) { bus_unregister_notifier(&platform_bus_type, &sysc_nb); platform_driver_unregister(&sysc_driver); + sysc_cleanup_soc(); } module_exit(sysc_exit); diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 2cbde6542849..accab5325cf3 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -141,6 +141,7 @@ struct clk; struct ti_sysc_platform_data { struct of_dev_auxdata *auxdata; + bool (*soc_type_gp)(void); int (*init_clockdomain)(struct device *dev, struct clk *fck, struct clk *ick, struct ti_sysc_cookie *cookie); void (*clkdm_deny_idle)(struct device *dev, -- cgit v1.2.3-58-ga151 From e8639e1c986a8a9d0f94549170f6db579376c3ae Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 24 Feb 2020 12:58:03 -0800 Subject: bus: ti-sysc: Handle module unlock quirk needed for some RTC The RTC modules on am3 and am4 need quirk handling to unlock and lock them for reset so let's add the quirk handling based on what we already have for legacy platform data. In later patches we will simply drop the RTC related platform data and the old quirk handling. Signed-off-by: Tony Lindgren --- drivers/bus/ti-sysc.c | 74 ++++++++++++++++++++++++++++++++--- include/linux/platform_data/ti-sysc.h | 1 + 2 files changed, 69 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 4c377c576582..6caa2222091e 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -110,6 +110,8 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = { * @reset_done_quirk: module specific reset done quirk * @module_enable_quirk: module specific enable quirk * @module_disable_quirk: module specific disable quirk + * @module_unlock_quirk: module specific sysconfig unlock quirk + * @module_lock_quirk: module specific sysconfig lock quirk */ struct sysc { struct device *dev; @@ -137,6 +139,8 @@ struct sysc { void (*reset_done_quirk)(struct sysc *sysc); void (*module_enable_quirk)(struct sysc *sysc); void (*module_disable_quirk)(struct sysc *sysc); + void (*module_unlock_quirk)(struct sysc *sysc); + void (*module_lock_quirk)(struct sysc *sysc); }; static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, @@ -896,6 +900,22 @@ static void sysc_show_registers(struct sysc *ddata) buf); } +/** + * sysc_write_sysconfig - handle sysconfig quirks for register write + * @ddata: device driver data + * @value: register value + */ +static void sysc_write_sysconfig(struct sysc *ddata, u32 value) +{ + if (ddata->module_unlock_quirk) + ddata->module_unlock_quirk(ddata); + + sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value); + + if (ddata->module_lock_quirk) + ddata->module_lock_quirk(ddata); +} + #define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1) #define SYSC_CLOCACT_ICK 2 @@ -942,7 +962,7 @@ static int sysc_enable_module(struct device *dev) reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift); reg |= best_mode << regbits->sidle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); set_midle: /* Set MIDLE mode */ @@ -961,14 +981,14 @@ set_midle: reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); reg |= best_mode << regbits->midle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); set_autoidle: /* Autoidle bit must enabled separately if available */ if (regbits->autoidle_shift >= 0 && ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) { reg |= 1 << regbits->autoidle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); } if (ddata->module_enable_quirk) @@ -1026,7 +1046,7 @@ static int sysc_disable_module(struct device *dev) reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); reg |= best_mode << regbits->midle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); set_sidle: /* Set SIDLE mode */ @@ -1049,7 +1069,7 @@ set_sidle: if (regbits->autoidle_shift >= 0 && ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) reg |= 1 << regbits->autoidle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); return 0; } @@ -1301,6 +1321,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0), SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, SYSC_MODULE_QUIRK_SGX), + SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0, + SYSC_MODULE_QUIRK_RTC_UNLOCK), SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff, @@ -1356,7 +1378,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0), SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0), SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0), - SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0, 0), SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0), SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0), SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0), @@ -1478,6 +1499,40 @@ static void sysc_post_reset_quirk_i2c(struct sysc *ddata) sysc_clk_quirk_i2c(ddata, true); } +/* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */ +static void sysc_quirk_rtc(struct sysc *ddata, bool lock) +{ + u32 val, kick0_val = 0, kick1_val = 0; + unsigned long flags; + int error; + + if (!lock) { + kick0_val = 0x83e70b13; + kick1_val = 0x95a4f1e0; + } + + local_irq_save(flags); + /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */ + error = readl_poll_timeout(ddata->module_va + 0x44, val, + !(val & BIT(0)), 100, 50); + if (error) + dev_warn(ddata->dev, "rtc busy timeout\n"); + /* Now we have ~15 microseconds to read/write various registers */ + sysc_write(ddata, 0x6c, kick0_val); + sysc_write(ddata, 0x70, kick1_val); + local_irq_restore(flags); +} + +static void sysc_module_unlock_quirk_rtc(struct sysc *ddata) +{ + sysc_quirk_rtc(ddata, false); +} + +static void sysc_module_lock_quirk_rtc(struct sysc *ddata) +{ + sysc_quirk_rtc(ddata, true); +} + /* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */ static void sysc_module_enable_quirk_sgx(struct sysc *ddata) { @@ -1532,6 +1587,13 @@ static void sysc_init_module_quirks(struct sysc *ddata) if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS) ddata->module_enable_quirk = sysc_module_enable_quirk_aess; + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) { + ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc; + ddata->module_lock_quirk = sysc_module_lock_quirk_rtc; + + return; + } + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX) ddata->module_enable_quirk = sysc_module_enable_quirk_sgx; diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index accab5325cf3..0b33c3b7302f 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -49,6 +49,7 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22) #define SYSC_QUIRK_CLKDM_NOAUTO BIT(21) #define SYSC_QUIRK_FORCE_MSTANDBY BIT(20) #define SYSC_MODULE_QUIRK_AESS BIT(19) -- cgit v1.2.3-58-ga151 From 7324a7a0d5e232551eedad69fea3e4b91973d7c6 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 24 Feb 2020 12:58:03 -0800 Subject: bus: ti-sysc: Implement display subsystem reset quirk The display subsystem (DSS) needs the child outputs disabled for reset. In order to prepare to probe DSS without legacy platform data, let's implement sysc_pre_reset_quirk_dss() similar to what we have for the platform data with omap_dss_reset(). Note that we cannot directly use the old omap_dss_reset() without platform data callbacks and updating omap_dss_reset() to understand struct device. And we will be dropping omap_dss_reset() anyways when all the SoCs are probing with device tree, so let's not mess with the legacy code at all. Cc: Jyri Sarha Cc: Laurent Pinchart Cc: Tomi Valkeinen Signed-off-by: Tony Lindgren --- drivers/bus/ti-sysc.c | 131 +++++++++++++++++++++++++++++++++- include/linux/platform_data/ti-sysc.h | 1 + 2 files changed, 129 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index b29f4e451dc1..e30c97ca5579 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1303,11 +1303,11 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, SYSC_QUIRK_CLKDM_NOAUTO), SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, - SYSC_QUIRK_OPT_CLKS_IN_RESET), + SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET), SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff, - SYSC_QUIRK_OPT_CLKS_IN_RESET), + SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET), SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff, - SYSC_QUIRK_OPT_CLKS_IN_RESET), + SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET), SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, SYSC_QUIRK_CLKDM_NOAUTO), SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, @@ -1468,6 +1468,128 @@ static void sysc_init_revision_quirks(struct sysc *ddata) } } +/* + * DSS needs dispc outputs disabled to reset modules. Returns mask of + * enabled DSS interrupts. Eventually we may be able to do this on + * dispc init rather than top-level DSS init. + */ +static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset, + bool disable) +{ + bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false; + const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1); + int manager_count; + bool framedonetv_irq; + u32 val, irq_mask = 0; + + switch (sysc_soc->soc) { + case SOC_2420 ... SOC_3630: + manager_count = 2; + framedonetv_irq = false; + break; + case SOC_4430 ... SOC_4470: + manager_count = 3; + break; + case SOC_5430: + case SOC_DRA7: + manager_count = 4; + break; + case SOC_AM4: + manager_count = 1; + break; + case SOC_UNKNOWN: + default: + return 0; + }; + + /* Remap the whole module range to be able to reset dispc outputs */ + devm_iounmap(ddata->dev, ddata->module_va); + ddata->module_va = devm_ioremap(ddata->dev, + ddata->module_pa, + ddata->module_size); + if (!ddata->module_va) + return -EIO; + + /* DISP_CONTROL */ + val = sysc_read(ddata, dispc_offset + 0x40); + lcd_en = val & lcd_en_mask; + digit_en = val & digit_en_mask; + if (lcd_en) + irq_mask |= BIT(0); /* FRAMEDONE */ + if (digit_en) { + if (framedonetv_irq) + irq_mask |= BIT(24); /* FRAMEDONETV */ + else + irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */ + } + if (disable & (lcd_en | digit_en)) + sysc_write(ddata, dispc_offset + 0x40, + val & ~(lcd_en_mask | digit_en_mask)); + + if (manager_count <= 2) + return irq_mask; + + /* DISPC_CONTROL2 */ + val = sysc_read(ddata, dispc_offset + 0x238); + lcd2_en = val & lcd_en_mask; + if (lcd2_en) + irq_mask |= BIT(22); /* FRAMEDONE2 */ + if (disable && lcd2_en) + sysc_write(ddata, dispc_offset + 0x238, + val & ~lcd_en_mask); + + if (manager_count <= 3) + return irq_mask; + + /* DISPC_CONTROL3 */ + val = sysc_read(ddata, dispc_offset + 0x848); + lcd3_en = val & lcd_en_mask; + if (lcd3_en) + irq_mask |= BIT(30); /* FRAMEDONE3 */ + if (disable && lcd3_en) + sysc_write(ddata, dispc_offset + 0x848, + val & ~lcd_en_mask); + + return irq_mask; +} + +/* DSS needs child outputs disabled and SDI registers cleared for reset */ +static void sysc_pre_reset_quirk_dss(struct sysc *ddata) +{ + const int dispc_offset = 0x1000; + int error; + u32 irq_mask, val; + + /* Get enabled outputs */ + irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false); + if (!irq_mask) + return; + + /* Clear IRQSTATUS */ + sysc_write(ddata, 0x1000 + 0x18, irq_mask); + + /* Disable outputs */ + val = sysc_quirk_dispc(ddata, dispc_offset, true); + + /* Poll IRQSTATUS */ + error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18, + val, val != irq_mask, 100, 50); + if (error) + dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n", + __func__, val, irq_mask); + + if (sysc_soc->soc == SOC_3430) { + /* Clear DSS_SDI_CONTROL */ + sysc_write(ddata, dispc_offset + 0x44, 0); + + /* Clear DSS_PLL_CONTROL */ + sysc_write(ddata, dispc_offset + 0x48, 0); + } + + /* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */ + sysc_write(ddata, dispc_offset + 0x40, 0); +} + /* 1-wire needs module's internal clocks enabled for reset */ static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata) { @@ -1606,6 +1728,9 @@ static void sysc_init_module_quirks(struct sysc *ddata) if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS) ddata->module_enable_quirk = sysc_module_enable_quirk_aess; + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET) + ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss; + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) { ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc; ddata->module_lock_quirk = sysc_module_lock_quirk_rtc; diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 0b33c3b7302f..ecd3a979a14d 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -49,6 +49,7 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_MODULE_QUIRK_DSS_RESET BIT(23) #define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22) #define SYSC_QUIRK_CLKDM_NOAUTO BIT(21) #define SYSC_QUIRK_FORCE_MSTANDBY BIT(20) -- cgit v1.2.3-58-ga151 From 06ee7a950b6a342cd79590e7243bdda850141967 Mon Sep 17 00:00:00 2001 From: Dave Gerlach Date: Thu, 12 Dec 2019 21:07:52 -0600 Subject: ARM: OMAP2+: pm33xx-core: Add cpuidle_ops for am335x/am437x am335x and am437x can now make use of the generic cpuidle-arm driver. This requires that we define init and suspend ops to be passed set as the cpuidle ops for the SoC. These ops are invoked directly at the last stage of the cpuidle-arm driver in order to allow low level platform code to run and bring the CPU the rest of the way into it's desired idle state. It is required that the CPUIDLE_METHOD_OF_DECLARE be called from code that is built in so define these ops in pm33xx-core where the always built-in portion of the PM code for these SoCs lives. Additionally, although an soc_suspend function is already exposed by the pm33xx platform code, it contains additional operations needed for full SoC suspend beyond what is needed for a relatively simple CPU suspend needed during cpuidle. To get around this introduce cpu_suspend ops to be used by the am335x and am437x PM driver for the last stage of cpuidle path. Acked-by: Santosh Shilimkar Signed-off-by: Dave Gerlach Acked-by: Santosh Shilimkar Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/pm33xx-core.c | 117 ++++++++++++++++++++++++++++++++++- include/linux/platform_data/pm33xx.h | 3 + 2 files changed, 119 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c index 7461b0346549..b36654186c79 100644 --- a/arch/arm/mach-omap2/pm33xx-core.c +++ b/arch/arm/mach-omap2/pm33xx-core.c @@ -6,11 +6,14 @@ * Dave Gerlach */ +#include +#include +#include #include #include #include -#include #include +#include #include #include #include @@ -35,6 +38,14 @@ static struct clockdomain *gfx_l4ls_clkdm; static void __iomem *scu_base; static struct omap_hwmod *rtc_oh; +static int (*idle_fn)(u32 wfi_flags); + +struct amx3_idle_state { + int wfi_flags; +}; + +static struct amx3_idle_state *idle_states; + static int am43xx_map_scu(void) { scu_base = ioremap(scu_a9_get_base(), SZ_256); @@ -201,6 +212,43 @@ static int am43xx_suspend(unsigned int state, int (*fn)(unsigned long), return ret; } +static int am33xx_cpu_suspend(int (*fn)(unsigned long), unsigned long args) +{ + int ret = 0; + + if (omap_irq_pending() || need_resched()) + return ret; + + ret = cpu_suspend(args, fn); + + return ret; +} + +static int am43xx_cpu_suspend(int (*fn)(unsigned long), unsigned long args) +{ + int ret = 0; + + if (!scu_base) + return 0; + + scu_power_mode(scu_base, SCU_PM_DORMANT); + ret = cpu_suspend(args, fn); + scu_power_mode(scu_base, SCU_PM_NORMAL); + + return ret; +} + +static void amx3_begin_suspend(void) +{ + cpu_idle_poll_ctrl(true); +} + +static void amx3_finish_suspend(void) +{ + cpu_idle_poll_ctrl(false); +} + + static struct am33xx_pm_sram_addr *amx3_get_sram_addrs(void) { if (soc_is_am33xx()) @@ -254,6 +302,9 @@ static void am43xx_prepare_rtc_resume(void) static struct am33xx_pm_platform_data am33xx_ops = { .init = am33xx_suspend_init, .soc_suspend = am33xx_suspend, + .cpu_suspend = am33xx_cpu_suspend, + .begin_suspend = amx3_begin_suspend, + .finish_suspend = amx3_finish_suspend, .get_sram_addrs = amx3_get_sram_addrs, .save_context = am33xx_save_context, .restore_context = am33xx_restore_context, @@ -266,6 +317,9 @@ static struct am33xx_pm_platform_data am33xx_ops = { static struct am33xx_pm_platform_data am43xx_ops = { .init = am43xx_suspend_init, .soc_suspend = am43xx_suspend, + .cpu_suspend = am43xx_cpu_suspend, + .begin_suspend = amx3_begin_suspend, + .finish_suspend = amx3_finish_suspend, .get_sram_addrs = amx3_get_sram_addrs, .save_context = am43xx_save_context, .restore_context = am43xx_restore_context, @@ -301,3 +355,64 @@ int __init amx3_common_pm_init(void) return 0; } + +static int __init amx3_idle_init(struct device_node *cpu_node, int cpu) +{ + struct device_node *state_node; + struct amx3_idle_state states[CPUIDLE_STATE_MAX]; + int i; + int state_count = 1; + + for (i = 0; ; i++) { + state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); + if (!state_node) + break; + + if (!of_device_is_available(state_node)) + continue; + + if (i == CPUIDLE_STATE_MAX) { + pr_warn("%s: cpuidle states reached max possible\n", + __func__); + break; + } + + states[state_count].wfi_flags = 0; + + if (of_property_read_bool(state_node, "ti,idle-wkup-m3")) + states[state_count].wfi_flags |= WFI_FLAG_WAKE_M3 | + WFI_FLAG_FLUSH_CACHE; + + state_count++; + } + + idle_states = kcalloc(state_count, sizeof(*idle_states), GFP_KERNEL); + if (!idle_states) + return -ENOMEM; + + for (i = 1; i < state_count; i++) + idle_states[i].wfi_flags = states[i].wfi_flags; + + return 0; +} + +static int amx3_idle_enter(unsigned long index) +{ + struct amx3_idle_state *idle_state = &idle_states[index]; + + if (!idle_state) + return -EINVAL; + + if (idle_fn) + idle_fn(idle_state->wfi_flags); + + return 0; +} + +static struct cpuidle_ops amx3_cpuidle_ops __initdata = { + .init = amx3_idle_init, + .suspend = amx3_idle_enter, +}; + +CPUIDLE_METHOD_OF_DECLARE(pm33xx_idle, "ti,am3352", &amx3_cpuidle_ops); +CPUIDLE_METHOD_OF_DECLARE(pm43xx_idle, "ti,am4372", &amx3_cpuidle_ops); diff --git a/include/linux/platform_data/pm33xx.h b/include/linux/platform_data/pm33xx.h index dd5971937a64..8e59f2db2adc 100644 --- a/include/linux/platform_data/pm33xx.h +++ b/include/linux/platform_data/pm33xx.h @@ -49,6 +49,9 @@ struct am33xx_pm_platform_data { int (*init)(void); int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long), unsigned long args); + int (*cpu_suspend)(int (*fn)(unsigned long), unsigned long args); + void (*begin_suspend)(void); + void (*finish_suspend)(void); struct am33xx_pm_sram_addr *(*get_sram_addrs)(void); void __iomem *(*get_rtc_base_addr)(void); void (*save_context)(void); -- cgit v1.2.3-58-ga151 From 65880ab160838e0764138894ef4450abdbed4af5 Mon Sep 17 00:00:00 2001 From: Dave Gerlach Date: Thu, 12 Dec 2019 21:07:53 -0600 Subject: ARM: OMAP2+: pm33xx-core: Extend platform_data ops for cpuidle In order for am335x and am437x to properly enter deeper c-states in cpuidle they must always call into the sleep33/43xx suspend code and also sometimes invoke the wkup_m3_ipc driver. These are both controlled by the pm33xx module so we must provide a method for the platform code to call back into the module when it is available as the core cpuidle ops that are invoked by the cpuidle-arm driver must remain as built in. Extend the init platform op to take an idle function as an argument so that we can use this to call into the pm33xx module for c-states that need it. Also add a deinit op so we can unregister this idle function from the PM core when the pm33xx module gets unloaded. Acked-by: Santosh Shilimkar Signed-off-by: Dave Gerlach Acked-by: Santosh Shilimkar Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/pm33xx-core.c | 20 +++++++++++++++----- drivers/soc/ti/pm33xx.c | 2 +- include/linux/platform_data/pm33xx.h | 3 ++- 3 files changed, 18 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c index b36654186c79..5455fc98c60e 100644 --- a/arch/arm/mach-omap2/pm33xx-core.c +++ b/arch/arm/mach-omap2/pm33xx-core.c @@ -79,7 +79,7 @@ static int am43xx_check_off_mode_enable(void) return 0; } -static int amx3_common_init(void) +static int amx3_common_init(int (*idle)(u32 wfi_flags)) { gfx_pwrdm = pwrdm_lookup("gfx_pwrdm"); per_pwrdm = pwrdm_lookup("per_pwrdm"); @@ -99,10 +99,12 @@ static int amx3_common_init(void) else omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF); + idle_fn = idle; + return 0; } -static int am33xx_suspend_init(void) +static int am33xx_suspend_init(int (*idle)(u32 wfi_flags)) { int ret; @@ -113,12 +115,12 @@ static int am33xx_suspend_init(void) return -ENODEV; } - ret = amx3_common_init(); + ret = amx3_common_init(idle); return ret; } -static int am43xx_suspend_init(void) +static int am43xx_suspend_init(int (*idle)(u32 wfi_flags)) { int ret = 0; @@ -128,11 +130,17 @@ static int am43xx_suspend_init(void) return ret; } - ret = amx3_common_init(); + ret = amx3_common_init(idle); return ret; } +static int amx3_suspend_deinit(void) +{ + idle_fn = NULL; + return 0; +} + static void amx3_pre_suspend_common(void) { omap_set_pwrdm_state(gfx_pwrdm, PWRDM_POWER_OFF); @@ -301,6 +309,7 @@ static void am43xx_prepare_rtc_resume(void) static struct am33xx_pm_platform_data am33xx_ops = { .init = am33xx_suspend_init, + .deinit = amx3_suspend_deinit, .soc_suspend = am33xx_suspend, .cpu_suspend = am33xx_cpu_suspend, .begin_suspend = amx3_begin_suspend, @@ -316,6 +325,7 @@ static struct am33xx_pm_platform_data am33xx_ops = { static struct am33xx_pm_platform_data am43xx_ops = { .init = am43xx_suspend_init, + .deinit = amx3_suspend_deinit, .soc_suspend = am43xx_suspend, .cpu_suspend = am43xx_cpu_suspend, .begin_suspend = amx3_begin_suspend, diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c index ccc6d53fe788..19bdcaca1f21 100644 --- a/drivers/soc/ti/pm33xx.c +++ b/drivers/soc/ti/pm33xx.c @@ -503,7 +503,7 @@ static int am33xx_pm_probe(struct platform_device *pdev) suspend_wfi_flags |= WFI_FLAG_WAKE_M3; #endif /* CONFIG_SUSPEND */ - ret = pm_ops->init(); + ret = pm_ops->init(NULL); if (ret) { dev_err(dev, "Unable to call core pm init!\n"); ret = -ENODEV; diff --git a/include/linux/platform_data/pm33xx.h b/include/linux/platform_data/pm33xx.h index 8e59f2db2adc..644af1d89cfa 100644 --- a/include/linux/platform_data/pm33xx.h +++ b/include/linux/platform_data/pm33xx.h @@ -46,7 +46,8 @@ struct am33xx_pm_sram_addr { }; struct am33xx_pm_platform_data { - int (*init)(void); + int (*init)(int (*idle)(u32 wfi_flags)); + int (*deinit)(void); int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long), unsigned long args); int (*cpu_suspend)(int (*fn)(unsigned long), unsigned long args); -- cgit v1.2.3-58-ga151 From 253d3194c2b58152fe830fd27c2fd83ebc6fe5ee Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 10 Feb 2020 13:00:13 +0000 Subject: random: add arch_get_random_*long_early() Some architectures (e.g. arm64) can have heterogeneous CPUs, and the boot CPU may be able to provide entropy while secondary CPUs cannot. On such systems, arch_get_random_long() and arch_get_random_seed_long() will fail unless support for RNG instructions has been detected on all CPUs. This prevents the boot CPU from being able to provide (potentially) trusted entropy when seeding the primary CRNG. To make it possible to seed the primary CRNG from the boot CPU without adversely affecting the runtime versions of arch_get_random_long() and arch_get_random_seed_long(), this patch adds new early versions of the functions used when initializing the primary CRNG. Default implementations are provided atop of the existing arch_get_random_long() and arch_get_random_seed_long() so that only architectures with such constraints need to provide the new helpers. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Mark Brown Cc: Theodore Ts'o Link: https://lore.kernel.org/r/20200210130015.17664-3-mark.rutland@arm.com Signed-off-by: Theodore Ts'o --- drivers/char/random.c | 20 +++++++++++++++++++- include/linux/random.h | 22 ++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/char/random.c b/drivers/char/random.c index 62d32e62f2da..02a85b87b993 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -799,6 +799,24 @@ static bool crng_init_try_arch(struct crng_state *crng) return arch_init; } +static bool __init crng_init_try_arch_early(struct crng_state *crng) +{ + int i; + bool arch_init = true; + unsigned long rv; + + for (i = 4; i < 16; i++) { + if (!arch_get_random_seed_long_early(&rv) && + !arch_get_random_long_early(&rv)) { + rv = random_get_entropy(); + arch_init = false; + } + crng->state[i] ^= rv; + } + + return arch_init; +} + static void crng_initialize_secondary(struct crng_state *crng) { memcpy(&crng->state[0], "expand 32-byte k", 16); @@ -811,7 +829,7 @@ static void __init crng_initialize_primary(struct crng_state *crng) { memcpy(&crng->state[0], "expand 32-byte k", 16); _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0); - if (crng_init_try_arch(crng) && trust_cpu) { + if (crng_init_try_arch_early(crng) && trust_cpu) { invalidate_batched_entropy(); numa_crng_init(); crng_init = 2; diff --git a/include/linux/random.h b/include/linux/random.h index d319f9a1e429..45e1f8fa742b 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -7,6 +7,8 @@ #ifndef _LINUX_RANDOM_H #define _LINUX_RANDOM_H +#include +#include #include #include @@ -185,6 +187,26 @@ static inline bool __must_check arch_get_random_seed_int(unsigned int *v) } #endif +/* + * Called from the boot CPU during startup; not valid to call once + * secondary CPUs are up and preemption is possible. + */ +#ifndef arch_get_random_seed_long_early +static inline bool __init arch_get_random_seed_long_early(unsigned long *v) +{ + WARN_ON(system_state != SYSTEM_BOOTING); + return arch_get_random_seed_long(v); +} +#endif + +#ifndef arch_get_random_long_early +static inline bool __init arch_get_random_long_early(unsigned long *v) +{ + WARN_ON(system_state != SYSTEM_BOOTING); + return arch_get_random_long(v); +} +#endif + /* Pseudo random number generator from numerical recipes. */ static inline u32 next_pseudo_random32(u32 seed) { -- cgit v1.2.3-58-ga151 From 59a135f6fb669f4f79f43160c7b8c8d6bfb37f75 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 7 Nov 2019 11:42:49 +0100 Subject: tee: remove linked list of struct tee_shm Removes list_shm from struct tee_context since the linked list isn't used any longer. Signed-off-by: Jens Wiklander --- drivers/tee/tee_core.c | 1 - drivers/tee/tee_shm.c | 12 +----------- include/linux/tee_drv.h | 3 --- 3 files changed, 1 insertion(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 37d22e39fd8d..6aec502c495c 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -44,7 +44,6 @@ static struct tee_context *teedev_open(struct tee_device *teedev) kref_init(&ctx->refcount); ctx->teedev = teedev; - INIT_LIST_HEAD(&ctx->list_shm); rc = teedev->desc->ops->open(ctx); if (rc) goto err; diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 937ac5aaa6d8..99f1c890ca3d 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -17,8 +17,6 @@ static void tee_shm_release(struct tee_shm *shm) mutex_lock(&teedev->mutex); idr_remove(&teedev->idr, shm->id); - if (shm->ctx) - list_del(&shm->link); mutex_unlock(&teedev->mutex); if (shm->flags & TEE_SHM_POOL) { @@ -168,12 +166,8 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx, } } - if (ctx) { + if (ctx) teedev_ctx_get(ctx); - mutex_lock(&teedev->mutex); - list_add_tail(&shm->link, &ctx->list_shm); - mutex_unlock(&teedev->mutex); - } return shm; err_rem: @@ -301,10 +295,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, } } - mutex_lock(&teedev->mutex); - list_add_tail(&shm->link, &ctx->list_shm); - mutex_unlock(&teedev->mutex); - return shm; err: if (shm) { diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 7a03f68fb982..cbddb883a7f8 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -49,7 +49,6 @@ struct tee_shm_pool; */ struct tee_context { struct tee_device *teedev; - struct list_head list_shm; void *data; struct kref refcount; bool releasing; @@ -170,7 +169,6 @@ void tee_device_unregister(struct tee_device *teedev); * struct tee_shm - shared memory object * @teedev: device used to allocate the object * @ctx: context using the object, if NULL the context is gone - * @link link element * @paddr: physical address of the shared memory * @kaddr: virtual address of the shared memory * @size: size of shared memory @@ -187,7 +185,6 @@ void tee_device_unregister(struct tee_device *teedev); struct tee_shm { struct tee_device *teedev; struct tee_context *ctx; - struct list_head link; phys_addr_t paddr; void *kaddr; size_t size; -- cgit v1.2.3-58-ga151 From c180f9bbe29a403459dd76422f435382aec6adaa Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 7 Nov 2019 11:42:52 +0100 Subject: tee: remove unused tee_shm_priv_alloc() tee_shm_priv_alloc() isn't useful in the current state and it's also not not used so remove it. Signed-off-by: Jens Wiklander --- drivers/tee/tee_shm.c | 33 ++------------------------------- include/linux/tee_drv.h | 12 ------------ 2 files changed, 2 insertions(+), 43 deletions(-) (limited to 'include/linux') diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 99f1c890ca3d..b666854c2491 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -89,20 +89,14 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = { .mmap = tee_shm_op_mmap, }; -static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx, - struct tee_device *teedev, - size_t size, u32 flags) +struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) { + struct tee_device *teedev = ctx->teedev; struct tee_shm_pool_mgr *poolm = NULL; struct tee_shm *shm; void *ret; int rc; - if (ctx && ctx->teedev != teedev) { - dev_err(teedev->dev.parent, "ctx and teedev mismatch\n"); - return ERR_PTR(-EINVAL); - } - if (!(flags & TEE_SHM_MAPPED)) { dev_err(teedev->dev.parent, "only mapped allocations supported\n"); @@ -182,31 +176,8 @@ err_dev_put: tee_device_put(teedev); return ret; } - -/** - * tee_shm_alloc() - Allocate shared memory - * @ctx: Context that allocates the shared memory - * @size: Requested size of shared memory - * @flags: Flags setting properties for the requested shared memory. - * - * Memory allocated as global shared memory is automatically freed when the - * TEE file pointer is closed. The @flags field uses the bits defined by - * TEE_SHM_* in . TEE_SHM_MAPPED must currently always be - * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and - * associated with a dma-buf handle, else driver private memory. - */ -struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) -{ - return __tee_shm_alloc(ctx, ctx->teedev, size, flags); -} EXPORT_SYMBOL_GPL(tee_shm_alloc); -struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size) -{ - return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED); -} -EXPORT_SYMBOL_GPL(tee_shm_priv_alloc); - struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, size_t length, u32 flags) { diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index cbddb883a7f8..42687f6c546d 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -315,18 +315,6 @@ void *tee_get_drvdata(struct tee_device *teedev); */ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); -/** - * tee_shm_priv_alloc() - Allocate shared memory privately - * @dev: Device that allocates the shared memory - * @size: Requested size of shared memory - * - * Allocates shared memory buffer that is not associated with any client - * context. Such buffers are owned by TEE driver and used for internal calls. - * - * @returns a pointer to 'struct tee_shm' - */ -struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size); - /** * tee_shm_register() - Register shared memory buffer * @ctx: Context that registers the shared memory -- cgit v1.2.3-58-ga151 From 5271b2011e448f1be7433554e4684e91951476fa Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 7 Nov 2019 11:42:59 +0100 Subject: tee: remove redundant teedev in struct tee_shm The ctx element in struct tee_shm is always valid. So remove the now redundant teedev element. Signed-off-by: Jens Wiklander --- drivers/tee/tee_shm.c | 7 ++----- include/linux/tee_drv.h | 4 +--- 2 files changed, 3 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 02210f179ae3..b01d2b7eea71 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -13,7 +13,7 @@ static void tee_shm_release(struct tee_shm *shm) { - struct tee_device *teedev = shm->teedev; + struct tee_device *teedev = shm->ctx->teedev; if (shm->flags & TEE_SHM_DMA_BUF) { mutex_lock(&teedev->mutex); @@ -44,8 +44,7 @@ static void tee_shm_release(struct tee_shm *shm) kfree(shm->pages); } - if (shm->ctx) - teedev_ctx_put(shm->ctx); + teedev_ctx_put(shm->ctx); kfree(shm); @@ -126,7 +125,6 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) } shm->flags = flags | TEE_SHM_POOL; - shm->teedev = teedev; shm->ctx = ctx; if (flags & TEE_SHM_DMA_BUF) poolm = teedev->pool->dma_buf_mgr; @@ -215,7 +213,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, } shm->flags = flags | TEE_SHM_REGISTER; - shm->teedev = teedev; shm->ctx = ctx; shm->id = -1; addr = untagged_addr(addr); diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 42687f6c546d..1412e9cc79ce 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -167,8 +167,7 @@ void tee_device_unregister(struct tee_device *teedev); /** * struct tee_shm - shared memory object - * @teedev: device used to allocate the object - * @ctx: context using the object, if NULL the context is gone + * @ctx: context using the object * @paddr: physical address of the shared memory * @kaddr: virtual address of the shared memory * @size: size of shared memory @@ -183,7 +182,6 @@ void tee_device_unregister(struct tee_device *teedev); * subsystem and from drivers that implements their own shm pool manager. */ struct tee_shm { - struct tee_device *teedev; struct tee_context *ctx; phys_addr_t paddr; void *kaddr; -- cgit v1.2.3-58-ga151 From 098accf2da940189f4d62d3514d17f8bb05dc6e1 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 13 Feb 2020 14:00:21 +0000 Subject: iommu: Use C99 flexible array in fwspec Although the 1-element array was a typical pre-C99 way to implement variable-length structures, and indeed is a fundamental construct in the APIs of certain other popular platforms, there's no good reason for it here (and in particular the sizeof() trick is far too "clever" for its own good). We can just as easily implement iommu_fwspec's preallocation behaviour using a standard flexible array member, so let's make it look the way most readers would expect. Signed-off-by: Robin Murphy Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 15 ++++++++------- include/linux/iommu.h | 2 +- 2 files changed, 9 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3e3528436e0b..660eea8d1d2f 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2405,7 +2405,8 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, if (fwspec) return ops == fwspec->ops ? 0 : -EINVAL; - fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL); + /* Preallocate for the overwhelmingly common case of 1 ID */ + fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); if (!fwspec) return -ENOMEM; @@ -2432,15 +2433,15 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_free); int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - size_t size; - int i; + int i, new_num; if (!fwspec) return -EINVAL; - size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]); - if (size > sizeof(*fwspec)) { - fwspec = krealloc(fwspec, size, GFP_KERNEL); + new_num = fwspec->num_ids + num_ids; + if (new_num > 1) { + fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), + GFP_KERNEL); if (!fwspec) return -ENOMEM; @@ -2450,7 +2451,7 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) for (i = 0; i < num_ids; i++) fwspec->ids[fwspec->num_ids + i] = ids[i]; - fwspec->num_ids += num_ids; + fwspec->num_ids = new_num; return 0; } EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index d1b5f4d98569..4d1ba76c9a64 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -592,7 +592,7 @@ struct iommu_fwspec { u32 flags; u32 num_pasid_bits; unsigned int num_ids; - u32 ids[1]; + u32 ids[]; }; /* ATS is supported */ -- cgit v1.2.3-58-ga151 From 88ac039cbed125bd9ed132d27ec9f689c6442748 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 26 Feb 2020 12:18:38 +0200 Subject: dmaengine: Refactor dmaengine_check_align() to be bit operations only There is no need to have branch and temporary variable in the function. Simple convert it to be a set of bit and arithmetic operations. Signed-off-by: Andy Shevchenko Reviewed-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200226101842.29426-1-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 64461fc64e1b..9f3f5582816a 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1155,14 +1155,7 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc static inline bool dmaengine_check_align(enum dmaengine_alignment align, size_t off1, size_t off2, size_t len) { - size_t mask; - - if (!align) - return true; - mask = (1 << align) - 1; - if (mask & (off1 | off2 | len)) - return false; - return true; + return !(((1 << align) - 1) & (off1 | off2 | len)); } static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, -- cgit v1.2.3-58-ga151 From 3a92063be16873a10648a81be0b1be42a9d54ee9 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 26 Feb 2020 12:18:39 +0200 Subject: dmaengine: Use negative condition for better readability When negative condition is in use we may decrease indentation level and make the main part of logic better visible. Signed-off-by: Andy Shevchenko Reviewed-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200226101842.29426-2-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 9f3f5582816a..ae56a91c2a05 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -618,10 +618,11 @@ static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) { - if (tx->unmap) { - dmaengine_unmap_put(tx->unmap); - tx->unmap = NULL; - } + if (!tx->unmap) + return; + + dmaengine_unmap_put(tx->unmap); + tx->unmap = NULL; } #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH @@ -1408,11 +1409,12 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, static inline void dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) { - if (st) { - st->last = last; - st->used = used; - st->residue = residue; - } + if (!st) + return; + + st->last = last; + st->used = used; + st->residue = residue; } #ifdef CONFIG_DMA_ENGINE @@ -1489,12 +1491,11 @@ static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) if (ret) return ret; - if (caps.descriptor_reuse) { - tx->flags |= DMA_CTRL_REUSE; - return 0; - } else { + if (!caps.descriptor_reuse) return -EPERM; - } + + tx->flags |= DMA_CTRL_REUSE; + return 0; } static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx) @@ -1510,10 +1511,10 @@ static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx) static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc) { /* this is supported for reusable desc, so check that */ - if (dmaengine_desc_test_reuse(desc)) - return desc->desc_free(desc); - else + if (!dmaengine_desc_test_reuse(desc)) return -EPERM; + + return desc->desc_free(desc); } /* --- DMA device --- */ -- cgit v1.2.3-58-ga151 From 5f77dd850c0a32d4d5047d139077718ee7f1a8fe Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 26 Feb 2020 12:18:40 +0200 Subject: dmaengine: Drop redundant 'else' keyword It's obvious that 'else' keyword is redundant in the code like if (foo) return bar; else if (baz) ... Drop it for good. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200226101842.29426-3-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index ae56a91c2a05..1bb5477ef7ec 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1230,9 +1230,9 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) { if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) return dma_dev_to_maxpq(dma); - else if (dmaf_p_disabled_continue(flags)) + if (dmaf_p_disabled_continue(flags)) return dma_dev_to_maxpq(dma) - 1; - else if (dmaf_continue(flags)) + if (dmaf_continue(flags)) return dma_dev_to_maxpq(dma) - 3; BUG(); } @@ -1243,7 +1243,7 @@ static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg, if (inc) { if (dir_icg) return dir_icg; - else if (sgl) + if (sgl) return icg; } -- cgit v1.2.3-58-ga151 From 1873300afa6147a1882aeba1e8bc9a13c5487571 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 26 Feb 2020 12:18:41 +0200 Subject: dmaengine: consistently return string literal from switch-case There is no need to have 'break;' statement in the default case followed by return certain string literal when all other cases have returned the string literals. So, refactor it accordingly. Signed-off-by: Andy Shevchenko Reviewed-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200226101842.29426-4-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 1bb5477ef7ec..d3672f065a64 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1560,9 +1560,7 @@ dmaengine_get_direction_text(enum dma_transfer_direction dir) case DMA_DEV_TO_DEV: return "DEV_TO_DEV"; default: - break; + return "invalid"; } - - return "invalid"; } #endif /* DMAENGINE_H */ -- cgit v1.2.3-58-ga151 From 89b74cac7834734d6b2733204c639917d3826083 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 3 Mar 2020 20:24:50 +0900 Subject: tools/bootconfig: Show line and column in parse error Show line and column when we got a parse error in bootconfig tool. Current lib/bootconfig shows the parse error with byte offset, but that is not human readable. This makes xbc_init() not showing error message itself but able to pass the error message and position to caller, so that the caller can decode it and show the error message with line number and columns. With this patch, bootconfig tool shows an error with line:column as below. $ cat samples/bad-dotword.bconf # do not start keyword with . key { .word = 1 } $ ./bootconfig -a samples/bad-dotword.bconf initrd Parse Error: Invalid keyword at 3:3 Link: http://lkml.kernel.org/r/158323469002.10560.4023923847704522760.stgit@devnote2 Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- include/linux/bootconfig.h | 3 ++- init/main.c | 14 ++++++++++---- lib/bootconfig.c | 35 ++++++++++++++++++++++++++--------- tools/bootconfig/main.c | 35 +++++++++++++++++++++++++++++++---- 4 files changed, 69 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h index d11e183fcb54..9903088891fa 100644 --- a/include/linux/bootconfig.h +++ b/include/linux/bootconfig.h @@ -216,7 +216,8 @@ static inline int __init xbc_node_compose_key(struct xbc_node *node, } /* XBC node initializer */ -int __init xbc_init(char *buf); +int __init xbc_init(char *buf, const char **emsg, int *epos); + /* XBC cleanup data structures */ void __init xbc_destroy_all(void); diff --git a/init/main.c b/init/main.c index ee4947af823f..e488213857e2 100644 --- a/init/main.c +++ b/init/main.c @@ -353,6 +353,8 @@ static int __init bootconfig_params(char *param, char *val, static void __init setup_boot_config(const char *cmdline) { static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata; + const char *msg; + int pos; u32 size, csum; char *data, *copy; u32 *hdr; @@ -400,10 +402,14 @@ static void __init setup_boot_config(const char *cmdline) memcpy(copy, data, size); copy[size] = '\0'; - ret = xbc_init(copy); - if (ret < 0) - pr_err("Failed to parse bootconfig\n"); - else { + ret = xbc_init(copy, &msg, &pos); + if (ret < 0) { + if (pos < 0) + pr_err("Failed to init bootconfig: %s.\n", msg); + else + pr_err("Failed to parse bootconfig: %s at %d.\n", + msg, pos); + } else { pr_info("Load bootconfig: %d bytes %d nodes\n", size, ret); /* keys starting with "kernel." are passed via cmdline */ extra_command_line = xbc_make_cmdline("kernel"); diff --git a/lib/bootconfig.c b/lib/bootconfig.c index ec3ce7fd299f..912ef4921398 100644 --- a/lib/bootconfig.c +++ b/lib/bootconfig.c @@ -29,12 +29,14 @@ static int xbc_node_num __initdata; static char *xbc_data __initdata; static size_t xbc_data_size __initdata; static struct xbc_node *last_parent __initdata; +static const char *xbc_err_msg __initdata; +static int xbc_err_pos __initdata; static int __init xbc_parse_error(const char *msg, const char *p) { - int pos = p - xbc_data; + xbc_err_msg = msg; + xbc_err_pos = (int)(p - xbc_data); - pr_err("Parse error at pos %d: %s\n", pos, msg); return -EINVAL; } @@ -738,33 +740,44 @@ void __init xbc_destroy_all(void) /** * xbc_init() - Parse given XBC file and build XBC internal tree * @buf: boot config text + * @emsg: A pointer of const char * to store the error message + * @epos: A pointer of int to store the error position * * This parses the boot config text in @buf. @buf must be a * null terminated string and smaller than XBC_DATA_MAX. * Return the number of stored nodes (>0) if succeeded, or -errno * if there is any error. + * In error cases, @emsg will be updated with an error message and + * @epos will be updated with the error position which is the byte offset + * of @buf. If the error is not a parser error, @epos will be -1. */ -int __init xbc_init(char *buf) +int __init xbc_init(char *buf, const char **emsg, int *epos) { char *p, *q; int ret, c; + if (epos) + *epos = -1; + if (xbc_data) { - pr_err("Error: bootconfig is already initialized.\n"); + if (emsg) + *emsg = "Bootconfig is already initialized"; return -EBUSY; } ret = strlen(buf); if (ret > XBC_DATA_MAX - 1 || ret == 0) { - pr_err("Error: Config data is %s.\n", - ret ? "too big" : "empty"); + if (emsg) + *emsg = ret ? "Config data is too big" : + "Config data is empty"; return -ERANGE; } xbc_nodes = memblock_alloc(sizeof(struct xbc_node) * XBC_NODE_MAX, SMP_CACHE_BYTES); if (!xbc_nodes) { - pr_err("Failed to allocate memory for bootconfig nodes.\n"); + if (emsg) + *emsg = "Failed to allocate bootconfig nodes"; return -ENOMEM; } memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX); @@ -814,9 +827,13 @@ int __init xbc_init(char *buf) if (!ret) ret = xbc_verify_tree(); - if (ret < 0) + if (ret < 0) { + if (epos) + *epos = xbc_err_pos; + if (emsg) + *emsg = xbc_err_msg; xbc_destroy_all(); - else + } else ret = xbc_node_num; return ret; diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c index a9b97814d1a9..16b9a420e6fd 100644 --- a/tools/bootconfig/main.c +++ b/tools/bootconfig/main.c @@ -130,6 +130,7 @@ int load_xbc_from_initrd(int fd, char **buf) int ret; u32 size = 0, csum = 0, rcsum; char magic[BOOTCONFIG_MAGIC_LEN]; + const char *msg; ret = fstat(fd, &stat); if (ret < 0) @@ -182,10 +183,12 @@ int load_xbc_from_initrd(int fd, char **buf) return -EINVAL; } - ret = xbc_init(*buf); + ret = xbc_init(*buf, &msg, NULL); /* Wrong data */ - if (ret < 0) + if (ret < 0) { + pr_err("parse error: %s.\n", msg); return ret; + } return size; } @@ -244,11 +247,34 @@ int delete_xbc(const char *path) return ret; } +static void show_xbc_error(const char *data, const char *msg, int pos) +{ + int lin = 1, col, i; + + if (pos < 0) { + pr_err("Error: %s.\n", msg); + return; + } + + /* Note that pos starts from 0 but lin and col should start from 1. */ + col = pos + 1; + for (i = 0; i < pos; i++) { + if (data[i] == '\n') { + lin++; + col = pos - i; + } + } + pr_err("Parse Error: %s at %d:%d\n", msg, lin, col); + +} + int apply_xbc(const char *path, const char *xbc_path) { u32 size, csum; char *buf, *data; int ret, fd; + const char *msg; + int pos; ret = load_xbc_file(xbc_path, &buf); if (ret < 0) { @@ -267,11 +293,12 @@ int apply_xbc(const char *path, const char *xbc_path) *(u32 *)(data + size + 4) = csum; /* Check the data format */ - ret = xbc_init(buf); + ret = xbc_init(buf, &msg, &pos); if (ret < 0) { - pr_err("Failed to parse %s: %d\n", xbc_path, ret); + show_xbc_error(data, msg, pos); free(data); free(buf); + return ret; } printf("Apply %s to %s\n", xbc_path, path); -- cgit v1.2.3-58-ga151 From b2745d92bb015cc4454d4195c4ce6e2852db397e Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Thu, 27 Feb 2020 16:28:34 -0600 Subject: bus: ti-sysc: Add support for PRUSS SYSC type The PRU-ICSS present on AM33xx/AM43xx/AM57xx has a very unique SYSCFG register. The register follows the OMAP4-style SYSC_TYPE3 for Master Standby and Slave Idle, but also has two additional unique fields - STANDBY_INIT and SUB_MWAIT. The STANDBY_INIT is a control bit that is used to initiate a Standby sequence (when set) and trigger a MStandby request to the SoC's PRCM module. This same bit is also used to enable the OCP master ports (when cleared) to allow the PRU cores to access any peripherals or memory beyond the PRU subsystem. The SUB_MWAIT is a ready status field for the external access. Add support for this SYSC type. The STANDBY_INIT has to be set during suspend, without which it results in a hang in the resume sequence on AM33xx/AM43xx boards and requires a board reset to come out of the hang. Any PRU applications requiring external access are supposed to clear the STANDBY_INIT bit. Note that the PRUSS context is lost during a suspend sequence because the PRUSS module is reset and/or disabled. Signed-off-by: Suman Anna Signed-off-by: Roger Quadros [tony@atomide.com: updated quirk define number and to use -ENODEV] Signed-off-by: Tony Lindgren --- drivers/bus/ti-sysc.c | 27 +++++++++++++++++++++++++++ include/linux/platform_data/ti-sysc.h | 2 ++ 2 files changed, 29 insertions(+) (limited to 'include/linux') diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index bc1c52f87046..86ac61fa5bc6 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1341,6 +1341,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, SYSC_MODULE_QUIRK_WDT), + /* PRUSS on am3, am4 and am5 */ + SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000, + SYSC_MODULE_QUIRK_PRUSS), /* Watchdog on am3 and am4 */ SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE), @@ -1712,6 +1715,16 @@ static void sysc_reset_done_quirk_wdt(struct sysc *ddata) dev_warn(ddata->dev, "wdt disable step2 failed\n"); } +/* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */ +static void sysc_module_disable_quirk_pruss(struct sysc *ddata) +{ + u32 reg; + + reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); + reg |= SYSC_PRUSS_STANDBY_INIT; + sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); +} + static void sysc_init_module_quirks(struct sysc *ddata) { if (ddata->legacy_mode || !ddata->name) @@ -1750,6 +1763,9 @@ static void sysc_init_module_quirks(struct sysc *ddata) ddata->reset_done_quirk = sysc_reset_done_quirk_wdt; ddata->module_disable_quirk = sysc_reset_done_quirk_wdt; } + + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS) + ddata->module_disable_quirk = sysc_module_disable_quirk_pruss; } static int sysc_clockdomain_init(struct sysc *ddata) @@ -2555,6 +2571,16 @@ static const struct sysc_capabilities sysc_dra7_mcan = { .mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED, }; +/* + * PRUSS found on some AM33xx, AM437x and AM57xx SoCs + */ +static const struct sysc_capabilities sysc_pruss = { + .type = TI_SYSC_PRUSS, + .sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT, + .regbits = &sysc_regbits_omap4_simple, + .mod_quirks = SYSC_MODULE_QUIRK_PRUSS, +}; + static int sysc_init_pdata(struct sysc *ddata) { struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev); @@ -2936,6 +2962,7 @@ static const struct of_device_id sysc_match[] = { { .compatible = "ti,sysc-usb-host-fs", .data = &sysc_omap4_usb_host_fs, }, { .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, }, + { .compatible = "ti,sysc-pruss", .data = &sysc_pruss, }, { }, }; MODULE_DEVICE_TABLE(of, sysc_match); diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index ecd3a979a14d..c59999ce044e 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -17,6 +17,7 @@ enum ti_sysc_module_type { TI_SYSC_OMAP4_MCASP, TI_SYSC_OMAP4_USB_HOST_FS, TI_SYSC_DRA7_MCAN, + TI_SYSC_PRUSS, }; struct ti_sysc_cookie { @@ -49,6 +50,7 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_MODULE_QUIRK_PRUSS BIT(24) #define SYSC_MODULE_QUIRK_DSS_RESET BIT(23) #define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22) #define SYSC_QUIRK_CLKDM_NOAUTO BIT(21) -- cgit v1.2.3-58-ga151 From 2333e829952fb437db915bbb17f4d8c43127d438 Mon Sep 17 00:00:00 2001 From: Yu Chen Date: Sun, 23 Feb 2020 15:28:52 +0800 Subject: workqueue: Make workqueue_init*() return void The return values of workqueue_init() and workqueue_early_int() are always 0, and there is no usage of their return value. So just make them return void. Signed-off-by: Yu Chen Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 4 ++-- kernel/workqueue.c | 8 ++------ 2 files changed, 4 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 4261d1c6e87b..c86a7691e13c 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -649,7 +649,7 @@ int workqueue_online_cpu(unsigned int cpu); int workqueue_offline_cpu(unsigned int cpu); #endif -int __init workqueue_init_early(void); -int __init workqueue_init(void); +void __init workqueue_init_early(void); +void __init workqueue_init(void); #endif diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 301db4406bc3..5afa9ad45eba 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5896,7 +5896,7 @@ static void __init wq_numa_init(void) * items. Actual work item execution starts only after kthreads can be * created and scheduled right before early initcalls. */ -int __init workqueue_init_early(void) +void __init workqueue_init_early(void) { int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ; @@ -5963,8 +5963,6 @@ int __init workqueue_init_early(void) !system_unbound_wq || !system_freezable_wq || !system_power_efficient_wq || !system_freezable_power_efficient_wq); - - return 0; } /** @@ -5976,7 +5974,7 @@ int __init workqueue_init_early(void) * are no kworkers executing the work items yet. Populate the worker pools * with the initial workers and enable future kworker creations. */ -int __init workqueue_init(void) +void __init workqueue_init(void) { struct workqueue_struct *wq; struct worker_pool *pool; @@ -6023,6 +6021,4 @@ int __init workqueue_init(void) wq_online = true; wq_watchdog_init(); - - return 0; } -- cgit v1.2.3-58-ga151 From 780d2a9c86dc12594e263752cd8426a5794f1cc8 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Wed, 4 Mar 2020 15:09:19 +0100 Subject: include/bitmap.h: add missing parameter in docs bitmap_find_next_zero_area_off() has an additional parameter which was not specified in the list of functions. Add it. Fixes: 5e19b013f55a ("lib: bitmap: add alignment offset for bitmap_find_next_zero_area()") Signed-off-by: Wolfram Sang Signed-off-by: Dennis Zhou --- include/linux/bitmap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index e52ceb1a73d3..804600f7dc35 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -50,7 +50,7 @@ * bitmap_set(dst, pos, nbits) Set specified bit area * bitmap_clear(dst, pos, nbits) Clear specified bit area * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area - * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above + * bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n * bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest -- cgit v1.2.3-58-ga151 From a392d26f32cdd87e09b1ea3849db79cfc4eae745 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Wed, 4 Mar 2020 15:09:20 +0100 Subject: include/bitmap.h: add new functions to documentation I found these functions only by chance although I was looking exactly for something like them. So, add them to the list of functions to make them more visible. Fixes: e837dfde15a4 ("bitmap: genericize percpu bitmap region iterators") Signed-off-by: Wolfram Sang Signed-off-by: Dennis Zhou --- include/linux/bitmap.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 804600f7dc35..99058eb81042 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -51,6 +51,12 @@ * bitmap_clear(dst, pos, nbits) Clear specified bit area * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area * bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above + * bitmap_next_clear_region(map, &start, &end, nbits) Find next clear region + * bitmap_next_set_region(map, &start, &end, nbits) Find next set region + * bitmap_for_each_clear_region(map, rs, re, start, end) + * Iterate over all clear regions + * bitmap_for_each_set_region(map, rs, re, start, end) + * Iterate over all set regions * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n * bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest -- cgit v1.2.3-58-ga151 From 241eaabc3c315cdfea505725a43de848f498527f Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 6 Mar 2020 10:34:10 +0800 Subject: power: supply: Allow charger manager can be built as a module Allow charger manager can be built as a module like other charger drivers. Signed-off-by: Baolin Wang Signed-off-by: Sebastian Reichel --- drivers/power/supply/Kconfig | 2 +- include/linux/power/charger-manager.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index 9a5591ab90d0..195bc0462d3e 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -480,7 +480,7 @@ config CHARGER_GPIO called gpio-charger. config CHARGER_MANAGER - bool "Battery charger manager for multiple chargers" + tristate "Battery charger manager for multiple chargers" depends on REGULATOR select EXTCON help diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h index ad19e68e1fc3..ae94dcebd936 100644 --- a/include/linux/power/charger-manager.h +++ b/include/linux/power/charger-manager.h @@ -248,7 +248,7 @@ struct charger_manager { u64 charging_end_time; }; -#ifdef CONFIG_CHARGER_MANAGER +#if IS_ENABLED(CONFIG_CHARGER_MANAGER) extern void cm_notify_event(struct power_supply *psy, enum cm_event_types type, char *msg); #else -- cgit v1.2.3-58-ga151 From f1541773af49ecd1edae29c8ac0775253a0b0760 Mon Sep 17 00:00:00 2001 From: Chuanhong Guo Date: Sat, 8 Feb 2020 15:43:50 +0800 Subject: mtd: spinand: rework detect procedure for different READ_ID operation Currently there are 3 different variants of read_id implementation: 1. opcode only. Found in GD5FxGQ4xF. 2. opcode + 1 addr byte. Found in GD5GxGQ4xA/E 3. opcode + 1 dummy byte. Found in other currently supported chips. Original implementation was for variant 1 and let detect function of chips with variant 2 and 3 to ignore the first byte. This isn't robust: 1. For chips of variant 2, if SPI master doesn't keep MOSI low during read, chip will get a random id offset, and the entire id buffer will shift by that offset, causing detect failure. 2. For chips of variant 1, if it happens to get a devid that equals to manufacture id of variant 2 or 3 chips, it'll get incorrectly detected. This patch reworks detect procedure to address problems above. New logic do detection for all variants separatedly, in 1-2-3 order. Since all current detect methods do exactly the same id matching procedure, unify them into core.c and remove detect method from manufacture_ops. Tested on GD5F1GQ4UAYIG and W25N01GVZEIG. Signed-off-by: Chuanhong Guo Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200208074439.146296-1-gch981213@gmail.com --- drivers/mtd/nand/spi/core.c | 86 ++++++++++++++++++++++++++++----------- drivers/mtd/nand/spi/gigadevice.c | 45 ++++++-------------- drivers/mtd/nand/spi/macronix.c | 30 +++----------- drivers/mtd/nand/spi/micron.c | 26 ++---------- drivers/mtd/nand/spi/paragon.c | 28 +++---------- drivers/mtd/nand/spi/toshiba.c | 45 ++++++++------------ drivers/mtd/nand/spi/winbond.c | 34 +++------------- include/linux/mtd/spinand.h | 66 ++++++++++++++++++++---------- 8 files changed, 157 insertions(+), 203 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 89f6beefb01c..a9e9cbad942f 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -370,10 +371,11 @@ out: return status & STATUS_BUSY ? -ETIMEDOUT : 0; } -static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf) +static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr, + u8 ndummy, u8 *buf) { - struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf, - SPINAND_MAX_ID_LEN); + struct spi_mem_op op = SPINAND_READID_OP( + naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN); int ret; ret = spi_mem_exec_op(spinand->spimem, &op); @@ -762,24 +764,62 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = { &winbond_spinand_manufacturer, }; -static int spinand_manufacturer_detect(struct spinand_device *spinand) +static int spinand_manufacturer_match(struct spinand_device *spinand, + enum spinand_readid_method rdid_method) { + u8 *id = spinand->id.data; unsigned int i; int ret; for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) { - ret = spinand_manufacturers[i]->ops->detect(spinand); - if (ret > 0) { - spinand->manufacturer = spinand_manufacturers[i]; - return 0; - } else if (ret < 0) { - return ret; - } - } + const struct spinand_manufacturer *manufacturer = + spinand_manufacturers[i]; + + if (id[0] != manufacturer->id) + continue; + ret = spinand_match_and_init(spinand, + manufacturer->chips, + manufacturer->nchips, + rdid_method); + if (ret < 0) + continue; + + spinand->manufacturer = manufacturer; + return 0; + } return -ENOTSUPP; } +static int spinand_id_detect(struct spinand_device *spinand) +{ + u8 *id = spinand->id.data; + int ret; + + ret = spinand_read_id_op(spinand, 0, 0, id); + if (ret) + return ret; + ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE); + if (!ret) + return 0; + + ret = spinand_read_id_op(spinand, 1, 0, id); + if (ret) + return ret; + ret = spinand_manufacturer_match(spinand, + SPINAND_READID_METHOD_OPCODE_ADDR); + if (!ret) + return 0; + + ret = spinand_read_id_op(spinand, 0, 1, id); + if (ret) + return ret; + ret = spinand_manufacturer_match(spinand, + SPINAND_READID_METHOD_OPCODE_DUMMY); + + return ret; +} + static int spinand_manufacturer_init(struct spinand_device *spinand) { if (spinand->manufacturer->ops->init) @@ -835,9 +875,9 @@ spinand_select_op_variant(struct spinand_device *spinand, * @spinand: SPI NAND object * @table: SPI NAND device description table * @table_size: size of the device description table + * @rdid_method: read id method to match * - * Should be used by SPI NAND manufacturer drivers when they want to find a - * match between a device ID retrieved through the READ_ID command and an + * Match between a device ID retrieved through the READ_ID command and an * entry in the SPI NAND description table. If a match is found, the spinand * object will be initialized with information provided by the matching * spinand_info entry. @@ -846,8 +886,10 @@ spinand_select_op_variant(struct spinand_device *spinand, */ int spinand_match_and_init(struct spinand_device *spinand, const struct spinand_info *table, - unsigned int table_size, u16 devid) + unsigned int table_size, + enum spinand_readid_method rdid_method) { + u8 *id = spinand->id.data; struct nand_device *nand = spinand_to_nand(spinand); unsigned int i; @@ -855,13 +897,17 @@ int spinand_match_and_init(struct spinand_device *spinand, const struct spinand_info *info = &table[i]; const struct spi_mem_op *op; - if (devid != info->devid) + if (rdid_method != info->devid.method) + continue; + + if (memcmp(id + 1, info->devid.id, info->devid.len)) continue; nand->memorg = table[i].memorg; nand->eccreq = table[i].eccreq; spinand->eccinfo = table[i].eccinfo; spinand->flags = table[i].flags; + spinand->id.len = 1 + table[i].devid.len; spinand->select_target = table[i].select_target; op = spinand_select_op_variant(spinand, @@ -898,13 +944,7 @@ static int spinand_detect(struct spinand_device *spinand) if (ret) return ret; - ret = spinand_read_id_op(spinand, spinand->id.data); - if (ret) - return ret; - - spinand->id.len = SPINAND_MAX_ID_LEN; - - ret = spinand_manufacturer_detect(spinand); + ret = spinand_id_detect(spinand); if (ret) { dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN, spinand->id.data); diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c index e99d425aa93f..d219c970042a 100644 --- a/drivers/mtd/nand/spi/gigadevice.c +++ b/drivers/mtd/nand/spi/gigadevice.c @@ -195,7 +195,8 @@ static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand, } static const struct spinand_info gigadevice_spinand_table[] = { - SPINAND_INFO("GD5F1GQ4xA", 0xF1, + SPINAND_INFO("GD5F1GQ4xA", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf1), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -204,7 +205,8 @@ static const struct spinand_info gigadevice_spinand_table[] = { 0, SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout, gd5fxgq4xa_ecc_get_status)), - SPINAND_INFO("GD5F2GQ4xA", 0xF2, + SPINAND_INFO("GD5F2GQ4xA", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf2), NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -213,7 +215,8 @@ static const struct spinand_info gigadevice_spinand_table[] = { 0, SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout, gd5fxgq4xa_ecc_get_status)), - SPINAND_INFO("GD5F4GQ4xA", 0xF4, + SPINAND_INFO("GD5F4GQ4xA", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf4), NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -222,7 +225,8 @@ static const struct spinand_info gigadevice_spinand_table[] = { 0, SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout, gd5fxgq4xa_ecc_get_status)), - SPINAND_INFO("GD5F1GQ4UExxG", 0xd1, + SPINAND_INFO("GD5F1GQ4UExxG", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd1), NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -231,7 +235,8 @@ static const struct spinand_info gigadevice_spinand_table[] = { 0, SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout, gd5fxgq4uexxg_ecc_get_status)), - SPINAND_INFO("GD5F1GQ4UFxxG", 0xb148, + SPINAND_INFO("GD5F1GQ4UFxxG", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb1, 0x48), NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f, @@ -242,39 +247,13 @@ static const struct spinand_info gigadevice_spinand_table[] = { gd5fxgq4ufxxg_ecc_get_status)), }; -static int gigadevice_spinand_detect(struct spinand_device *spinand) -{ - u8 *id = spinand->id.data; - u16 did; - int ret; - - /* - * Earlier GDF5-series devices (A,E) return [0][MID][DID] - * Later (F) devices return [MID][DID1][DID2] - */ - - if (id[0] == SPINAND_MFR_GIGADEVICE) - did = (id[1] << 8) + id[2]; - else if (id[0] == 0 && id[1] == SPINAND_MFR_GIGADEVICE) - did = id[2]; - else - return 0; - - ret = spinand_match_and_init(spinand, gigadevice_spinand_table, - ARRAY_SIZE(gigadevice_spinand_table), - did); - if (ret) - return ret; - - return 1; -} - static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = { - .detect = gigadevice_spinand_detect, }; const struct spinand_manufacturer gigadevice_spinand_manufacturer = { .id = SPINAND_MFR_GIGADEVICE, .name = "GigaDevice", + .chips = gigadevice_spinand_table, + .nchips = ARRAY_SIZE(gigadevice_spinand_table), .ops = &gigadevice_spinand_manuf_ops, }; diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c index 21def3f8fb36..0f900f3aa21a 100644 --- a/drivers/mtd/nand/spi/macronix.c +++ b/drivers/mtd/nand/spi/macronix.c @@ -99,7 +99,8 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand, } static const struct spinand_info macronix_spinand_table[] = { - SPINAND_INFO("MX35LF1GE4AB", 0x12, + SPINAND_INFO("MX35LF1GE4AB", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(4, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -108,7 +109,8 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, mx35lf1ge4ab_ecc_get_status)), - SPINAND_INFO("MX35LF2GE4AB", 0x22, + SPINAND_INFO("MX35LF2GE4AB", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22), NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1), NAND_ECCREQ(4, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -118,33 +120,13 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), }; -static int macronix_spinand_detect(struct spinand_device *spinand) -{ - u8 *id = spinand->id.data; - int ret; - - /* - * Macronix SPI NAND read ID needs a dummy byte, so the first byte in - * raw_id is garbage. - */ - if (id[1] != SPINAND_MFR_MACRONIX) - return 0; - - ret = spinand_match_and_init(spinand, macronix_spinand_table, - ARRAY_SIZE(macronix_spinand_table), - id[2]); - if (ret) - return ret; - - return 1; -} - static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = { - .detect = macronix_spinand_detect, }; const struct spinand_manufacturer macronix_spinand_manufacturer = { .id = SPINAND_MFR_MACRONIX, .name = "Macronix", + .chips = macronix_spinand_table, + .nchips = ARRAY_SIZE(macronix_spinand_table), .ops = ¯onix_spinand_manuf_ops, }; diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c index 7d7b1f7fcf71..f56f81325e10 100644 --- a/drivers/mtd/nand/spi/micron.c +++ b/drivers/mtd/nand/spi/micron.c @@ -91,7 +91,8 @@ static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand, } static const struct spinand_info micron_spinand_table[] = { - SPINAND_INFO("MT29F2G01ABAGD", 0x24, + SPINAND_INFO("MT29F2G01ABAGD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -102,32 +103,13 @@ static const struct spinand_info micron_spinand_table[] = { mt29f2g01abagd_ecc_get_status)), }; -static int micron_spinand_detect(struct spinand_device *spinand) -{ - u8 *id = spinand->id.data; - int ret; - - /* - * Micron SPI NAND read ID need a dummy byte, - * so the first byte in raw_id is dummy. - */ - if (id[1] != SPINAND_MFR_MICRON) - return 0; - - ret = spinand_match_and_init(spinand, micron_spinand_table, - ARRAY_SIZE(micron_spinand_table), id[2]); - if (ret) - return ret; - - return 1; -} - static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = { - .detect = micron_spinand_detect, }; const struct spinand_manufacturer micron_spinand_manufacturer = { .id = SPINAND_MFR_MICRON, .name = "Micron", + .chips = micron_spinand_table, + .nchips = ARRAY_SIZE(micron_spinand_table), .ops = µn_spinand_manuf_ops, }; diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c index 52307681cbd0..519ade513c1f 100644 --- a/drivers/mtd/nand/spi/paragon.c +++ b/drivers/mtd/nand/spi/paragon.c @@ -97,7 +97,8 @@ static const struct mtd_ooblayout_ops pn26g0xa_ooblayout = { static const struct spinand_info paragon_spinand_table[] = { - SPINAND_INFO("PN26G01A", 0xe1, + SPINAND_INFO("PN26G01A", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe1), NAND_MEMORG(1, 2048, 128, 64, 1024, 21, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -106,7 +107,8 @@ static const struct spinand_info paragon_spinand_table[] = { 0, SPINAND_ECCINFO(&pn26g0xa_ooblayout, pn26g0xa_ecc_get_status)), - SPINAND_INFO("PN26G02A", 0xe2, + SPINAND_INFO("PN26G02A", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe2), NAND_MEMORG(1, 2048, 128, 64, 2048, 41, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -117,31 +119,13 @@ static const struct spinand_info paragon_spinand_table[] = { pn26g0xa_ecc_get_status)), }; -static int paragon_spinand_detect(struct spinand_device *spinand) -{ - u8 *id = spinand->id.data; - int ret; - - /* Read ID returns [0][MID][DID] */ - - if (id[1] != SPINAND_MFR_PARAGON) - return 0; - - ret = spinand_match_and_init(spinand, paragon_spinand_table, - ARRAY_SIZE(paragon_spinand_table), - id[2]); - if (ret) - return ret; - - return 1; -} - static const struct spinand_manufacturer_ops paragon_spinand_manuf_ops = { - .detect = paragon_spinand_detect, }; const struct spinand_manufacturer paragon_spinand_manufacturer = { .id = SPINAND_MFR_PARAGON, .name = "Paragon", + .chips = paragon_spinand_table, + .nchips = ARRAY_SIZE(paragon_spinand_table), .ops = ¶gon_spinand_manuf_ops, }; diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c index 833e8f64e0a0..d34773191700 100644 --- a/drivers/mtd/nand/spi/toshiba.c +++ b/drivers/mtd/nand/spi/toshiba.c @@ -96,7 +96,8 @@ static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand, static const struct spinand_info toshiba_spinand_table[] = { /* 3.3V 1Gb */ - SPINAND_INFO("TC58CVG0S3", 0xC2, + SPINAND_INFO("TC58CVG0S3", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xC2), NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -106,7 +107,8 @@ static const struct spinand_info toshiba_spinand_table[] = { SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, tc58cxgxsx_ecc_get_status)), /* 3.3V 2Gb */ - SPINAND_INFO("TC58CVG1S3", 0xCB, + SPINAND_INFO("TC58CVG1S3", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCB), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -116,7 +118,8 @@ static const struct spinand_info toshiba_spinand_table[] = { SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, tc58cxgxsx_ecc_get_status)), /* 3.3V 4Gb */ - SPINAND_INFO("TC58CVG2S0", 0xCD, + SPINAND_INFO("TC58CVG2S0", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCD), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -126,7 +129,8 @@ static const struct spinand_info toshiba_spinand_table[] = { SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, tc58cxgxsx_ecc_get_status)), /* 3.3V 4Gb */ - SPINAND_INFO("TC58CVG2S0", 0xED, + SPINAND_INFO("TC58CVG2S0", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xED), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -136,7 +140,8 @@ static const struct spinand_info toshiba_spinand_table[] = { SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, tc58cxgxsx_ecc_get_status)), /* 1.8V 1Gb */ - SPINAND_INFO("TC58CYG0S3", 0xB2, + SPINAND_INFO("TC58CYG0S3", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xB2), NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -146,7 +151,8 @@ static const struct spinand_info toshiba_spinand_table[] = { SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, tc58cxgxsx_ecc_get_status)), /* 1.8V 2Gb */ - SPINAND_INFO("TC58CYG1S3", 0xBB, + SPINAND_INFO("TC58CYG1S3", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBB), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -156,7 +162,8 @@ static const struct spinand_info toshiba_spinand_table[] = { SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, tc58cxgxsx_ecc_get_status)), /* 1.8V 4Gb */ - SPINAND_INFO("TC58CYG2S0", 0xBD, + SPINAND_INFO("TC58CYG2S0", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBD), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -167,33 +174,13 @@ static const struct spinand_info toshiba_spinand_table[] = { tc58cxgxsx_ecc_get_status)), }; -static int toshiba_spinand_detect(struct spinand_device *spinand) -{ - u8 *id = spinand->id.data; - int ret; - - /* - * Toshiba SPI NAND read ID needs a dummy byte, - * so the first byte in id is garbage. - */ - if (id[1] != SPINAND_MFR_TOSHIBA) - return 0; - - ret = spinand_match_and_init(spinand, toshiba_spinand_table, - ARRAY_SIZE(toshiba_spinand_table), - id[2]); - if (ret) - return ret; - - return 1; -} - static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = { - .detect = toshiba_spinand_detect, }; const struct spinand_manufacturer toshiba_spinand_manufacturer = { .id = SPINAND_MFR_TOSHIBA, .name = "Toshiba", + .chips = toshiba_spinand_table, + .nchips = ARRAY_SIZE(toshiba_spinand_table), .ops = &toshiba_spinand_manuf_ops, }; diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c index a6c17e0cace8..76684428354e 100644 --- a/drivers/mtd/nand/spi/winbond.c +++ b/drivers/mtd/nand/spi/winbond.c @@ -75,7 +75,8 @@ static int w25m02gv_select_target(struct spinand_device *spinand, } static const struct spinand_info winbond_spinand_table[] = { - SPINAND_INFO("W25M02GV", 0xAB, + SPINAND_INFO("W25M02GV", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2), NAND_ECCREQ(1, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -84,7 +85,8 @@ static const struct spinand_info winbond_spinand_table[] = { 0, SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL), SPINAND_SELECT_TARGET(w25m02gv_select_target)), - SPINAND_INFO("W25N01GV", 0xAA, + SPINAND_INFO("W25N01GV", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(1, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, @@ -94,31 +96,6 @@ static const struct spinand_info winbond_spinand_table[] = { SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)), }; -/** - * winbond_spinand_detect - initialize device related part in spinand_device - * struct if it is a Winbond device. - * @spinand: SPI NAND device structure - */ -static int winbond_spinand_detect(struct spinand_device *spinand) -{ - u8 *id = spinand->id.data; - int ret; - - /* - * Winbond SPI NAND read ID need a dummy byte, - * so the first byte in raw_id is dummy. - */ - if (id[1] != SPINAND_MFR_WINBOND) - return 0; - - ret = spinand_match_and_init(spinand, winbond_spinand_table, - ARRAY_SIZE(winbond_spinand_table), id[2]); - if (ret) - return ret; - - return 1; -} - static int winbond_spinand_init(struct spinand_device *spinand) { struct nand_device *nand = spinand_to_nand(spinand); @@ -138,12 +115,13 @@ static int winbond_spinand_init(struct spinand_device *spinand) } static const struct spinand_manufacturer_ops winbond_spinand_manuf_ops = { - .detect = winbond_spinand_detect, .init = winbond_spinand_init, }; const struct spinand_manufacturer winbond_spinand_manufacturer = { .id = SPINAND_MFR_WINBOND, .name = "Winbond", + .chips = winbond_spinand_table, + .nchips = ARRAY_SIZE(winbond_spinand_table), .ops = &winbond_spinand_manuf_ops, }; diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 4ea558bd3c46..f4c4ae87181b 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -32,9 +32,9 @@ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_NO_DATA) -#define SPINAND_READID_OP(ndummy, buf, len) \ +#define SPINAND_READID_OP(naddr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \ - SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_ADDR(naddr, 0, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 1)) @@ -176,37 +176,46 @@ struct spinand_device; * @data: buffer containing the id bytes. Currently 4 bytes large, but can * be extended if required * @len: ID length - * - * struct_spinand_id->data contains all bytes returned after a READ_ID command, - * including dummy bytes if the chip does not emit ID bytes right after the - * READ_ID command. The responsibility to extract real ID bytes is left to - * struct_manufacurer_ops->detect(). */ struct spinand_id { u8 data[SPINAND_MAX_ID_LEN]; int len; }; +enum spinand_readid_method { + SPINAND_READID_METHOD_OPCODE, + SPINAND_READID_METHOD_OPCODE_ADDR, + SPINAND_READID_METHOD_OPCODE_DUMMY, +}; + +/** + * struct spinand_devid - SPI NAND device id structure + * @id: device id of current chip + * @len: number of bytes in device id + * @method: method to read chip id + * There are 3 possible variants: + * SPINAND_READID_METHOD_OPCODE: chip id is returned immediately + * after read_id opcode. + * SPINAND_READID_METHOD_OPCODE_ADDR: chip id is returned after + * read_id opcode + 1-byte address. + * SPINAND_READID_METHOD_OPCODE_DUMMY: chip id is returned after + * read_id opcode + 1 dummy byte. + */ +struct spinand_devid { + const u8 *id; + const u8 len; + const enum spinand_readid_method method; +}; + /** * struct manufacurer_ops - SPI NAND manufacturer specific operations - * @detect: detect a SPI NAND device. Every time a SPI NAND device is probed - * the core calls the struct_manufacurer_ops->detect() hook of each - * registered manufacturer until one of them return 1. Note that - * the first thing to check in this hook is that the manufacturer ID - * in struct_spinand_device->id matches the manufacturer whose - * ->detect() hook has been called. Should return 1 if there's a - * match, 0 if the manufacturer ID does not match and a negative - * error code otherwise. When true is returned, the core assumes - * that properties of the NAND chip (spinand->base.memorg and - * spinand->base.eccreq) have been filled * @init: initialize a SPI NAND device * @cleanup: cleanup a SPI NAND device * * Each SPI NAND manufacturer driver should implement this interface so that - * NAND chips coming from this vendor can be detected and initialized properly. + * NAND chips coming from this vendor can be initialized properly. */ struct spinand_manufacturer_ops { - int (*detect)(struct spinand_device *spinand); int (*init)(struct spinand_device *spinand); void (*cleanup)(struct spinand_device *spinand); }; @@ -215,11 +224,16 @@ struct spinand_manufacturer_ops { * struct spinand_manufacturer - SPI NAND manufacturer instance * @id: manufacturer ID * @name: manufacturer name + * @devid_len: number of bytes in device ID + * @chips: supported SPI NANDs under current manufacturer + * @nchips: number of SPI NANDs available in chips array * @ops: manufacturer operations */ struct spinand_manufacturer { u8 id; char *name; + const struct spinand_info *chips; + const size_t nchips; const struct spinand_manufacturer_ops *ops; }; @@ -291,7 +305,7 @@ struct spinand_ecc_info { */ struct spinand_info { const char *model; - u16 devid; + struct spinand_devid devid; u32 flags; struct nand_memory_organization memorg; struct nand_ecc_req eccreq; @@ -305,6 +319,13 @@ struct spinand_info { unsigned int target); }; +#define SPINAND_ID(__method, ...) \ + { \ + .id = (const u8[]){ __VA_ARGS__ }, \ + .len = sizeof((u8[]){ __VA_ARGS__ }), \ + .method = __method, \ + } + #define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \ { \ .read_cache = __read, \ @@ -451,9 +472,10 @@ static inline void spinand_set_of_node(struct spinand_device *spinand, nanddev_set_of_node(&spinand->base, np); } -int spinand_match_and_init(struct spinand_device *dev, +int spinand_match_and_init(struct spinand_device *spinand, const struct spinand_info *table, - unsigned int table_size, u16 devid); + unsigned int table_size, + enum spinand_readid_method rdid_method); int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); int spinand_select_target(struct spinand_device *spinand, unsigned int target); -- cgit v1.2.3-58-ga151 From dceeb0f0e61071b1d990459dbd6a53f590cdaf77 Mon Sep 17 00:00:00 2001 From: Tejas Patel Date: Thu, 9 Jan 2020 11:06:03 -0800 Subject: include: linux: firmware: Correct config dependency of zynqmp_eemi_ops zynqmp_eemi_ops will be compiled only when CONFIG_ZYNQMP_FIRMWARE is enabled. So check for CONFIG_ZYNQMP_FIRMWARE instead of checking for CONFIG_ARCH_ZYNQMP. Signed-off-by: Tejas Patel Signed-off-by: Jolly Shah Signed-off-by: Michal Simek --- include/linux/firmware/xlnx-zynqmp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 2cd12ebd6826..ed1aace0cbbc 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -320,7 +320,7 @@ struct zynqmp_eemi_ops { int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 *ret_payload); -#if IS_REACHABLE(CONFIG_ARCH_ZYNQMP) +#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE) const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void); #else static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void) -- cgit v1.2.3-58-ga151 From ed680522268da2f6f2a67505dd144e718d726712 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 28 Feb 2020 18:12:20 +0100 Subject: i2c: convert SMBus alert setup function to return an ERRPTR Only few drivers use this call, so drivers and I2C core are converted at once with this patch. By simply using i2c_new_client_device() instead of i2c_new_device(), we easily can return an ERRPTR for this function as well. To make out of tree users aware that something changed, the function is renamed to i2c_new_smbus_alert_device(). Signed-off-by: Wolfram Sang Reviewed-by: Luca Ceresoli Signed-off-by: Wolfram Sang --- Documentation/i2c/smbus-protocol.rst | 2 +- drivers/i2c/busses/i2c-parport.c | 12 ++++++++---- drivers/i2c/busses/i2c-thunderx-pcidrv.c | 11 ++++++++--- drivers/i2c/busses/i2c-xlp9xx.c | 10 +++++++--- drivers/i2c/i2c-core-smbus.c | 21 ++++++++------------- drivers/i2c/i2c-smbus.c | 2 +- include/linux/i2c-smbus.h | 4 ++-- 7 files changed, 35 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/Documentation/i2c/smbus-protocol.rst b/Documentation/i2c/smbus-protocol.rst index c122ed239f7f..c2e29633071e 100644 --- a/Documentation/i2c/smbus-protocol.rst +++ b/Documentation/i2c/smbus-protocol.rst @@ -274,7 +274,7 @@ to know which slave triggered the interrupt. This is implemented the following way in the Linux kernel: * I2C bus drivers which support SMBus alert should call - i2c_setup_smbus_alert() to setup SMBus alert support. + i2c_new_smbus_alert_device() to install SMBus alert support. * I2C drivers for devices which can trigger SMBus alerts should implement the optional alert() callback. diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c index 81eb441b2387..a535889acca6 100644 --- a/drivers/i2c/busses/i2c-parport.c +++ b/drivers/i2c/busses/i2c-parport.c @@ -333,13 +333,17 @@ static void i2c_parport_attach(struct parport *port) /* Setup SMBus alert if supported */ if (adapter_parm[type].smbus_alert) { - adapter->ara = i2c_setup_smbus_alert(&adapter->adapter, - &adapter->alert_data); - if (adapter->ara) + struct i2c_client *ara; + + ara = i2c_new_smbus_alert_device(&adapter->adapter, + &adapter->alert_data); + if (!IS_ERR(ara)) { + adapter->ara = ara; parport_enable_irq(port); - else + } else { dev_warn(&adapter->pdev->dev, "Failed to register ARA client\n"); + } } /* Add the new adapter to the list */ diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c index 19f8eec38717..7d3b9d66ad36 100644 --- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c +++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c @@ -118,6 +118,8 @@ static void thunder_i2c_clock_disable(struct device *dev, struct clk *clk) static int thunder_i2c_smbus_setup_of(struct octeon_i2c *i2c, struct device_node *node) { + struct i2c_client *ara; + if (!node) return -EINVAL; @@ -125,9 +127,12 @@ static int thunder_i2c_smbus_setup_of(struct octeon_i2c *i2c, if (!i2c->alert_data.irq) return -EINVAL; - i2c->ara = i2c_setup_smbus_alert(&i2c->adap, &i2c->alert_data); - if (!i2c->ara) - return -ENODEV; + ara = i2c_new_smbus_alert_device(&i2c->adap, &i2c->alert_data); + if (IS_ERR(ara)) + return PTR_ERR(ara); + + i2c->ara = ara; + return 0; } diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c index 8a873975cf12..823945bc3249 100644 --- a/drivers/i2c/busses/i2c-xlp9xx.c +++ b/drivers/i2c/busses/i2c-xlp9xx.c @@ -491,12 +491,16 @@ static int xlp9xx_i2c_get_frequency(struct platform_device *pdev, static int xlp9xx_i2c_smbus_setup(struct xlp9xx_i2c_dev *priv, struct platform_device *pdev) { + struct i2c_client *ara; + if (!priv->alert_data.irq) return -EINVAL; - priv->ara = i2c_setup_smbus_alert(&priv->adapter, &priv->alert_data); - if (!priv->ara) - return -ENODEV; + ara = i2c_new_smbus_alert_device(&priv->adapter, &priv->alert_data); + if (IS_ERR(ara)) + return PTR_ERR(ara); + + priv->ara = ara; return 0; } diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c index 3ac426a8ab5a..fd2b961f113e 100644 --- a/drivers/i2c/i2c-core-smbus.c +++ b/drivers/i2c/i2c-core-smbus.c @@ -666,7 +666,7 @@ s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client, EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data_or_emulated); /** - * i2c_setup_smbus_alert - Setup SMBus alert support + * i2c_new_smbus_alert_device - get ara client for SMBus alert support * @adapter: the target adapter * @setup: setup data for the SMBus alert handler * Context: can sleep @@ -682,25 +682,24 @@ EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data_or_emulated); * should have said it's level triggered. * * This returns the ara client, which should be saved for later use with - * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or NULL - * to indicate an error. + * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or an + * ERRPTR to indicate an error. */ -struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter, - struct i2c_smbus_alert_setup *setup) +struct i2c_client *i2c_new_smbus_alert_device(struct i2c_adapter *adapter, + struct i2c_smbus_alert_setup *setup) { struct i2c_board_info ara_board_info = { I2C_BOARD_INFO("smbus_alert", 0x0c), .platform_data = setup, }; - return i2c_new_device(adapter, &ara_board_info); + return i2c_new_client_device(adapter, &ara_board_info); } -EXPORT_SYMBOL_GPL(i2c_setup_smbus_alert); +EXPORT_SYMBOL_GPL(i2c_new_smbus_alert_device); #if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF) int of_i2c_setup_smbus_alert(struct i2c_adapter *adapter) { - struct i2c_client *client; int irq; irq = of_property_match_string(adapter->dev.of_node, "interrupt-names", @@ -710,11 +709,7 @@ int of_i2c_setup_smbus_alert(struct i2c_adapter *adapter) else if (irq < 0) return irq; - client = i2c_setup_smbus_alert(adapter, NULL); - if (!client) - return -ENODEV; - - return 0; + return PTR_ERR_OR_ZERO(i2c_new_smbus_alert_device(adapter, NULL)); } EXPORT_SYMBOL_GPL(of_i2c_setup_smbus_alert); #endif diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c index 7e2f5d0eacdb..809bcf8387d0 100644 --- a/drivers/i2c/i2c-smbus.c +++ b/drivers/i2c/i2c-smbus.c @@ -184,7 +184,7 @@ static struct i2c_driver smbalert_driver = { * corresponding I2C device driver's alert function. * * It is assumed that ara is a valid i2c client previously returned by - * i2c_setup_smbus_alert(). + * i2c_new_smbus_alert_device(). */ int i2c_handle_smbus_alert(struct i2c_client *ara) { diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h index 585ad6fc3847..802aac0d2010 100644 --- a/include/linux/i2c-smbus.h +++ b/include/linux/i2c-smbus.h @@ -31,8 +31,8 @@ struct i2c_smbus_alert_setup { int irq; }; -struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter, - struct i2c_smbus_alert_setup *setup); +struct i2c_client *i2c_new_smbus_alert_device(struct i2c_adapter *adapter, + struct i2c_smbus_alert_setup *setup); int i2c_handle_smbus_alert(struct i2c_client *ara); #if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF) -- cgit v1.2.3-58-ga151 From a47070aac935b9c0e5d0f99843e0c8784f455ea7 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 28 Feb 2020 18:12:21 +0100 Subject: i2c: smbus: remove outdated references to irq level triggers IRQ levels are now handled within the IRQ core. Remove the forgotten references from the documentation. Fixes: 9b9f2b8bc2ac ("i2c: i2c-smbus: Use threaded irq for smbalert") Signed-off-by: Wolfram Sang Reviewed-by: Luca Ceresoli Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-smbus.c | 5 ----- include/linux/i2c-smbus.h | 5 ----- 2 files changed, 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c index fd2b961f113e..b34d2ff06931 100644 --- a/drivers/i2c/i2c-core-smbus.c +++ b/drivers/i2c/i2c-core-smbus.c @@ -676,11 +676,6 @@ EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data_or_emulated); * Handling can be done either through our IRQ handler, or by the * adapter (from its handler, periodic polling, or whatever). * - * NOTE that if we manage the IRQ, we *MUST* know if it's level or - * edge triggered in order to hand it to the workqueue correctly. - * If triggering the alert seems to wedge the system, you probably - * should have said it's level triggered. - * * This returns the ara client, which should be saved for later use with * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or an * ERRPTR to indicate an error. diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h index 802aac0d2010..8c5459034f92 100644 --- a/include/linux/i2c-smbus.h +++ b/include/linux/i2c-smbus.h @@ -15,17 +15,12 @@ /** * i2c_smbus_alert_setup - platform data for the smbus_alert i2c client - * @alert_edge_triggered: whether the alert interrupt is edge (1) or level (0) - * triggered * @irq: IRQ number, if the smbus_alert driver should take care of interrupt * handling * * If irq is not specified, the smbus_alert driver doesn't take care of * interrupt handling. In that case it is up to the I2C bus driver to either * handle the interrupts or to poll for alerts. - * - * If irq is specified then it it crucial that alert_edge_triggered is - * properly set. */ struct i2c_smbus_alert_setup { int irq; -- cgit v1.2.3-58-ga151 From e56faff57f0b39661093c00e0262d4ab9088830e Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Fri, 28 Feb 2020 15:02:03 -0600 Subject: PCI: Add pci_speed_string() Add pci_speed_string() to return a text description of the supplied bus or link speed. The slot code previously used the private pci_bus_speed_strings[] array for this purpose, but adding this interface will enable us to consolidate similar code elsewhere. Export pcie_link_speed[] and pci_speed_string() so they can be used by modules. Signed-off-by: Bjorn Helgaas --- drivers/pci/pci.h | 1 + drivers/pci/probe.c | 40 ++++++++++++++++++++++++++++++++++++++++ drivers/pci/slot.c | 38 +------------------------------------- include/linux/pci.h | 2 +- 4 files changed, 43 insertions(+), 38 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index f65912e0f30d..809753b10fad 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -310,6 +310,7 @@ void pci_bus_put(struct pci_bus *bus); (speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \ 0) +const char *pci_speed_string(enum pci_bus_speed speed); enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev); enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev); u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 512cb4312ddd..4fc41b71cc95 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -640,6 +640,7 @@ void pci_free_host_bridge(struct pci_host_bridge *bridge) } EXPORT_SYMBOL(pci_free_host_bridge); +/* Indexed by PCI_X_SSTATUS_FREQ (secondary bus mode and frequency) */ static const unsigned char pcix_bus_speed[] = { PCI_SPEED_UNKNOWN, /* 0 */ PCI_SPEED_66MHz_PCIX, /* 1 */ @@ -659,6 +660,7 @@ static const unsigned char pcix_bus_speed[] = { PCI_SPEED_133MHz_PCIX_533 /* F */ }; +/* Indexed by PCI_EXP_LNKCAP_SLS, PCI_EXP_LNKSTA_CLS */ const unsigned char pcie_link_speed[] = { PCI_SPEED_UNKNOWN, /* 0 */ PCIE_SPEED_2_5GT, /* 1 */ @@ -677,6 +679,44 @@ const unsigned char pcie_link_speed[] = { PCI_SPEED_UNKNOWN, /* E */ PCI_SPEED_UNKNOWN /* F */ }; +EXPORT_SYMBOL_GPL(pcie_link_speed); + +const char *pci_speed_string(enum pci_bus_speed speed) +{ + /* Indexed by the pci_bus_speed enum */ + static const char *speed_strings[] = { + "33 MHz PCI", /* 0x00 */ + "66 MHz PCI", /* 0x01 */ + "66 MHz PCI-X", /* 0x02 */ + "100 MHz PCI-X", /* 0x03 */ + "133 MHz PCI-X", /* 0x04 */ + NULL, /* 0x05 */ + NULL, /* 0x06 */ + NULL, /* 0x07 */ + NULL, /* 0x08 */ + "66 MHz PCI-X 266", /* 0x09 */ + "100 MHz PCI-X 266", /* 0x0a */ + "133 MHz PCI-X 266", /* 0x0b */ + "Unknown AGP", /* 0x0c */ + "1x AGP", /* 0x0d */ + "2x AGP", /* 0x0e */ + "4x AGP", /* 0x0f */ + "8x AGP", /* 0x10 */ + "66 MHz PCI-X 533", /* 0x11 */ + "100 MHz PCI-X 533", /* 0x12 */ + "133 MHz PCI-X 533", /* 0x13 */ + "2.5 GT/s PCIe", /* 0x14 */ + "5.0 GT/s PCIe", /* 0x15 */ + "8.0 GT/s PCIe", /* 0x16 */ + "16.0 GT/s PCIe", /* 0x17 */ + "32.0 GT/s PCIe", /* 0x18 */ + }; + + if (speed < ARRAY_SIZE(speed_strings)) + return speed_strings[speed]; + return "Unknown"; +} +EXPORT_SYMBOL_GPL(pci_speed_string); void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) { diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index ae4aa0e1f2f4..cc386ef2fa12 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -49,45 +49,9 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf) slot->number); } -/* these strings match up with the values in pci_bus_speed */ -static const char *pci_bus_speed_strings[] = { - "33 MHz PCI", /* 0x00 */ - "66 MHz PCI", /* 0x01 */ - "66 MHz PCI-X", /* 0x02 */ - "100 MHz PCI-X", /* 0x03 */ - "133 MHz PCI-X", /* 0x04 */ - NULL, /* 0x05 */ - NULL, /* 0x06 */ - NULL, /* 0x07 */ - NULL, /* 0x08 */ - "66 MHz PCI-X 266", /* 0x09 */ - "100 MHz PCI-X 266", /* 0x0a */ - "133 MHz PCI-X 266", /* 0x0b */ - "Unknown AGP", /* 0x0c */ - "1x AGP", /* 0x0d */ - "2x AGP", /* 0x0e */ - "4x AGP", /* 0x0f */ - "8x AGP", /* 0x10 */ - "66 MHz PCI-X 533", /* 0x11 */ - "100 MHz PCI-X 533", /* 0x12 */ - "133 MHz PCI-X 533", /* 0x13 */ - "2.5 GT/s PCIe", /* 0x14 */ - "5.0 GT/s PCIe", /* 0x15 */ - "8.0 GT/s PCIe", /* 0x16 */ - "16.0 GT/s PCIe", /* 0x17 */ - "32.0 GT/s PCIe", /* 0x18 */ -}; - static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf) { - const char *speed_string; - - if (speed < ARRAY_SIZE(pci_bus_speed_strings)) - speed_string = pci_bus_speed_strings[speed]; - else - speed_string = "Unknown"; - - return sprintf(buf, "%s\n", speed_string); + return sprintf(buf, "%s\n", pci_speed_string(speed)); } static ssize_t max_speed_read_file(struct pci_slot *slot, char *buf) diff --git a/include/linux/pci.h b/include/linux/pci.h index 3840a541a9de..76f4806a154c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -236,7 +236,7 @@ enum pcie_link_width { PCIE_LNK_WIDTH_UNKNOWN = 0xff, }; -/* Based on the PCI Hotplug Spec, but some values are made up by us */ +/* See matching string table in pci_speed_string() */ enum pci_bus_speed { PCI_SPEED_33MHz = 0x00, PCI_SPEED_66MHz = 0x01, -- cgit v1.2.3-58-ga151 From e937cc1dd7966df33a478943817302502a164e25 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 6 Mar 2020 16:28:37 +0200 Subject: dmaengine: Add basic debugfs support Via the /sys/kernel/debug/dmaengine/summary users can get information about the DMA devices and the used channels. Example output on am654-evm with audio using two channels and after running dmatest on 4 channels: dma0 (285c0000.dma-controller): number of channels: 96 dma1 (31150000.dma-controller): number of channels: 267 dma1chan0 | 2b00000.mcasp:tx dma1chan1 | 2b00000.mcasp:rx dma1chan2 | in-use dma1chan3 | in-use dma1chan4 | in-use dma1chan5 | in-use For slave channels we can show the device and the channel name a given channel is requested. For non slave devices the only information we know is that the channel is in use. DMA drivers can implement the optional dbg_summary_show callback to provide controller specific information instead of the generic one. It is easy to extend the generic dmaengine_summary_show() to print additional information about the used channels. I have taken the idea from gpiolib and clk subsystems. Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200306142839.17910-2-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++- include/linux/dmaengine.h | 13 +++++++- 2 files changed, 87 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index c3b1283b6d31..509abc8e8378 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -58,6 +58,65 @@ static DEFINE_IDA(dma_ida); static LIST_HEAD(dma_device_list); static long dmaengine_ref_count; +/* --- debugfs implementation --- */ +#ifdef CONFIG_DEBUG_FS +#include + +static void dmaengine_dbg_summary_show(struct seq_file *s, + struct dma_device *dma_dev) +{ + struct dma_chan *chan; + + list_for_each_entry(chan, &dma_dev->channels, device_node) { + if (chan->client_count) { + seq_printf(s, " %-13s| %s", dma_chan_name(chan), + chan->dbg_client_name ?: "in-use"); + + if (chan->router) + seq_printf(s, " (via router: %s)\n", + dev_name(chan->router->dev)); + else + seq_puts(s, "\n"); + } + } +} + +static int dmaengine_summary_show(struct seq_file *s, void *data) +{ + struct dma_device *dma_dev = NULL; + + mutex_lock(&dma_list_mutex); + list_for_each_entry(dma_dev, &dma_device_list, global_node) { + seq_printf(s, "dma%d (%s): number of channels: %u\n", + dma_dev->dev_id, dev_name(dma_dev->dev), + dma_dev->chancnt); + + if (dma_dev->dbg_summary_show) + dma_dev->dbg_summary_show(s, dma_dev); + else + dmaengine_dbg_summary_show(s, dma_dev); + + if (!list_is_last(&dma_dev->global_node, &dma_device_list)) + seq_puts(s, "\n"); + } + mutex_unlock(&dma_list_mutex); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dmaengine_summary); + +static void __init dmaengine_debugfs_init(void) +{ + struct dentry *rootdir = debugfs_create_dir("dmaengine", NULL); + + /* /sys/kernel/debug/dmaengine/summary */ + debugfs_create_file("summary", 0444, rootdir, NULL, + &dmaengine_summary_fops); +} +#else +static inline void dmaengine_debugfs_init(void) { } +#endif /* DEBUG_FS */ + /* --- sysfs implementation --- */ #define DMA_SLAVE_NAME "slave" @@ -760,6 +819,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name) return chan ? chan : ERR_PTR(-EPROBE_DEFER); found: +#ifdef CONFIG_DEBUG_FS + chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev), + name); +#endif + chan->name = kasprintf(GFP_KERNEL, "dma:%s", name); if (!chan->name) return chan; @@ -837,6 +901,11 @@ void dma_release_channel(struct dma_chan *chan) chan->name = NULL; chan->slave = NULL; } + +#ifdef CONFIG_DEBUG_FS + kfree(chan->dbg_client_name); + chan->dbg_client_name = NULL; +#endif mutex_unlock(&dma_list_mutex); } EXPORT_SYMBOL_GPL(dma_release_channel); @@ -1559,6 +1628,11 @@ static int __init dma_bus_init(void) if (err) return err; - return class_register(&dma_devclass); + + err = class_register(&dma_devclass); + if (!err) + dmaengine_debugfs_init(); + + return err; } arch_initcall(dma_bus_init); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index d3672f065a64..72920b5cf2d7 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -300,6 +300,8 @@ struct dma_router { * @chan_id: channel ID for sysfs * @dev: class device for sysfs * @name: backlink name for sysfs + * @dbg_client_name: slave name for debugfs in format: + * dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx" * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu * @client_count: how many clients are using this channel @@ -318,6 +320,9 @@ struct dma_chan { int chan_id; struct dma_chan_dev *dev; const char *name; +#ifdef CONFIG_DEBUG_FS + char *dbg_client_name; +#endif struct list_head device_node; struct dma_chan_percpu __percpu *local; @@ -806,7 +811,9 @@ struct dma_filter { * called and there are no further references to this structure. This * must be implemented to free resources however many existing drivers * do not and are therefore not safe to unbind while in use. - * + * @dbg_summary_show: optional routine to show contents in debugfs; default code + * will be used when this is omitted, but custom code can show extra, + * controller specific information. */ struct dma_device { struct kref ref; @@ -892,6 +899,10 @@ struct dma_device { struct dma_tx_state *txstate); void (*device_issue_pending)(struct dma_chan *chan); void (*device_release)(struct dma_device *dev); + /* debugfs support */ +#ifdef CONFIG_DEBUG_FS + void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev); +#endif }; static inline int dmaengine_slave_config(struct dma_chan *chan, -- cgit v1.2.3-58-ga151 From 26cf132de6f79c06025706ddc61e045d591d404d Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 6 Mar 2020 16:28:39 +0200 Subject: dmaengine: Create debug directories for DMA devices Create a placeholder directory for each registered DMA device. DMA drivers can use the dmaengine_get_debugfs_root() call to get their debugfs root and can populate with custom files to aim debugging. Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200306142839.17910-4-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 28 +++++++++++++++++++++++++++- drivers/dma/dmaengine.h | 16 ++++++++++++++++ include/linux/dmaengine.h | 1 + 3 files changed, 44 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 509abc8e8378..5a442752e07d 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -62,6 +62,22 @@ static long dmaengine_ref_count; #ifdef CONFIG_DEBUG_FS #include +static struct dentry *rootdir; + +static void dmaengine_debug_register(struct dma_device *dma_dev) +{ + dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev), + rootdir); + if (IS_ERR(dma_dev->dbg_dev_root)) + dma_dev->dbg_dev_root = NULL; +} + +static void dmaengine_debug_unregister(struct dma_device *dma_dev) +{ + debugfs_remove_recursive(dma_dev->dbg_dev_root); + dma_dev->dbg_dev_root = NULL; +} + static void dmaengine_dbg_summary_show(struct seq_file *s, struct dma_device *dma_dev) { @@ -107,7 +123,7 @@ DEFINE_SHOW_ATTRIBUTE(dmaengine_summary); static void __init dmaengine_debugfs_init(void) { - struct dentry *rootdir = debugfs_create_dir("dmaengine", NULL); + rootdir = debugfs_create_dir("dmaengine", NULL); /* /sys/kernel/debug/dmaengine/summary */ debugfs_create_file("summary", 0444, rootdir, NULL, @@ -115,6 +131,12 @@ static void __init dmaengine_debugfs_init(void) } #else static inline void dmaengine_debugfs_init(void) { } +static inline int dmaengine_debug_register(struct dma_device *dma_dev) +{ + return 0; +} + +static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { } #endif /* DEBUG_FS */ /* --- sysfs implementation --- */ @@ -1265,6 +1287,8 @@ int dma_async_device_register(struct dma_device *device) dma_channel_rebalance(); mutex_unlock(&dma_list_mutex); + dmaengine_debug_register(device); + return 0; err_out: @@ -1298,6 +1322,8 @@ void dma_async_device_unregister(struct dma_device *device) { struct dma_chan *chan, *n; + dmaengine_debug_unregister(device); + list_for_each_entry_safe(chan, n, &device->channels, device_node) __dma_async_device_channel_unregister(device, chan); diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h index e8a320c9e57c..1bfbd64b1371 100644 --- a/drivers/dma/dmaengine.h +++ b/drivers/dma/dmaengine.h @@ -182,4 +182,20 @@ dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb) struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); +#ifdef CONFIG_DEBUG_FS +#include + +static inline struct dentry * +dmaengine_get_debugfs_root(struct dma_device *dma_dev) { + return dma_dev->dbg_dev_root; +} +#else +struct dentry; +static inline struct dentry * +dmaengine_get_debugfs_root(struct dma_device *dma_dev) +{ + return NULL; +} +#endif /* CONFIG_DEBUG_FS */ + #endif diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 72920b5cf2d7..21065c04c4ac 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -902,6 +902,7 @@ struct dma_device { /* debugfs support */ #ifdef CONFIG_DEBUG_FS void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev); + struct dentry *dbg_dev_root; #endif }; -- cgit v1.2.3-58-ga151 From 46b5889cc2c54bac7d7e727a44d28a298df23cef Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 14 Jan 2020 10:09:52 +0100 Subject: mtd: implement proper partition handling Instead of collecting partitions in a flat list, create a hierarchy within the mtd_info structure: use a partitions list to keep track of the partitions of an MTD device (which might be itself a partition of another MTD device), a pointer to the parent device (NULL when the MTD device is the root one, not a partition). By also saving directly in mtd_info the offset of the partition, we can get rid of the mtd_part structure. While at it, be consistent in the naming of the mtd_info structures to ease the understanding of the new hierarchy: these structures are usually called 'mtd', unless there are multiple instances of the same structure. In this case, there is usually a parent/child bound so we will call them 'parent' and 'child'. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200114090952.11232-1-miquel.raynal@bootlin.com --- drivers/mtd/mtdchar.c | 12 +- drivers/mtd/mtdcore.c | 250 ++++++++++----- drivers/mtd/mtdpart.c | 695 +++++++++++------------------------------ include/linux/mtd/mtd.h | 125 +++++++- include/linux/mtd/partitions.h | 1 - 5 files changed, 478 insertions(+), 605 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index b841008a9eb7..c5935b2f9cd1 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -349,6 +349,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, uint64_t start, uint32_t length, void __user *ptr, uint32_t __user *retp) { + struct mtd_info *master = mtd_get_master(mtd); struct mtd_file_info *mfi = file->private_data; struct mtd_oob_ops ops = {}; uint32_t retlen; @@ -360,7 +361,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, if (length > 4096) return -EINVAL; - if (!mtd->_write_oob) + if (!master->_write_oob) return -EOPNOTSUPP; ops.ooblen = length; @@ -586,6 +587,7 @@ static int mtdchar_blkpg_ioctl(struct mtd_info *mtd, static int mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp) { + struct mtd_info *master = mtd_get_master(mtd); struct mtd_write_req req; struct mtd_oob_ops ops = {}; const void __user *usr_data, *usr_oob; @@ -597,9 +599,8 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd, usr_data = (const void __user *)(uintptr_t)req.usr_data; usr_oob = (const void __user *)(uintptr_t)req.usr_oob; - if (!mtd->_write_oob) + if (!master->_write_oob) return -EOPNOTSUPP; - ops.mode = req.mode; ops.len = (size_t)req.len; ops.ooblen = (size_t)req.ooblen; @@ -635,6 +636,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) { struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; + struct mtd_info *master = mtd_get_master(mtd); void __user *argp = (void __user *)arg; int ret = 0; struct mtd_info_user info; @@ -824,7 +826,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) { struct nand_oobinfo oi; - if (!mtd->ooblayout) + if (!master->ooblayout) return -EOPNOTSUPP; ret = get_oobinfo(mtd, &oi); @@ -918,7 +920,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) { struct nand_ecclayout_user *usrlay; - if (!mtd->ooblayout) + if (!master->ooblayout) return -EOPNOTSUPP; usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL); diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 5fac4355b9c2..2916674208b3 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -456,13 +456,14 @@ static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state, int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, struct mtd_pairing_info *info) { - int npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd); + struct mtd_info *master = mtd_get_master(mtd); + int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master); if (wunit < 0 || wunit >= npairs) return -EINVAL; - if (mtd->pairing && mtd->pairing->get_info) - return mtd->pairing->get_info(mtd, wunit, info); + if (master->pairing && master->pairing->get_info) + return master->pairing->get_info(master, wunit, info); info->group = 0; info->pair = wunit; @@ -498,15 +499,16 @@ EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info); int mtd_pairing_info_to_wunit(struct mtd_info *mtd, const struct mtd_pairing_info *info) { - int ngroups = mtd_pairing_groups(mtd); - int npairs = mtd_wunit_per_eb(mtd) / ngroups; + struct mtd_info *master = mtd_get_master(mtd); + int ngroups = mtd_pairing_groups(master); + int npairs = mtd_wunit_per_eb(master) / ngroups; if (!info || info->pair < 0 || info->pair >= npairs || info->group < 0 || info->group >= ngroups) return -EINVAL; - if (mtd->pairing && mtd->pairing->get_wunit) - return mtd->pairing->get_wunit(mtd, info); + if (master->pairing && master->pairing->get_wunit) + return mtd->pairing->get_wunit(master, info); return info->pair; } @@ -524,10 +526,12 @@ EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit); */ int mtd_pairing_groups(struct mtd_info *mtd) { - if (!mtd->pairing || !mtd->pairing->ngroups) + struct mtd_info *master = mtd_get_master(mtd); + + if (!master->pairing || !master->pairing->ngroups) return 1; - return mtd->pairing->ngroups; + return master->pairing->ngroups; } EXPORT_SYMBOL_GPL(mtd_pairing_groups); @@ -587,6 +591,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd) int add_mtd_device(struct mtd_info *mtd) { + struct mtd_info *master = mtd_get_master(mtd); struct mtd_notifier *not; int i, error; @@ -608,7 +613,7 @@ int add_mtd_device(struct mtd_info *mtd) (mtd->_read && mtd->_read_oob))) return -EINVAL; - if (WARN_ON((!mtd->erasesize || !mtd->_erase) && + if (WARN_ON((!mtd->erasesize || !master->_erase) && !(mtd->flags & MTD_NO_ERASE))) return -EINVAL; @@ -765,7 +770,8 @@ static void mtd_set_dev_defaults(struct mtd_info *mtd) pr_debug("mtd device won't show a device symlink in sysfs\n"); } - mtd->orig_flags = mtd->flags; + INIT_LIST_HEAD(&mtd->partitions); + mutex_init(&mtd->master.partitions_lock); } /** @@ -971,20 +977,26 @@ EXPORT_SYMBOL_GPL(get_mtd_device); int __get_mtd_device(struct mtd_info *mtd) { + struct mtd_info *master = mtd_get_master(mtd); int err; - if (!try_module_get(mtd->owner)) + if (!try_module_get(master->owner)) return -ENODEV; - if (mtd->_get_device) { - err = mtd->_get_device(mtd); + if (master->_get_device) { + err = master->_get_device(mtd); if (err) { - module_put(mtd->owner); + module_put(master->owner); return err; } } - mtd->usecount++; + + while (mtd->parent) { + mtd->usecount++; + mtd = mtd->parent; + } + return 0; } EXPORT_SYMBOL_GPL(__get_mtd_device); @@ -1038,13 +1050,18 @@ EXPORT_SYMBOL_GPL(put_mtd_device); void __put_mtd_device(struct mtd_info *mtd) { - --mtd->usecount; - BUG_ON(mtd->usecount < 0); + struct mtd_info *master = mtd_get_master(mtd); - if (mtd->_put_device) - mtd->_put_device(mtd); + while (mtd->parent) { + --mtd->usecount; + BUG_ON(mtd->usecount < 0); + mtd = mtd->parent; + } + + if (master->_put_device) + master->_put_device(master); - module_put(mtd->owner); + module_put(master->owner); } EXPORT_SYMBOL_GPL(__put_mtd_device); @@ -1055,9 +1072,13 @@ EXPORT_SYMBOL_GPL(__put_mtd_device); */ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) { + struct mtd_info *master = mtd_get_master(mtd); + u64 mst_ofs = mtd_get_master_ofs(mtd, 0); + int ret; + instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; - if (!mtd->erasesize || !mtd->_erase) + if (!mtd->erasesize || !master->_erase) return -ENOTSUPP; if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr) @@ -1069,7 +1090,14 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) return 0; ledtrig_mtd_activity(); - return mtd->_erase(mtd, instr); + + instr->addr += mst_ofs; + ret = master->_erase(master, instr); + if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) + instr->fail_addr -= mst_ofs; + + instr->addr -= mst_ofs; + return ret; } EXPORT_SYMBOL_GPL(mtd_erase); @@ -1079,30 +1107,36 @@ EXPORT_SYMBOL_GPL(mtd_erase); int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys) { + struct mtd_info *master = mtd_get_master(mtd); + *retlen = 0; *virt = NULL; if (phys) *phys = 0; - if (!mtd->_point) + if (!master->_point) return -EOPNOTSUPP; if (from < 0 || from >= mtd->size || len > mtd->size - from) return -EINVAL; if (!len) return 0; - return mtd->_point(mtd, from, len, retlen, virt, phys); + + from = mtd_get_master_ofs(mtd, from); + return master->_point(master, from, len, retlen, virt, phys); } EXPORT_SYMBOL_GPL(mtd_point); /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) { - if (!mtd->_unpoint) + struct mtd_info *master = mtd_get_master(mtd); + + if (!master->_unpoint) return -EOPNOTSUPP; if (from < 0 || from >= mtd->size || len > mtd->size - from) return -EINVAL; if (!len) return 0; - return mtd->_unpoint(mtd, from, len); + return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len); } EXPORT_SYMBOL_GPL(mtd_unpoint); @@ -1129,6 +1163,25 @@ unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, } EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); +static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master, + const struct mtd_ecc_stats *old_stats) +{ + struct mtd_ecc_stats diff; + + if (master == mtd) + return; + + diff = master->ecc_stats; + diff.failed -= old_stats->failed; + diff.corrected -= old_stats->corrected; + + while (mtd->parent) { + mtd->ecc_stats.failed += diff.failed; + mtd->ecc_stats.corrected += diff.corrected; + mtd = mtd->parent; + } +} + int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { @@ -1171,8 +1224,10 @@ EXPORT_SYMBOL_GPL(mtd_write); int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { + struct mtd_info *master = mtd_get_master(mtd); + *retlen = 0; - if (!mtd->_panic_write) + if (!master->_panic_write) return -EOPNOTSUPP; if (to < 0 || to >= mtd->size || len > mtd->size - to) return -EINVAL; @@ -1183,7 +1238,8 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, if (!mtd->oops_panic_write) mtd->oops_panic_write = true; - return mtd->_panic_write(mtd, to, len, retlen, buf); + return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len, + retlen, buf); } EXPORT_SYMBOL_GPL(mtd_panic_write); @@ -1222,7 +1278,10 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs, int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { + struct mtd_info *master = mtd_get_master(mtd); + struct mtd_ecc_stats old_stats = master->ecc_stats; int ret_code; + ops->retlen = ops->oobretlen = 0; ret_code = mtd_check_oob_ops(mtd, from, ops); @@ -1232,14 +1291,17 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) ledtrig_mtd_activity(); /* Check the validity of a potential fallback on mtd->_read */ - if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf)) + if (!master->_read_oob && (!master->_read || ops->oobbuf)) return -EOPNOTSUPP; - if (mtd->_read_oob) - ret_code = mtd->_read_oob(mtd, from, ops); + from = mtd_get_master_ofs(mtd, from); + if (master->_read_oob) + ret_code = master->_read_oob(master, from, ops); else - ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen, - ops->datbuf); + ret_code = master->_read(master, from, ops->len, &ops->retlen, + ops->datbuf); + + mtd_update_ecc_stats(mtd, master, &old_stats); /* * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics @@ -1258,6 +1320,7 @@ EXPORT_SYMBOL_GPL(mtd_read_oob); int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { + struct mtd_info *master = mtd_get_master(mtd); int ret; ops->retlen = ops->oobretlen = 0; @@ -1272,14 +1335,16 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to, ledtrig_mtd_activity(); /* Check the validity of a potential fallback on mtd->_write */ - if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf)) + if (!master->_write_oob && (!master->_write || ops->oobbuf)) return -EOPNOTSUPP; - if (mtd->_write_oob) - return mtd->_write_oob(mtd, to, ops); + to = mtd_get_master_ofs(mtd, to); + + if (master->_write_oob) + return master->_write_oob(master, to, ops); else - return mtd->_write(mtd, to, ops->len, &ops->retlen, - ops->datbuf); + return master->_write(master, to, ops->len, &ops->retlen, + ops->datbuf); } EXPORT_SYMBOL_GPL(mtd_write_oob); @@ -1302,15 +1367,17 @@ EXPORT_SYMBOL_GPL(mtd_write_oob); int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *oobecc) { + struct mtd_info *master = mtd_get_master(mtd); + memset(oobecc, 0, sizeof(*oobecc)); - if (!mtd || section < 0) + if (!master || section < 0) return -EINVAL; - if (!mtd->ooblayout || !mtd->ooblayout->ecc) + if (!master->ooblayout || !master->ooblayout->ecc) return -ENOTSUPP; - return mtd->ooblayout->ecc(mtd, section, oobecc); + return master->ooblayout->ecc(master, section, oobecc); } EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc); @@ -1334,15 +1401,17 @@ EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc); int mtd_ooblayout_free(struct mtd_info *mtd, int section, struct mtd_oob_region *oobfree) { + struct mtd_info *master = mtd_get_master(mtd); + memset(oobfree, 0, sizeof(*oobfree)); - if (!mtd || section < 0) + if (!master || section < 0) return -EINVAL; - if (!mtd->ooblayout || !mtd->ooblayout->free) + if (!master->ooblayout || !master->ooblayout->free) return -ENOTSUPP; - return mtd->ooblayout->free(mtd, section, oobfree); + return master->ooblayout->free(master, section, oobfree); } EXPORT_SYMBOL_GPL(mtd_ooblayout_free); @@ -1651,60 +1720,69 @@ EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes); int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, struct otp_info *buf) { - if (!mtd->_get_fact_prot_info) + struct mtd_info *master = mtd_get_master(mtd); + + if (!master->_get_fact_prot_info) return -EOPNOTSUPP; if (!len) return 0; - return mtd->_get_fact_prot_info(mtd, len, retlen, buf); + return master->_get_fact_prot_info(master, len, retlen, buf); } EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { + struct mtd_info *master = mtd_get_master(mtd); + *retlen = 0; - if (!mtd->_read_fact_prot_reg) + if (!master->_read_fact_prot_reg) return -EOPNOTSUPP; if (!len) return 0; - return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf); + return master->_read_fact_prot_reg(master, from, len, retlen, buf); } EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, struct otp_info *buf) { - if (!mtd->_get_user_prot_info) + struct mtd_info *master = mtd_get_master(mtd); + + if (!master->_get_user_prot_info) return -EOPNOTSUPP; if (!len) return 0; - return mtd->_get_user_prot_info(mtd, len, retlen, buf); + return master->_get_user_prot_info(master, len, retlen, buf); } EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { + struct mtd_info *master = mtd_get_master(mtd); + *retlen = 0; - if (!mtd->_read_user_prot_reg) + if (!master->_read_user_prot_reg) return -EOPNOTSUPP; if (!len) return 0; - return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf); + return master->_read_user_prot_reg(master, from, len, retlen, buf); } EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg); int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, u_char *buf) { + struct mtd_info *master = mtd_get_master(mtd); int ret; *retlen = 0; - if (!mtd->_write_user_prot_reg) + if (!master->_write_user_prot_reg) return -EOPNOTSUPP; if (!len) return 0; - ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf); + ret = master->_write_user_prot_reg(master, to, len, retlen, buf); if (ret) return ret; @@ -1718,80 +1796,105 @@ EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg); int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) { - if (!mtd->_lock_user_prot_reg) + struct mtd_info *master = mtd_get_master(mtd); + + if (!master->_lock_user_prot_reg) return -EOPNOTSUPP; if (!len) return 0; - return mtd->_lock_user_prot_reg(mtd, from, len); + return master->_lock_user_prot_reg(master, from, len); } EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg); /* Chip-supported device locking */ int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { - if (!mtd->_lock) + struct mtd_info *master = mtd_get_master(mtd); + + if (!master->_lock) return -EOPNOTSUPP; if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) return -EINVAL; if (!len) return 0; - return mtd->_lock(mtd, ofs, len); + return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len); } EXPORT_SYMBOL_GPL(mtd_lock); int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { - if (!mtd->_unlock) + struct mtd_info *master = mtd_get_master(mtd); + + if (!master->_unlock) return -EOPNOTSUPP; if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) return -EINVAL; if (!len) return 0; - return mtd->_unlock(mtd, ofs, len); + return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len); } EXPORT_SYMBOL_GPL(mtd_unlock); int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) { - if (!mtd->_is_locked) + struct mtd_info *master = mtd_get_master(mtd); + + if (!master->_is_locked) return -EOPNOTSUPP; if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) return -EINVAL; if (!len) return 0; - return mtd->_is_locked(mtd, ofs, len); + return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len); } EXPORT_SYMBOL_GPL(mtd_is_locked); int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs) { + struct mtd_info *master = mtd_get_master(mtd); + if (ofs < 0 || ofs >= mtd->size) return -EINVAL; - if (!mtd->_block_isreserved) + if (!master->_block_isreserved) return 0; - return mtd->_block_isreserved(mtd, ofs); + return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs)); } EXPORT_SYMBOL_GPL(mtd_block_isreserved); int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) { + struct mtd_info *master = mtd_get_master(mtd); + if (ofs < 0 || ofs >= mtd->size) return -EINVAL; - if (!mtd->_block_isbad) + if (!master->_block_isbad) return 0; - return mtd->_block_isbad(mtd, ofs); + return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs)); } EXPORT_SYMBOL_GPL(mtd_block_isbad); int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) { - if (!mtd->_block_markbad) + struct mtd_info *master = mtd_get_master(mtd); + int ret; + + if (!master->_block_markbad) return -EOPNOTSUPP; if (ofs < 0 || ofs >= mtd->size) return -EINVAL; if (!(mtd->flags & MTD_WRITEABLE)) return -EROFS; - return mtd->_block_markbad(mtd, ofs); + + ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs)); + if (ret) + return ret; + + while (mtd->parent) { + mtd->ecc_stats.badblocks++; + mtd = mtd->parent; + } + + return 0; } EXPORT_SYMBOL_GPL(mtd_block_markbad); @@ -1841,12 +1944,17 @@ static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen) { + struct mtd_info *master = mtd_get_master(mtd); + *retlen = 0; if (!(mtd->flags & MTD_WRITEABLE)) return -EROFS; - if (!mtd->_writev) + + if (!master->_writev) return default_mtd_writev(mtd, vecs, count, to, retlen); - return mtd->_writev(mtd, vecs, count, to, retlen); + + return master->_writev(master, vecs, count, + mtd_get_master_ofs(mtd, to), retlen); } EXPORT_SYMBOL_GPL(mtd_writev); diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 7328c066c5ba..3f6025684f58 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -20,339 +20,52 @@ #include "mtdcore.h" -/* Our partition linked list */ -static LIST_HEAD(mtd_partitions); -static DEFINE_MUTEX(mtd_partitions_mutex); - -/** - * struct mtd_part - our partition node structure - * - * @mtd: struct holding partition details - * @parent: parent mtd - flash device or another partition - * @offset: partition offset relative to the *flash device* - */ -struct mtd_part { - struct mtd_info mtd; - struct mtd_info *parent; - uint64_t offset; - struct list_head list; -}; - -/* - * Given a pointer to the MTD object in the mtd_part structure, we can retrieve - * the pointer to that structure. - */ -static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd) -{ - return container_of(mtd, struct mtd_part, mtd); -} - -static u64 part_absolute_offset(struct mtd_info *mtd) -{ - struct mtd_part *part = mtd_to_part(mtd); - - if (!mtd_is_partition(mtd)) - return 0; - - return part_absolute_offset(part->parent) + part->offset; -} - /* * MTD methods which simply translate the effective address and pass through * to the _real_ device. */ -static int part_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf) -{ - struct mtd_part *part = mtd_to_part(mtd); - struct mtd_ecc_stats stats; - int res; - - stats = part->parent->ecc_stats; - res = part->parent->_read(part->parent, from + part->offset, len, - retlen, buf); - if (unlikely(mtd_is_eccerr(res))) - mtd->ecc_stats.failed += - part->parent->ecc_stats.failed - stats.failed; - else - mtd->ecc_stats.corrected += - part->parent->ecc_stats.corrected - stats.corrected; - return res; -} - -static int part_point(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, void **virt, resource_size_t *phys) -{ - struct mtd_part *part = mtd_to_part(mtd); - - return part->parent->_point(part->parent, from + part->offset, len, - retlen, virt, phys); -} - -static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) -{ - struct mtd_part *part = mtd_to_part(mtd); - - return part->parent->_unpoint(part->parent, from + part->offset, len); -} - -static int part_read_oob(struct mtd_info *mtd, loff_t from, - struct mtd_oob_ops *ops) -{ - struct mtd_part *part = mtd_to_part(mtd); - struct mtd_ecc_stats stats; - int res; - - stats = part->parent->ecc_stats; - res = part->parent->_read_oob(part->parent, from + part->offset, ops); - if (unlikely(mtd_is_eccerr(res))) - mtd->ecc_stats.failed += - part->parent->ecc_stats.failed - stats.failed; - else - mtd->ecc_stats.corrected += - part->parent->ecc_stats.corrected - stats.corrected; - return res; -} - -static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, - size_t len, size_t *retlen, u_char *buf) -{ - struct mtd_part *part = mtd_to_part(mtd); - return part->parent->_read_user_prot_reg(part->parent, from, len, - retlen, buf); -} - -static int part_get_user_prot_info(struct mtd_info *mtd, size_t len, - size_t *retlen, struct otp_info *buf) -{ - struct mtd_part *part = mtd_to_part(mtd); - return part->parent->_get_user_prot_info(part->parent, len, retlen, - buf); -} - -static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, - size_t len, size_t *retlen, u_char *buf) -{ - struct mtd_part *part = mtd_to_part(mtd); - return part->parent->_read_fact_prot_reg(part->parent, from, len, - retlen, buf); -} - -static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len, - size_t *retlen, struct otp_info *buf) -{ - struct mtd_part *part = mtd_to_part(mtd); - return part->parent->_get_fact_prot_info(part->parent, len, retlen, - buf); -} - -static int part_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf) -{ - struct mtd_part *part = mtd_to_part(mtd); - return part->parent->_write(part->parent, to + part->offset, len, - retlen, buf); -} - -static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf) -{ - struct mtd_part *part = mtd_to_part(mtd); - return part->parent->_panic_write(part->parent, to + part->offset, len, - retlen, buf); -} - -static int part_write_oob(struct mtd_info *mtd, loff_t to, - struct mtd_oob_ops *ops) -{ - struct mtd_part *part = mtd_to_part(mtd); - - return part->parent->_write_oob(part->parent, to + part->offset, ops); -} - -static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, - size_t len, size_t *retlen, u_char *buf) -{ - struct mtd_part *part = mtd_to_part(mtd); - return part->parent->_write_user_prot_reg(part->parent, from, len, - retlen, buf); -} - -static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, - size_t len) -{ - struct mtd_part *part = mtd_to_part(mtd); - return part->parent->_lock_user_prot_reg(part->parent, from, len); -} - -static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, - unsigned long count, loff_t to, size_t *retlen) -{ - struct mtd_part *part = mtd_to_part(mtd); - return part->parent->_writev(part->parent, vecs, count, - to + part->offset, retlen); -} - -static int part_erase(struct mtd_info *mtd, struct erase_info *instr) -{ - struct mtd_part *part = mtd_to_part(mtd); - int ret; - - instr->addr += part->offset; - ret = part->parent->_erase(part->parent, instr); - if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) - instr->fail_addr -= part->offset; - instr->addr -= part->offset; - - return ret; -} - -static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) -{ - struct mtd_part *part = mtd_to_part(mtd); - return part->parent->_lock(part->parent, ofs + part->offset, len); -} - -static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) -{ - struct mtd_part *part = mtd_to_part(mtd); - return part->parent->_unlock(part->parent, ofs + part->offset, len); -} - -static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) -{ - struct mtd_part *part = mtd_to_part(mtd); - return part->parent->_is_locked(part->parent, ofs + part->offset, len); -} - -static void part_sync(struct mtd_info *mtd) -{ - struct mtd_part *part = mtd_to_part(mtd); - part->parent->_sync(part->parent); -} - -static int part_suspend(struct mtd_info *mtd) -{ - struct mtd_part *part = mtd_to_part(mtd); - return part->parent->_suspend(part->parent); -} - -static void part_resume(struct mtd_info *mtd) -{ - struct mtd_part *part = mtd_to_part(mtd); - part->parent->_resume(part->parent); -} - -static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs) -{ - struct mtd_part *part = mtd_to_part(mtd); - ofs += part->offset; - return part->parent->_block_isreserved(part->parent, ofs); -} - -static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) -{ - struct mtd_part *part = mtd_to_part(mtd); - ofs += part->offset; - return part->parent->_block_isbad(part->parent, ofs); -} - -static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) -{ - struct mtd_part *part = mtd_to_part(mtd); - int res; - - ofs += part->offset; - res = part->parent->_block_markbad(part->parent, ofs); - if (!res) - mtd->ecc_stats.badblocks++; - return res; -} - -static int part_get_device(struct mtd_info *mtd) -{ - struct mtd_part *part = mtd_to_part(mtd); - return part->parent->_get_device(part->parent); -} - -static void part_put_device(struct mtd_info *mtd) -{ - struct mtd_part *part = mtd_to_part(mtd); - part->parent->_put_device(part->parent); -} - -static int part_ooblayout_ecc(struct mtd_info *mtd, int section, - struct mtd_oob_region *oobregion) -{ - struct mtd_part *part = mtd_to_part(mtd); - - return mtd_ooblayout_ecc(part->parent, section, oobregion); -} - -static int part_ooblayout_free(struct mtd_info *mtd, int section, - struct mtd_oob_region *oobregion) -{ - struct mtd_part *part = mtd_to_part(mtd); - - return mtd_ooblayout_free(part->parent, section, oobregion); -} - -static const struct mtd_ooblayout_ops part_ooblayout_ops = { - .ecc = part_ooblayout_ecc, - .free = part_ooblayout_free, -}; - -static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len) -{ - struct mtd_part *part = mtd_to_part(mtd); - - return part->parent->_max_bad_blocks(part->parent, - ofs + part->offset, len); -} - -static inline void free_partition(struct mtd_part *p) +static inline void free_partition(struct mtd_info *mtd) { - kfree(p->mtd.name); - kfree(p); + kfree(mtd->name); + kfree(mtd); } -static struct mtd_part *allocate_partition(struct mtd_info *parent, - const struct mtd_partition *part, int partno, - uint64_t cur_offset) +static struct mtd_info *allocate_partition(struct mtd_info *parent, + const struct mtd_partition *part, + int partno, uint64_t cur_offset) { int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize : parent->erasesize; - struct mtd_part *slave; + struct mtd_info *child, *master = mtd_get_master(parent); u32 remainder; char *name; u64 tmp; /* allocate the partition structure */ - slave = kzalloc(sizeof(*slave), GFP_KERNEL); + child = kzalloc(sizeof(*child), GFP_KERNEL); name = kstrdup(part->name, GFP_KERNEL); - if (!name || !slave) { + if (!name || !child) { printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", parent->name); kfree(name); - kfree(slave); + kfree(child); return ERR_PTR(-ENOMEM); } /* set up the MTD object for this partition */ - slave->mtd.type = parent->type; - slave->mtd.flags = parent->orig_flags & ~part->mask_flags; - slave->mtd.orig_flags = slave->mtd.flags; - slave->mtd.size = part->size; - slave->mtd.writesize = parent->writesize; - slave->mtd.writebufsize = parent->writebufsize; - slave->mtd.oobsize = parent->oobsize; - slave->mtd.oobavail = parent->oobavail; - slave->mtd.subpage_sft = parent->subpage_sft; - slave->mtd.pairing = parent->pairing; - - slave->mtd.name = name; - slave->mtd.owner = parent->owner; + child->type = parent->type; + child->part.flags = parent->flags & ~part->mask_flags; + child->flags = child->part.flags; + child->size = part->size; + child->writesize = parent->writesize; + child->writebufsize = parent->writebufsize; + child->oobsize = parent->oobsize; + child->oobavail = parent->oobavail; + child->subpage_sft = parent->subpage_sft; + + child->name = name; + child->owner = parent->owner; /* NOTE: Historically, we didn't arrange MTDs as a tree out of * concern for showing the same data in multiple partitions. @@ -360,134 +73,76 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent, * so the MTD_PARTITIONED_MASTER option allows that. The master * will have device nodes etc only if this is set, so make the * parent conditional on that option. Note, this is a way to - * distinguish between the master and the partition in sysfs. + * distinguish between the parent and its partitions in sysfs. */ - slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ? - &parent->dev : - parent->dev.parent; - slave->mtd.dev.of_node = part->of_node; - - if (parent->_read) - slave->mtd._read = part_read; - if (parent->_write) - slave->mtd._write = part_write; - - if (parent->_panic_write) - slave->mtd._panic_write = part_panic_write; - - if (parent->_point && parent->_unpoint) { - slave->mtd._point = part_point; - slave->mtd._unpoint = part_unpoint; - } - - if (parent->_read_oob) - slave->mtd._read_oob = part_read_oob; - if (parent->_write_oob) - slave->mtd._write_oob = part_write_oob; - if (parent->_read_user_prot_reg) - slave->mtd._read_user_prot_reg = part_read_user_prot_reg; - if (parent->_read_fact_prot_reg) - slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg; - if (parent->_write_user_prot_reg) - slave->mtd._write_user_prot_reg = part_write_user_prot_reg; - if (parent->_lock_user_prot_reg) - slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg; - if (parent->_get_user_prot_info) - slave->mtd._get_user_prot_info = part_get_user_prot_info; - if (parent->_get_fact_prot_info) - slave->mtd._get_fact_prot_info = part_get_fact_prot_info; - if (parent->_sync) - slave->mtd._sync = part_sync; - if (!partno && !parent->dev.class && parent->_suspend && - parent->_resume) { - slave->mtd._suspend = part_suspend; - slave->mtd._resume = part_resume; - } - if (parent->_writev) - slave->mtd._writev = part_writev; - if (parent->_lock) - slave->mtd._lock = part_lock; - if (parent->_unlock) - slave->mtd._unlock = part_unlock; - if (parent->_is_locked) - slave->mtd._is_locked = part_is_locked; - if (parent->_block_isreserved) - slave->mtd._block_isreserved = part_block_isreserved; - if (parent->_block_isbad) - slave->mtd._block_isbad = part_block_isbad; - if (parent->_block_markbad) - slave->mtd._block_markbad = part_block_markbad; - if (parent->_max_bad_blocks) - slave->mtd._max_bad_blocks = part_max_bad_blocks; - - if (parent->_get_device) - slave->mtd._get_device = part_get_device; - if (parent->_put_device) - slave->mtd._put_device = part_put_device; - - slave->mtd._erase = part_erase; - slave->parent = parent; - slave->offset = part->offset; - - if (slave->offset == MTDPART_OFS_APPEND) - slave->offset = cur_offset; - if (slave->offset == MTDPART_OFS_NXTBLK) { + child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ? + &parent->dev : parent->dev.parent; + child->dev.of_node = part->of_node; + child->parent = parent; + child->part.offset = part->offset; + INIT_LIST_HEAD(&child->partitions); + + if (child->part.offset == MTDPART_OFS_APPEND) + child->part.offset = cur_offset; + if (child->part.offset == MTDPART_OFS_NXTBLK) { tmp = cur_offset; - slave->offset = cur_offset; + child->part.offset = cur_offset; remainder = do_div(tmp, wr_alignment); if (remainder) { - slave->offset += wr_alignment - remainder; + child->part.offset += wr_alignment - remainder; printk(KERN_NOTICE "Moving partition %d: " "0x%012llx -> 0x%012llx\n", partno, - (unsigned long long)cur_offset, (unsigned long long)slave->offset); + (unsigned long long)cur_offset, + child->part.offset); } } - if (slave->offset == MTDPART_OFS_RETAIN) { - slave->offset = cur_offset; - if (parent->size - slave->offset >= slave->mtd.size) { - slave->mtd.size = parent->size - slave->offset - - slave->mtd.size; + if (child->part.offset == MTDPART_OFS_RETAIN) { + child->part.offset = cur_offset; + if (parent->size - child->part.offset >= child->size) { + child->size = parent->size - child->part.offset - + child->size; } else { printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n", - part->name, parent->size - slave->offset, - slave->mtd.size); + part->name, parent->size - child->part.offset, + child->size); /* register to preserve ordering */ goto out_register; } } - if (slave->mtd.size == MTDPART_SIZ_FULL) - slave->mtd.size = parent->size - slave->offset; + if (child->size == MTDPART_SIZ_FULL) + child->size = parent->size - child->part.offset; - printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset, - (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name); + printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", + child->part.offset, child->part.offset + child->size, + child->name); /* let's do some sanity checks */ - if (slave->offset >= parent->size) { + if (child->part.offset >= parent->size) { /* let's register it anyway to preserve ordering */ - slave->offset = 0; - slave->mtd.size = 0; + child->part.offset = 0; + child->size = 0; /* Initialize ->erasesize to make add_mtd_device() happy. */ - slave->mtd.erasesize = parent->erasesize; - + child->erasesize = parent->erasesize; printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", part->name); goto out_register; } - if (slave->offset + slave->mtd.size > parent->size) { - slave->mtd.size = parent->size - slave->offset; + if (child->part.offset + child->size > parent->size) { + child->size = parent->size - child->part.offset; printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", - part->name, parent->name, (unsigned long long)slave->mtd.size); + part->name, parent->name, child->size); } if (parent->numeraseregions > 1) { /* Deal with variable erase size stuff */ int i, max = parent->numeraseregions; - u64 end = slave->offset + slave->mtd.size; + u64 end = child->part.offset + child->size; struct mtd_erase_region_info *regions = parent->eraseregions; /* Find the first erase regions which is part of this * partition. */ - for (i = 0; i < max && regions[i].offset <= slave->offset; i++) + for (i = 0; i < max && regions[i].offset <= child->part.offset; + i++) ; /* The loop searched for the region _behind_ the first one */ if (i > 0) @@ -495,70 +150,68 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent, /* Pick biggest erasesize */ for (; i < max && regions[i].offset < end; i++) { - if (slave->mtd.erasesize < regions[i].erasesize) { - slave->mtd.erasesize = regions[i].erasesize; - } + if (child->erasesize < regions[i].erasesize) + child->erasesize = regions[i].erasesize; } - BUG_ON(slave->mtd.erasesize == 0); + BUG_ON(child->erasesize == 0); } else { /* Single erase size */ - slave->mtd.erasesize = parent->erasesize; + child->erasesize = parent->erasesize; } /* - * Slave erasesize might differ from the master one if the master + * Child erasesize might differ from the parent one if the parent * exposes several regions with different erasesize. Adjust * wr_alignment accordingly. */ - if (!(slave->mtd.flags & MTD_NO_ERASE)) - wr_alignment = slave->mtd.erasesize; + if (!(child->flags & MTD_NO_ERASE)) + wr_alignment = child->erasesize; - tmp = part_absolute_offset(parent) + slave->offset; + tmp = mtd_get_master_ofs(child, 0); remainder = do_div(tmp, wr_alignment); - if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { + if ((child->flags & MTD_WRITEABLE) && remainder) { /* Doesn't start on a boundary of major erase size */ /* FIXME: Let it be writable if it is on a boundary of * _minor_ erase size though */ - slave->mtd.flags &= ~MTD_WRITEABLE; + child->flags &= ~MTD_WRITEABLE; printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n", part->name); } - tmp = part_absolute_offset(parent) + slave->mtd.size; + tmp = mtd_get_master_ofs(child, 0) + child->size; remainder = do_div(tmp, wr_alignment); - if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { - slave->mtd.flags &= ~MTD_WRITEABLE; + if ((child->flags & MTD_WRITEABLE) && remainder) { + child->flags &= ~MTD_WRITEABLE; printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n", part->name); } - mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops); - slave->mtd.ecc_step_size = parent->ecc_step_size; - slave->mtd.ecc_strength = parent->ecc_strength; - slave->mtd.bitflip_threshold = parent->bitflip_threshold; + child->ecc_step_size = parent->ecc_step_size; + child->ecc_strength = parent->ecc_strength; + child->bitflip_threshold = parent->bitflip_threshold; - if (parent->_block_isbad) { + if (master->_block_isbad) { uint64_t offs = 0; - while (offs < slave->mtd.size) { - if (mtd_block_isreserved(parent, offs + slave->offset)) - slave->mtd.ecc_stats.bbtblocks++; - else if (mtd_block_isbad(parent, offs + slave->offset)) - slave->mtd.ecc_stats.badblocks++; - offs += slave->mtd.erasesize; + while (offs < child->size) { + if (mtd_block_isreserved(child, offs)) + child->ecc_stats.bbtblocks++; + else if (mtd_block_isbad(child, offs)) + child->ecc_stats.badblocks++; + offs += child->erasesize; } } out_register: - return slave; + return child; } static ssize_t mtd_partition_offset_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); - struct mtd_part *part = mtd_to_part(mtd); - return snprintf(buf, PAGE_SIZE, "%llu\n", part->offset); + + return snprintf(buf, PAGE_SIZE, "%lld\n", mtd->part.offset); } static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL); @@ -568,9 +221,9 @@ static const struct attribute *mtd_partition_attrs[] = { NULL }; -static int mtd_add_partition_attrs(struct mtd_part *new) +static int mtd_add_partition_attrs(struct mtd_info *new) { - int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs); + int ret = sysfs_create_files(&new->dev.kobj, mtd_partition_attrs); if (ret) printk(KERN_WARNING "mtd: failed to create partition attrs, err=%d\n", ret); @@ -580,8 +233,9 @@ static int mtd_add_partition_attrs(struct mtd_part *new) int mtd_add_partition(struct mtd_info *parent, const char *name, long long offset, long long length) { + struct mtd_info *master = mtd_get_master(parent); struct mtd_partition part; - struct mtd_part *new; + struct mtd_info *child; int ret = 0; /* the direct offset is expected */ @@ -600,28 +254,28 @@ int mtd_add_partition(struct mtd_info *parent, const char *name, part.size = length; part.offset = offset; - new = allocate_partition(parent, &part, -1, offset); - if (IS_ERR(new)) - return PTR_ERR(new); + child = allocate_partition(parent, &part, -1, offset); + if (IS_ERR(child)) + return PTR_ERR(child); - mutex_lock(&mtd_partitions_mutex); - list_add(&new->list, &mtd_partitions); - mutex_unlock(&mtd_partitions_mutex); + mutex_lock(&master->master.partitions_lock); + list_add_tail(&child->part.node, &parent->partitions); + mutex_unlock(&master->master.partitions_lock); - ret = add_mtd_device(&new->mtd); + ret = add_mtd_device(child); if (ret) goto err_remove_part; - mtd_add_partition_attrs(new); + mtd_add_partition_attrs(child); return 0; err_remove_part: - mutex_lock(&mtd_partitions_mutex); - list_del(&new->list); - mutex_unlock(&mtd_partitions_mutex); + mutex_lock(&master->master.partitions_lock); + list_del(&child->part.node); + mutex_unlock(&master->master.partitions_lock); - free_partition(new); + free_partition(child); return ret; } @@ -630,119 +284,142 @@ EXPORT_SYMBOL_GPL(mtd_add_partition); /** * __mtd_del_partition - delete MTD partition * - * @priv: internal MTD struct for partition to be deleted + * @priv: MTD structure to be deleted * * This function must be called with the partitions mutex locked. */ -static int __mtd_del_partition(struct mtd_part *priv) +static int __mtd_del_partition(struct mtd_info *mtd) { - struct mtd_part *child, *next; + struct mtd_info *child, *next; int err; - list_for_each_entry_safe(child, next, &mtd_partitions, list) { - if (child->parent == &priv->mtd) { - err = __mtd_del_partition(child); - if (err) - return err; - } + list_for_each_entry_safe(child, next, &mtd->partitions, part.node) { + err = __mtd_del_partition(child); + if (err) + return err; } - sysfs_remove_files(&priv->mtd.dev.kobj, mtd_partition_attrs); + sysfs_remove_files(&mtd->dev.kobj, mtd_partition_attrs); - err = del_mtd_device(&priv->mtd); + err = del_mtd_device(mtd); if (err) return err; - list_del(&priv->list); - free_partition(priv); + list_del(&child->part.node); + free_partition(mtd); return 0; } /* * This function unregisters and destroy all slave MTD objects which are - * attached to the given MTD object. + * attached to the given MTD object, recursively. */ -int del_mtd_partitions(struct mtd_info *mtd) +static int __del_mtd_partitions(struct mtd_info *mtd) { - struct mtd_part *slave, *next; + struct mtd_info *child, *next; + LIST_HEAD(tmp_list); int ret, err = 0; - mutex_lock(&mtd_partitions_mutex); - list_for_each_entry_safe(slave, next, &mtd_partitions, list) - if (slave->parent == mtd) { - ret = __mtd_del_partition(slave); - if (ret < 0) - err = ret; + list_for_each_entry_safe(child, next, &mtd->partitions, part.node) { + if (mtd_has_partitions(child)) + del_mtd_partitions(child); + + pr_info("Deleting %s MTD partition\n", child->name); + ret = del_mtd_device(child); + if (ret < 0) { + pr_err("Error when deleting partition \"%s\" (%d)\n", + child->name, ret); + err = ret; + continue; } - mutex_unlock(&mtd_partitions_mutex); + + list_del(&child->part.node); + free_partition(child); + } return err; } +int del_mtd_partitions(struct mtd_info *mtd) +{ + struct mtd_info *master = mtd_get_master(mtd); + int ret; + + pr_info("Deleting MTD partitions on \"%s\":\n", mtd->name); + + mutex_lock(&master->master.partitions_lock); + ret = __del_mtd_partitions(mtd); + mutex_unlock(&master->master.partitions_lock); + + return ret; +} + int mtd_del_partition(struct mtd_info *mtd, int partno) { - struct mtd_part *slave, *next; + struct mtd_info *child, *master = mtd_get_master(mtd); int ret = -EINVAL; - mutex_lock(&mtd_partitions_mutex); - list_for_each_entry_safe(slave, next, &mtd_partitions, list) - if ((slave->parent == mtd) && - (slave->mtd.index == partno)) { - ret = __mtd_del_partition(slave); + mutex_lock(&master->master.partitions_lock); + list_for_each_entry(child, &mtd->partitions, part.node) { + if (child->index == partno) { + ret = __mtd_del_partition(child); break; } - mutex_unlock(&mtd_partitions_mutex); + } + mutex_unlock(&master->master.partitions_lock); return ret; } EXPORT_SYMBOL_GPL(mtd_del_partition); /* - * This function, given a master MTD object and a partition table, creates - * and registers slave MTD objects which are bound to the master according to - * the partition definitions. + * This function, given a parent MTD object and a partition table, creates + * and registers the child MTD objects which are bound to the parent according + * to the partition definitions. * - * For historical reasons, this function's caller only registers the master + * For historical reasons, this function's caller only registers the parent * if the MTD_PARTITIONED_MASTER config option is set. */ -int add_mtd_partitions(struct mtd_info *master, +int add_mtd_partitions(struct mtd_info *parent, const struct mtd_partition *parts, int nbparts) { - struct mtd_part *slave; + struct mtd_info *child, *master = mtd_get_master(parent); uint64_t cur_offset = 0; int i, ret; - printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); + printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", + nbparts, parent->name); for (i = 0; i < nbparts; i++) { - slave = allocate_partition(master, parts + i, i, cur_offset); - if (IS_ERR(slave)) { - ret = PTR_ERR(slave); + child = allocate_partition(parent, parts + i, i, cur_offset); + if (IS_ERR(child)) { + ret = PTR_ERR(child); goto err_del_partitions; } - mutex_lock(&mtd_partitions_mutex); - list_add(&slave->list, &mtd_partitions); - mutex_unlock(&mtd_partitions_mutex); + mutex_lock(&master->master.partitions_lock); + list_add_tail(&child->part.node, &parent->partitions); + mutex_unlock(&master->master.partitions_lock); - ret = add_mtd_device(&slave->mtd); + ret = add_mtd_device(child); if (ret) { - mutex_lock(&mtd_partitions_mutex); - list_del(&slave->list); - mutex_unlock(&mtd_partitions_mutex); + mutex_lock(&master->master.partitions_lock); + list_del(&child->part.node); + mutex_unlock(&master->master.partitions_lock); - free_partition(slave); + free_partition(child); goto err_del_partitions; } - mtd_add_partition_attrs(slave); + mtd_add_partition_attrs(child); + /* Look for subpartitions */ - parse_mtd_partitions(&slave->mtd, parts[i].types, NULL); + parse_mtd_partitions(child, parts[i].types, NULL); - cur_offset = slave->offset + slave->mtd.size; + cur_offset = child->part.offset + child->size; } return 0; @@ -1023,29 +700,11 @@ void mtd_part_parser_cleanup(struct mtd_partitions *parts) } } -int mtd_is_partition(const struct mtd_info *mtd) -{ - struct mtd_part *part; - int ispart = 0; - - mutex_lock(&mtd_partitions_mutex); - list_for_each_entry(part, &mtd_partitions, list) - if (&part->mtd == mtd) { - ispart = 1; - break; - } - mutex_unlock(&mtd_partitions_mutex); - - return ispart; -} -EXPORT_SYMBOL_GPL(mtd_is_partition); - /* Returns the size of the entire flash chip */ uint64_t mtd_get_device_size(const struct mtd_info *mtd) { - if (!mtd_is_partition(mtd)) - return mtd->size; + struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd); - return mtd_get_device_size(mtd_to_part(mtd)->parent); + return master->size; } EXPORT_SYMBOL_GPL(mtd_get_device_size); diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 249e8d9bfbcd..2d1f4a61f4ac 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -194,10 +195,43 @@ struct mtd_debug_info { const char *partid; }; +/** + * struct mtd_part - MTD partition specific fields + * + * @node: list node used to add an MTD partition to the parent partition list + * @offset: offset of the partition relatively to the parent offset + * @flags: original flags (before the mtdpart logic decided to tweak them based + * on flash constraints, like eraseblock/pagesize alignment) + * + * This struct is embedded in mtd_info and contains partition-specific + * properties/fields. + */ +struct mtd_part { + struct list_head node; + u64 offset; + u32 flags; +}; + +/** + * struct mtd_master - MTD master specific fields + * + * @partitions_lock: lock protecting accesses to the partition list. Protects + * not only the master partition list, but also all + * sub-partitions. + * @suspended: et to 1 when the device is suspended, 0 otherwise + * + * This struct is embedded in mtd_info and contains master-specific + * properties/fields. The master is the root MTD device from the MTD partition + * point of view. + */ +struct mtd_master { + struct mutex partitions_lock; + unsigned int suspended : 1; +}; + struct mtd_info { u_char type; uint32_t flags; - uint32_t orig_flags; /* Flags as before running mtd checks */ uint64_t size; // Total size of the MTD /* "Major" erase size for the device. Naïve users may take this @@ -339,8 +373,52 @@ struct mtd_info { int usecount; struct mtd_debug_info dbg; struct nvmem_device *nvmem; + + /* + * Parent device from the MTD partition point of view. + * + * MTD masters do not have any parent, MTD partitions do. The parent + * MTD device can itself be a partition. + */ + struct mtd_info *parent; + + /* List of partitions attached to this MTD device */ + struct list_head partitions; + + union { + struct mtd_part part; + struct mtd_master master; + }; }; +static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd) +{ + while (mtd->parent) + mtd = mtd->parent; + + return mtd; +} + +static inline u64 mtd_get_master_ofs(struct mtd_info *mtd, u64 ofs) +{ + while (mtd->parent) { + ofs += mtd->part.offset; + mtd = mtd->parent; + } + + return ofs; +} + +static inline bool mtd_is_partition(const struct mtd_info *mtd) +{ + return mtd->parent; +} + +static inline bool mtd_has_partitions(const struct mtd_info *mtd) +{ + return !list_empty(&mtd->partitions); +} + int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *oobecc); int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, @@ -392,13 +470,16 @@ static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops) static inline int mtd_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len) { - if (!mtd->_max_bad_blocks) + struct mtd_info *master = mtd_get_master(mtd); + + if (!master->_max_bad_blocks) return -ENOTSUPP; if (mtd->size < (len + ofs) || ofs < 0) return -EINVAL; - return mtd->_max_bad_blocks(mtd, ofs, len); + return master->_max_bad_blocks(master, mtd_get_master_ofs(mtd, ofs), + len); } int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, @@ -439,8 +520,10 @@ int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, static inline void mtd_sync(struct mtd_info *mtd) { - if (mtd->_sync) - mtd->_sync(mtd); + struct mtd_info *master = mtd_get_master(mtd); + + if (master->_sync) + master->_sync(master); } int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); @@ -452,13 +535,31 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs); static inline int mtd_suspend(struct mtd_info *mtd) { - return mtd->_suspend ? mtd->_suspend(mtd) : 0; + struct mtd_info *master = mtd_get_master(mtd); + int ret; + + if (master->master.suspended) + return 0; + + ret = master->_suspend ? master->_suspend(master) : 0; + if (ret) + return ret; + + master->master.suspended = 1; + return 0; } static inline void mtd_resume(struct mtd_info *mtd) { - if (mtd->_resume) - mtd->_resume(mtd); + struct mtd_info *master = mtd_get_master(mtd); + + if (!master->master.suspended) + return; + + if (master->_resume) + master->_resume(master); + + master->master.suspended = 0; } static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) @@ -538,7 +639,9 @@ static inline loff_t mtd_wunit_to_offset(struct mtd_info *mtd, loff_t base, static inline int mtd_has_oob(const struct mtd_info *mtd) { - return mtd->_read_oob && mtd->_write_oob; + struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd); + + return master->_read_oob && master->_write_oob; } static inline int mtd_type_is_nand(const struct mtd_info *mtd) @@ -548,7 +651,9 @@ static inline int mtd_type_is_nand(const struct mtd_info *mtd) static inline int mtd_can_have_bb(const struct mtd_info *mtd) { - return !!mtd->_block_isbad; + struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd); + + return !!master->_block_isbad; } /* Kernel-side ioctl definitions */ diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index 11cb0c50cd84..e545c050d3e8 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -105,7 +105,6 @@ extern void deregister_mtd_parser(struct mtd_part_parser *parser); module_driver(__mtd_part_parser, register_mtd_parser, \ deregister_mtd_parser) -int mtd_is_partition(const struct mtd_info *mtd); int mtd_add_partition(struct mtd_info *master, const char *name, long long offset, long long length); int mtd_del_partition(struct mtd_info *master, int partno); -- cgit v1.2.3-58-ga151 From c6fbcb70132ffc66696a94dd3d8e6215c750254f Mon Sep 17 00:00:00 2001 From: Jonathan Neuschäfer Date: Sun, 23 Feb 2020 19:06:33 +0100 Subject: mtd: rawnand: Fix a typo ("manufecturer") MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonathan Neuschäfer Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200223180634.8736-1-j.neuschaefer@gmx.net --- include/linux/mtd/rawnand.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 4ab9bccfcde0..3c7c15aadcee 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1215,7 +1215,7 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) * struct nand_flash_dev - NAND Flash Device ID Structure * @name: a human-readable name of the NAND chip * @dev_id: the device ID (the second byte of the full chip ID array) - * @mfr_id: manufecturer ID part of the full chip ID array (refers the same + * @mfr_id: manufacturer ID part of the full chip ID array (refers the same * memory address as ``id[0]``) * @dev_id: device ID part of the full chip ID array (refers the same memory * address as ``id[1]``) -- cgit v1.2.3-58-ga151 From 92270086b7e5ada7ab381c06cc3da2e95ed17088 Mon Sep 17 00:00:00 2001 From: Mason Yang Date: Tue, 3 Mar 2020 15:21:21 +0800 Subject: mtd: rawnand: Add support for manufacturer specific lock/unlock operation Add nand_lock() & nand_unlock() for manufacturer specific lock & unlock operation while the device supports Block Portection function. Signed-off-by: Mason Yang Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/1583220084-10890-2-git-send-email-masonccyang@mxic.com.tw --- drivers/mtd/nand/raw/nand_base.c | 36 ++++++++++++++++++++++++++++++++++-- include/linux/mtd/rawnand.h | 5 +++++ 2 files changed, 39 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index a3ed6c54963e..a13b91aa3780 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -4365,6 +4365,38 @@ static void nand_shutdown(struct mtd_info *mtd) nand_suspend(mtd); } +/** + * nand_lock - [MTD Interface] Lock the NAND flash + * @mtd: MTD device structure + * @ofs: offset byte address + * @len: number of bytes to lock (must be a multiple of block/page size) + */ +static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (!chip->lock_area) + return -ENOTSUPP; + + return chip->lock_area(chip, ofs, len); +} + +/** + * nand_unlock - [MTD Interface] Unlock the NAND flash + * @mtd: MTD device structure + * @ofs: offset byte address + * @len: number of bytes to unlock (must be a multiple of block/page size) + */ +static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (!chip->unlock_area) + return -ENOTSUPP; + + return chip->unlock_area(chip, ofs, len); +} + /* Set default functions */ static void nand_set_defaults(struct nand_chip *chip) { @@ -5791,8 +5823,8 @@ static int nand_scan_tail(struct nand_chip *chip) mtd->_read_oob = nand_read_oob; mtd->_write_oob = nand_write_oob; mtd->_sync = nand_sync; - mtd->_lock = NULL; - mtd->_unlock = NULL; + mtd->_lock = nand_lock; + mtd->_unlock = nand_unlock; mtd->_suspend = nand_suspend; mtd->_resume = nand_resume; mtd->_reboot = nand_shutdown; diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 3c7c15aadcee..49ed50fb44ab 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1077,6 +1077,8 @@ struct nand_legacy { * @manufacturer: [INTERN] Contains manufacturer information * @manufacturer.desc: [INTERN] Contains manufacturer's description * @manufacturer.priv: [INTERN] Contains manufacturer private information + * @lock_area: [REPLACEABLE] specific NAND chip lock operation + * @unlock_area: [REPLACEABLE] specific NAND chip unlock operation */ struct nand_chip { @@ -1136,6 +1138,9 @@ struct nand_chip { const struct nand_manufacturer *desc; void *priv; } manufacturer; + + int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); + int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); }; extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; -- cgit v1.2.3-58-ga151 From 34471abfc8fedcebf7c45a5e37ef383b8bb16de3 Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Sat, 22 Feb 2020 08:08:49 +0800 Subject: thermal: of-thermal: add API for getting sensor ID from DT This patch adds new API thermal_zone_of_get_sensor_id() to provide the feature of getting sensor ID from DT thermal zone's node. It's useful for thermal driver to register the specific thermal zone devices from DT in a common way. Signed-off-by: Anson Huang Reviewed-by: Dong Aisheng Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/1582330132-13461-2-git-send-email-Anson.Huang@nxp.com --- drivers/thermal/of-thermal.c | 62 ++++++++++++++++++++++++++++++++------------ include/linux/thermal.h | 10 +++++++ 2 files changed, 56 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index ef0baa954ff0..874a47d6923f 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -448,6 +448,50 @@ thermal_zone_of_add_sensor(struct device_node *zone, return tzd; } +/** + * thermal_zone_of_get_sensor_id - get sensor ID from a DT thermal zone + * @tz_np: a valid thermal zone device node. + * @sensor_np: a sensor node of a valid sensor device. + * @id: the sensor ID returned if success. + * + * This function will get sensor ID from a given thermal zone node and + * the sensor node must match the temperature provider @sensor_np. + * + * Return: 0 on success, proper error code otherwise. + */ + +int thermal_zone_of_get_sensor_id(struct device_node *tz_np, + struct device_node *sensor_np, + u32 *id) +{ + struct of_phandle_args sensor_specs; + int ret; + + ret = of_parse_phandle_with_args(tz_np, + "thermal-sensors", + "#thermal-sensor-cells", + 0, + &sensor_specs); + if (ret) + return ret; + + if (sensor_specs.np != sensor_np) { + of_node_put(sensor_specs.np); + return -ENODEV; + } + + if (sensor_specs.args_count > 1) + pr_warn("%pOFn: too many cells in sensor specifier %d\n", + sensor_specs.np, sensor_specs.args_count); + + *id = sensor_specs.args_count ? sensor_specs.args[0] : 0; + + of_node_put(sensor_specs.np); + + return 0; +} +EXPORT_SYMBOL_GPL(thermal_zone_of_get_sensor_id); + /** * thermal_zone_of_sensor_register - registers a sensor to a DT thermal zone * @dev: a valid struct device pointer of a sensor device. Must contain @@ -499,36 +543,22 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data, sensor_np = of_node_get(dev->of_node); for_each_available_child_of_node(np, child) { - struct of_phandle_args sensor_specs; int ret, id; /* For now, thermal framework supports only 1 sensor per zone */ - ret = of_parse_phandle_with_args(child, "thermal-sensors", - "#thermal-sensor-cells", - 0, &sensor_specs); + ret = thermal_zone_of_get_sensor_id(child, sensor_np, &id); if (ret) continue; - if (sensor_specs.args_count >= 1) { - id = sensor_specs.args[0]; - WARN(sensor_specs.args_count > 1, - "%pOFn: too many cells in sensor specifier %d\n", - sensor_specs.np, sensor_specs.args_count); - } else { - id = 0; - } - - if (sensor_specs.np == sensor_np && id == sensor_id) { + if (id == sensor_id) { tzd = thermal_zone_of_add_sensor(child, sensor_np, data, ops); if (!IS_ERR(tzd)) tzd->ops->set_mode(tzd, THERMAL_DEVICE_ENABLED); - of_node_put(sensor_specs.np); of_node_put(child); goto exit; } - of_node_put(sensor_specs.np); } exit: of_node_put(sensor_np); diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 126913c6a53b..53e6f677761f 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -364,6 +364,9 @@ struct thermal_trip { /* Function declarations */ #ifdef CONFIG_THERMAL_OF +int thermal_zone_of_get_sensor_id(struct device_node *tz_np, + struct device_node *sensor_np, + u32 *id); struct thermal_zone_device * thermal_zone_of_sensor_register(struct device *dev, int id, void *data, const struct thermal_zone_of_device_ops *ops); @@ -375,6 +378,13 @@ struct thermal_zone_device *devm_thermal_zone_of_sensor_register( void devm_thermal_zone_of_sensor_unregister(struct device *dev, struct thermal_zone_device *tz); #else + +static int thermal_zone_of_get_sensor_id(struct device_node *tz_np, + struct device_node *sensor_np, + u32 *id) +{ + return -ENOENT; +} static inline struct thermal_zone_device * thermal_zone_of_sensor_register(struct device *dev, int id, void *data, const struct thermal_zone_of_device_ops *ops) -- cgit v1.2.3-58-ga151 From 15a26319c41962b7cf87603bc51d667f43d2ca67 Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Tue, 3 Mar 2020 16:04:43 +0800 Subject: thermal: Fix build warning of !defined(CONFIG_THERMAL_OF) Add "inline" to thermal_zone_of_get_sensor_id() function to avoid below build warning of !defined(CONFIG_THERMAL_OF). In file included from drivers/hwmon/hwmon.c:22: include/linux/thermal.h:382:12: warning: 'thermal_zone_of_get_sensor_id' defined but not used [-Wunused-function] 382 | static int thermal_zone_of_get_sensor_id(struct device_node *tz_np, Signed-off-by: Anson Huang Reported-by: Stephen Rothwell Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/1583222684-10229-1-git-send-email-Anson.Huang@nxp.com --- include/linux/thermal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 53e6f677761f..c91b1e344d56 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -379,7 +379,7 @@ void devm_thermal_zone_of_sensor_unregister(struct device *dev, struct thermal_zone_device *tz); #else -static int thermal_zone_of_get_sensor_id(struct device_node *tz_np, +static inline int thermal_zone_of_get_sensor_id(struct device_node *tz_np, struct device_node *sensor_np, u32 *id) { -- cgit v1.2.3-58-ga151 From 0bc68af9137dc3f30b161de4ce546c7799f88d1e Mon Sep 17 00:00:00 2001 From: Shivamurthy Shastri Date: Wed, 11 Mar 2020 18:57:33 +0100 Subject: mtd: spinand: micron: identify SPI NAND device with Continuous Read mode Add SPINAND_HAS_CR_FEAT_BIT flag to identify the SPI NAND device with the Continuous Read mode. Some of the Micron SPI NAND devices have the "Continuous Read" feature enabled by default, which does not fit the subsystem needs. In this mode, the READ CACHE command doesn't require the starting column address. The device always output the data starting from the first column of the cache register, and once the end of the cache register reached, the data output continues through the next page. With the continuous read mode, it is possible to read out the entire block using a single READ command, and once the end of the block reached, the output pins become High-Z state. However, during this mode the read command doesn't output the OOB area. Hence, we disable the feature at probe time. Signed-off-by: Shivamurthy Shastri Reviewed-by: Boris Brezillon Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200311175735.2007-5-sshivamurthy@micron.com --- drivers/mtd/nand/spi/micron.c | 16 ++++++++++++++++ include/linux/mtd/spinand.h | 1 + 2 files changed, 17 insertions(+) (limited to 'include/linux') diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c index 26925714a9fb..956f7710aca2 100644 --- a/drivers/mtd/nand/spi/micron.c +++ b/drivers/mtd/nand/spi/micron.c @@ -18,6 +18,8 @@ #define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4) #define MICRON_STATUS_ECC_7TO8_BITFLIPS (5 << 4) +#define MICRON_CFG_CR BIT(0) + static SPINAND_OP_VARIANTS(read_cache_variants, SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), @@ -137,7 +139,21 @@ static const struct spinand_info micron_spinand_table[] = { micron_8_ecc_get_status)), }; +static int micron_spinand_init(struct spinand_device *spinand) +{ + /* + * M70A device series enable Continuous Read feature at Power-up, + * which is not supported. Disable this bit to avoid any possible + * failure. + */ + if (spinand->flags & SPINAND_HAS_CR_FEAT_BIT) + return spinand_upd_cfg(spinand, MICRON_CFG_CR, 0); + + return 0; +} + static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = { + .init = micron_spinand_init, }; const struct spinand_manufacturer micron_spinand_manufacturer = { diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index f4c4ae87181b..1077c45721ff 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -284,6 +284,7 @@ struct spinand_ecc_info { }; #define SPINAND_HAS_QE_BIT BIT(0) +#define SPINAND_HAS_CR_FEAT_BIT BIT(1) /** * struct spinand_info - Structure used to describe SPI NAND chips -- cgit v1.2.3-58-ga151 From fbe639b44a82755d639df1c5d147c93f02ac5a0f Mon Sep 17 00:00:00 2001 From: Sibi Sankar Date: Thu, 12 Mar 2020 17:38:40 +0530 Subject: soc: qcom: Introduce Protection Domain Restart helpers Qualcomm SoCs (starting with MSM8998) allow for multiple protection domains to run on the same Q6 sub-system. This allows for services like ATH10K WLAN FW to have their own separate address space and crash/recover without disrupting the modem and other PDs running on the same sub-system. The PDR helpers introduces an abstraction that allows for tracking/controlling the life cycle of protection domains running on various Q6 sub-systems. Signed-off-by: Sibi Sankar Link: https://lore.kernel.org/r/20200312120842.21991-2-sibis@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/Kconfig | 4 + drivers/soc/qcom/Makefile | 1 + drivers/soc/qcom/pdr_interface.c | 757 +++++++++++++++++++++++++++++++++++++++ drivers/soc/qcom/pdr_internal.h | 379 ++++++++++++++++++++ include/linux/soc/qcom/pdr.h | 29 ++ include/linux/soc/qcom/qmi.h | 1 + 6 files changed, 1171 insertions(+) create mode 100644 drivers/soc/qcom/pdr_interface.c create mode 100644 drivers/soc/qcom/pdr_internal.h create mode 100644 include/linux/soc/qcom/pdr.h (limited to 'include/linux') diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 80aa8b6c56e0..48501f0245b0 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -76,6 +76,10 @@ config QCOM_OCMEM requirements. This is typically used by the GPU, camera/video, and audio components on some Snapdragon SoCs. +config QCOM_PDR_HELPERS + tristate + select QCOM_QMI_HELPERS + config QCOM_PM bool "Qualcomm Power Management" depends on ARCH_QCOM && !ARM64 diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 9fb35c8a495e..5d6b83dc58e8 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o obj-$(CONFIG_QCOM_OCMEM) += ocmem.o +obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o obj-$(CONFIG_QCOM_PM) += spm.o obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o qmi_helpers-y += qmi_encdec.o qmi_interface.o diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c new file mode 100644 index 000000000000..7ee088b9cc7c --- /dev/null +++ b/drivers/soc/qcom/pdr_interface.c @@ -0,0 +1,757 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include + +#include "pdr_internal.h" + +struct pdr_service { + char service_name[SERVREG_NAME_LENGTH + 1]; + char service_path[SERVREG_NAME_LENGTH + 1]; + + struct sockaddr_qrtr addr; + + unsigned int instance; + unsigned int service; + u8 service_data_valid; + u32 service_data; + int state; + + bool need_notifier_register; + bool need_notifier_remove; + bool need_locator_lookup; + bool service_connected; + + struct list_head node; +}; + +struct pdr_handle { + struct qmi_handle locator_hdl; + struct qmi_handle notifier_hdl; + + struct sockaddr_qrtr locator_addr; + + struct list_head lookups; + struct list_head indack_list; + + /* control access to pdr lookup/indack lists */ + struct mutex list_lock; + + /* serialize pd status invocation */ + struct mutex status_lock; + + /* control access to the locator state */ + struct mutex lock; + + bool locator_init_complete; + + struct work_struct locator_work; + struct work_struct notifier_work; + struct work_struct indack_work; + + struct workqueue_struct *notifier_wq; + struct workqueue_struct *indack_wq; + + void (*status)(int state, char *service_path, void *priv); + void *priv; +}; + +struct pdr_list_node { + enum servreg_service_state curr_state; + u16 transaction_id; + struct pdr_service *pds; + struct list_head node; +}; + +static int pdr_locator_new_server(struct qmi_handle *qmi, + struct qmi_service *svc) +{ + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, + locator_hdl); + struct pdr_service *pds; + + /* Create a local client port for QMI communication */ + pdr->locator_addr.sq_family = AF_QIPCRTR; + pdr->locator_addr.sq_node = svc->node; + pdr->locator_addr.sq_port = svc->port; + + mutex_lock(&pdr->lock); + pdr->locator_init_complete = true; + mutex_unlock(&pdr->lock); + + /* Service pending lookup requests */ + mutex_lock(&pdr->list_lock); + list_for_each_entry(pds, &pdr->lookups, node) { + if (pds->need_locator_lookup) + schedule_work(&pdr->locator_work); + } + mutex_unlock(&pdr->list_lock); + + return 0; +} + +static void pdr_locator_del_server(struct qmi_handle *qmi, + struct qmi_service *svc) +{ + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, + locator_hdl); + + mutex_lock(&pdr->lock); + pdr->locator_init_complete = false; + mutex_unlock(&pdr->lock); + + pdr->locator_addr.sq_node = 0; + pdr->locator_addr.sq_port = 0; +} + +static struct qmi_ops pdr_locator_ops = { + .new_server = pdr_locator_new_server, + .del_server = pdr_locator_del_server, +}; + +static int pdr_register_listener(struct pdr_handle *pdr, + struct pdr_service *pds, + bool enable) +{ + struct servreg_register_listener_resp resp; + struct servreg_register_listener_req req; + struct qmi_txn txn; + int ret; + + ret = qmi_txn_init(&pdr->notifier_hdl, &txn, + servreg_register_listener_resp_ei, + &resp); + if (ret < 0) + return ret; + + req.enable = enable; + strcpy(req.service_path, pds->service_path); + + ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr, + &txn, SERVREG_REGISTER_LISTENER_REQ, + SERVREG_REGISTER_LISTENER_REQ_LEN, + servreg_register_listener_req_ei, + &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + return ret; + } + + ret = qmi_txn_wait(&txn, 5 * HZ); + if (ret < 0) { + pr_err("PDR: %s register listener txn wait failed: %d\n", + pds->service_path, ret); + return ret; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + pr_err("PDR: %s register listener failed: 0x%x\n", + pds->service_path, resp.resp.error); + return ret; + } + + if ((int)resp.curr_state < INT_MIN || (int)resp.curr_state > INT_MAX) + pr_err("PDR: %s notification state invalid: 0x%x\n", + pds->service_path, resp.curr_state); + + pds->state = resp.curr_state; + + return 0; +} + +static void pdr_notifier_work(struct work_struct *work) +{ + struct pdr_handle *pdr = container_of(work, struct pdr_handle, + notifier_work); + struct pdr_service *pds; + int ret; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(pds, &pdr->lookups, node) { + if (pds->service_connected) { + if (!pds->need_notifier_register) + continue; + + pds->need_notifier_register = false; + ret = pdr_register_listener(pdr, pds, true); + if (ret < 0) + pds->state = SERVREG_SERVICE_STATE_DOWN; + } else { + if (!pds->need_notifier_remove) + continue; + + pds->need_notifier_remove = false; + pds->state = SERVREG_SERVICE_STATE_DOWN; + } + + mutex_lock(&pdr->status_lock); + pdr->status(pds->state, pds->service_path, pdr->priv); + mutex_unlock(&pdr->status_lock); + } + mutex_unlock(&pdr->list_lock); +} + +static int pdr_notifier_new_server(struct qmi_handle *qmi, + struct qmi_service *svc) +{ + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, + notifier_hdl); + struct pdr_service *pds; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(pds, &pdr->lookups, node) { + if (pds->service == svc->service && + pds->instance == svc->instance) { + pds->service_connected = true; + pds->need_notifier_register = true; + pds->addr.sq_family = AF_QIPCRTR; + pds->addr.sq_node = svc->node; + pds->addr.sq_port = svc->port; + queue_work(pdr->notifier_wq, &pdr->notifier_work); + } + } + mutex_unlock(&pdr->list_lock); + + return 0; +} + +static void pdr_notifier_del_server(struct qmi_handle *qmi, + struct qmi_service *svc) +{ + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, + notifier_hdl); + struct pdr_service *pds; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(pds, &pdr->lookups, node) { + if (pds->service == svc->service && + pds->instance == svc->instance) { + pds->service_connected = false; + pds->need_notifier_remove = true; + pds->addr.sq_node = 0; + pds->addr.sq_port = 0; + queue_work(pdr->notifier_wq, &pdr->notifier_work); + } + } + mutex_unlock(&pdr->list_lock); +} + +static struct qmi_ops pdr_notifier_ops = { + .new_server = pdr_notifier_new_server, + .del_server = pdr_notifier_del_server, +}; + +static int pdr_send_indack_msg(struct pdr_handle *pdr, struct pdr_service *pds, + u16 tid) +{ + struct servreg_set_ack_resp resp; + struct servreg_set_ack_req req; + struct qmi_txn txn; + int ret; + + ret = qmi_txn_init(&pdr->notifier_hdl, &txn, servreg_set_ack_resp_ei, + &resp); + if (ret < 0) + return ret; + + req.transaction_id = tid; + strcpy(req.service_path, pds->service_path); + + ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr, + &txn, SERVREG_SET_ACK_REQ, + SERVREG_SET_ACK_REQ_LEN, + servreg_set_ack_req_ei, + &req); + + /* Skip waiting for response */ + qmi_txn_cancel(&txn); + return ret; +} + +static void pdr_indack_work(struct work_struct *work) +{ + struct pdr_handle *pdr = container_of(work, struct pdr_handle, + indack_work); + struct pdr_list_node *ind, *tmp; + struct pdr_service *pds; + + list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) { + pds = ind->pds; + pdr_send_indack_msg(pdr, pds, ind->transaction_id); + + mutex_lock(&pdr->status_lock); + pds->state = ind->curr_state; + pdr->status(pds->state, pds->service_path, pdr->priv); + mutex_unlock(&pdr->status_lock); + + mutex_lock(&pdr->list_lock); + list_del(&ind->node); + mutex_unlock(&pdr->list_lock); + + kfree(ind); + } +} + +static void pdr_indication_cb(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, const void *data) +{ + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, + notifier_hdl); + const struct servreg_state_updated_ind *ind_msg = data; + struct pdr_list_node *ind; + struct pdr_service *pds; + bool found; + + if (!ind_msg || !ind_msg->service_path[0] || + strlen(ind_msg->service_path) > SERVREG_NAME_LENGTH) + return; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(pds, &pdr->lookups, node) { + if (strcmp(pds->service_path, ind_msg->service_path)) + continue; + + found = true; + break; + } + mutex_unlock(&pdr->list_lock); + + if (!found) + return; + + pr_info("PDR: Indication received from %s, state: 0x%x, trans-id: %d\n", + ind_msg->service_path, ind_msg->curr_state, + ind_msg->transaction_id); + + ind = kzalloc(sizeof(*ind), GFP_KERNEL); + if (!ind) + return; + + ind->transaction_id = ind_msg->transaction_id; + ind->curr_state = ind_msg->curr_state; + ind->pds = pds; + + mutex_lock(&pdr->list_lock); + list_add_tail(&ind->node, &pdr->indack_list); + mutex_unlock(&pdr->list_lock); + + queue_work(pdr->indack_wq, &pdr->indack_work); +} + +static struct qmi_msg_handler qmi_indication_handler[] = { + { + .type = QMI_INDICATION, + .msg_id = SERVREG_STATE_UPDATED_IND_ID, + .ei = servreg_state_updated_ind_ei, + .decoded_size = sizeof(struct servreg_state_updated_ind), + .fn = pdr_indication_cb, + }, + {} +}; + +static int pdr_get_domain_list(struct servreg_get_domain_list_req *req, + struct servreg_get_domain_list_resp *resp, + struct pdr_handle *pdr) +{ + struct qmi_txn txn; + int ret; + + ret = qmi_txn_init(&pdr->locator_hdl, &txn, + servreg_get_domain_list_resp_ei, resp); + if (ret < 0) + return ret; + + ret = qmi_send_request(&pdr->locator_hdl, + &pdr->locator_addr, + &txn, SERVREG_GET_DOMAIN_LIST_REQ, + SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN, + servreg_get_domain_list_req_ei, + req); + if (ret < 0) { + qmi_txn_cancel(&txn); + return ret; + } + + ret = qmi_txn_wait(&txn, 5 * HZ); + if (ret < 0) { + pr_err("PDR: %s get domain list txn wait failed: %d\n", + req->service_name, ret); + return ret; + } + + if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + pr_err("PDR: %s get domain list failed: 0x%x\n", + req->service_name, resp->resp.error); + return -EREMOTEIO; + } + + return 0; +} + +static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds) +{ + struct servreg_get_domain_list_resp *resp; + struct servreg_get_domain_list_req req; + struct servreg_location_entry *entry; + int domains_read = 0; + int ret, i; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) + return -ENOMEM; + + /* Prepare req message */ + strcpy(req.service_name, pds->service_name); + req.domain_offset_valid = true; + req.domain_offset = 0; + + do { + req.domain_offset = domains_read; + ret = pdr_get_domain_list(&req, resp, pdr); + if (ret < 0) + goto out; + + for (i = domains_read; i < resp->domain_list_len; i++) { + entry = &resp->domain_list[i]; + + if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name)) + continue; + + if (!strcmp(entry->name, pds->service_path)) { + pds->service_data_valid = entry->service_data_valid; + pds->service_data = entry->service_data; + pds->instance = entry->instance; + goto out; + } + } + + /* Update ret to indicate that the service is not yet found */ + ret = -ENXIO; + + /* Always read total_domains from the response msg */ + if (resp->domain_list_len > resp->total_domains) + resp->domain_list_len = resp->total_domains; + + domains_read += resp->domain_list_len; + } while (domains_read < resp->total_domains); +out: + kfree(resp); + return ret; +} + +static void pdr_notify_lookup_failure(struct pdr_handle *pdr, + struct pdr_service *pds, + int err) +{ + pr_err("PDR: service lookup for %s failed: %d\n", + pds->service_name, err); + + if (err == -ENXIO) + return; + + list_del(&pds->node); + pds->state = SERVREG_LOCATOR_ERR; + mutex_lock(&pdr->status_lock); + pdr->status(pds->state, pds->service_path, pdr->priv); + mutex_unlock(&pdr->status_lock); + kfree(pds); +} + +static void pdr_locator_work(struct work_struct *work) +{ + struct pdr_handle *pdr = container_of(work, struct pdr_handle, + locator_work); + struct pdr_service *pds, *tmp; + int ret = 0; + + /* Bail out early if the SERVREG LOCATOR QMI service is not up */ + mutex_lock(&pdr->lock); + if (!pdr->locator_init_complete) { + mutex_unlock(&pdr->lock); + pr_debug("PDR: SERVICE LOCATOR service not available\n"); + return; + } + mutex_unlock(&pdr->lock); + + mutex_lock(&pdr->list_lock); + list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) { + if (!pds->need_locator_lookup) + continue; + + ret = pdr_locate_service(pdr, pds); + if (ret < 0) { + pdr_notify_lookup_failure(pdr, pds, ret); + continue; + } + + ret = qmi_add_lookup(&pdr->notifier_hdl, pds->service, 1, + pds->instance); + if (ret < 0) { + pdr_notify_lookup_failure(pdr, pds, ret); + continue; + } + + pds->need_locator_lookup = false; + } + mutex_unlock(&pdr->list_lock); +} + +/** + * pdr_add_lookup() - register a tracking request for a PD + * @pdr: PDR client handle + * @service_name: service name of the tracking request + * @service_path: service path of the tracking request + * + * Registering a pdr lookup allows for tracking the life cycle of the PD. + * + * Return: pdr_service object on success, ERR_PTR on failure. -EALREADY is + * returned if a lookup is already in progress for the given service path. + */ +struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr, + const char *service_name, + const char *service_path) +{ + struct pdr_service *pds, *tmp; + int ret; + + if (IS_ERR_OR_NULL(pdr)) + return ERR_PTR(-EINVAL); + + if (!service_name || strlen(service_name) > SERVREG_NAME_LENGTH || + !service_path || strlen(service_path) > SERVREG_NAME_LENGTH) + return ERR_PTR(-EINVAL); + + pds = kzalloc(sizeof(*pds), GFP_KERNEL); + if (!pds) + return ERR_PTR(-ENOMEM); + + pds->service = SERVREG_NOTIFIER_SERVICE; + strcpy(pds->service_name, service_name); + strcpy(pds->service_path, service_path); + pds->need_locator_lookup = true; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(tmp, &pdr->lookups, node) { + if (strcmp(tmp->service_path, service_path)) + continue; + + mutex_unlock(&pdr->list_lock); + ret = -EALREADY; + goto err; + } + + list_add(&pds->node, &pdr->lookups); + mutex_unlock(&pdr->list_lock); + + schedule_work(&pdr->locator_work); + + return pds; +err: + kfree(pds); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(pdr_add_lookup); + +/** + * pdr_restart_pd() - restart PD + * @pdr: PDR client handle + * @pds: PD service handle + * + * Restarts the PD tracked by the PDR client handle for a given service path. + * + * Return: 0 on success, negative errno on failure. + */ +int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds) +{ + struct servreg_restart_pd_resp resp; + struct servreg_restart_pd_req req; + struct sockaddr_qrtr addr; + struct pdr_service *tmp; + struct qmi_txn txn; + int ret; + + if (IS_ERR_OR_NULL(pdr) || IS_ERR_OR_NULL(pds)) + return -EINVAL; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(tmp, &pdr->lookups, node) { + if (tmp != pds) + continue; + + if (!pds->service_connected) + break; + + /* Prepare req message */ + strcpy(req.service_path, pds->service_path); + addr = pds->addr; + break; + } + mutex_unlock(&pdr->list_lock); + + if (!req.service_path[0]) + return -EINVAL; + + ret = qmi_txn_init(&pdr->notifier_hdl, &txn, + servreg_restart_pd_resp_ei, + &resp); + if (ret < 0) + return ret; + + ret = qmi_send_request(&pdr->notifier_hdl, &addr, + &txn, SERVREG_RESTART_PD_REQ, + SERVREG_RESTART_PD_REQ_MAX_LEN, + servreg_restart_pd_req_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + return ret; + } + + ret = qmi_txn_wait(&txn, 5 * HZ); + if (ret < 0) { + pr_err("PDR: %s PD restart txn wait failed: %d\n", + req.service_path, ret); + return ret; + } + + /* Check response if PDR is disabled */ + if (resp.resp.result == QMI_RESULT_FAILURE_V01 && + resp.resp.error == QMI_ERR_DISABLED_V01) { + pr_err("PDR: %s PD restart is disabled: 0x%x\n", + req.service_path, resp.resp.error); + return -EOPNOTSUPP; + } + + /* Check the response for other error case*/ + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + pr_err("PDR: %s request for PD restart failed: 0x%x\n", + req.service_path, resp.resp.error); + return -EREMOTEIO; + } + + return 0; +} +EXPORT_SYMBOL(pdr_restart_pd); + +/** + * pdr_handle_alloc() - initialize the PDR client handle + * @status: function to be called on PD state change + * @priv: handle for client's use + * + * Initializes the PDR client handle to allow for tracking/restart of PDs. + * + * Return: pdr_handle object on success, ERR_PTR on failure. + */ +struct pdr_handle *pdr_handle_alloc(void (*status)(int state, + char *service_path, + void *priv), void *priv) +{ + struct pdr_handle *pdr; + int ret; + + if (!status) + return ERR_PTR(-EINVAL); + + pdr = kzalloc(sizeof(*pdr), GFP_KERNEL); + if (!pdr) + return ERR_PTR(-ENOMEM); + + pdr->status = status; + pdr->priv = priv; + + mutex_init(&pdr->status_lock); + mutex_init(&pdr->list_lock); + mutex_init(&pdr->lock); + + INIT_LIST_HEAD(&pdr->lookups); + INIT_LIST_HEAD(&pdr->indack_list); + + INIT_WORK(&pdr->locator_work, pdr_locator_work); + INIT_WORK(&pdr->notifier_work, pdr_notifier_work); + INIT_WORK(&pdr->indack_work, pdr_indack_work); + + pdr->notifier_wq = create_singlethread_workqueue("pdr_notifier_wq"); + if (!pdr->notifier_wq) { + ret = -ENOMEM; + goto free_pdr_handle; + } + + pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI); + if (!pdr->indack_wq) { + ret = -ENOMEM; + goto destroy_notifier; + } + + ret = qmi_handle_init(&pdr->locator_hdl, + SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN, + &pdr_locator_ops, NULL); + if (ret < 0) + goto destroy_indack; + + ret = qmi_add_lookup(&pdr->locator_hdl, SERVREG_LOCATOR_SERVICE, 1, 1); + if (ret < 0) + goto release_qmi_handle; + + ret = qmi_handle_init(&pdr->notifier_hdl, + SERVREG_STATE_UPDATED_IND_MAX_LEN, + &pdr_notifier_ops, + qmi_indication_handler); + if (ret < 0) + goto release_qmi_handle; + + return pdr; + +release_qmi_handle: + qmi_handle_release(&pdr->locator_hdl); +destroy_indack: + destroy_workqueue(pdr->indack_wq); +destroy_notifier: + destroy_workqueue(pdr->notifier_wq); +free_pdr_handle: + kfree(pdr); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL(pdr_handle_alloc); + +/** + * pdr_handle_release() - release the PDR client handle + * @pdr: PDR client handle + * + * Cleans up pending tracking requests and releases the underlying qmi handles. + */ +void pdr_handle_release(struct pdr_handle *pdr) +{ + struct pdr_service *pds, *tmp; + + if (IS_ERR_OR_NULL(pdr)) + return; + + mutex_lock(&pdr->list_lock); + list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) { + list_del(&pds->node); + kfree(pds); + } + mutex_unlock(&pdr->list_lock); + + cancel_work_sync(&pdr->locator_work); + cancel_work_sync(&pdr->notifier_work); + cancel_work_sync(&pdr->indack_work); + + destroy_workqueue(pdr->notifier_wq); + destroy_workqueue(pdr->indack_wq); + + qmi_handle_release(&pdr->locator_hdl); + qmi_handle_release(&pdr->notifier_hdl); + + kfree(pdr); +} +EXPORT_SYMBOL(pdr_handle_release); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Qualcomm Protection Domain Restart helpers"); diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h new file mode 100644 index 000000000000..15b5002e4127 --- /dev/null +++ b/drivers/soc/qcom/pdr_internal.h @@ -0,0 +1,379 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __QCOM_PDR_HELPER_INTERNAL__ +#define __QCOM_PDR_HELPER_INTERNAL__ + +#include + +#define SERVREG_LOCATOR_SERVICE 0x40 +#define SERVREG_NOTIFIER_SERVICE 0x42 + +#define SERVREG_REGISTER_LISTENER_REQ 0x20 +#define SERVREG_GET_DOMAIN_LIST_REQ 0x21 +#define SERVREG_STATE_UPDATED_IND_ID 0x22 +#define SERVREG_SET_ACK_REQ 0x23 +#define SERVREG_RESTART_PD_REQ 0x24 + +#define SERVREG_DOMAIN_LIST_LENGTH 32 +#define SERVREG_RESTART_PD_REQ_MAX_LEN 67 +#define SERVREG_REGISTER_LISTENER_REQ_LEN 71 +#define SERVREG_SET_ACK_REQ_LEN 72 +#define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN 74 +#define SERVREG_STATE_UPDATED_IND_MAX_LEN 79 +#define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN 2389 + +struct servreg_location_entry { + char name[SERVREG_NAME_LENGTH + 1]; + u8 service_data_valid; + u32 service_data; + u32 instance; +}; + +struct qmi_elem_info servreg_location_entry_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + name), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + instance), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + service_data_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + service_data), + }, + {} +}; + +struct servreg_get_domain_list_req { + char service_name[SERVREG_NAME_LENGTH + 1]; + u8 domain_offset_valid; + u32 domain_offset; +}; + +struct qmi_elem_info servreg_get_domain_list_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_get_domain_list_req, + service_name), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_req, + domain_offset_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_req, + domain_offset), + }, + {} +}; + +struct servreg_get_domain_list_resp { + struct qmi_response_type_v01 resp; + u8 total_domains_valid; + u16 total_domains; + u8 db_rev_count_valid; + u16 db_rev_count; + u8 domain_list_valid; + u32 domain_list_len; + struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH]; +}; + +struct qmi_elem_info servreg_get_domain_list_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_get_domain_list_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_resp, + total_domains_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_resp, + total_domains), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct servreg_get_domain_list_resp, + db_rev_count_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct servreg_get_domain_list_resp, + db_rev_count), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct servreg_get_domain_list_resp, + domain_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct servreg_get_domain_list_resp, + domain_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = SERVREG_DOMAIN_LIST_LENGTH, + .elem_size = sizeof(struct servreg_location_entry), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct servreg_get_domain_list_resp, + domain_list), + .ei_array = servreg_location_entry_ei, + }, + {} +}; + +struct servreg_register_listener_req { + u8 enable; + char service_path[SERVREG_NAME_LENGTH + 1]; +}; + +struct qmi_elem_info servreg_register_listener_req_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_register_listener_req, + enable), + }, + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_register_listener_req, + service_path), + }, + {} +}; + +struct servreg_register_listener_resp { + struct qmi_response_type_v01 resp; + u8 curr_state_valid; + enum servreg_service_state curr_state; +}; + +struct qmi_elem_info servreg_register_listener_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_register_listener_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_register_listener_resp, + curr_state_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum servreg_service_state), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_register_listener_resp, + curr_state), + }, + {} +}; + +struct servreg_restart_pd_req { + char service_path[SERVREG_NAME_LENGTH + 1]; +}; + +struct qmi_elem_info servreg_restart_pd_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_restart_pd_req, + service_path), + }, + {} +}; + +struct servreg_restart_pd_resp { + struct qmi_response_type_v01 resp; +}; + +struct qmi_elem_info servreg_restart_pd_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_restart_pd_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +struct servreg_state_updated_ind { + enum servreg_service_state curr_state; + char service_path[SERVREG_NAME_LENGTH + 1]; + u16 transaction_id; +}; + +struct qmi_elem_info servreg_state_updated_ind_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_state_updated_ind, + curr_state), + }, + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_state_updated_ind, + service_path), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct servreg_state_updated_ind, + transaction_id), + }, + {} +}; + +struct servreg_set_ack_req { + char service_path[SERVREG_NAME_LENGTH + 1]; + u16 transaction_id; +}; + +struct qmi_elem_info servreg_set_ack_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_set_ack_req, + service_path), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_set_ack_req, + transaction_id), + }, + {} +}; + +struct servreg_set_ack_resp { + struct qmi_response_type_v01 resp; +}; + +struct qmi_elem_info servreg_set_ack_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_set_ack_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +#endif diff --git a/include/linux/soc/qcom/pdr.h b/include/linux/soc/qcom/pdr.h new file mode 100644 index 000000000000..83a8ea612e69 --- /dev/null +++ b/include/linux/soc/qcom/pdr.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __QCOM_PDR_HELPER__ +#define __QCOM_PDR_HELPER__ + +#include + +#define SERVREG_NAME_LENGTH 64 + +struct pdr_service; +struct pdr_handle; + +enum servreg_service_state { + SERVREG_LOCATOR_ERR = 0x1, + SERVREG_SERVICE_STATE_DOWN = 0x0FFFFFFF, + SERVREG_SERVICE_STATE_UP = 0x1FFFFFFF, + SERVREG_SERVICE_STATE_EARLY_DOWN = 0x2FFFFFFF, + SERVREG_SERVICE_STATE_UNINIT = 0x7FFFFFFF, +}; + +struct pdr_handle *pdr_handle_alloc(void (*status)(int state, + char *service_path, + void *priv), void *priv); +struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr, + const char *service_name, + const char *service_path); +int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds); +void pdr_handle_release(struct pdr_handle *pdr); + +#endif diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h index 5efa2b67fa55..e712f94b89fc 100644 --- a/include/linux/soc/qcom/qmi.h +++ b/include/linux/soc/qcom/qmi.h @@ -88,6 +88,7 @@ struct qmi_elem_info { #define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5 #define QMI_ERR_INVALID_ID_V01 41 #define QMI_ERR_ENCODING_V01 58 +#define QMI_ERR_DISABLED_V01 69 #define QMI_ERR_INCOMPATIBLE_STATE_V01 90 #define QMI_ERR_NOT_SUPPORTED_V01 94 -- cgit v1.2.3-58-ga151 From 83473566260288c560e5443ea4cc40a458aa9e6a Mon Sep 17 00:00:00 2001 From: Sibi Sankar Date: Thu, 12 Mar 2020 17:38:42 +0530 Subject: soc: qcom: apr: Add avs/audio tracking functionality Use PDR helper functions to track the protection domains that the apr services are dependent upon on SDM845 SoC, specifically the "avs/audio" service running on ADSP Q6. Reviewed-by: Bjorn Andersson Signed-off-by: Sibi Sankar Link: https://lore.kernel.org/r/20200312120842.21991-4-sibis@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/Kconfig | 1 + drivers/soc/qcom/apr.c | 123 +++++++++++++++++++++++++++++++++++++++---- include/linux/soc/qcom/apr.h | 1 + 3 files changed, 116 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 48501f0245b0..9ac6b0072e8c 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -200,6 +200,7 @@ config QCOM_APR tristate "Qualcomm APR Bus (Asynchronous Packet Router)" depends on ARCH_QCOM || COMPILE_TEST depends on RPMSG + select QCOM_PDR_HELPERS help Enable APR IPC protocol support between application processor and QDSP6. APR is diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c index 4fcc32420c47..1f35b097c635 100644 --- a/drivers/soc/qcom/apr.c +++ b/drivers/soc/qcom/apr.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -21,6 +22,7 @@ struct apr { spinlock_t rx_lock; struct idr svcs_idr; int dest_domain_id; + struct pdr_handle *pdr; struct workqueue_struct *rxwq; struct work_struct rx_work; struct list_head rx_list; @@ -289,6 +291,9 @@ static int apr_add_device(struct device *dev, struct device_node *np, id->svc_id + 1, GFP_ATOMIC); spin_unlock(&apr->svcs_lock); + of_property_read_string_index(np, "qcom,protection-domain", + 1, &adev->service_path); + dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev)); ret = device_register(&adev->dev); @@ -300,14 +305,75 @@ static int apr_add_device(struct device *dev, struct device_node *np, return ret; } -static void of_register_apr_devices(struct device *dev) +static int of_apr_add_pd_lookups(struct device *dev) +{ + const char *service_name, *service_path; + struct apr *apr = dev_get_drvdata(dev); + struct device_node *node; + struct pdr_service *pds; + int ret; + + for_each_child_of_node(dev->of_node, node) { + ret = of_property_read_string_index(node, "qcom,protection-domain", + 0, &service_name); + if (ret < 0) + continue; + + ret = of_property_read_string_index(node, "qcom,protection-domain", + 1, &service_path); + if (ret < 0) { + dev_err(dev, "pdr service path missing: %d\n", ret); + return ret; + } + + pds = pdr_add_lookup(apr->pdr, service_name, service_path); + if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) { + dev_err(dev, "pdr add lookup failed: %d\n", ret); + return PTR_ERR(pds); + } + } + + return 0; +} + +static void of_register_apr_devices(struct device *dev, const char *svc_path) { struct apr *apr = dev_get_drvdata(dev); struct device_node *node; + const char *service_path; + int ret; for_each_child_of_node(dev->of_node, node) { struct apr_device_id id = { {0} }; + /* + * This function is called with svc_path NULL during + * apr_probe(), in which case we register any apr devices + * without a qcom,protection-domain specified. + * + * Then as the protection domains becomes available + * (if applicable) this function is again called, but with + * svc_path representing the service becoming available. In + * this case we register any apr devices with a matching + * qcom,protection-domain. + */ + + ret = of_property_read_string_index(node, "qcom,protection-domain", + 1, &service_path); + if (svc_path) { + /* skip APR services that are PD independent */ + if (ret) + continue; + + /* skip APR services whose PD paths don't match */ + if (strcmp(service_path, svc_path)) + continue; + } else { + /* skip APR services whose PD lookups are registered */ + if (ret == 0) + continue; + } + if (of_property_read_u32(node, "reg", &id.svc_id)) continue; @@ -318,6 +384,34 @@ static void of_register_apr_devices(struct device *dev) } } +static int apr_remove_device(struct device *dev, void *svc_path) +{ + struct apr_device *adev = to_apr_device(dev); + + if (svc_path && adev->service_path) { + if (!strcmp(adev->service_path, (char *)svc_path)) + device_unregister(&adev->dev); + } else { + device_unregister(&adev->dev); + } + + return 0; +} + +static void apr_pd_status(int state, char *svc_path, void *priv) +{ + struct apr *apr = (struct apr *)priv; + + switch (state) { + case SERVREG_SERVICE_STATE_UP: + of_register_apr_devices(apr->dev, svc_path); + break; + case SERVREG_SERVICE_STATE_DOWN: + device_for_each_child(apr->dev, svc_path, apr_remove_device); + break; + } +} + static int apr_probe(struct rpmsg_device *rpdev) { struct device *dev = &rpdev->dev; @@ -343,28 +437,39 @@ static int apr_probe(struct rpmsg_device *rpdev) return -ENOMEM; } INIT_WORK(&apr->rx_work, apr_rxwq); + + apr->pdr = pdr_handle_alloc(apr_pd_status, apr); + if (IS_ERR(apr->pdr)) { + dev_err(dev, "Failed to init PDR handle\n"); + ret = PTR_ERR(apr->pdr); + goto destroy_wq; + } + INIT_LIST_HEAD(&apr->rx_list); spin_lock_init(&apr->rx_lock); spin_lock_init(&apr->svcs_lock); idr_init(&apr->svcs_idr); - of_register_apr_devices(dev); - - return 0; -} -static int apr_remove_device(struct device *dev, void *null) -{ - struct apr_device *adev = to_apr_device(dev); + ret = of_apr_add_pd_lookups(dev); + if (ret) + goto handle_release; - device_unregister(&adev->dev); + of_register_apr_devices(dev, NULL); return 0; + +handle_release: + pdr_handle_release(apr->pdr); +destroy_wq: + destroy_workqueue(apr->rxwq); + return ret; } static void apr_remove(struct rpmsg_device *rpdev) { struct apr *apr = dev_get_drvdata(&rpdev->dev); + pdr_handle_release(apr->pdr); device_for_each_child(&rpdev->dev, NULL, apr_remove_device); flush_workqueue(apr->rxwq); destroy_workqueue(apr->rxwq); diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h index c5d52e2cb275..7f0bc3cf4d61 100644 --- a/include/linux/soc/qcom/apr.h +++ b/include/linux/soc/qcom/apr.h @@ -85,6 +85,7 @@ struct apr_device { uint16_t domain_id; uint32_t version; char name[APR_NAME_SIZE]; + const char *service_path; spinlock_t lock; struct list_head node; }; -- cgit v1.2.3-58-ga151 From 4f8232bbf887123f78bcdca3dfd2b3dfa52a0112 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 21 Feb 2020 12:24:02 -0800 Subject: dma-direct: remove the cached_kernel_address hook dma-direct now finds the kernel address for coherent allocations based on the dma address, so the cached_kernel_address hooks is unused and can be removed entirely. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- arch/Kconfig | 2 +- arch/microblaze/mm/consistent.c | 7 ------- arch/mips/mm/dma-noncoherent.c | 5 ----- arch/nios2/mm/dma-mapping.c | 10 ---------- arch/xtensa/kernel/pci-dma.c | 10 ++-------- include/linux/dma-noncoherent.h | 1 - 6 files changed, 3 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 98de654b79b3..7994b239f155 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -249,7 +249,7 @@ config ARCH_HAS_SET_DIRECT_MAP # # Select if arch has an uncached kernel segment and provides the -# uncached_kernel_address / cached_kernel_address symbols to use it +# uncached_kernel_address symbol to use it # config ARCH_HAS_UNCACHED_SEGMENT select ARCH_HAS_DMA_PREP_COHERENT diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index 8c5f0c332d8b..cede7c5e8135 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -49,11 +49,4 @@ void *uncached_kernel_address(void *ptr) pr_warn("ERROR: Your cache coherent area is CACHED!!!\n"); return (void *)addr; } - -void *cached_kernel_address(void *ptr) -{ - unsigned long addr = (unsigned long)ptr; - - return (void *)(addr & ~UNCACHED_SHADOW_MASK); -} #endif /* CONFIG_MMU */ diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index dc42ffc83825..77dce28ad0a0 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -54,11 +54,6 @@ void *uncached_kernel_address(void *addr) return (void *)(__pa(addr) + UNCAC_BASE); } -void *cached_kernel_address(void *addr) -{ - return __va(addr) - UNCAC_BASE; -} - static inline void dma_sync_virt(void *addr, size_t size, enum dma_data_direction dir) { diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c index 0ed711e37902..f30f2749257c 100644 --- a/arch/nios2/mm/dma-mapping.c +++ b/arch/nios2/mm/dma-mapping.c @@ -75,13 +75,3 @@ void *uncached_kernel_address(void *ptr) return (void *)ptr; } - -void *cached_kernel_address(void *ptr) -{ - unsigned long addr = (unsigned long)ptr; - - addr &= ~CONFIG_NIOS2_IO_REGION_BASE; - addr |= CONFIG_NIOS2_KERNEL_REGION_BASE; - - return (void *)ptr; -} diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 72b6222daa0b..6a685545d5c9 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -88,18 +88,12 @@ void arch_dma_prep_coherent(struct page *page, size_t size) /* * Memory caching is platform-dependent in noMMU xtensa configurations. - * The following two functions should be implemented in platform code - * in order to enable coherent DMA memory operations when CONFIG_MMU is not - * enabled. + * This function should be implemented in platform code in order to enable + * coherent DMA memory operations when CONFIG_MMU is not enabled. */ #ifdef CONFIG_MMU void *uncached_kernel_address(void *p) { return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; } - -void *cached_kernel_address(void *p) -{ - return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; -} #endif /* CONFIG_MMU */ diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index ca9b5770caee..b6b72e19b0cd 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -109,6 +109,5 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size) #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ void *uncached_kernel_address(void *addr); -void *cached_kernel_address(void *addr); #endif /* _LINUX_DMA_NONCOHERENT_H */ -- cgit v1.2.3-58-ga151 From fa7e2247c5729f990c7456fe09f3af99c8f2571b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 21 Feb 2020 15:55:43 -0800 Subject: dma-direct: make uncached_kernel_address more general Rename the symbol to arch_dma_set_uncached, and pass a size to it as well as allow an error return. That will allow reusing this hook for in-place pagetable remapping. As the in-place remap doesn't always require an explicit cache flush, also detangle ARCH_HAS_DMA_PREP_COHERENT from ARCH_HAS_DMA_SET_UNCACHED. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- arch/Kconfig | 8 ++++---- arch/microblaze/Kconfig | 2 +- arch/microblaze/mm/consistent.c | 2 +- arch/mips/Kconfig | 3 ++- arch/mips/mm/dma-noncoherent.c | 2 +- arch/nios2/Kconfig | 3 ++- arch/nios2/mm/dma-mapping.c | 2 +- arch/xtensa/Kconfig | 2 +- arch/xtensa/kernel/pci-dma.c | 2 +- include/linux/dma-noncoherent.h | 2 +- kernel/dma/direct.c | 10 ++++++---- 11 files changed, 21 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 7994b239f155..090cfe0c82a7 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -248,11 +248,11 @@ config ARCH_HAS_SET_DIRECT_MAP bool # -# Select if arch has an uncached kernel segment and provides the -# uncached_kernel_address symbol to use it +# Select if the architecture provides the arch_dma_set_uncached symbol to +# either provide an uncached segement alias for a DMA allocation, or +# to remap the page tables in place. # -config ARCH_HAS_UNCACHED_SEGMENT - select ARCH_HAS_DMA_PREP_COHERENT +config ARCH_HAS_DMA_SET_UNCACHED bool # Select if arch init_task must go in the __init_task_data section diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 6a331bd57ea8..9606c244b5b8 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -8,7 +8,7 @@ config MICROBLAZE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_UNCACHED_SEGMENT if !MMU + select ARCH_HAS_DMA_SET_UNCACHED if !MMU select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_TABLE_SORT diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index cede7c5e8135..e09b66e43cb6 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -40,7 +40,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) #define UNCACHED_SHADOW_MASK 0 #endif /* CONFIG_XILINX_UNCACHED_SHADOW */ -void *uncached_kernel_address(void *ptr) +void *arch_dma_set_uncached(void *ptr, size_t size) { unsigned long addr = (unsigned long)ptr; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 797d7f1ad5fe..489185db501e 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1187,8 +1187,9 @@ config DMA_NONCOHERENT # significant advantages. # select ARCH_HAS_DMA_WRITE_COMBINE + select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_UNCACHED_SEGMENT + select ARCH_HAS_DMA_SET_UNCACHED select DMA_NONCOHERENT_MMAP select DMA_NONCOHERENT_CACHE_SYNC select NEED_DMA_MAP_STATE diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index 77dce28ad0a0..fcea92d95d86 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -49,7 +49,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) dma_cache_wback_inv((unsigned long)page_address(page), size); } -void *uncached_kernel_address(void *addr) +void *arch_dma_set_uncached(void *addr, size_t size) { return (void *)(__pa(addr) + UNCAC_BASE); } diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index 44b5da37e8bd..2fc4ed210b5f 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -2,9 +2,10 @@ config NIOS2 def_bool y select ARCH_32BIT_OFF_T + select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_UNCACHED_SEGMENT + select ARCH_HAS_DMA_SET_UNCACHED select ARCH_NO_SWAP select TIMER_OF select GENERIC_ATOMIC64 diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c index f30f2749257c..fd887d5f3f9a 100644 --- a/arch/nios2/mm/dma-mapping.c +++ b/arch/nios2/mm/dma-mapping.c @@ -67,7 +67,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) flush_dcache_range(start, start + size); } -void *uncached_kernel_address(void *ptr) +void *arch_dma_set_uncached(void *ptr, size_t size) { unsigned long addr = (unsigned long)ptr; diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 32ee759a3fda..de229424b659 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -6,7 +6,7 @@ config XTENSA select ARCH_HAS_DMA_PREP_COHERENT if MMU select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU - select ARCH_HAS_UNCACHED_SEGMENT if MMU + select ARCH_HAS_DMA_SET_UNCACHED if MMU select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS select ARCH_WANT_FRAME_POINTERS diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 6a685545d5c9..17c4384f8495 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -92,7 +92,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) * coherent DMA memory operations when CONFIG_MMU is not enabled. */ #ifdef CONFIG_MMU -void *uncached_kernel_address(void *p) +void *arch_dma_set_uncached(void *p, size_t size) { return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; } diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index b6b72e19b0cd..1a4039506673 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -108,6 +108,6 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size) } #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ -void *uncached_kernel_address(void *addr); +void *arch_dma_set_uncached(void *addr, size_t size); #endif /* _LINUX_DMA_NONCOHERENT_H */ diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 650580fbbff3..baf4e93735c3 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -192,10 +192,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, memset(ret, 0, size); - if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && + if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && dma_alloc_need_uncached(dev, attrs)) { arch_dma_prep_coherent(page, size); - ret = uncached_kernel_address(ret); + ret = arch_dma_set_uncached(ret, size); + if (IS_ERR(ret)) + goto out_free_pages; } done: if (force_dma_unencrypted(dev)) @@ -236,7 +238,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { - if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && + if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && dma_alloc_need_uncached(dev, attrs)) return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); @@ -246,7 +248,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { - if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && + if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && dma_alloc_need_uncached(dev, attrs)) arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); -- cgit v1.2.3-58-ga151 From 999a5d1203baa7cff00586361feae263ee3f23a5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 21 Feb 2020 12:35:05 -0800 Subject: dma-direct: provide a arch_dma_clear_uncached hook This allows the arch code to reset the page tables to cached access when freeing a dma coherent allocation that was set to uncached using arch_dma_set_uncached. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- arch/Kconfig | 7 +++++++ include/linux/dma-noncoherent.h | 1 + kernel/dma/direct.c | 2 ++ 3 files changed, 10 insertions(+) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 090cfe0c82a7..c26302f90c96 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -255,6 +255,13 @@ config ARCH_HAS_SET_DIRECT_MAP config ARCH_HAS_DMA_SET_UNCACHED bool +# +# Select if the architectures provides the arch_dma_clear_uncached symbol +# to undo an in-place page table remap for uncached access. +# +config ARCH_HAS_DMA_CLEAR_UNCACHED + bool + # Select if arch init_task must go in the __init_task_data section config ARCH_TASK_STRUCT_ON_STACK bool diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index 1a4039506673..b59f1b6be3e9 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -109,5 +109,6 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size) #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ void *arch_dma_set_uncached(void *addr, size_t size); +void arch_dma_clear_uncached(void *addr, size_t size); #endif /* _LINUX_DMA_NONCOHERENT_H */ diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index baf4e93735c3..412f560dc69f 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -231,6 +231,8 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) vunmap(cpu_addr); + else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED)) + arch_dma_clear_uncached(cpu_addr, size); dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size); } -- cgit v1.2.3-58-ga151 From 7eac52648a4c24ad23a05f62db97867c92a5747b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 7 Feb 2020 19:11:12 -0500 Subject: SUNRPC: Add a flag to avoid reference counts on credentials Add a flag to signal to the RPC layer that the credential is already pinned for the duration of the RPC call. Signed-off-by: Trond Myklebust --- include/linux/sunrpc/sched.h | 1 + net/sunrpc/clnt.c | 5 +++-- net/sunrpc/sched.c | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index a6ef35184ef1..df696efdd675 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -132,6 +132,7 @@ struct rpc_task_setup { #define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */ #define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */ #define RPC_TASK_NO_RETRANS_TIMEOUT 0x4000 /* wait forever for a reply */ +#define RPC_TASK_CRED_NOREF 0x8000 /* No refcount on the credential */ #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 7324b21f923e..2345e563c2f4 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1099,8 +1099,9 @@ rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg) task->tk_msg.rpc_proc = msg->rpc_proc; task->tk_msg.rpc_argp = msg->rpc_argp; task->tk_msg.rpc_resp = msg->rpc_resp; - if (msg->rpc_cred != NULL) - task->tk_msg.rpc_cred = get_cred(msg->rpc_cred); + task->tk_msg.rpc_cred = msg->rpc_cred; + if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) + get_cred(task->tk_msg.rpc_cred); } } diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 55e900255b0c..6eff14119a88 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -1162,7 +1162,8 @@ static void rpc_release_resources_task(struct rpc_task *task) { xprt_release(task); if (task->tk_msg.rpc_cred) { - put_cred(task->tk_msg.rpc_cred); + if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) + put_cred(task->tk_msg.rpc_cred); task->tk_msg.rpc_cred = NULL; } rpc_task_release_client(task); -- cgit v1.2.3-58-ga151 From 8d6bda7f23a9b3ef1d7e386f01924c37f18fe771 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 11 Mar 2020 11:21:12 -0400 Subject: SUNRPC: Remove xdr_buf_read_mic() Clean up: this function is no longer used. Signed-off-by: Chuck Lever Reviewed-by: Benjamin Coddington Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xdr.h | 1 - net/sunrpc/xdr.c | 55 ---------------------------------------------- 2 files changed, 56 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index b41f34977995..8a6dd5bd6748 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -184,7 +184,6 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p) extern void xdr_shift_buf(struct xdr_buf *, size_t); extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); -extern int xdr_buf_read_mic(struct xdr_buf *, struct xdr_netobj *, unsigned int); extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index e5497dc2475b..15b58c5144f9 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1235,61 +1235,6 @@ xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) } EXPORT_SYMBOL_GPL(xdr_encode_word); -/** - * xdr_buf_read_mic() - obtain the address of the GSS mic from xdr buf - * @buf: pointer to buffer containing a mic - * @mic: on success, returns the address of the mic - * @offset: the offset in buf where mic may be found - * - * This function may modify the xdr buf if the mic is found to be straddling - * a boundary between head, pages, and tail. On success the mic can be read - * from the address returned. There is no need to free the mic. - * - * Return: Success returns 0, otherwise an integer error. - */ -int xdr_buf_read_mic(struct xdr_buf *buf, struct xdr_netobj *mic, unsigned int offset) -{ - struct xdr_buf subbuf; - unsigned int boundary; - - if (xdr_decode_word(buf, offset, &mic->len)) - return -EFAULT; - offset += 4; - - /* Is the mic partially in the head? */ - boundary = buf->head[0].iov_len; - if (offset < boundary && (offset + mic->len) > boundary) - xdr_shift_buf(buf, boundary - offset); - - /* Is the mic partially in the pages? */ - boundary += buf->page_len; - if (offset < boundary && (offset + mic->len) > boundary) - xdr_shrink_pagelen(buf, boundary - offset); - - if (xdr_buf_subsegment(buf, &subbuf, offset, mic->len)) - return -EFAULT; - - /* Is the mic contained entirely in the head? */ - mic->data = subbuf.head[0].iov_base; - if (subbuf.head[0].iov_len == mic->len) - return 0; - /* ..or is the mic contained entirely in the tail? */ - mic->data = subbuf.tail[0].iov_base; - if (subbuf.tail[0].iov_len == mic->len) - return 0; - - /* Find a contiguous area in @buf to hold all of @mic */ - if (mic->len > buf->buflen - buf->len) - return -ENOMEM; - if (buf->tail[0].iov_len != 0) - mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len; - else - mic->data = buf->head[0].iov_base + buf->head[0].iov_len; - __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len); - return 0; -} -EXPORT_SYMBOL_GPL(xdr_buf_read_mic); - /* Returns 0 on success, or else a negative error code. */ static int xdr_xcode_array2(struct xdr_buf *buf, unsigned int base, -- cgit v1.2.3-58-ga151 From 469aef23aa4e49d5191050410a1422117db03e11 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 28 Feb 2020 07:23:23 -0600 Subject: sunrpc: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva Signed-off-by: J. Bruce Fields Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 1afe38eb33f7..7f0a83451bc0 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -380,7 +380,7 @@ struct svc_deferred_req { struct cache_deferred_req handle; size_t xprt_hlen; int argslen; - __be32 args[0]; + __be32 args[]; }; struct svc_process_info { -- cgit v1.2.3-58-ga151 From 412055398b9e67e07347a936fc4a6adddabe9cf4 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 2 Mar 2020 14:45:53 -0500 Subject: nfsd: Fix NFSv4 READ on RDMA when using readv svcrdma expects that the payload falls precisely into the xdr_buf page vector. This does not seem to be the case for nfsd4_encode_readv(). This code is called only when fops->splice_read is missing or when RQ_SPLICE_OK is clear, so it's not a noticeable problem in many common cases. Add new transport method: ->xpo_read_payload so that when a READ payload does not fit exactly in rq_res's page vector, the XDR encoder can inform the RPC transport exactly where that payload is, without the payload's XDR pad. That way, when a Write chunk is present, the transport knows what byte range in the Reply message is supposed to be matched with the chunk. Note that the Linux NFS server implementation of NFS/RDMA can currently handle only one Write chunk per RPC-over-RDMA message. This simplifies the implementation of this fix. Fixes: b04209806384 ("nfsd4: allow exotic read compounds") Buglink: https://bugzilla.kernel.org/show_bug.cgi?id=198053 Signed-off-by: Chuck Lever --- fs/nfsd/nfs4xdr.c | 20 +++++++++------- include/linux/sunrpc/svc.h | 3 +++ include/linux/sunrpc/svc_rdma.h | 8 ++++++- include/linux/sunrpc/svc_xprt.h | 2 ++ net/sunrpc/svc.c | 16 +++++++++++++ net/sunrpc/svcsock.c | 8 +++++++ net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 1 + net/sunrpc/xprtrdma/svc_rdma_rw.c | 30 ++++++++++++++---------- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 40 +++++++++++++++++++++++++++++++- net/sunrpc/xprtrdma/svc_rdma_transport.c | 1 + 10 files changed, 106 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index ad38aed6f5c2..eedb62c411a5 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3594,17 +3594,17 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp, u32 zzz = 0; int pad; + /* + * svcrdma requires every READ payload to start somewhere + * in xdr->pages. + */ + if (xdr->iov == xdr->buf->head) { + xdr->iov = NULL; + xdr->end = xdr->p; + } + len = maxcount; v = 0; - - thislen = min_t(long, len, ((void *)xdr->end - (void *)xdr->p)); - p = xdr_reserve_space(xdr, (thislen+3)&~3); - WARN_ON_ONCE(!p); - resp->rqstp->rq_vec[v].iov_base = p; - resp->rqstp->rq_vec[v].iov_len = thislen; - v++; - len -= thislen; - while (len) { thislen = min_t(long, len, PAGE_SIZE); p = xdr_reserve_space(xdr, (thislen+3)&~3); @@ -3623,6 +3623,8 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp, read->rd_length = maxcount; if (nfserr) return nfserr; + if (svc_encode_read_payload(resp->rqstp, starting_len + 8, maxcount)) + return nfserr_io; xdr_truncate_encode(xdr, starting_len + 8 + ((maxcount+3)&~3)); tmp = htonl(eof); diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 7f0a83451bc0..fd390894a584 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -517,6 +517,9 @@ void svc_wake_up(struct svc_serv *); void svc_reserve(struct svc_rqst *rqstp, int space); struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu); char * svc_print_addr(struct svc_rqst *, char *, size_t); +int svc_encode_read_payload(struct svc_rqst *rqstp, + unsigned int offset, + unsigned int length); unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct page **pages, struct kvec *first, size_t total); diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 40f65888dd38..04e4a34d1c6a 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -137,6 +137,8 @@ struct svc_rdma_recv_ctxt { unsigned int rc_page_count; unsigned int rc_hdr_count; u32 rc_inv_rkey; + unsigned int rc_read_payload_offset; + unsigned int rc_read_payload_length; struct page *rc_pages[RPCSVC_MAXPAGES]; }; @@ -170,7 +172,9 @@ extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head, __be32 *p); extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, - __be32 *wr_ch, struct xdr_buf *xdr); + __be32 *wr_ch, struct xdr_buf *xdr, + unsigned int offset, + unsigned long length); extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch, bool writelist, struct xdr_buf *xdr); @@ -189,6 +193,8 @@ extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt, struct xdr_buf *xdr, __be32 *wr_lst); extern int svc_rdma_sendto(struct svc_rqst *); +extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, + unsigned int length); /* svc_rdma_transport.c */ extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index ea6f46be9cb7..9e1e046de176 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -21,6 +21,8 @@ struct svc_xprt_ops { int (*xpo_has_wspace)(struct svc_xprt *); int (*xpo_recvfrom)(struct svc_rqst *); int (*xpo_sendto)(struct svc_rqst *); + int (*xpo_read_payload)(struct svc_rqst *, unsigned int, + unsigned int); void (*xpo_release_rqst)(struct svc_rqst *); void (*xpo_detach)(struct svc_xprt *); void (*xpo_free)(struct svc_xprt *); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 187dd4e73d64..18676d36f490 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1636,6 +1636,22 @@ u32 svc_max_payload(const struct svc_rqst *rqstp) } EXPORT_SYMBOL_GPL(svc_max_payload); +/** + * svc_encode_read_payload - mark a range of bytes as a READ payload + * @rqstp: svc_rqst to operate on + * @offset: payload's byte offset in rqstp->rq_res + * @length: size of payload, in bytes + * + * Returns zero on success, or a negative errno if a permanent + * error occurred. + */ +int svc_encode_read_payload(struct svc_rqst *rqstp, unsigned int offset, + unsigned int length) +{ + return rqstp->rq_xprt->xpt_ops->xpo_read_payload(rqstp, offset, length); +} +EXPORT_SYMBOL_GPL(svc_encode_read_payload); + /** * svc_fill_write_vector - Construct data argument for VFS write call * @rqstp: svc_rqst to operate on diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 2934dd711715..758ab10690de 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -279,6 +279,12 @@ out: return len; } +static int svc_sock_read_payload(struct svc_rqst *rqstp, unsigned int offset, + unsigned int length) +{ + return 0; +} + /* * Report socket names for nfsdfs */ @@ -653,6 +659,7 @@ static const struct svc_xprt_ops svc_udp_ops = { .xpo_create = svc_udp_create, .xpo_recvfrom = svc_udp_recvfrom, .xpo_sendto = svc_udp_sendto, + .xpo_read_payload = svc_sock_read_payload, .xpo_release_rqst = svc_release_udp_skb, .xpo_detach = svc_sock_detach, .xpo_free = svc_sock_free, @@ -1171,6 +1178,7 @@ static const struct svc_xprt_ops svc_tcp_ops = { .xpo_create = svc_tcp_create, .xpo_recvfrom = svc_tcp_recvfrom, .xpo_sendto = svc_tcp_sendto, + .xpo_read_payload = svc_sock_read_payload, .xpo_release_rqst = svc_release_skb, .xpo_detach = svc_tcp_sock_detach, .xpo_free = svc_sock_free, diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 96bccd398469..71127d898562 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -193,6 +193,7 @@ svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma) out: ctxt->rc_page_count = 0; + ctxt->rc_read_payload_length = 0; return ctxt; out_empty: diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 003610ce00bc..927f98995356 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -482,18 +482,19 @@ static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info, vec->iov_len); } -/* Send an xdr_buf's page list by itself. A Write chunk is - * just the page list. a Reply chunk is the head, page list, - * and tail. This function is shared between the two types - * of chunk. +/* Send an xdr_buf's page list by itself. A Write chunk is just + * the page list. A Reply chunk is @xdr's head, page list, and + * tail. This function is shared between the two types of chunk. */ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info, - struct xdr_buf *xdr) + struct xdr_buf *xdr, + unsigned int offset, + unsigned long length) { info->wi_xdr = xdr; - info->wi_next_off = 0; + info->wi_next_off = offset - xdr->head[0].iov_len; return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg, - xdr->page_len); + length); } /** @@ -501,6 +502,8 @@ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info, * @rdma: controlling RDMA transport * @wr_ch: Write chunk provided by client * @xdr: xdr_buf containing the data payload + * @offset: payload's byte offset in @xdr + * @length: size of payload, in bytes * * Returns a non-negative number of bytes the chunk consumed, or * %-E2BIG if the payload was larger than the Write chunk, @@ -510,19 +513,20 @@ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info, * %-EIO if rdma_rw initialization failed (DMA mapping, etc). */ int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch, - struct xdr_buf *xdr) + struct xdr_buf *xdr, + unsigned int offset, unsigned long length) { struct svc_rdma_write_info *info; int ret; - if (!xdr->page_len) + if (!length) return 0; info = svc_rdma_write_info_alloc(rdma, wr_ch); if (!info) return -ENOMEM; - ret = svc_rdma_send_xdr_pagelist(info, xdr); + ret = svc_rdma_send_xdr_pagelist(info, xdr, offset, length); if (ret < 0) goto out_err; @@ -531,7 +535,7 @@ int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch, goto out_err; trace_svcrdma_encode_write(xdr->page_len); - return xdr->page_len; + return length; out_err: svc_rdma_write_info_free(info); @@ -571,7 +575,9 @@ int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch, * client did not provide Write chunks. */ if (!writelist && xdr->page_len) { - ret = svc_rdma_send_xdr_pagelist(info, xdr); + ret = svc_rdma_send_xdr_pagelist(info, xdr, + xdr->head[0].iov_len, + xdr->page_len); if (ret < 0) goto out_err; consumed += xdr->page_len; diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index f3f108090aa4..a11983c2056f 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -858,7 +858,18 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) if (wr_lst) { /* XXX: Presume the client sent only one Write chunk */ - ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr); + unsigned long offset; + unsigned int length; + + if (rctxt->rc_read_payload_length) { + offset = rctxt->rc_read_payload_offset; + length = rctxt->rc_read_payload_length; + } else { + offset = xdr->head[0].iov_len; + length = xdr->page_len; + } + ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr, offset, + length); if (ret < 0) goto err2; svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret); @@ -900,3 +911,30 @@ out: ret = -ENOTCONN; goto out; } + +/** + * svc_rdma_read_payload - special processing for a READ payload + * @rqstp: svc_rqst to operate on + * @offset: payload's byte offset in @xdr + * @length: size of payload, in bytes + * + * Returns zero on success. + * + * For the moment, just record the xdr_buf location of the READ + * payload. svc_rdma_sendto will use that location later when + * we actually send the payload. + */ +int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, + unsigned int length) +{ + struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; + + /* XXX: Just one READ payload slot for now, since our + * transport implementation currently supports only one + * Write chunk. + */ + rctxt->rc_read_payload_offset = offset; + rctxt->rc_read_payload_length = length; + + return 0; +} diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 145a3615c319..f6aad2798063 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -82,6 +82,7 @@ static const struct svc_xprt_ops svc_rdma_ops = { .xpo_create = svc_rdma_create, .xpo_recvfrom = svc_rdma_recvfrom, .xpo_sendto = svc_rdma_sendto, + .xpo_read_payload = svc_rdma_read_payload, .xpo_release_rqst = svc_rdma_release_rqst, .xpo_detach = svc_rdma_detach, .xpo_free = svc_rdma_free, -- cgit v1.2.3-58-ga151 From 96f194b715b61b11f0184c776a1283df8e152033 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 2 Mar 2020 15:01:08 -0500 Subject: SUNRPC: Add xdr_pad_size() helper Introduce a helper function to compute the XDR pad size of a variable-length XDR object. Clean up: Replace open-coded calculation of XDR pad sizes. I'm sure I haven't found every instance of this calculation. Signed-off-by: Chuck Lever --- include/linux/sunrpc/xdr.h | 15 +++++++++++++++ net/sunrpc/auth_gss/auth_gss.c | 2 +- net/sunrpc/auth_gss/svcauth_gss.c | 5 +++-- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 9 ++------- 4 files changed, 21 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index b41f34977995..83cd9f15c526 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -300,6 +300,21 @@ xdr_align_size(size_t n) return (n + mask) & ~mask; } +/** + * xdr_pad_size - Calculate size of an object's pad + * @n: Size of an object being XDR encoded (in bytes) + * + * This implementation avoids the need for conditional + * branches or modulo division. + * + * Return value: + * Size (in bytes) of the needed XDR pad + */ +static inline size_t xdr_pad_size(size_t n) +{ + return xdr_align_size(n) - n; +} + /** * xdr_stream_encode_u32 - Encode a 32-bit integer * @xdr: pointer to xdr_stream diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 24ca861815b1..ee060d57d216 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -1877,7 +1877,7 @@ static int gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, else iov = snd_buf->head; p = iov->iov_base + iov->iov_len; - pad = 3 - ((snd_buf->len - offset - 1) & 3); + pad = xdr_pad_size(snd_buf->len - offset); memset(p, 0, pad); iov->iov_len += pad; snd_buf->len += pad; diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 65b67b257302..fcf74621f008 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -961,7 +961,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs /* XXX: This is very inefficient. It would be better to either do * this while we encrypt, or maybe in the receive code, if we can peak * ahead and work out the service and mechanism there. */ - offset = buf->head[0].iov_len % 4; + offset = xdr_pad_size(buf->head[0].iov_len); if (offset) { buf->buflen = RPCSVC_MAXPAYLOAD; xdr_shift_buf(buf, offset); @@ -1680,7 +1680,8 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp) goto out; integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base; integ_len = resbuf->len - integ_offset; - BUG_ON(integ_len % 4); + if (integ_len & 3) + goto out; *p++ = htonl(integ_len); *p++ = htonl(gc->gc_seq); if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, integ_len)) { diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 354c5619176a..4add875277f8 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -322,11 +322,6 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) return ret; } -static u32 xdr_padsize(u32 len) -{ - return (len & 3) ? (4 - (len & 3)) : 0; -} - /* Returns length of transport header, in bytes. */ static unsigned int svc_rdma_reply_hdr_len(__be32 *rdma_resp) @@ -595,7 +590,7 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, if (wr_lst) { u32 xdrpad; - xdrpad = xdr_padsize(xdr->page_len); + xdrpad = xdr_pad_size(xdr->page_len); if (taillen && xdrpad) { tailbase += xdrpad; taillen -= xdrpad; @@ -670,7 +665,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, if (wr_lst) { base = xdr->tail[0].iov_base; len = xdr->tail[0].iov_len; - xdr_pad = xdr_padsize(xdr->page_len); + xdr_pad = xdr_pad_size(xdr->page_len); if (len && xdr_pad) { base += xdr_pad; -- cgit v1.2.3-58-ga151 From e604aad2cac7357162f661e45f2f60e46faa7b17 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 2 Mar 2020 15:01:08 -0500 Subject: svcrdma: Use struct xdr_stream to decode ingress transport headers The logic that checks incoming network headers has to be scrupulous. De-duplicate: replace open-coded buffer overflow checks with the use of xdr_stream helpers that are used most everywhere else XDR decoding is done. One minor change to the sanity checks: instead of checking the length of individual segments, cap the length of the whole chunk to be sure it can fit in the set of pages available in rq_pages. This should be a better test of whether the server can handle the chunks in each request. Signed-off-by: Chuck Lever --- include/linux/sunrpc/rpc_rdma.h | 3 +- include/linux/sunrpc/svc_rdma.h | 1 + include/trace/events/rpcrdma.h | 7 +- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 206 +++++++++++++++++++------------- 4 files changed, 130 insertions(+), 87 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index 92d182fd8e3b..320c672d84de 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h @@ -58,7 +58,8 @@ enum { enum { rpcrdma_fixed_maxsz = 4, rpcrdma_segment_maxsz = 4, - rpcrdma_readchunk_maxsz = 2 + rpcrdma_segment_maxsz, + rpcrdma_readseg_maxsz = 1 + rpcrdma_segment_maxsz, + rpcrdma_readchunk_maxsz = 1 + rpcrdma_readseg_maxsz, }; /* diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 04e4a34d1c6a..c790dbb0dd90 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -132,6 +132,7 @@ struct svc_rdma_recv_ctxt { struct ib_sge rc_recv_sge; void *rc_recv_buf; struct xdr_buf rc_arg; + struct xdr_stream rc_stream; bool rc_temp; u32 rc_byte_len; unsigned int rc_page_count; diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 545fe936a0cc..814b73bd2cc7 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1469,7 +1469,7 @@ DECLARE_EVENT_CLASS(svcrdma_segment_event, ); #define DEFINE_SEGMENT_EVENT(name) \ - DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\ + DEFINE_EVENT(svcrdma_segment_event, svcrdma_##name,\ TP_PROTO( \ u32 handle, \ u32 length, \ @@ -1477,8 +1477,9 @@ DECLARE_EVENT_CLASS(svcrdma_segment_event, ), \ TP_ARGS(handle, length, offset)) -DEFINE_SEGMENT_EVENT(rseg); -DEFINE_SEGMENT_EVENT(wseg); +DEFINE_SEGMENT_EVENT(decode_wseg); +DEFINE_SEGMENT_EVENT(encode_rseg); +DEFINE_SEGMENT_EVENT(encode_wseg); DECLARE_EVENT_CLASS(svcrdma_chunk_event, TP_PROTO( diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 71127d898562..bd92ed611b4c 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -358,15 +358,14 @@ static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp, arg->len = ctxt->rc_byte_len; } -/* This accommodates the largest possible Write chunk, - * in one segment. +/* This accommodates the largest possible Write chunk. */ -#define MAX_BYTES_WRITE_SEG ((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT)) +#define MAX_BYTES_WRITE_CHUNK ((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT)) /* This accommodates the largest possible Position-Zero - * Read chunk or Reply chunk, in one segment. + * Read chunk or Reply chunk. */ -#define MAX_BYTES_SPECIAL_SEG ((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT)) +#define MAX_BYTES_SPECIAL_CHUNK ((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT)) /* Sanity check the Read list. * @@ -374,7 +373,7 @@ static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp, * - This implementation supports only one Read chunk. * * Sanity checks: - * - Read list does not overflow buffer. + * - Read list does not overflow Receive buffer. * - Segment size limited by largest NFS data payload. * * The segment count is limited to how many segments can @@ -382,30 +381,44 @@ static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp, * buffer. That's about 40 Read segments for a 1KB inline * threshold. * - * Returns pointer to the following Write list. + * Return values: + * %true: Read list is valid. @rctxt's xdr_stream is updated + * to point to the first byte past the Read list. + * %false: Read list is corrupt. @rctxt's xdr_stream is left + * in an unknown state. */ -static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end) +static bool xdr_check_read_list(struct svc_rdma_recv_ctxt *rctxt) { - u32 position; + u32 position, len; bool first; + __be32 *p; + + p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); + if (!p) + return false; + len = 0; first = true; - while (*p++ != xdr_zero) { + while (*p != xdr_zero) { + p = xdr_inline_decode(&rctxt->rc_stream, + rpcrdma_readseg_maxsz * sizeof(*p)); + if (!p) + return false; + if (first) { - position = be32_to_cpup(p++); + position = be32_to_cpup(p); first = false; - } else if (be32_to_cpup(p++) != position) { - return NULL; + } else if (be32_to_cpup(p) != position) { + return false; } - p++; /* handle */ - if (be32_to_cpup(p++) > MAX_BYTES_SPECIAL_SEG) - return NULL; - p += 2; /* offset */ + p += 2; + len += be32_to_cpup(p); - if (p > end) - return NULL; + p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); + if (!p) + return false; } - return p; + return len <= MAX_BYTES_SPECIAL_CHUNK; } /* The segment count is limited to how many segments can @@ -413,67 +426,93 @@ static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end) * buffer. That's about 60 Write segments for a 1KB inline * threshold. */ -static __be32 *xdr_check_write_chunk(__be32 *p, const __be32 *end, - u32 maxlen) +static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt, u32 maxlen) { - u32 i, segcount; + u32 i, segcount, total; + __be32 *p; + + p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); + if (!p) + return false; + segcount = be32_to_cpup(p); - segcount = be32_to_cpup(p++); + total = 0; for (i = 0; i < segcount; i++) { - p++; /* handle */ - if (be32_to_cpup(p++) > maxlen) - return NULL; - p += 2; /* offset */ + u32 handle, length; + u64 offset; - if (p > end) - return NULL; - } + p = xdr_inline_decode(&rctxt->rc_stream, + rpcrdma_segment_maxsz * sizeof(*p)); + if (!p) + return false; + + handle = be32_to_cpup(p++); + length = be32_to_cpup(p++); + xdr_decode_hyper(p, &offset); + trace_svcrdma_decode_wseg(handle, length, offset); - return p; + total += length; + } + return total <= maxlen; } /* Sanity check the Write list. * * Implementation limits: - * - This implementation supports only one Write chunk. + * - This implementation currently supports only one Write chunk. * * Sanity checks: - * - Write list does not overflow buffer. - * - Segment size limited by largest NFS data payload. - * - * Returns pointer to the following Reply chunk. + * - Write list does not overflow Receive buffer. + * - Chunk size limited by largest NFS data payload. + * + * Return values: + * %true: Write list is valid. @rctxt's xdr_stream is updated + * to point to the first byte past the Write list. + * %false: Write list is corrupt. @rctxt's xdr_stream is left + * in an unknown state. */ -static __be32 *xdr_check_write_list(__be32 *p, const __be32 *end) +static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt) { - u32 chcount; + u32 chcount = 0; + __be32 *p; - chcount = 0; - while (*p++ != xdr_zero) { - p = xdr_check_write_chunk(p, end, MAX_BYTES_WRITE_SEG); + p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); + if (!p) + return false; + while (*p != xdr_zero) { + if (!xdr_check_write_chunk(rctxt, MAX_BYTES_WRITE_CHUNK)) + return false; + ++chcount; + p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); if (!p) - return NULL; - if (chcount++ > 1) - return NULL; + return false; } - return p; + return chcount < 2; } /* Sanity check the Reply chunk. * * Sanity checks: - * - Reply chunk does not overflow buffer. - * - Segment size limited by largest NFS data payload. - * - * Returns pointer to the following RPC header. + * - Reply chunk does not overflow Receive buffer. + * - Chunk size limited by largest NFS data payload. + * + * Return values: + * %true: Reply chunk is valid. @rctxt's xdr_stream is updated + * to point to the first byte past the Reply chunk. + * %false: Reply chunk is corrupt. @rctxt's xdr_stream is left + * in an unknown state. */ -static __be32 *xdr_check_reply_chunk(__be32 *p, const __be32 *end) +static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt *rctxt) { - if (*p++ != xdr_zero) { - p = xdr_check_write_chunk(p, end, MAX_BYTES_SPECIAL_SEG); - if (!p) - return NULL; - } - return p; + __be32 *p; + + p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); + if (!p) + return false; + if (*p != xdr_zero) + if (!xdr_check_write_chunk(rctxt, MAX_BYTES_SPECIAL_CHUNK)) + return false; + return true; } /* RPC-over-RDMA Version One private extension: Remote Invalidation. @@ -538,60 +577,61 @@ static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma, ctxt->rc_inv_rkey = be32_to_cpu(inv_rkey); } -/* On entry, xdr->head[0].iov_base points to first byte in the - * RPC-over-RDMA header. +/** + * svc_rdma_xdr_decode_req - Decode the transport header + * @rq_arg: xdr_buf containing ingress RPC/RDMA message + * @rctxt: state of decoding + * + * On entry, xdr->head[0].iov_base points to first byte of the + * RPC-over-RDMA transport header. * * On successful exit, head[0] points to first byte past the * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message. + * * The length of the RPC-over-RDMA header is returned. * * Assumptions: * - The transport header is entirely contained in the head iovec. */ -static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg) +static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg, + struct svc_rdma_recv_ctxt *rctxt) { - __be32 *p, *end, *rdma_argp; + __be32 *p, *rdma_argp; unsigned int hdr_len; - /* Verify that there's enough bytes for header + something */ - if (rq_arg->len <= RPCRDMA_HDRLEN_ERR) - goto out_short; - rdma_argp = rq_arg->head[0].iov_base; - if (*(rdma_argp + 1) != rpcrdma_version) - goto out_version; + xdr_init_decode(&rctxt->rc_stream, rq_arg, rdma_argp, NULL); - switch (*(rdma_argp + 3)) { + p = xdr_inline_decode(&rctxt->rc_stream, + rpcrdma_fixed_maxsz * sizeof(*p)); + if (unlikely(!p)) + goto out_short; + p++; + if (*p != rpcrdma_version) + goto out_version; + p += 2; + switch (*p) { case rdma_msg: break; case rdma_nomsg: break; - case rdma_done: goto out_drop; - case rdma_error: goto out_drop; - default: goto out_proc; } - end = (__be32 *)((unsigned long)rdma_argp + rq_arg->len); - p = xdr_check_read_list(rdma_argp + 4, end); - if (!p) + if (!xdr_check_read_list(rctxt)) goto out_inval; - p = xdr_check_write_list(p, end); - if (!p) - goto out_inval; - p = xdr_check_reply_chunk(p, end); - if (!p) + if (!xdr_check_write_list(rctxt)) goto out_inval; - if (p > end) + if (!xdr_check_reply_chunk(rctxt)) goto out_inval; - rq_arg->head[0].iov_base = p; - hdr_len = (unsigned long)p - (unsigned long)rdma_argp; + rq_arg->head[0].iov_base = rctxt->rc_stream.p; + hdr_len = xdr_stream_pos(&rctxt->rc_stream); rq_arg->head[0].iov_len -= hdr_len; rq_arg->len -= hdr_len; trace_svcrdma_decode_rqst(rdma_argp, hdr_len); @@ -786,7 +826,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) rqstp->rq_next_page = rqstp->rq_respages; p = (__be32 *)rqstp->rq_arg.head[0].iov_base; - ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg); + ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt); if (ret < 0) goto out_err; if (ret == 0) -- cgit v1.2.3-58-ga151 From 2fe8c446338e083a1f3c0ccaaaa20e7d48e71ebc Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 2 Mar 2020 15:01:08 -0500 Subject: svcrdma: De-duplicate code that locates Write and Reply chunks Cache the locations of the Requester-provided Write list and Reply chunk so that the Send path doesn't need to parse the Call header again. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 2 ++ net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 9 +++++++- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 38 ++++----------------------------- 3 files changed, 14 insertions(+), 35 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index c790dbb0dd90..e714e4d90ac5 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -138,6 +138,8 @@ struct svc_rdma_recv_ctxt { unsigned int rc_page_count; unsigned int rc_hdr_count; u32 rc_inv_rkey; + __be32 *rc_write_list; + __be32 *rc_reply_chunk; unsigned int rc_read_payload_offset; unsigned int rc_read_payload_length; struct page *rc_pages[RPCSVC_MAXPAGES]; diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index bd92ed611b4c..70129d7cc972 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -479,6 +479,7 @@ static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt) p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); if (!p) return false; + rctxt->rc_write_list = p; while (*p != xdr_zero) { if (!xdr_check_write_chunk(rctxt, MAX_BYTES_WRITE_CHUNK)) return false; @@ -487,6 +488,8 @@ static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt) if (!p) return false; } + if (!chcount) + rctxt->rc_write_list = NULL; return chcount < 2; } @@ -509,9 +512,13 @@ static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt *rctxt) p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); if (!p) return false; - if (*p != xdr_zero) + rctxt->rc_reply_chunk = p; + if (*p != xdr_zero) { if (!xdr_check_write_chunk(rctxt, MAX_BYTES_SPECIAL_CHUNK)) return false; + } else { + rctxt->rc_reply_chunk = NULL; + } return true; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 4add875277f8..94895635c007 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -449,36 +449,6 @@ static void svc_rdma_xdr_encode_reply_chunk(__be32 *rdma_resp, __be32 *rp_ch, xdr_encode_write_chunk(p, rp_ch, consumed); } -/* Parse the RPC Call's transport header. - */ -static void svc_rdma_get_write_arrays(__be32 *rdma_argp, - __be32 **write, __be32 **reply) -{ - __be32 *p; - - p = rdma_argp + rpcrdma_fixed_maxsz; - - /* Read list */ - while (*p++ != xdr_zero) - p += 5; - - /* Write list */ - if (*p != xdr_zero) { - *write = p; - while (*p++ != xdr_zero) - p += 1 + be32_to_cpu(*p) * 4; - } else { - *write = NULL; - p++; - } - - /* Reply chunk */ - if (*p != xdr_zero) - *reply = p; - else - *reply = NULL; -} - static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt, struct page *page, @@ -813,14 +783,14 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; - __be32 *p, *rdma_argp, *rdma_resp, *wr_lst, *rp_ch; + __be32 *rdma_argp = rctxt->rc_recv_buf; + __be32 *wr_lst = rctxt->rc_write_list; + __be32 *rp_ch = rctxt->rc_reply_chunk; struct xdr_buf *xdr = &rqstp->rq_res; struct svc_rdma_send_ctxt *sctxt; + __be32 *p, *rdma_resp; int ret; - rdma_argp = rctxt->rc_recv_buf; - svc_rdma_get_write_arrays(rdma_argp, &wr_lst, &rp_ch); - /* Create the RDMA response header. xprt->xpt_mutex, * acquired in svc_send(), serializes RPC replies. The * code path below that inserts the credit grant value -- cgit v1.2.3-58-ga151 From 6fa5785e78d39f03d9fa33dea4dad2e7caf21e1e Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 2 Mar 2020 15:02:19 -0500 Subject: svcrdma: Update synopsis of svc_rdma_send_reply_chunk() Preparing for subsequent patches, no behavior change expected. Pass the RPC Call's svc_rdma_recv_ctxt deeper into the sendto() path. This enables passing more information about Requester- provided Write and Reply chunks into the lower-level send functions. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 2 +- net/sunrpc/xprtrdma/svc_rdma_rw.c | 12 ++++++------ net/sunrpc/xprtrdma/svc_rdma_sendto.c | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index e714e4d90ac5..42b68126cc60 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -179,7 +179,7 @@ extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, unsigned int offset, unsigned long length); extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, - __be32 *rp_ch, bool writelist, + const struct svc_rdma_recv_ctxt *rctxt, struct xdr_buf *xdr); /* svc_rdma_sendto.c */ diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 927f98995356..aee8ee2d01da 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -545,8 +545,7 @@ out_err: /** * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk * @rdma: controlling RDMA transport - * @rp_ch: Reply chunk provided by client - * @writelist: true if client provided a Write list + * @rctxt: Write and Reply chunks from client * @xdr: xdr_buf containing an RPC Reply * * Returns a non-negative number of bytes the chunk consumed, or @@ -556,13 +555,14 @@ out_err: * %-ENOTCONN if posting failed (connection is lost), * %-EIO if rdma_rw initialization failed (DMA mapping, etc). */ -int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch, - bool writelist, struct xdr_buf *xdr) +int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, + const struct svc_rdma_recv_ctxt *rctxt, + struct xdr_buf *xdr) { struct svc_rdma_write_info *info; int consumed, ret; - info = svc_rdma_write_info_alloc(rdma, rp_ch); + info = svc_rdma_write_info_alloc(rdma, rctxt->rc_reply_chunk); if (!info) return -ENOMEM; @@ -574,7 +574,7 @@ int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch, /* Send the page list in the Reply chunk only if the * client did not provide Write chunks. */ - if (!writelist && xdr->page_len) { + if (!rctxt->rc_write_list && xdr->page_len) { ret = svc_rdma_send_xdr_pagelist(info, xdr, xdr->head[0].iov_len, xdr->page_len); diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 94895635c007..0b6ff55b1ab1 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -833,7 +833,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret); } if (rp_ch) { - ret = svc_rdma_send_reply_chunk(rdma, rp_ch, wr_lst, xdr); + ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res); if (ret < 0) goto err2; svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret); -- cgit v1.2.3-58-ga151 From 4554755ed81bb690d709168550aba5b46447f069 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 2 Mar 2020 15:02:19 -0500 Subject: svcrdma: Update synopsis of svc_rdma_map_reply_msg() Preparing for subsequent patches, no behavior change expected. Pass the RPC Call's svc_rdma_recv_ctxt deeper into the sendto() path. This enables passing more information about Requester- provided Write and Reply chunks into those lower-level functions. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 5 +- net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 2 +- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 82 ++++++++++++++++++------------ 3 files changed, 53 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 42b68126cc60..c506732886b3 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -193,8 +193,9 @@ extern void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt, unsigned int len); extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, - struct svc_rdma_send_ctxt *ctxt, - struct xdr_buf *xdr, __be32 *wr_lst); + struct svc_rdma_send_ctxt *sctxt, + const struct svc_rdma_recv_ctxt *rctxt, + struct xdr_buf *xdr); extern int svc_rdma_sendto(struct svc_rqst *); extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 908e78bb87c6..ce1a7a706f36 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -117,7 +117,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, { int ret; - ret = svc_rdma_map_reply_msg(rdma, ctxt, &rqst->rq_snd_buf, NULL); + ret = svc_rdma_map_reply_msg(rdma, ctxt, NULL, &rqst->rq_snd_buf); if (ret < 0) return -EIO; diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 0b6ff55b1ab1..0301b8721868 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -502,13 +502,19 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma, DMA_TO_DEVICE); } -/* If the xdr_buf has more elements than the device can - * transmit in a single RDMA Send, then the reply will - * have to be copied into a bounce buffer. +/** + * svc_rdma_pull_up_needed - Determine whether to use pull-up + * @rdma: controlling transport + * @rctxt: Write and Reply chunks provided by client + * @xdr: xdr_buf containing RPC message to transmit + * + * Returns: + * %true if pull-up must be used + * %false otherwise */ static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma, - struct xdr_buf *xdr, - __be32 *wr_lst) + const struct svc_rdma_recv_ctxt *rctxt, + struct xdr_buf *xdr) { int elements; @@ -516,7 +522,7 @@ static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma, elements = 1; /* xdr->pages */ - if (!wr_lst) { + if (!rctxt || !rctxt->rc_write_list) { unsigned int remaining; unsigned long pageoff; @@ -538,26 +544,35 @@ static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma, return elements >= rdma->sc_max_send_sges; } -/* The device is not capable of sending the reply directly. - * Assemble the elements of @xdr into the transport header - * buffer. +/** + * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer + * @rdma: controlling transport + * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared + * @rctxt: Write and Reply chunks provided by client + * @xdr: prepared xdr_buf containing RPC message + * + * The device is not capable of sending the reply directly. + * Assemble the elements of @xdr into the transport header buffer. + * + * Returns zero on success, or a negative errno on failure. */ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, - struct svc_rdma_send_ctxt *ctxt, - struct xdr_buf *xdr, __be32 *wr_lst) + struct svc_rdma_send_ctxt *sctxt, + const struct svc_rdma_recv_ctxt *rctxt, + const struct xdr_buf *xdr) { unsigned char *dst, *tailbase; unsigned int taillen; - dst = ctxt->sc_xprt_buf; - dst += ctxt->sc_sges[0].length; + dst = sctxt->sc_xprt_buf; + dst += sctxt->sc_sges[0].length; memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len); dst += xdr->head[0].iov_len; tailbase = xdr->tail[0].iov_base; taillen = xdr->tail[0].iov_len; - if (wr_lst) { + if (rctxt && rctxt->rc_write_list) { u32 xdrpad; xdrpad = xdr_pad_size(xdr->page_len); @@ -586,20 +601,20 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, if (taillen) memcpy(dst, tailbase, taillen); - ctxt->sc_sges[0].length += xdr->len; + sctxt->sc_sges[0].length += xdr->len; ib_dma_sync_single_for_device(rdma->sc_pd->device, - ctxt->sc_sges[0].addr, - ctxt->sc_sges[0].length, + sctxt->sc_sges[0].addr, + sctxt->sc_sges[0].length, DMA_TO_DEVICE); return 0; } -/* svc_rdma_map_reply_msg - Map the buffer holding RPC message +/* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message * @rdma: controlling transport - * @ctxt: send_ctxt for the Send WR + * @sctxt: send_ctxt for the Send WR + * @rctxt: Write and Reply chunks provided by client * @xdr: prepared xdr_buf containing RPC message - * @wr_lst: pointer to Call header's Write list, or NULL * * Load the xdr_buf into the ctxt's sge array, and DMA map each * element as it is added. @@ -607,8 +622,9 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, * Returns zero on success, or a negative errno on failure. */ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, - struct svc_rdma_send_ctxt *ctxt, - struct xdr_buf *xdr, __be32 *wr_lst) + struct svc_rdma_send_ctxt *sctxt, + const struct svc_rdma_recv_ctxt *rctxt, + struct xdr_buf *xdr) { unsigned int len, remaining; unsigned long page_off; @@ -617,11 +633,11 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, u32 xdr_pad; int ret; - if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst)) - return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst); + if (svc_rdma_pull_up_needed(rdma, rctxt, xdr)) + return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr); - ++ctxt->sc_cur_sge_no; - ret = svc_rdma_dma_map_buf(rdma, ctxt, + ++sctxt->sc_cur_sge_no; + ret = svc_rdma_dma_map_buf(rdma, sctxt, xdr->head[0].iov_base, xdr->head[0].iov_len); if (ret < 0) @@ -632,7 +648,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, * have added XDR padding in the tail buffer, and that * should not be included inline. */ - if (wr_lst) { + if (rctxt && rctxt->rc_write_list) { base = xdr->tail[0].iov_base; len = xdr->tail[0].iov_len; xdr_pad = xdr_pad_size(xdr->page_len); @@ -651,8 +667,8 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, while (remaining) { len = min_t(u32, PAGE_SIZE - page_off, remaining); - ++ctxt->sc_cur_sge_no; - ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++, + ++sctxt->sc_cur_sge_no; + ret = svc_rdma_dma_map_page(rdma, sctxt, *ppages++, page_off, len); if (ret < 0) return ret; @@ -665,8 +681,8 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, len = xdr->tail[0].iov_len; tail: if (len) { - ++ctxt->sc_cur_sge_no; - ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len); + ++sctxt->sc_cur_sge_no; + ret = svc_rdma_dma_map_buf(rdma, sctxt, base, len); if (ret < 0) return ret; } @@ -720,8 +736,8 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, int ret; if (!rp_ch) { - ret = svc_rdma_map_reply_msg(rdma, sctxt, - &rqstp->rq_res, wr_lst); + ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, + &rqstp->rq_res); if (ret < 0) return ret; } -- cgit v1.2.3-58-ga151 From 5c266df52701635edfd49415b225fb17ceac5183 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 2 Mar 2020 15:02:20 -0500 Subject: SUNRPC: Add encoders for list item discriminators Clean up. These are taken from the client-side RPC/RDMA transport to a more global header file so they can be used elsewhere. Signed-off-by: Chuck Lever --- include/linux/sunrpc/xdr.h | 38 ++++++++++++++++++++++++++++++++++++++ net/sunrpc/xprtrdma/rpc_rdma.c | 36 +++++------------------------------- 2 files changed, 43 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 83cd9f15c526..9c8b73b509a1 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -315,6 +315,44 @@ static inline size_t xdr_pad_size(size_t n) return xdr_align_size(n) - n; } +/** + * xdr_stream_encode_item_present - Encode a "present" list item + * @xdr: pointer to xdr_stream + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t xdr_stream_encode_item_present(struct xdr_stream *xdr) +{ + const size_t len = sizeof(__be32); + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + *p = xdr_one; + return len; +} + +/** + * xdr_stream_encode_item_absent - Encode a "not present" list item + * @xdr: pointer to xdr_stream + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline int xdr_stream_encode_item_absent(struct xdr_stream *xdr) +{ + const size_t len = sizeof(__be32); + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + *p = xdr_zero; + return len; +} + /** * xdr_stream_encode_u32 - Encode a 32-bit integer * @xdr: pointer to xdr_stream diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 28020ec104d4..577513b7642e 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -275,32 +275,6 @@ out: return n; } -static inline int -encode_item_present(struct xdr_stream *xdr) -{ - __be32 *p; - - p = xdr_reserve_space(xdr, sizeof(*p)); - if (unlikely(!p)) - return -EMSGSIZE; - - *p = xdr_one; - return 0; -} - -static inline int -encode_item_not_present(struct xdr_stream *xdr) -{ - __be32 *p; - - p = xdr_reserve_space(xdr, sizeof(*p)); - if (unlikely(!p)) - return -EMSGSIZE; - - *p = xdr_zero; - return 0; -} - static void xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr) { @@ -414,7 +388,7 @@ static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, } while (nsegs); done: - return encode_item_not_present(xdr); + return xdr_stream_encode_item_absent(xdr); } /* Register and XDR encode the Write list. Supports encoding a list @@ -453,7 +427,7 @@ static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, if (nsegs < 0) return nsegs; - if (encode_item_present(xdr) < 0) + if (xdr_stream_encode_item_present(xdr) < 0) return -EMSGSIZE; segcount = xdr_reserve_space(xdr, sizeof(*segcount)); if (unlikely(!segcount)) @@ -480,7 +454,7 @@ static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, *segcount = cpu_to_be32(nchunks); done: - return encode_item_not_present(xdr); + return xdr_stream_encode_item_absent(xdr); } /* Register and XDR encode the Reply chunk. Supports encoding an array @@ -507,14 +481,14 @@ static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, __be32 *segcount; if (wtype != rpcrdma_replych) - return encode_item_not_present(xdr); + return xdr_stream_encode_item_absent(xdr); seg = req->rl_segments; nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg); if (nsegs < 0) return nsegs; - if (encode_item_present(xdr) < 0) + if (xdr_stream_encode_item_present(xdr) < 0) return -EMSGSIZE; segcount = xdr_reserve_space(xdr, sizeof(*segcount)); if (unlikely(!segcount)) -- cgit v1.2.3-58-ga151 From 6fd5034db45c9c0ca57c98f3d5b9a0ce5869eab3 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 2 Mar 2020 15:02:20 -0500 Subject: svcrdma: Refactor chunk list encoders Same idea as the receive-side changes I did a while back: use xdr_stream helpers rather than open-coding the XDR chunk list encoders. This builds the Reply transport header from beginning to end without backtracking. As additional clean-ups, fill in documenting comments for the XDR encoders and sprinkle some trace points in the new encoding functions. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 2 + net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 15 +- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 32 +++- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 281 ++++++++++++++++++----------- 4 files changed, 209 insertions(+), 121 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index c506732886b3..d001aac13c2f 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -149,6 +149,8 @@ struct svc_rdma_send_ctxt { struct list_head sc_list; struct ib_send_wr sc_send_wr; struct ib_cqe sc_cqe; + struct xdr_buf sc_hdrbuf; + struct xdr_stream sc_stream; void *sc_xprt_buf; int sc_page_count; int sc_cur_sge_no; diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index ce1a7a706f36..9830748c58d2 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -181,7 +181,9 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) if (!ctxt) goto drop_connection; - p = ctxt->sc_xprt_buf; + p = xdr_reserve_space(&ctxt->sc_stream, RPCRDMA_HDRLEN_MIN); + if (!p) + goto put_ctxt; *p++ = rqst->rq_xid; *p++ = rpcrdma_version; *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_max_requests); @@ -189,7 +191,7 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) *p++ = xdr_zero; *p++ = xdr_zero; *p = xdr_zero; - svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_MIN); + svc_rdma_sync_reply_hdr(rdma, ctxt, ctxt->sc_hdrbuf.len); #ifdef SVCRDMA_BACKCHANNEL_DEBUG pr_info("%s: %*ph\n", __func__, 64, rqst->rq_buffer); @@ -197,12 +199,13 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) rqst->rq_xtime = ktime_get(); rc = svc_rdma_bc_sendto(rdma, rqst, ctxt); - if (rc) { - svc_rdma_send_ctxt_put(rdma, ctxt); - goto drop_connection; - } + if (rc) + goto put_ctxt; return 0; +put_ctxt: + svc_rdma_send_ctxt_put(rdma, ctxt); + drop_connection: dprintk("svcrdma: failed to send bc call\n"); return -ENOTCONN; diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 70129d7cc972..e2c747b5f517 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -698,7 +698,6 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt, __be32 *rdma_argp, int status) { struct svc_rdma_send_ctxt *ctxt; - unsigned int length; __be32 *p; int ret; @@ -706,29 +705,46 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt, if (!ctxt) return; - p = ctxt->sc_xprt_buf; + p = xdr_reserve_space(&ctxt->sc_stream, + rpcrdma_fixed_maxsz * sizeof(*p)); + if (!p) + goto put_ctxt; + *p++ = *rdma_argp; *p++ = *(rdma_argp + 1); *p++ = xprt->sc_fc_credits; - *p++ = rdma_error; + *p = rdma_error; + switch (status) { case -EPROTONOSUPPORT: + p = xdr_reserve_space(&ctxt->sc_stream, 3 * sizeof(*p)); + if (!p) + goto put_ctxt; + *p++ = err_vers; *p++ = rpcrdma_version; - *p++ = rpcrdma_version; + *p = rpcrdma_version; trace_svcrdma_err_vers(*rdma_argp); break; default: - *p++ = err_chunk; + p = xdr_reserve_space(&ctxt->sc_stream, sizeof(*p)); + if (!p) + goto put_ctxt; + + *p = err_chunk; trace_svcrdma_err_chunk(*rdma_argp); } - length = (unsigned long)p - (unsigned long)ctxt->sc_xprt_buf; - svc_rdma_sync_reply_hdr(xprt, ctxt, length); + + svc_rdma_sync_reply_hdr(xprt, ctxt, ctxt->sc_hdrbuf.len); ctxt->sc_send_wr.opcode = IB_WR_SEND; ret = svc_rdma_send(xprt, &ctxt->sc_send_wr); if (ret) - svc_rdma_send_ctxt_put(xprt, ctxt); + goto put_ctxt; + return; + +put_ctxt: + svc_rdma_send_ctxt_put(xprt, ctxt); } /* By convention, backchannel calls arrive via rdma_msg type diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index c2ace0fb7a2e..9d3b9a7e954f 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -151,6 +151,8 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED; ctxt->sc_cqe.done = svc_rdma_wc_send; ctxt->sc_xprt_buf = buffer; + xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf, + rdma->sc_max_req_size); ctxt->sc_sges[0].addr = addr; for (i = 0; i < rdma->sc_max_send_sges; i++) @@ -204,6 +206,10 @@ struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma) spin_unlock(&rdma->sc_send_lock); out: + rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0); + xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf, + ctxt->sc_xprt_buf, NULL); + ctxt->sc_send_wr.num_sge = 0; ctxt->sc_cur_sge_no = 0; ctxt->sc_page_count = 0; @@ -322,131 +328,173 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) return ret; } -/* Returns length of transport header, in bytes. +/** + * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list + * @sctxt: Send context for the RPC Reply + * + * Return values: + * On success, returns length in bytes of the Reply XDR buffer + * that was consumed by the Reply Read list + * %-EMSGSIZE on XDR buffer overflow */ -static unsigned int svc_rdma_reply_hdr_len(__be32 *rdma_resp) +static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt) { - unsigned int nsegs; - __be32 *p; - - p = rdma_resp; - - /* RPC-over-RDMA V1 replies never have a Read list. */ - p += rpcrdma_fixed_maxsz + 1; - - /* Skip Write list. */ - while (*p++ != xdr_zero) { - nsegs = be32_to_cpup(p++); - p += nsegs * rpcrdma_segment_maxsz; - } + /* RPC-over-RDMA version 1 replies never have a Read list. */ + return xdr_stream_encode_item_absent(&sctxt->sc_stream); +} - /* Skip Reply chunk. */ - if (*p++ != xdr_zero) { - nsegs = be32_to_cpup(p++); - p += nsegs * rpcrdma_segment_maxsz; +/** + * svc_rdma_encode_write_segment - Encode one Write segment + * @src: matching Write chunk in the RPC Call header + * @sctxt: Send context for the RPC Reply + * @remaining: remaining bytes of the payload left in the Write chunk + * + * Return values: + * On success, returns length in bytes of the Reply XDR buffer + * that was consumed by the Write segment + * %-EMSGSIZE on XDR buffer overflow + */ +static ssize_t svc_rdma_encode_write_segment(__be32 *src, + struct svc_rdma_send_ctxt *sctxt, + unsigned int *remaining) +{ + __be32 *p; + const size_t len = rpcrdma_segment_maxsz * sizeof(*p); + u32 handle, length; + u64 offset; + + p = xdr_reserve_space(&sctxt->sc_stream, len); + if (!p) + return -EMSGSIZE; + + handle = be32_to_cpup(src++); + length = be32_to_cpup(src++); + xdr_decode_hyper(src, &offset); + + *p++ = cpu_to_be32(handle); + if (*remaining < length) { + /* segment only partly filled */ + length = *remaining; + *remaining = 0; + } else { + /* entire segment was consumed */ + *remaining -= length; } + *p++ = cpu_to_be32(length); + xdr_encode_hyper(p, offset); - return (unsigned long)p - (unsigned long)rdma_resp; + trace_svcrdma_encode_wseg(handle, length, offset); + return len; } -/* One Write chunk is copied from Call transport header to Reply - * transport header. Each segment's length field is updated to - * reflect number of bytes consumed in the segment. - * - * Returns number of segments in this chunk. +/** + * svc_rdma_encode_write_chunk - Encode one Write chunk + * @src: matching Write chunk in the RPC Call header + * @sctxt: Send context for the RPC Reply + * @remaining: size in bytes of the payload in the Write chunk + * + * Copy a Write chunk from the Call transport header to the + * Reply transport header. Update each segment's length field + * to reflect the number of bytes written in that segment. + * + * Return values: + * On success, returns length in bytes of the Reply XDR buffer + * that was consumed by the Write chunk + * %-EMSGSIZE on XDR buffer overflow */ -static unsigned int xdr_encode_write_chunk(__be32 *dst, __be32 *src, +static ssize_t svc_rdma_encode_write_chunk(__be32 *src, + struct svc_rdma_send_ctxt *sctxt, unsigned int remaining) { unsigned int i, nsegs; - u32 seg_len; + ssize_t len, ret; - /* Write list discriminator */ - *dst++ = *src++; + len = 0; + trace_svcrdma_encode_write_chunk(remaining); - /* number of segments in this chunk */ - nsegs = be32_to_cpup(src); - *dst++ = *src++; + src++; + ret = xdr_stream_encode_item_present(&sctxt->sc_stream); + if (ret < 0) + return -EMSGSIZE; + len += ret; - for (i = nsegs; i; i--) { - /* segment's RDMA handle */ - *dst++ = *src++; - - /* bytes returned in this segment */ - seg_len = be32_to_cpu(*src); - if (remaining >= seg_len) { - /* entire segment was consumed */ - *dst = *src; - remaining -= seg_len; - } else { - /* segment only partly filled */ - *dst = cpu_to_be32(remaining); - remaining = 0; - } - dst++; src++; + nsegs = be32_to_cpup(src++); + ret = xdr_stream_encode_u32(&sctxt->sc_stream, nsegs); + if (ret < 0) + return -EMSGSIZE; + len += ret; - /* segment's RDMA offset */ - *dst++ = *src++; - *dst++ = *src++; + for (i = nsegs; i; i--) { + ret = svc_rdma_encode_write_segment(src, sctxt, &remaining); + if (ret < 0) + return -EMSGSIZE; + src += rpcrdma_segment_maxsz; + len += ret; } - return nsegs; + return len; } -/* The client provided a Write list in the Call message. Fill in - * the segments in the first Write chunk in the Reply's transport +/** + * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list + * @rctxt: Reply context with information about the RPC Call + * @sctxt: Send context for the RPC Reply + * @length: size in bytes of the payload in the first Write chunk + * + * The client provides a Write chunk list in the Call message. Fill + * in the segments in the first Write chunk in the Reply's transport * header with the number of bytes consumed in each segment. * Remaining chunks are returned unused. * * Assumptions: * - Client has provided only one Write chunk + * + * Return values: + * On success, returns length in bytes of the Reply XDR buffer + * that was consumed by the Reply's Write list + * %-EMSGSIZE on XDR buffer overflow */ -static void svc_rdma_xdr_encode_write_list(__be32 *rdma_resp, __be32 *wr_ch, - unsigned int consumed) +static ssize_t +svc_rdma_encode_write_list(const struct svc_rdma_recv_ctxt *rctxt, + struct svc_rdma_send_ctxt *sctxt, + unsigned int length) { - unsigned int nsegs; - __be32 *p, *q; - - /* RPC-over-RDMA V1 replies never have a Read list. */ - p = rdma_resp + rpcrdma_fixed_maxsz + 1; - - q = wr_ch; - while (*q != xdr_zero) { - nsegs = xdr_encode_write_chunk(p, q, consumed); - q += 2 + nsegs * rpcrdma_segment_maxsz; - p += 2 + nsegs * rpcrdma_segment_maxsz; - consumed = 0; - } + ssize_t len, ret; - /* Terminate Write list */ - *p++ = xdr_zero; + ret = svc_rdma_encode_write_chunk(rctxt->rc_write_list, sctxt, length); + if (ret < 0) + return ret; + len = ret; - /* Reply chunk discriminator; may be replaced later */ - *p = xdr_zero; + /* Terminate the Write list */ + ret = xdr_stream_encode_item_absent(&sctxt->sc_stream); + if (ret < 0) + return ret; + + return len + ret; } -/* The client provided a Reply chunk in the Call message. Fill in - * the segments in the Reply chunk in the Reply message with the - * number of bytes consumed in each segment. +/** + * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk + * @rctxt: Reply context with information about the RPC Call + * @sctxt: Send context for the RPC Reply + * @length: size in bytes of the payload in the Reply chunk * * Assumptions: - * - Reply can always fit in the provided Reply chunk + * - Reply can always fit in the client-provided Reply chunk + * + * Return values: + * On success, returns length in bytes of the Reply XDR buffer + * that was consumed by the Reply's Reply chunk + * %-EMSGSIZE on XDR buffer overflow */ -static void svc_rdma_xdr_encode_reply_chunk(__be32 *rdma_resp, __be32 *rp_ch, - unsigned int consumed) +static ssize_t +svc_rdma_encode_reply_chunk(const struct svc_rdma_recv_ctxt *rctxt, + struct svc_rdma_send_ctxt *sctxt, + unsigned int length) { - __be32 *p; - - /* Find the Reply chunk in the Reply's xprt header. - * RPC-over-RDMA V1 replies never have a Read list. - */ - p = rdma_resp + rpcrdma_fixed_maxsz + 1; - - /* Skip past Write list */ - while (*p++ != xdr_zero) - p += 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz; - - xdr_encode_write_chunk(p, rp_ch, consumed); + return svc_rdma_encode_write_chunk(rctxt->rc_reply_chunk, sctxt, + length); } static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma, @@ -765,14 +813,26 @@ static int svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt, struct svc_rqst *rqstp) { + struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; + __be32 *rdma_argp = rctxt->rc_recv_buf; __be32 *p; - p = ctxt->sc_xprt_buf; - trace_svcrdma_err_chunk(*p); - p += 3; + rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0); + xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf, ctxt->sc_xprt_buf, + NULL); + + p = xdr_reserve_space(&ctxt->sc_stream, RPCRDMA_HDRLEN_ERR); + if (!p) + return -ENOMSG; + + *p++ = *rdma_argp; + *p++ = *(rdma_argp + 1); + *p++ = rdma->sc_fc_credits; *p++ = rdma_error; *p = err_chunk; - svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_ERR); + trace_svcrdma_err_chunk(*rdma_argp); + + svc_rdma_sync_reply_hdr(rdma, ctxt, ctxt->sc_hdrbuf.len); svc_rdma_save_io_pages(rqstp, ctxt); @@ -803,7 +863,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) __be32 *rp_ch = rctxt->rc_reply_chunk; struct xdr_buf *xdr = &rqstp->rq_res; struct svc_rdma_send_ctxt *sctxt; - __be32 *p, *rdma_resp; + __be32 *p; int ret; /* Create the RDMA response header. xprt->xpt_mutex, @@ -816,19 +876,18 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) sctxt = svc_rdma_send_ctxt_get(rdma); if (!sctxt) goto err0; - rdma_resp = sctxt->sc_xprt_buf; - p = rdma_resp; + p = xdr_reserve_space(&sctxt->sc_stream, + rpcrdma_fixed_maxsz * sizeof(*p)); + if (!p) + goto err0; *p++ = *rdma_argp; *p++ = *(rdma_argp + 1); *p++ = rdma->sc_fc_credits; - *p++ = rp_ch ? rdma_nomsg : rdma_msg; - - /* Start with empty chunks */ - *p++ = xdr_zero; - *p++ = xdr_zero; - *p = xdr_zero; + *p = rp_ch ? rdma_nomsg : rdma_msg; + if (svc_rdma_encode_read_list(sctxt) < 0) + goto err0; if (wr_lst) { /* XXX: Presume the client sent only one Write chunk */ unsigned long offset; @@ -845,16 +904,24 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) length); if (ret < 0) goto err2; - svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret); + if (svc_rdma_encode_write_list(rctxt, sctxt, length) < 0) + goto err0; + } else { + if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0) + goto err0; } if (rp_ch) { ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res); if (ret < 0) goto err2; - svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret); + if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0) + goto err0; + } else { + if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0) + goto err0; } - svc_rdma_sync_reply_hdr(rdma, sctxt, svc_rdma_reply_hdr_len(rdma_resp)); + svc_rdma_sync_reply_hdr(rdma, sctxt, sctxt->sc_hdrbuf.len); ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp); if (ret < 0) goto err1; -- cgit v1.2.3-58-ga151 From aee4b74a3f273b54d136132fedf575ec464f4134 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 3 Mar 2020 11:08:05 -0500 Subject: svcrdma: Fix double sync of transport header buffer Performance optimization: Avoid syncing the transport buffer twice when Reply buffer pull-up is necessary. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 3 -- net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 1 - net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 4 +- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 64 ++++++++++++------------------ 4 files changed, 28 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index d001aac13c2f..a3fa5b4fa2e4 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -191,9 +191,6 @@ extern struct svc_rdma_send_ctxt * extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt); extern int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr); -extern void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma, - struct svc_rdma_send_ctxt *ctxt, - unsigned int len); extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, const struct svc_rdma_recv_ctxt *rctxt, diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 9830748c58d2..46b59e91d34a 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -191,7 +191,6 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) *p++ = xdr_zero; *p++ = xdr_zero; *p = xdr_zero; - svc_rdma_sync_reply_hdr(rdma, ctxt, ctxt->sc_hdrbuf.len); #ifdef SVCRDMA_BACKCHANNEL_DEBUG pr_info("%s: %*ph\n", __func__, 64, rqst->rq_buffer); diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index e2c747b5f517..54469b72b25f 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -735,9 +735,9 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt, trace_svcrdma_err_chunk(*rdma_argp); } - svc_rdma_sync_reply_hdr(xprt, ctxt, ctxt->sc_hdrbuf.len); - + ctxt->sc_send_wr.num_sge = 1; ctxt->sc_send_wr.opcode = IB_WR_SEND; + ctxt->sc_sges[0].length = ctxt->sc_hdrbuf.len; ret = svc_rdma_send(xprt, &ctxt->sc_send_wr); if (ret) goto put_ctxt; diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 9d3b9a7e954f..7b9853214769 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -301,6 +301,12 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) might_sleep(); + /* Sync the transport header buffer */ + ib_dma_sync_single_for_device(rdma->sc_pd->device, + wr->sg_list[0].addr, + wr->sg_list[0].length, + DMA_TO_DEVICE); + /* If the SQ is full, wait until an SQ entry is available */ while (1) { if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) { @@ -532,24 +538,6 @@ static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma, offset_in_page(base), len); } -/** - * svc_rdma_sync_reply_hdr - DMA sync the transport header buffer - * @rdma: controlling transport - * @ctxt: send_ctxt for the Send WR - * @len: length of transport header - * - */ -void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma, - struct svc_rdma_send_ctxt *ctxt, - unsigned int len) -{ - ctxt->sc_sges[0].length = len; - ctxt->sc_send_wr.num_sge++; - ib_dma_sync_single_for_device(rdma->sc_pd->device, - ctxt->sc_sges[0].addr, len, - DMA_TO_DEVICE); -} - /** * svc_rdma_pull_up_needed - Determine whether to use pull-up * @rdma: controlling transport @@ -612,9 +600,7 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, unsigned char *dst, *tailbase; unsigned int taillen; - dst = sctxt->sc_xprt_buf; - dst += sctxt->sc_sges[0].length; - + dst = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len; memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len); dst += xdr->head[0].iov_len; @@ -650,11 +636,6 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, memcpy(dst, tailbase, taillen); sctxt->sc_sges[0].length += xdr->len; - ib_dma_sync_single_for_device(rdma->sc_pd->device, - sctxt->sc_sges[0].addr, - sctxt->sc_sges[0].length, - DMA_TO_DEVICE); - return 0; } @@ -665,7 +646,7 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, * @xdr: prepared xdr_buf containing RPC message * * Load the xdr_buf into the ctxt's sge array, and DMA map each - * element as it is added. + * element as it is added. The Send WR's num_sge field is set. * * Returns zero on success, or a negative errno on failure. */ @@ -681,6 +662,19 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, u32 xdr_pad; int ret; + /* Set up the (persistently-mapped) transport header SGE. */ + sctxt->sc_send_wr.num_sge = 1; + sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; + + /* If there is a Reply chunk, nothing follows the transport + * header, and we're done here. + */ + if (rctxt && rctxt->rc_reply_chunk) + return 0; + + /* For pull-up, svc_rdma_send() will sync the transport header. + * No additional DMA mapping is necessary. + */ if (svc_rdma_pull_up_needed(rdma, rctxt, xdr)) return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr); @@ -782,12 +776,9 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, { int ret; - if (!rctxt->rc_reply_chunk) { - ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, - &rqstp->rq_res); - if (ret < 0) - return ret; - } + ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res); + if (ret < 0) + return ret; svc_rdma_save_io_pages(rqstp, sctxt); @@ -797,8 +788,6 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, } else { sctxt->sc_send_wr.opcode = IB_WR_SEND; } - dprintk("svcrdma: posting Send WR with %u sge(s)\n", - sctxt->sc_send_wr.num_sge); return svc_rdma_send(rdma, &sctxt->sc_send_wr); } @@ -832,11 +821,11 @@ static int svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, *p = err_chunk; trace_svcrdma_err_chunk(*rdma_argp); - svc_rdma_sync_reply_hdr(rdma, ctxt, ctxt->sc_hdrbuf.len); - svc_rdma_save_io_pages(rqstp, ctxt); + ctxt->sc_send_wr.num_sge = 1; ctxt->sc_send_wr.opcode = IB_WR_SEND; + ctxt->sc_sges[0].length = ctxt->sc_hdrbuf.len; return svc_rdma_send(rdma, &ctxt->sc_send_wr); } @@ -921,7 +910,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) goto err0; } - svc_rdma_sync_reply_hdr(rdma, sctxt, sctxt->sc_hdrbuf.len); ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp); if (ret < 0) goto err1; -- cgit v1.2.3-58-ga151 From 0dabe948f28274e7956a625a24f205016b810693 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 3 Mar 2020 13:28:14 -0500 Subject: svcrdma: Avoid DMA mapping small RPC Replies On some platforms, DMA mapping part of a page is more costly than copying bytes. Indeed, not involving the I/O MMU can help the RPC/RDMA transport scale better for tiny I/Os across more RDMA devices. This is because interaction with the I/O MMU is eliminated for each of these small I/Os. Without the explicit unmapping, the NIC no longer needs to do a costly internal TLB shoot down for buffers that are just a handful of bytes. Since pull-up is now a more a frequent operation, I've introduced a trace point in the pull-up path. It can be used for debugging or user-space tools that count pull-up frequency. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 1 + include/trace/events/rpcrdma.h | 18 ++++++++++++++++++ net/sunrpc/xprtrdma/svc_rdma_sendto.c | 13 ++++++++++++- 3 files changed, 31 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index a3fa5b4fa2e4..78fe2ac6dc6c 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -52,6 +52,7 @@ /* Default and maximum inline threshold sizes */ enum { + RPCRDMA_PULLUP_THRESH = RPCRDMA_V1_DEF_INLINE_SIZE >> 1, RPCRDMA_DEF_INLINE_THRESH = 4096, RPCRDMA_MAX_INLINE_THRESH = 65536 }; diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 74b68547eefb..9238d233f8cf 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1639,6 +1639,24 @@ TRACE_EVENT(svcrdma_dma_map_rwctx, ) ); +TRACE_EVENT(svcrdma_send_pullup, + TP_PROTO( + unsigned int len + ), + + TP_ARGS(len), + + TP_STRUCT__entry( + __field(unsigned int, len) + ), + + TP_fast_assign( + __entry->len = len; + ), + + TP_printk("len=%u", __entry->len) +); + TRACE_EVENT(svcrdma_send_failed, TP_PROTO( const struct svc_rqst *rqst, diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 7b9853214769..90cba3058f04 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -541,6 +541,7 @@ static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma, /** * svc_rdma_pull_up_needed - Determine whether to use pull-up * @rdma: controlling transport + * @sctxt: send_ctxt for the Send WR * @rctxt: Write and Reply chunks provided by client * @xdr: xdr_buf containing RPC message to transmit * @@ -549,11 +550,20 @@ static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma, * %false otherwise */ static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma, + struct svc_rdma_send_ctxt *sctxt, const struct svc_rdma_recv_ctxt *rctxt, struct xdr_buf *xdr) { int elements; + /* For small messages, copying bytes is cheaper than DMA mapping. + */ + if (sctxt->sc_hdrbuf.len + xdr->len < RPCRDMA_PULLUP_THRESH) + return true; + + /* Check whether the xdr_buf has more elements than can + * fit in a single RDMA Send. + */ /* xdr->head */ elements = 1; @@ -636,6 +646,7 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, memcpy(dst, tailbase, taillen); sctxt->sc_sges[0].length += xdr->len; + trace_svcrdma_send_pullup(sctxt->sc_sges[0].length); return 0; } @@ -675,7 +686,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, /* For pull-up, svc_rdma_send() will sync the transport header. * No additional DMA mapping is necessary. */ - if (svc_rdma_pull_up_needed(rdma, rctxt, xdr)) + if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr)) return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr); ++sctxt->sc_cur_sge_no; -- cgit v1.2.3-58-ga151 From 9e55eef4ab1bf1810443bb3989a07a68e1f5d084 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 2 Mar 2020 15:19:54 -0500 Subject: SUNRPC: Refactor xs_sendpages() Re-locate xs_sendpages() so that it can be shared with server code. Signed-off-by: Chuck Lever --- include/linux/sunrpc/xdr.h | 14 ----- net/sunrpc/socklib.c | 141 ++++++++++++++++++++++++++++++++++++++++++ net/sunrpc/socklib.h | 15 +++++ net/sunrpc/svcsock.c | 1 + net/sunrpc/xprtsock.c | 149 ++++++--------------------------------------- 5 files changed, 177 insertions(+), 143 deletions(-) create mode 100644 net/sunrpc/socklib.h (limited to 'include/linux') diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 9c8b73b509a1..8529d6e33137 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -188,20 +188,6 @@ extern int xdr_buf_read_mic(struct xdr_buf *, struct xdr_netobj *, unsigned int) extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); -/* - * Helper structure for copying from an sk_buff. - */ -struct xdr_skb_reader { - struct sk_buff *skb; - unsigned int offset; - size_t count; - __wsum csum; -}; - -typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len); - -extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *); - extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32); extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *); diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c index 1a864f1ed119..3fc8af8bb961 100644 --- a/net/sunrpc/socklib.c +++ b/net/sunrpc/socklib.c @@ -14,9 +14,24 @@ #include #include #include +#include #include #include +#include "socklib.h" + +/* + * Helper structure for copying from an sk_buff. + */ +struct xdr_skb_reader { + struct sk_buff *skb; + unsigned int offset; + size_t count; + __wsum csum; +}; + +typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, + size_t len); /** * xdr_skb_read_bits - copy some data bits from skb to internal buffer @@ -186,3 +201,129 @@ no_checksum: return 0; } EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr); + +static inline int xprt_sendmsg(struct socket *sock, struct msghdr *msg, + size_t seek) +{ + if (seek) + iov_iter_advance(&msg->msg_iter, seek); + return sock_sendmsg(sock, msg); +} + +static int xprt_send_kvec(struct socket *sock, struct msghdr *msg, + struct kvec *vec, size_t seek) +{ + iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len); + return xprt_sendmsg(sock, msg, seek); +} + +static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg, + struct xdr_buf *xdr, size_t base) +{ + int err; + + err = xdr_alloc_bvec(xdr, GFP_KERNEL); + if (err < 0) + return err; + + iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, xdr_buf_pagecount(xdr), + xdr->page_len + xdr->page_base); + return xprt_sendmsg(sock, msg, base + xdr->page_base); +} + +/* Common case: + * - stream transport + * - sending from byte 0 of the message + * - the message is wholly contained in @xdr's head iovec + */ +static int xprt_send_rm_and_kvec(struct socket *sock, struct msghdr *msg, + rpc_fraghdr marker, struct kvec *vec, + size_t base) +{ + struct kvec iov[2] = { + [0] = { + .iov_base = &marker, + .iov_len = sizeof(marker) + }, + [1] = *vec, + }; + size_t len = iov[0].iov_len + iov[1].iov_len; + + iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len); + return xprt_sendmsg(sock, msg, base); +} + +/** + * xprt_sock_sendmsg - write an xdr_buf directly to a socket + * @sock: open socket to send on + * @msg: socket message metadata + * @xdr: xdr_buf containing this request + * @base: starting position in the buffer + * @marker: stream record marker field + * @sent_p: return the total number of bytes successfully queued for sending + * + * Return values: + * On success, returns zero and fills in @sent_p. + * %-ENOTSOCK if @sock is not a struct socket. + */ +int xprt_sock_sendmsg(struct socket *sock, struct msghdr *msg, + struct xdr_buf *xdr, unsigned int base, + rpc_fraghdr marker, unsigned int *sent_p) +{ + unsigned int rmsize = marker ? sizeof(marker) : 0; + unsigned int remainder = rmsize + xdr->len - base; + unsigned int want; + int err = 0; + + *sent_p = 0; + + if (unlikely(!sock)) + return -ENOTSOCK; + + msg->msg_flags |= MSG_MORE; + want = xdr->head[0].iov_len + rmsize; + if (base < want) { + unsigned int len = want - base; + + remainder -= len; + if (remainder == 0) + msg->msg_flags &= ~MSG_MORE; + if (rmsize) + err = xprt_send_rm_and_kvec(sock, msg, marker, + &xdr->head[0], base); + else + err = xprt_send_kvec(sock, msg, &xdr->head[0], base); + if (remainder == 0 || err != len) + goto out; + *sent_p += err; + base = 0; + } else { + base -= want; + } + + if (base < xdr->page_len) { + unsigned int len = xdr->page_len - base; + + remainder -= len; + if (remainder == 0) + msg->msg_flags &= ~MSG_MORE; + err = xprt_send_pagedata(sock, msg, xdr, base); + if (remainder == 0 || err != len) + goto out; + *sent_p += err; + base = 0; + } else { + base -= xdr->page_len; + } + + if (base >= xdr->tail[0].iov_len) + return 0; + msg->msg_flags &= ~MSG_MORE; + err = xprt_send_kvec(sock, msg, &xdr->tail[0], base); +out: + if (err > 0) { + *sent_p += err; + err = 0; + } + return err; +} diff --git a/net/sunrpc/socklib.h b/net/sunrpc/socklib.h new file mode 100644 index 000000000000..c48114ad6f00 --- /dev/null +++ b/net/sunrpc/socklib.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1995-1997 Olaf Kirch + * Copyright (C) 2020, Oracle. + */ + +#ifndef _NET_SUNRPC_SOCKLIB_H_ +#define _NET_SUNRPC_SOCKLIB_H_ + +int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb); +int xprt_sock_sendmsg(struct socket *sock, struct msghdr *msg, + struct xdr_buf *xdr, unsigned int base, + rpc_fraghdr marker, unsigned int *sent_p); + +#endif /* _NET_SUNRPC_SOCKLIB_H_ */ diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 758ab10690de..1cc5c224392b 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -55,6 +55,7 @@ #include #include +#include "socklib.h" #include "sunrpc.h" #define RPCDBG_FACILITY RPCDBG_SVCXPRT diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index d86c664ea6af..1a7c0856c5b6 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -54,6 +54,7 @@ #include +#include "socklib.h" #include "sunrpc.h" static void xs_close(struct rpc_xprt *xprt); @@ -749,125 +750,6 @@ xs_stream_start_connect(struct sock_xprt *transport) #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) -static int xs_sendmsg(struct socket *sock, struct msghdr *msg, size_t seek) -{ - if (seek) - iov_iter_advance(&msg->msg_iter, seek); - return sock_sendmsg(sock, msg); -} - -static int xs_send_kvec(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t seek) -{ - iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len); - return xs_sendmsg(sock, msg, seek); -} - -static int xs_send_pagedata(struct socket *sock, struct msghdr *msg, struct xdr_buf *xdr, size_t base) -{ - int err; - - err = xdr_alloc_bvec(xdr, GFP_KERNEL); - if (err < 0) - return err; - - iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, - xdr_buf_pagecount(xdr), - xdr->page_len + xdr->page_base); - return xs_sendmsg(sock, msg, base + xdr->page_base); -} - -#define xs_record_marker_len() sizeof(rpc_fraghdr) - -/* Common case: - * - stream transport - * - sending from byte 0 of the message - * - the message is wholly contained in @xdr's head iovec - */ -static int xs_send_rm_and_kvec(struct socket *sock, struct msghdr *msg, - rpc_fraghdr marker, struct kvec *vec, size_t base) -{ - struct kvec iov[2] = { - [0] = { - .iov_base = &marker, - .iov_len = sizeof(marker) - }, - [1] = *vec, - }; - size_t len = iov[0].iov_len + iov[1].iov_len; - - iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len); - return xs_sendmsg(sock, msg, base); -} - -/** - * xs_sendpages - write pages directly to a socket - * @sock: socket to send on - * @addr: UDP only -- address of destination - * @addrlen: UDP only -- length of destination address - * @xdr: buffer containing this request - * @base: starting position in the buffer - * @rm: stream record marker field - * @sent_p: return the total number of bytes successfully queued for sending - * - */ -static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, rpc_fraghdr rm, int *sent_p) -{ - struct msghdr msg = { - .msg_name = addr, - .msg_namelen = addrlen, - .msg_flags = XS_SENDMSG_FLAGS | MSG_MORE, - }; - unsigned int rmsize = rm ? sizeof(rm) : 0; - unsigned int remainder = rmsize + xdr->len - base; - unsigned int want; - int err = 0; - - if (unlikely(!sock)) - return -ENOTSOCK; - - want = xdr->head[0].iov_len + rmsize; - if (base < want) { - unsigned int len = want - base; - remainder -= len; - if (remainder == 0) - msg.msg_flags &= ~MSG_MORE; - if (rmsize) - err = xs_send_rm_and_kvec(sock, &msg, rm, - &xdr->head[0], base); - else - err = xs_send_kvec(sock, &msg, &xdr->head[0], base); - if (remainder == 0 || err != len) - goto out; - *sent_p += err; - base = 0; - } else - base -= want; - - if (base < xdr->page_len) { - unsigned int len = xdr->page_len - base; - remainder -= len; - if (remainder == 0) - msg.msg_flags &= ~MSG_MORE; - err = xs_send_pagedata(sock, &msg, xdr, base); - if (remainder == 0 || err != len) - goto out; - *sent_p += err; - base = 0; - } else - base -= xdr->page_len; - - if (base >= xdr->tail[0].iov_len) - return 0; - msg.msg_flags &= ~MSG_MORE; - err = xs_send_kvec(sock, &msg, &xdr->tail[0], base); -out: - if (err > 0) { - *sent_p += err; - err = 0; - } - return err; -} - /** * xs_nospace - handle transmit was incomplete * @req: pointer to RPC request @@ -959,8 +841,11 @@ static int xs_local_send_request(struct rpc_rqst *req) struct xdr_buf *xdr = &req->rq_snd_buf; rpc_fraghdr rm = xs_stream_record_marker(xdr); unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen; + struct msghdr msg = { + .msg_flags = XS_SENDMSG_FLAGS, + }; + unsigned int uninitialized_var(sent); int status; - int sent = 0; /* Close the stream if the previous transmission was incomplete */ if (xs_send_request_was_aborted(transport, req)) { @@ -972,8 +857,8 @@ static int xs_local_send_request(struct rpc_rqst *req) req->rq_svec->iov_base, req->rq_svec->iov_len); req->rq_xtime = ktime_get(); - status = xs_sendpages(transport->sock, NULL, 0, xdr, - transport->xmit.offset, rm, &sent); + status = xprt_sock_sendmsg(transport->sock, &msg, xdr, + transport->xmit.offset, rm, &sent); dprintk("RPC: %s(%u) = %d\n", __func__, xdr->len - transport->xmit.offset, status); @@ -1025,7 +910,12 @@ static int xs_udp_send_request(struct rpc_rqst *req) struct rpc_xprt *xprt = req->rq_xprt; struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); struct xdr_buf *xdr = &req->rq_snd_buf; - int sent = 0; + struct msghdr msg = { + .msg_name = xs_addr(xprt), + .msg_namelen = xprt->addrlen, + .msg_flags = XS_SENDMSG_FLAGS, + }; + unsigned int uninitialized_var(sent); int status; xs_pktdump("packet data:", @@ -1039,8 +929,7 @@ static int xs_udp_send_request(struct rpc_rqst *req) return -EBADSLT; req->rq_xtime = ktime_get(); - status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen, - xdr, 0, 0, &sent); + status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent); dprintk("RPC: xs_udp_send_request(%u) = %d\n", xdr->len, status); @@ -1106,9 +995,12 @@ static int xs_tcp_send_request(struct rpc_rqst *req) struct xdr_buf *xdr = &req->rq_snd_buf; rpc_fraghdr rm = xs_stream_record_marker(xdr); unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen; + struct msghdr msg = { + .msg_flags = XS_SENDMSG_FLAGS, + }; bool vm_wait = false; + unsigned int uninitialized_var(sent); int status; - int sent; /* Close the stream if the previous transmission was incomplete */ if (xs_send_request_was_aborted(transport, req)) { @@ -1129,9 +1021,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req) * called sendmsg(). */ req->rq_xtime = ktime_get(); while (1) { - sent = 0; - status = xs_sendpages(transport->sock, NULL, 0, xdr, - transport->xmit.offset, rm, &sent); + status = xprt_sock_sendmsg(transport->sock, &msg, xdr, + transport->xmit.offset, rm, &sent); dprintk("RPC: xs_tcp_send_request(%u) = %d\n", xdr->len - transport->xmit.offset, status); -- cgit v1.2.3-58-ga151 From 65286b883c6de6b30928c837c47c167e82bde0b2 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 1 Mar 2020 18:21:42 -0500 Subject: nfsd: export upcalls must not return ESTALE when mountd is down If the rpc.mountd daemon goes down, then that should not cause all exports to start failing with ESTALE errors. Let's explicitly distinguish between the cache upcall cases that need to time out, and those that do not. Signed-off-by: Trond Myklebust Signed-off-by: Chuck Lever --- fs/nfs/dns_resolve.c | 11 ++++---- fs/nfsd/export.c | 12 +++++++++ fs/nfsd/nfs4idmap.c | 14 +++++++++++ include/linux/sunrpc/cache.h | 3 +++ net/sunrpc/auth_gss/svcauth_gss.c | 12 +++++++++ net/sunrpc/cache.c | 53 +++++++++++++++++++-------------------- net/sunrpc/svcauth_unix.c | 12 +++++++++ 7 files changed, 85 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c index 89bd5581f317..963800037609 100644 --- a/fs/nfs/dns_resolve.c +++ b/fs/nfs/dns_resolve.c @@ -152,12 +152,13 @@ static int nfs_dns_upcall(struct cache_detail *cd, struct cache_head *ch) { struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h); - int ret; - ret = nfs_cache_upcall(cd, key->hostname); - if (ret) - ret = sunrpc_cache_pipe_upcall(cd, ch); - return ret; + if (test_and_set_bit(CACHE_PENDING, &ch->flags)) + return 0; + if (!nfs_cache_upcall(cd, key->hostname)) + return 0; + clear_bit(CACHE_PENDING, &ch->flags); + return sunrpc_cache_pipe_upcall_timeout(cd, ch); } static int nfs_dns_match(struct cache_head *ca, diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 6e6cbeb7ac2b..cb777fe82988 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -51,6 +51,11 @@ static void expkey_put(struct kref *ref) kfree_rcu(key, ek_rcu); } +static int expkey_upcall(struct cache_detail *cd, struct cache_head *h) +{ + return sunrpc_cache_pipe_upcall(cd, h); +} + static void expkey_request(struct cache_detail *cd, struct cache_head *h, char **bpp, int *blen) @@ -254,6 +259,7 @@ static const struct cache_detail svc_expkey_cache_template = { .hash_size = EXPKEY_HASHMAX, .name = "nfsd.fh", .cache_put = expkey_put, + .cache_upcall = expkey_upcall, .cache_request = expkey_request, .cache_parse = expkey_parse, .cache_show = expkey_show, @@ -335,6 +341,11 @@ static void svc_export_put(struct kref *ref) kfree_rcu(exp, ex_rcu); } +static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h) +{ + return sunrpc_cache_pipe_upcall(cd, h); +} + static void svc_export_request(struct cache_detail *cd, struct cache_head *h, char **bpp, int *blen) @@ -774,6 +785,7 @@ static const struct cache_detail svc_export_cache_template = { .hash_size = EXPORT_HASHMAX, .name = "nfsd.export", .cache_put = svc_export_put, + .cache_upcall = svc_export_upcall, .cache_request = svc_export_request, .cache_parse = svc_export_parse, .cache_show = svc_export_show, diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c index d1f285245af8..9460be8a8321 100644 --- a/fs/nfsd/nfs4idmap.c +++ b/fs/nfsd/nfs4idmap.c @@ -122,6 +122,12 @@ idtoname_hash(struct ent *ent) return hash; } +static int +idtoname_upcall(struct cache_detail *cd, struct cache_head *h) +{ + return sunrpc_cache_pipe_upcall_timeout(cd, h); +} + static void idtoname_request(struct cache_detail *cd, struct cache_head *ch, char **bpp, int *blen) @@ -184,6 +190,7 @@ static const struct cache_detail idtoname_cache_template = { .hash_size = ENT_HASHMAX, .name = "nfs4.idtoname", .cache_put = ent_put, + .cache_upcall = idtoname_upcall, .cache_request = idtoname_request, .cache_parse = idtoname_parse, .cache_show = idtoname_show, @@ -295,6 +302,12 @@ nametoid_hash(struct ent *ent) return hash_str(ent->name, ENT_HASHBITS); } +static int +nametoid_upcall(struct cache_detail *cd, struct cache_head *h) +{ + return sunrpc_cache_pipe_upcall_timeout(cd, h); +} + static void nametoid_request(struct cache_detail *cd, struct cache_head *ch, char **bpp, int *blen) @@ -347,6 +360,7 @@ static const struct cache_detail nametoid_cache_template = { .hash_size = ENT_HASHMAX, .name = "nfs4.nametoid", .cache_put = ent_put, + .cache_upcall = nametoid_upcall, .cache_request = nametoid_request, .cache_parse = nametoid_parse, .cache_show = nametoid_show, diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 0f64de7caa39..656882a50991 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -179,6 +179,9 @@ sunrpc_cache_update(struct cache_detail *detail, extern int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h); +extern int +sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail, + struct cache_head *h); extern void cache_clean_deferred(void *owner); diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index e93f06ec180d..54ae5be62f6a 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -180,6 +180,11 @@ static struct cache_head *rsi_alloc(void) return NULL; } +static int rsi_upcall(struct cache_detail *cd, struct cache_head *h) +{ + return sunrpc_cache_pipe_upcall_timeout(cd, h); +} + static void rsi_request(struct cache_detail *cd, struct cache_head *h, char **bpp, int *blen) @@ -278,6 +283,7 @@ static const struct cache_detail rsi_cache_template = { .hash_size = RSI_HASHMAX, .name = "auth.rpcsec.init", .cache_put = rsi_put, + .cache_upcall = rsi_upcall, .cache_request = rsi_request, .cache_parse = rsi_parse, .match = rsi_match, @@ -424,6 +430,11 @@ rsc_alloc(void) return NULL; } +static int rsc_upcall(struct cache_detail *cd, struct cache_head *h) +{ + return -EINVAL; +} + static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen) { @@ -550,6 +561,7 @@ static const struct cache_detail rsc_cache_template = { .hash_size = RSC_HASHMAX, .name = "auth.rpcsec.context", .cache_put = rsc_put, + .cache_upcall = rsc_upcall, .cache_parse = rsc_parse, .match = rsc_match, .init = rsc_init, diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 8a8e3528293c..7f2e5d818e05 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -38,7 +38,6 @@ static bool cache_defer_req(struct cache_req *req, struct cache_head *item); static void cache_revisit_request(struct cache_head *item); -static bool cache_listeners_exist(struct cache_detail *detail); static void cache_init(struct cache_head *h, struct cache_detail *detail) { @@ -225,13 +224,6 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, } EXPORT_SYMBOL_GPL(sunrpc_cache_update); -static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h) -{ - if (cd->cache_upcall) - return cd->cache_upcall(cd, h); - return sunrpc_cache_pipe_upcall(cd, h); -} - static inline int cache_is_valid(struct cache_head *h) { if (!test_bit(CACHE_VALID, &h->flags)) @@ -304,17 +296,14 @@ int cache_check(struct cache_detail *detail, (h->expiry_time != 0 && age > refresh_age/2)) { dprintk("RPC: Want update, refage=%lld, age=%lld\n", refresh_age, age); - if (!test_and_set_bit(CACHE_PENDING, &h->flags)) { - switch (cache_make_upcall(detail, h)) { - case -EINVAL: - rv = try_to_negate_entry(detail, h); - break; - case -EAGAIN: - cache_fresh_unlocked(h, detail); - break; - } - } else if (!cache_listeners_exist(detail)) + switch (detail->cache_upcall(detail, h)) { + case -EINVAL: rv = try_to_negate_entry(detail, h); + break; + case -EAGAIN: + cache_fresh_unlocked(h, detail); + break; + } } if (rv == -EAGAIN) { @@ -1196,20 +1185,12 @@ static bool cache_listeners_exist(struct cache_detail *detail) * * Each request is at most one page long. */ -int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h) +static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h) { - char *buf; struct cache_request *crq; int ret = 0; - if (!detail->cache_request) - return -EINVAL; - - if (!cache_listeners_exist(detail)) { - warn_no_listener(detail); - return -EINVAL; - } if (test_bit(CACHE_CLEANED, &h->flags)) /* Too late to make an upcall */ return -EAGAIN; @@ -1243,8 +1224,26 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h) } return ret; } + +int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h) +{ + if (test_and_set_bit(CACHE_PENDING, &h->flags)) + return 0; + return cache_pipe_upcall(detail, h); +} EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall); +int sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail, + struct cache_head *h) +{ + if (!cache_listeners_exist(detail)) { + warn_no_listener(detail); + return -EINVAL; + } + return sunrpc_cache_pipe_upcall(detail, h); +} +EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout); + /* * parse a message from user-space and pass it * to an appropriate cache diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 04aa80a2d752..6c8f802c4261 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -148,6 +148,11 @@ static struct cache_head *ip_map_alloc(void) return NULL; } +static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h) +{ + return sunrpc_cache_pipe_upcall(cd, h); +} + static void ip_map_request(struct cache_detail *cd, struct cache_head *h, char **bpp, int *blen) @@ -467,6 +472,11 @@ static struct cache_head *unix_gid_alloc(void) return NULL; } +static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h) +{ + return sunrpc_cache_pipe_upcall_timeout(cd, h); +} + static void unix_gid_request(struct cache_detail *cd, struct cache_head *h, char **bpp, int *blen) @@ -584,6 +594,7 @@ static const struct cache_detail unix_gid_cache_template = { .hash_size = GID_HASHMAX, .name = "auth.unix.gid", .cache_put = unix_gid_put, + .cache_upcall = unix_gid_upcall, .cache_request = unix_gid_request, .cache_parse = unix_gid_parse, .cache_show = unix_gid_show, @@ -881,6 +892,7 @@ static const struct cache_detail ip_map_cache_template = { .hash_size = IP_HASHMAX, .name = "auth.unix.ip", .cache_put = ip_map_put, + .cache_upcall = ip_map_upcall, .cache_request = ip_map_request, .cache_parse = ip_map_parse, .cache_show = ip_map_show, -- cgit v1.2.3-58-ga151 From 277f27e2f27752cd1a7901443d72e908ddea8a2e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 1 Mar 2020 18:21:43 -0500 Subject: SUNRPC/cache: Allow garbage collection of invalid cache entries If the cache entry never gets initialised, we want the garbage collector to be able to evict it. Otherwise if the upcall daemon fails to initialise the entry, we end up never expiring it. Signed-off-by: Trond Myklebust [ cel: resolved a merge conflict ] Signed-off-by: Chuck Lever --- include/linux/sunrpc/cache.h | 3 --- net/sunrpc/cache.c | 36 +++++++++++++++++++----------------- 2 files changed, 19 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 656882a50991..532cdbda43da 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -209,9 +209,6 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd) static inline bool cache_is_expired(struct cache_detail *detail, struct cache_head *h) { - if (!test_bit(CACHE_VALID, &h->flags)) - return false; - return (h->expiry_time < seconds_since_boot()) || (detail->flush_time >= h->last_refresh); } diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 7f2e5d818e05..cd76ef2d26b8 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -64,13 +64,14 @@ static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail, rcu_read_lock(); hlist_for_each_entry_rcu(tmp, head, cache_list) { - if (detail->match(tmp, key)) { - if (cache_is_expired(detail, tmp)) - continue; - tmp = cache_get_rcu(tmp); - rcu_read_unlock(); - return tmp; - } + if (!detail->match(tmp, key)) + continue; + if (test_bit(CACHE_VALID, &tmp->flags) && + cache_is_expired(detail, tmp)) + continue; + tmp = cache_get_rcu(tmp); + rcu_read_unlock(); + return tmp; } rcu_read_unlock(); return NULL; @@ -114,17 +115,18 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail, /* check if entry appeared while we slept */ hlist_for_each_entry_rcu(tmp, head, cache_list, lockdep_is_held(&detail->hash_lock)) { - if (detail->match(tmp, key)) { - if (cache_is_expired(detail, tmp)) { - sunrpc_begin_cache_remove_entry(tmp, detail); - freeme = tmp; - break; - } - cache_get(tmp); - spin_unlock(&detail->hash_lock); - cache_put(new, detail); - return tmp; + if (!detail->match(tmp, key)) + continue; + if (test_bit(CACHE_VALID, &tmp->flags) && + cache_is_expired(detail, tmp)) { + sunrpc_begin_cache_remove_entry(tmp, detail); + freeme = tmp; + break; } + cache_get(tmp); + spin_unlock(&detail->hash_lock); + cache_put(new, detail); + return tmp; } hlist_add_head_rcu(&new->cache_list, head); -- cgit v1.2.3-58-ga151 From 81924dae51941018afdaf25638da804be4807ce5 Mon Sep 17 00:00:00 2001 From: Tudor Ambarus Date: Fri, 13 Mar 2020 19:42:35 +0000 Subject: mtd: spi-nor: Emphasise which is the generic set_4byte_addr_mode() method Rename (*set_4byte)() to (*set_4byte_addr_mode)() for a better differentiation between the 4 byte address mode and opcodes. Rename macronix_set_4byte() to spi_nor_set_4byte_addr_mode(), it will be the only 4 byte address mode method exposed to the manufacturer drivers. Here's how the manufacturers enter and exit the 4 byte address mode: - eon, gidadevice, issi, macronix, xmc use EN4B/EX4B - micron-st needs WEN. st_micron_set_4byte_addr_mode() will become a private method, as they are the only ones that need WEN before the EN4B/EX4B commands. - newer spansion have a 4BAM opcode (this translates to a new, public command). Older spansion flashes use the BRWR command (legacy in core.c -> spansion_set_4byte_addr_mode()) - winbond's method is hackish and may be reason for just a flash fixup hook -> private method Signed-off-by: Tudor Ambarus Reviewed-by: Vignesh Raghavendra --- drivers/mtd/spi-nor/spi-nor.c | 34 ++++++++++++++++++---------------- include/linux/mtd/spi-nor.h | 4 ++-- 2 files changed, 20 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 0b8fac0b0299..8616673ddb7c 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -568,14 +568,14 @@ static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr) } /** - * macronix_set_4byte() - Set 4-byte address mode for Macronix flashes. + * spi_nor_set_4byte_addr_mode() - Enter/Exit 4-byte address mode. * @nor: pointer to 'struct spi_nor'. * @enable: true to enter the 4-byte address mode, false to exit the 4-byte * address mode. * * Return: 0 on success, -errno otherwise. */ -static int macronix_set_4byte(struct spi_nor *nor, bool enable) +static int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable) { int ret; @@ -604,14 +604,15 @@ static int macronix_set_4byte(struct spi_nor *nor, bool enable) } /** - * st_micron_set_4byte() - Set 4-byte address mode for ST and Micron flashes. + * st_micron_set_4byte_addr_mode() - Set 4-byte address mode for ST and Micron + * flashes. * @nor: pointer to 'struct spi_nor'. * @enable: true to enter the 4-byte address mode, false to exit the 4-byte * address mode. * * Return: 0 on success, -errno otherwise. */ -static int st_micron_set_4byte(struct spi_nor *nor, bool enable) +static int st_micron_set_4byte_addr_mode(struct spi_nor *nor, bool enable) { int ret; @@ -619,7 +620,7 @@ static int st_micron_set_4byte(struct spi_nor *nor, bool enable) if (ret) return ret; - ret = macronix_set_4byte(nor, enable); + ret = spi_nor_set_4byte_addr_mode(nor, enable); if (ret) return ret; @@ -627,14 +628,15 @@ static int st_micron_set_4byte(struct spi_nor *nor, bool enable) } /** - * spansion_set_4byte() - Set 4-byte address mode for Spansion flashes. + * spansion_set_4byte_addr_mode() - Set 4-byte address mode for Spansion + * flashes. * @nor: pointer to 'struct spi_nor'. * @enable: true to enter the 4-byte address mode, false to exit the 4-byte * address mode. * * Return: 0 on success, -errno otherwise. */ -static int spansion_set_4byte(struct spi_nor *nor, bool enable) +static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable) { int ret; @@ -692,18 +694,18 @@ static int spi_nor_write_ear(struct spi_nor *nor, u8 ear) } /** - * winbond_set_4byte() - Set 4-byte address mode for Winbond flashes. + * winbond_set_4byte_addr_mode() - Set 4-byte address mode for Winbond flashes. * @nor: pointer to 'struct spi_nor'. * @enable: true to enter the 4-byte address mode, false to exit the 4-byte * address mode. * * Return: 0 on success, -errno otherwise. */ -static int winbond_set_4byte(struct spi_nor *nor, bool enable) +static int winbond_set_4byte_addr_mode(struct spi_nor *nor, bool enable) { int ret; - ret = macronix_set_4byte(nor, enable); + ret = spi_nor_set_4byte_addr_mode(nor, enable); if (ret || enable) return ret; @@ -4655,7 +4657,7 @@ static void issi_set_default_init(struct spi_nor *nor) static void macronix_set_default_init(struct spi_nor *nor) { nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable; - nor->params.set_4byte = macronix_set_4byte; + nor->params.set_4byte_addr_mode = spi_nor_set_4byte_addr_mode; } static void sst_set_default_init(struct spi_nor *nor) @@ -4668,12 +4670,12 @@ static void st_micron_set_default_init(struct spi_nor *nor) nor->flags |= SNOR_F_HAS_LOCK; nor->flags &= ~SNOR_F_HAS_16BIT_SR; nor->params.quad_enable = NULL; - nor->params.set_4byte = st_micron_set_4byte; + nor->params.set_4byte_addr_mode = st_micron_set_4byte_addr_mode; } static void winbond_set_default_init(struct spi_nor *nor) { - nor->params.set_4byte = winbond_set_4byte; + nor->params.set_4byte_addr_mode = winbond_set_4byte_addr_mode; } /** @@ -4759,7 +4761,7 @@ static void spi_nor_info_init_params(struct spi_nor *nor) /* Initialize legacy flash parameters and settings. */ params->quad_enable = spi_nor_sr2_bit1_quad_enable; - params->set_4byte = spansion_set_4byte; + params->set_4byte_addr_mode = spansion_set_4byte_addr_mode; params->setup = spi_nor_default_setup; /* Default to 16-bit Write Status (01h) Command */ nor->flags |= SNOR_F_HAS_16BIT_SR; @@ -5011,7 +5013,7 @@ static int spi_nor_init(struct spi_nor *nor) */ WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET, "enabling reset hack; may not recover from unexpected reboots\n"); - nor->params.set_4byte(nor, true); + nor->params.set_4byte_addr_mode(nor, true); } return 0; @@ -5035,7 +5037,7 @@ void spi_nor_restore(struct spi_nor *nor) /* restore the addressing mode */ if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) && nor->flags & SNOR_F_BROKEN_RESET) - nor->params.set_4byte(nor, false); + nor->params.set_4byte_addr_mode(nor, false); } EXPORT_SYMBOL_GPL(spi_nor_restore); diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index de90724f62f1..2b9717b0cd62 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -520,7 +520,7 @@ struct spi_nor_locking_ops { * @erase_map: the erase map parsed from the SFDP Sector Map Parameter * Table. * @quad_enable: enables SPI NOR quad mode. - * @set_4byte: puts the SPI NOR in 4 byte addressing mode. + * @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode. * @convert_addr: converts an absolute address into something the flash * will understand. Particularly useful when pagesize is * not a power-of-2. @@ -541,7 +541,7 @@ struct spi_nor_flash_parameter { struct spi_nor_erase_map erase_map; int (*quad_enable)(struct spi_nor *nor); - int (*set_4byte)(struct spi_nor *nor, bool enable); + int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable); u32 (*convert_addr)(struct spi_nor *nor, u32 addr); int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps); -- cgit v1.2.3-58-ga151 From a46a22955bae16fc5a756af7188d3ccb25c3f797 Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Thu, 12 Mar 2020 13:03:15 -0700 Subject: kernfs: Add removed_size out param for simple_xattr_set This helps set up size accounting in the next commit. Without this out param, it's difficult to find out the removed xattr size without taking a lock for longer and walking the xattr linked list twice. Signed-off-by: Daniel Xu Acked-by: Chris Down Reviewed-by: Greg Kroah-Hartman Signed-off-by: Tejun Heo --- fs/kernfs/inode.c | 2 +- fs/xattr.c | 11 ++++++++++- include/linux/xattr.h | 3 ++- mm/shmem.c | 2 +- 4 files changed, 14 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c index d0f7a5abd9a9..5f10ae95fbfa 100644 --- a/fs/kernfs/inode.c +++ b/fs/kernfs/inode.c @@ -303,7 +303,7 @@ int kernfs_xattr_set(struct kernfs_node *kn, const char *name, if (!attrs) return -ENOMEM; - return simple_xattr_set(&attrs->xattrs, name, value, size, flags); + return simple_xattr_set(&attrs->xattrs, name, value, size, flags, NULL); } static int kernfs_vfs_xattr_get(const struct xattr_handler *handler, diff --git a/fs/xattr.c b/fs/xattr.c index 0d3c9b4d1914..e13265e65871 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -860,6 +860,7 @@ int simple_xattr_get(struct simple_xattrs *xattrs, const char *name, * @value: value of the xattr. If %NULL, will remove the attribute. * @size: size of the new xattr * @flags: %XATTR_{CREATE|REPLACE} + * @removed_size: returns size of the removed xattr, -1 if none removed * * %XATTR_CREATE is set, the xattr shouldn't exist already; otherwise fails * with -EEXIST. If %XATTR_REPLACE is set, the xattr should exist; @@ -868,7 +869,8 @@ int simple_xattr_get(struct simple_xattrs *xattrs, const char *name, * Returns 0 on success, -errno on failure. */ int simple_xattr_set(struct simple_xattrs *xattrs, const char *name, - const void *value, size_t size, int flags) + const void *value, size_t size, int flags, + ssize_t *removed_size) { struct simple_xattr *xattr; struct simple_xattr *new_xattr = NULL; @@ -895,8 +897,12 @@ int simple_xattr_set(struct simple_xattrs *xattrs, const char *name, err = -EEXIST; } else if (new_xattr) { list_replace(&xattr->list, &new_xattr->list); + if (removed_size) + *removed_size = xattr->size; } else { list_del(&xattr->list); + if (removed_size) + *removed_size = xattr->size; } goto out; } @@ -908,6 +914,9 @@ int simple_xattr_set(struct simple_xattrs *xattrs, const char *name, list_add(&new_xattr->list, &xattrs->head); xattr = NULL; } + + if (removed_size) + *removed_size = -1; out: spin_unlock(&xattrs->lock); if (xattr) { diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 6dad031be3c2..4cf6e11f4a3c 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -102,7 +102,8 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size); int simple_xattr_get(struct simple_xattrs *xattrs, const char *name, void *buffer, size_t size); int simple_xattr_set(struct simple_xattrs *xattrs, const char *name, - const void *value, size_t size, int flags); + const void *value, size_t size, int flags, + ssize_t *removed_size); ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, char *buffer, size_t size); void simple_xattr_list_add(struct simple_xattrs *xattrs, diff --git a/mm/shmem.c b/mm/shmem.c index c8f7540ef048..e6a7549faf20 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3243,7 +3243,7 @@ static int shmem_xattr_handler_set(const struct xattr_handler *handler, struct shmem_inode_info *info = SHMEM_I(inode); name = xattr_full_name(handler, name); - return simple_xattr_set(&info->xattrs, name, value, size, flags); + return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL); } static const struct xattr_handler shmem_security_xattr_handler = { -- cgit v1.2.3-58-ga151 From 0c47383ba3bd10877956e41149d19644fba937d1 Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Thu, 12 Mar 2020 13:03:16 -0700 Subject: kernfs: Add option to enable user xattrs User extended attributes are useful as metadata storage for kernfs consumers like cgroups. Especially in the case of cgroups, it is useful to have a central metadata store that multiple processes/services can use to coordinate actions. A concrete example is for userspace out of memory killers. We want to let delegated cgroup subtree owners (running as non-root) to be able to say "please avoid killing this cgroup". This is especially important for desktop linux as delegated subtrees owners are less likely to run as root. This patch introduces a new flag, KERNFS_ROOT_SUPPORT_USER_XATTR, that lets kernfs consumers enable user xattr support. An initial limit of 128 entries or 128KB -- whichever is hit first -- is placed per cgroup because xattrs come from kernel memory and we don't want to let unprivileged users accidentally eat up too much kernel memory. Signed-off-by: Daniel Xu Acked-by: Chris Down Reviewed-by: Greg Kroah-Hartman Signed-off-by: Tejun Heo --- fs/kernfs/inode.c | 89 +++++++++++++++++++++++++++++++++++++++++++++ fs/kernfs/kernfs-internal.h | 2 + include/linux/kernfs.h | 11 +++++- 3 files changed, 100 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c index 5f10ae95fbfa..fc2469a20fed 100644 --- a/fs/kernfs/inode.c +++ b/fs/kernfs/inode.c @@ -53,6 +53,8 @@ static struct kernfs_iattrs *__kernfs_iattrs(struct kernfs_node *kn, int alloc) kn->iattr->ia_ctime = kn->iattr->ia_atime; simple_xattrs_init(&kn->iattr->xattrs); + atomic_set(&kn->iattr->nr_user_xattrs, 0); + atomic_set(&kn->iattr->user_xattr_size, 0); out_unlock: ret = kn->iattr; mutex_unlock(&iattr_mutex); @@ -327,6 +329,86 @@ static int kernfs_vfs_xattr_set(const struct xattr_handler *handler, return kernfs_xattr_set(kn, name, value, size, flags); } +static int kernfs_vfs_user_xattr_add(struct kernfs_node *kn, + const char *full_name, + struct simple_xattrs *xattrs, + const void *value, size_t size, int flags) +{ + atomic_t *sz = &kn->iattr->user_xattr_size; + atomic_t *nr = &kn->iattr->nr_user_xattrs; + ssize_t removed_size; + int ret; + + if (atomic_inc_return(nr) > KERNFS_MAX_USER_XATTRS) { + ret = -ENOSPC; + goto dec_count_out; + } + + if (atomic_add_return(size, sz) > KERNFS_USER_XATTR_SIZE_LIMIT) { + ret = -ENOSPC; + goto dec_size_out; + } + + ret = simple_xattr_set(xattrs, full_name, value, size, flags, + &removed_size); + + if (!ret && removed_size >= 0) + size = removed_size; + else if (!ret) + return 0; +dec_size_out: + atomic_sub(size, sz); +dec_count_out: + atomic_dec(nr); + return ret; +} + +static int kernfs_vfs_user_xattr_rm(struct kernfs_node *kn, + const char *full_name, + struct simple_xattrs *xattrs, + const void *value, size_t size, int flags) +{ + atomic_t *sz = &kn->iattr->user_xattr_size; + atomic_t *nr = &kn->iattr->nr_user_xattrs; + ssize_t removed_size; + int ret; + + ret = simple_xattr_set(xattrs, full_name, value, size, flags, + &removed_size); + + if (removed_size >= 0) { + atomic_sub(removed_size, sz); + atomic_dec(nr); + } + + return ret; +} + +static int kernfs_vfs_user_xattr_set(const struct xattr_handler *handler, + struct dentry *unused, struct inode *inode, + const char *suffix, const void *value, + size_t size, int flags) +{ + const char *full_name = xattr_full_name(handler, suffix); + struct kernfs_node *kn = inode->i_private; + struct kernfs_iattrs *attrs; + + if (!(kernfs_root(kn)->flags & KERNFS_ROOT_SUPPORT_USER_XATTR)) + return -EOPNOTSUPP; + + attrs = kernfs_iattrs(kn); + if (!attrs) + return -ENOMEM; + + if (value) + return kernfs_vfs_user_xattr_add(kn, full_name, &attrs->xattrs, + value, size, flags); + else + return kernfs_vfs_user_xattr_rm(kn, full_name, &attrs->xattrs, + value, size, flags); + +} + static const struct xattr_handler kernfs_trusted_xattr_handler = { .prefix = XATTR_TRUSTED_PREFIX, .get = kernfs_vfs_xattr_get, @@ -339,8 +421,15 @@ static const struct xattr_handler kernfs_security_xattr_handler = { .set = kernfs_vfs_xattr_set, }; +static const struct xattr_handler kernfs_user_xattr_handler = { + .prefix = XATTR_USER_PREFIX, + .get = kernfs_vfs_xattr_get, + .set = kernfs_vfs_user_xattr_set, +}; + const struct xattr_handler *kernfs_xattr_handlers[] = { &kernfs_trusted_xattr_handler, &kernfs_security_xattr_handler, + &kernfs_user_xattr_handler, NULL }; diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h index 2f3c51d55261..7ee97ef59184 100644 --- a/fs/kernfs/kernfs-internal.h +++ b/fs/kernfs/kernfs-internal.h @@ -26,6 +26,8 @@ struct kernfs_iattrs { struct timespec64 ia_ctime; struct simple_xattrs xattrs; + atomic_t nr_user_xattrs; + atomic_t user_xattr_size; }; /* +1 to avoid triggering overflow warning when negating it */ diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index dded2e5a9f42..89f6a4214a70 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -37,8 +37,10 @@ enum kernfs_node_type { KERNFS_LINK = 0x0004, }; -#define KERNFS_TYPE_MASK 0x000f -#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK +#define KERNFS_TYPE_MASK 0x000f +#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK +#define KERNFS_MAX_USER_XATTRS 128 +#define KERNFS_USER_XATTR_SIZE_LIMIT (128 << 10) enum kernfs_node_flag { KERNFS_ACTIVATED = 0x0010, @@ -78,6 +80,11 @@ enum kernfs_root_flag { * fhandle to access nodes of the fs. */ KERNFS_ROOT_SUPPORT_EXPORTOP = 0x0004, + + /* + * Support user xattrs to be written to nodes rooted at this root. + */ + KERNFS_ROOT_SUPPORT_USER_XATTR = 0x0008, }; /* type-specific structures for kernfs_node union members */ -- cgit v1.2.3-58-ga151 From 19f747f7370fcf4ced4988ed795ccd4a28f2b530 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 13 Mar 2020 13:30:58 -0700 Subject: scsi: linux/unaligned/byteshift.h: Remove superfluous casts The C language supports implicitly casting a void pointer into a non-void pointer. Remove explicit void pointer to non-void pointer casts because these are superfluous. Link: https://lore.kernel.org/r/20200313203102.16613-2-bvanassche@acm.org Cc: Harvey Harrison Cc: Ingo Molnar Cc: Thomas Gleixner Cc: H. Peter Anvin Cc: Andrew Morton Reviewed-by: Christoph Hellwig Signed-off-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- include/linux/unaligned/be_byteshift.h | 6 +++--- include/linux/unaligned/le_byteshift.h | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h index 8bdb8fa01bd4..c43ff5918c8a 100644 --- a/include/linux/unaligned/be_byteshift.h +++ b/include/linux/unaligned/be_byteshift.h @@ -40,17 +40,17 @@ static inline void __put_unaligned_be64(u64 val, u8 *p) static inline u16 get_unaligned_be16(const void *p) { - return __get_unaligned_be16((const u8 *)p); + return __get_unaligned_be16(p); } static inline u32 get_unaligned_be32(const void *p) { - return __get_unaligned_be32((const u8 *)p); + return __get_unaligned_be32(p); } static inline u64 get_unaligned_be64(const void *p) { - return __get_unaligned_be64((const u8 *)p); + return __get_unaligned_be64(p); } static inline void put_unaligned_be16(u16 val, void *p) diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h index 1628b75866f0..2248dcb0df76 100644 --- a/include/linux/unaligned/le_byteshift.h +++ b/include/linux/unaligned/le_byteshift.h @@ -40,17 +40,17 @@ static inline void __put_unaligned_le64(u64 val, u8 *p) static inline u16 get_unaligned_le16(const void *p) { - return __get_unaligned_le16((const u8 *)p); + return __get_unaligned_le16(p); } static inline u32 get_unaligned_le32(const void *p) { - return __get_unaligned_le32((const u8 *)p); + return __get_unaligned_le32(p); } static inline u64 get_unaligned_le64(const void *p) { - return __get_unaligned_le64((const u8 *)p); + return __get_unaligned_le64(p); } static inline void put_unaligned_le16(u16 val, void *p) -- cgit v1.2.3-58-ga151 From a7afff31d56db22647251d76d6af030cd47bd97e Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 13 Mar 2020 13:31:00 -0700 Subject: scsi: treewide: Consolidate {get,put}_unaligned_[bl]e24() definitions Move the get_unaligned_be24(), get_unaligned_le24() and put_unaligned_le24() definitions from various drivers into include/linux/unaligned/generic.h. Add a put_unaligned_be24() implementation. Link: https://lore.kernel.org/r/20200313203102.16613-4-bvanassche@acm.org Cc: Keith Busch Cc: Sagi Grimberg Cc: Jens Axboe Cc: Harvey Harrison Cc: Martin K. Petersen Cc: Ingo Molnar Cc: Thomas Gleixner Cc: H. Peter Anvin Cc: Andrew Morton Reviewed-by: Christoph Hellwig Reviewed-by: Andy Shevchenko Reviewed-by: Greg Kroah-Hartman # For drivers/usb Reviewed-by: Felipe Balbi # For drivers/usb/gadget Signed-off-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/nvme/host/rdma.c | 8 ----- drivers/nvme/target/rdma.c | 6 ---- drivers/usb/gadget/function/f_mass_storage.c | 1 + drivers/usb/gadget/function/storage_common.h | 5 --- include/linux/unaligned/generic.h | 46 ++++++++++++++++++++++++++++ include/target/target_core_backend.h | 6 ---- 6 files changed, 47 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 2a47c6c5007e..85e9bcbd44b4 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -142,14 +142,6 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); static const struct blk_mq_ops nvme_rdma_mq_ops; static const struct blk_mq_ops nvme_rdma_admin_mq_ops; -/* XXX: really should move to a generic header sooner or later.. */ -static inline void put_unaligned_le24(u32 val, u8 *p) -{ - *p++ = val; - *p++ = val >> 8; - *p++ = val >> 16; -} - static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) { return queue - queue->ctrl->queues; diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 37d262a65877..8fcede75e02a 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -143,12 +143,6 @@ static int num_pages(int len) return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT); } -/* XXX: really should move to a generic header sooner or later.. */ -static inline u32 get_unaligned_le24(const u8 *p) -{ - return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16; -} - static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) { return nvme_is_write(rsp->req.cmd) && diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 7c96c4665178..950d2a85f098 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -216,6 +216,7 @@ #include #include #include +#include #include #include diff --git a/drivers/usb/gadget/function/storage_common.h b/drivers/usb/gadget/function/storage_common.h index e5e3a2553aaa..bdeb1e233fc9 100644 --- a/drivers/usb/gadget/function/storage_common.h +++ b/drivers/usb/gadget/function/storage_common.h @@ -172,11 +172,6 @@ enum data_direction { DATA_DIR_NONE }; -static inline u32 get_unaligned_be24(u8 *buf) -{ - return 0xffffff & (u32) get_unaligned_be32(buf - 1); -} - static inline struct fsg_lun *fsg_lun_from_dev(struct device *dev) { return container_of(dev, struct fsg_lun, dev); diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h index 57d3114656e5..303289492859 100644 --- a/include/linux/unaligned/generic.h +++ b/include/linux/unaligned/generic.h @@ -2,6 +2,8 @@ #ifndef _LINUX_UNALIGNED_GENERIC_H #define _LINUX_UNALIGNED_GENERIC_H +#include + /* * Cause a link-time error if we try an unaligned access other than * 1,2,4 or 8 bytes long @@ -66,4 +68,48 @@ extern void __bad_unaligned_access_size(void); } \ (void)0; }) +static inline u32 __get_unaligned_be24(const u8 *p) +{ + return p[0] << 16 | p[1] << 8 | p[2]; +} + +static inline u32 get_unaligned_be24(const void *p) +{ + return __get_unaligned_be24(p); +} + +static inline u32 __get_unaligned_le24(const u8 *p) +{ + return p[0] | p[1] << 8 | p[2] << 16; +} + +static inline u32 get_unaligned_le24(const void *p) +{ + return __get_unaligned_le24(p); +} + +static inline void __put_unaligned_be24(const u32 val, u8 *p) +{ + *p++ = val >> 16; + *p++ = val >> 8; + *p++ = val; +} + +static inline void put_unaligned_be24(const u32 val, void *p) +{ + __put_unaligned_be24(val, p); +} + +static inline void __put_unaligned_le24(const u32 val, u8 *p) +{ + *p++ = val; + *p++ = val >> 8; + *p++ = val >> 16; +} + +static inline void put_unaligned_le24(const u32 val, void *p) +{ + __put_unaligned_le24(val, p); +} + #endif /* _LINUX_UNALIGNED_GENERIC_H */ diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 51b6f50eabee..1b752d8ea529 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -116,10 +116,4 @@ static inline bool target_dev_configured(struct se_device *se_dev) return !!(se_dev->dev_flags & DF_CONFIGURED); } -/* Only use get_unaligned_be24() if reading p - 1 is allowed. */ -static inline uint32_t get_unaligned_be24(const uint8_t *const p) -{ - return get_unaligned_be32(p - 1) & 0xffffffU; -} - #endif /* TARGET_CORE_BACKEND_H */ -- cgit v1.2.3-58-ga151 From 9ec4bbcb2044ea1f380c9feceb10654dd5a35a95 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 13 Mar 2020 19:42:39 +0000 Subject: mtd: spi-nor: Add the concept of SPI NOR manufacturer driver Declare a spi_nor_manufacturer struct and add basic building blocks to move manufacturer specific code outside of the core. Signed-off-by: Boris Brezillon Signed-off-by: Tudor Ambarus Reviewed-by: Vignesh Raghavendra --- drivers/mtd/spi-nor/core.c | 78 ++++++++++++++++++++++++++++++++++++++------- drivers/mtd/spi-nor/core.h | 14 ++++++++ include/linux/mtd/spi-nor.h | 8 +++++ 3 files changed, 89 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index aae94e4250f6..4494959cd937 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -2474,8 +2474,26 @@ static const struct flash_info spi_nor_ids[] = { { }, }; +static const struct spi_nor_manufacturer *manufacturers[0]; + +static const struct flash_info * +spi_nor_search_part_by_id(const struct flash_info *parts, unsigned int nparts, + const u8 *id) +{ + unsigned int i; + + for (i = 0; i < nparts; i++) { + if (parts[i].id_len && + !memcmp(parts[i].id, id, parts[i].id_len)) + return &parts[i]; + } + + return NULL; +} + static const struct flash_info *spi_nor_read_id(struct spi_nor *nor) { + const struct flash_info *info; u8 *id = nor->bouncebuf; unsigned int i; int ret; @@ -2497,11 +2515,21 @@ static const struct flash_info *spi_nor_read_id(struct spi_nor *nor) return ERR_PTR(ret); } - for (i = 0; i < ARRAY_SIZE(spi_nor_ids) - 1; i++) { - if (spi_nor_ids[i].id_len && - !memcmp(spi_nor_ids[i].id, id, spi_nor_ids[i].id_len)) - return &spi_nor_ids[i]; + for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { + info = spi_nor_search_part_by_id(manufacturers[i]->parts, + manufacturers[i]->nparts, + id); + if (info) { + nor->manufacturer = manufacturers[i]; + return info; + } } + + info = spi_nor_search_part_by_id(spi_nor_ids, + ARRAY_SIZE(spi_nor_ids) - 1, id); + if (info) + return info; + dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n", SPI_NOR_MAX_ID_LEN, id); return ERR_PTR(-ENODEV); @@ -2987,6 +3015,16 @@ int spi_nor_post_bfpt_fixups(struct spi_nor *nor, const struct sfdp_bfpt *bfpt, struct spi_nor_flash_parameter *params) { + int ret; + + if (nor->manufacturer && nor->manufacturer->fixups && + nor->manufacturer->fixups->post_bfpt) { + ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header, + bfpt, params); + if (ret) + return ret; + } + if (nor->info->fixups && nor->info->fixups->post_bfpt) return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt, params); @@ -3296,6 +3334,10 @@ static void spi_nor_manufacturer_init_params(struct spi_nor *nor) break; } + if (nor->manufacturer && nor->manufacturer->fixups && + nor->manufacturer->fixups->default_init) + nor->manufacturer->fixups->default_init(nor); + if (nor->info->fixups && nor->info->fixups->default_init) nor->info->fixups->default_init(nor); } @@ -3455,6 +3497,10 @@ static void spi_nor_post_sfdp_fixups(struct spi_nor *nor) if (nor->info->flags & SPI_S3AN) s3an_post_sfdp_fixups(nor); + if (nor->manufacturer && nor->manufacturer->fixups && + nor->manufacturer->fixups->post_sfdp) + nor->manufacturer->fixups->post_sfdp(nor); + if (nor->info->fixups && nor->info->fixups->post_sfdp) nor->info->fixups->post_sfdp(nor); } @@ -3617,15 +3663,25 @@ void spi_nor_restore(struct spi_nor *nor) } EXPORT_SYMBOL_GPL(spi_nor_restore); -static const struct flash_info *spi_nor_match_id(const char *name) +static const struct flash_info *spi_nor_match_id(struct spi_nor *nor, + const char *name) { - const struct flash_info *id = spi_nor_ids; + unsigned int i, j; - while (id->name) { - if (!strcmp(name, id->name)) - return id; - id++; + for (i = 0; i < ARRAY_SIZE(spi_nor_ids) - 1; i++) { + if (!strcmp(name, spi_nor_ids[i].name)) + return &spi_nor_ids[i]; } + + for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { + for (j = 0; j < manufacturers[i]->nparts; j++) { + if (!strcmp(name, manufacturers[i]->parts[j].name)) { + nor->manufacturer = manufacturers[i]; + return &manufacturers[i]->parts[j]; + } + } + } + return NULL; } @@ -3672,7 +3728,7 @@ static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor, const struct flash_info *info = NULL; if (name) - info = spi_nor_match_id(name); + info = spi_nor_match_id(nor, name); /* Try to auto-detect if chip name wasn't specified or not found */ if (!info) info = spi_nor_read_id(nor); diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h index abec65081519..8599796dfc40 100644 --- a/drivers/mtd/spi-nor/core.h +++ b/drivers/mtd/spi-nor/core.h @@ -152,6 +152,20 @@ struct flash_info { .addr_width = 3, \ .flags = SPI_NOR_NO_FR | SPI_S3AN, +/** + * struct spi_nor_manufacturer - SPI NOR manufacturer object + * @name: manufacturer name + * @parts: array of parts supported by this manufacturer + * @nparts: number of entries in the parts array + * @fixups: hooks called at various points in time during spi_nor_scan() + */ +struct spi_nor_manufacturer { + const char *name; + const struct flash_info *parts; + unsigned int nparts; + const struct spi_nor_fixups *fixups; +}; + int spi_nor_write_enable(struct spi_nor *nor); int spi_nor_write_disable(struct spi_nor *nor); int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable); diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 2b9717b0cd62..bf37bfc68797 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -554,6 +554,12 @@ struct spi_nor_flash_parameter { */ struct flash_info; +/** + * struct spi_nor_manufacturer - Forward declaration of a structure used + * internally by the core and manufacturer drivers. + */ +struct spi_nor_manufacturer; + /** * struct spi_nor - Structure for defining a the SPI NOR layer * @mtd: point to a mtd_info structure @@ -564,6 +570,7 @@ struct flash_info; * layer is not DMA-able * @bouncebuf_size: size of the bounce buffer * @info: spi-nor part JDEC MFR id and other info + * @manufacturer: spi-nor manufacturer * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes * @erase_opcode: the opcode for erasing a sector @@ -591,6 +598,7 @@ struct spi_nor { u8 *bouncebuf; size_t bouncebuf_size; const struct flash_info *info; + const struct spi_nor_manufacturer *manufacturer; u32 page_size; u8 addr_width; u8 erase_opcode; -- cgit v1.2.3-58-ga151 From d3c4bb31bf627ede607d7b1827e6be43f1b26be7 Mon Sep 17 00:00:00 2001 From: Tudor Ambarus Date: Fri, 13 Mar 2020 19:42:52 +0000 Subject: mtd: spi-nor: Drop the MFR definitions Cross manufacturer code is unlikely and discouraged, get rid of the MFR definitions. Suggested-by: Vignesh Raghavendra Signed-off-by: Tudor Ambarus Reviewed-by: Boris Brezillon --- drivers/mtd/spi-nor/core.c | 2 -- include/linux/mtd/spi-nor.h | 17 ----------------- 2 files changed, 19 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index baee58fd8b04..b07e66f10995 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -40,8 +40,6 @@ #define SPI_NOR_MAX_ADDR_WIDTH 4 -#define JEDEC_MFR(info) ((info)->id[0]) - /** * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data * transfer diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index bf37bfc68797..2f7725525460 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -11,23 +11,6 @@ #include #include -/* - * Manufacturer IDs - * - * The first byte returned from the flash after sending opcode SPINOR_OP_RDID. - * Sometimes these are the same as CFI IDs, but sometimes they aren't. - */ -#define SNOR_MFR_ATMEL CFI_MFR_ATMEL -#define SNOR_MFR_GIGADEVICE 0xc8 -#define SNOR_MFR_INTEL CFI_MFR_INTEL -#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro */ -#define SNOR_MFR_MICRON CFI_MFR_MICRON /* Micron */ -#define SNOR_MFR_ISSI CFI_MFR_PMC -#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX -#define SNOR_MFR_SPANSION CFI_MFR_AMD -#define SNOR_MFR_SST CFI_MFR_SST -#define SNOR_MFR_WINBOND 0xef /* Also used by some Spansion */ - /* * Note on opcode nomenclature: some opcodes have a format like * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number -- cgit v1.2.3-58-ga151 From 829ec6408dc58dbf27522bbd57d0a85b0a3d1a0e Mon Sep 17 00:00:00 2001 From: Tudor Ambarus Date: Fri, 13 Mar 2020 19:42:53 +0000 Subject: mtd: spi-nor: Trim what is exposed in spi-nor.h The SPI NOR controllers drivers must not be able to use structures that are meant just for the SPI NOR core. struct spi_nor_flash_parameter is filled at run-time with info gathered from flash_info, manufacturer and sfdp data. struct spi_nor_flash_parameter should be opaque to the SPI NOR controller drivers, make sure it is. spi_nor_option_flags, spi_nor_read_command, spi_nor_pp_command, spi_nor_read_command_index and spi_nor_pp_command_index are defined for the core use, make sure they are opaque to the SPI NOR controller drivers. Signed-off-by: Tudor Ambarus Reviewed-by: Boris Brezillon Reviewed-by: Vignesh Raghavendra --- drivers/mtd/spi-nor/core.c | 86 ++++++++----- drivers/mtd/spi-nor/core.h | 214 ++++++++++++++++++++++++++++++++ drivers/mtd/spi-nor/gigadevice.c | 2 +- drivers/mtd/spi-nor/issi.c | 2 +- drivers/mtd/spi-nor/macronix.c | 4 +- drivers/mtd/spi-nor/micron-st.c | 4 +- drivers/mtd/spi-nor/sfdp.c | 10 ++ drivers/mtd/spi-nor/spansion.c | 2 +- drivers/mtd/spi-nor/winbond.c | 2 +- drivers/mtd/spi-nor/xilinx.c | 4 +- include/linux/mtd/spi-nor.h | 260 +-------------------------------------- 11 files changed, 294 insertions(+), 296 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index b07e66f10995..877557dbda7f 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -778,7 +778,7 @@ static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1) ret = spi_nor_read_cr(nor, &sr_cr[1]); if (ret) return ret; - } else if (nor->params.quad_enable) { + } else if (nor->params->quad_enable) { /* * If the Status Register 2 Read command (35h) is not * supported, we should at least be sure we don't @@ -786,7 +786,7 @@ static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1) * * We can safely assume that when the Quad Enable method is * set, the value of the QE bit is one, as a consequence of the - * nor->params.quad_enable() call. + * nor->params->quad_enable() call. * * We can safely assume that the Quad Enable bit is present in * the Status Register 2 at BIT(1). According to the JESD216 @@ -1051,6 +1051,11 @@ static u8 spi_nor_convert_3to4_erase(u8 opcode) ARRAY_SIZE(spi_nor_3to4_erase)); } +static bool spi_nor_has_uniform_erase(const struct spi_nor *nor) +{ + return !!nor->params->erase_map.uniform_erase_type; +} + static void spi_nor_set_4byte_opcodes(struct spi_nor *nor) { nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode); @@ -1058,7 +1063,7 @@ static void spi_nor_set_4byte_opcodes(struct spi_nor *nor) nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode); if (!spi_nor_has_uniform_erase(nor)) { - struct spi_nor_erase_map *map = &nor->params.erase_map; + struct spi_nor_erase_map *map = &nor->params->erase_map; struct spi_nor_erase_type *erase; int i; @@ -1095,10 +1100,10 @@ void spi_nor_unlock_and_unprep(struct spi_nor *nor) static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr) { - if (!nor->params.convert_addr) + if (!nor->params->convert_addr) return addr; - return nor->params.convert_addr(nor, addr); + return nor->params->convert_addr(nor, addr); } /* @@ -1203,6 +1208,16 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map, return NULL; } +static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region) +{ + return region->offset & SNOR_LAST_REGION; +} + +static u64 spi_nor_region_end(const struct spi_nor_erase_region *region) +{ + return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size; +} + /** * spi_nor_region_next() - get the next spi nor region * @region: pointer to a structure that describes a SPI NOR erase region @@ -1307,7 +1322,7 @@ static int spi_nor_init_erase_cmd_list(struct spi_nor *nor, struct list_head *erase_list, u64 addr, u32 len) { - const struct spi_nor_erase_map *map = &nor->params.erase_map; + const struct spi_nor_erase_map *map = &nor->params->erase_map; const struct spi_nor_erase_type *erase, *prev_erase = NULL; struct spi_nor_erase_region *region; struct spi_nor_erase_command *cmd = NULL; @@ -1793,7 +1808,7 @@ static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) if (ret) return ret; - ret = nor->params.locking_ops->lock(nor, ofs, len); + ret = nor->params->locking_ops->lock(nor, ofs, len); spi_nor_unlock_and_unprep(nor); return ret; @@ -1808,7 +1823,7 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) if (ret) return ret; - ret = nor->params.locking_ops->unlock(nor, ofs, len); + ret = nor->params->locking_ops->unlock(nor, ofs, len); spi_nor_unlock_and_unprep(nor); return ret; @@ -1823,7 +1838,7 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) if (ret) return ret; - ret = nor->params.locking_ops->is_locked(nor, ofs, len); + ret = nor->params->locking_ops->is_locked(nor, ofs, len); spi_nor_unlock_and_unprep(nor); return ret; @@ -2288,7 +2303,7 @@ static int spi_nor_spimem_check_pp(struct spi_nor *nor, static void spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps) { - struct spi_nor_flash_parameter *params = &nor->params; + struct spi_nor_flash_parameter *params = nor->params; unsigned int cap; /* DTR modes are not supported yet, mask them all. */ @@ -2387,7 +2402,7 @@ static int spi_nor_select_read(struct spi_nor *nor, if (cmd < 0) return -EINVAL; - read = &nor->params.reads[cmd]; + read = &nor->params->reads[cmd]; nor->read_opcode = read->opcode; nor->read_proto = read->proto; @@ -2418,7 +2433,7 @@ static int spi_nor_select_pp(struct spi_nor *nor, if (cmd < 0) return -EINVAL; - pp = &nor->params.page_programs[cmd]; + pp = &nor->params->page_programs[cmd]; nor->program_opcode = pp->opcode; nor->write_proto = pp->proto; return 0; @@ -2479,7 +2494,7 @@ spi_nor_select_uniform_erase(struct spi_nor_erase_map *map, static int spi_nor_select_erase(struct spi_nor *nor) { - struct spi_nor_erase_map *map = &nor->params.erase_map; + struct spi_nor_erase_map *map = &nor->params->erase_map; const struct spi_nor_erase_type *erase = NULL; struct mtd_info *mtd = &nor->mtd; u32 wanted_size = nor->info->sector_size; @@ -2528,7 +2543,7 @@ static int spi_nor_select_erase(struct spi_nor *nor) static int spi_nor_default_setup(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps) { - struct spi_nor_flash_parameter *params = &nor->params; + struct spi_nor_flash_parameter *params = nor->params; u32 ignored_mask, shared_mask; int err; @@ -2589,10 +2604,10 @@ static int spi_nor_default_setup(struct spi_nor *nor, static int spi_nor_setup(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps) { - if (!nor->params.setup) + if (!nor->params->setup) return 0; - return nor->params.setup(nor, hwcaps); + return nor->params->setup(nor, hwcaps); } /** @@ -2622,13 +2637,13 @@ static void spi_nor_sfdp_init_params(struct spi_nor *nor) { struct spi_nor_flash_parameter sfdp_params; - memcpy(&sfdp_params, &nor->params, sizeof(sfdp_params)); + memcpy(&sfdp_params, nor->params, sizeof(sfdp_params)); if (spi_nor_parse_sfdp(nor, &sfdp_params)) { nor->addr_width = 0; nor->flags &= ~SNOR_F_4B_OPCODES; } else { - memcpy(&nor->params, &sfdp_params, sizeof(nor->params)); + memcpy(nor->params, &sfdp_params, sizeof(*nor->params)); } } @@ -2639,7 +2654,7 @@ static void spi_nor_sfdp_init_params(struct spi_nor *nor) */ static void spi_nor_info_init_params(struct spi_nor *nor) { - struct spi_nor_flash_parameter *params = &nor->params; + struct spi_nor_flash_parameter *params = nor->params; struct spi_nor_erase_map *map = ¶ms->erase_map; const struct flash_info *info = nor->info; struct device_node *np = spi_nor_get_flash_node(nor); @@ -2758,8 +2773,8 @@ static void spi_nor_late_init_params(struct spi_nor *nor) * NOR protection support. When locking_ops are not provided, we pick * the default ones. */ - if (nor->flags & SNOR_F_HAS_LOCK && !nor->params.locking_ops) - nor->params.locking_ops = &spi_nor_sr_locking_ops; + if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops) + nor->params->locking_ops = &spi_nor_sr_locking_ops; } /** @@ -2799,8 +2814,12 @@ static void spi_nor_late_init_params(struct spi_nor *nor) * ->default_init() hook or the SFDP parser do not set specific params. * spi_nor_late_init_params() */ -static void spi_nor_init_params(struct spi_nor *nor) +static int spi_nor_init_params(struct spi_nor *nor) { + nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL); + if (!nor->params) + return -ENOMEM; + spi_nor_info_init_params(nor); spi_nor_manufacturer_init_params(nor); @@ -2812,6 +2831,8 @@ static void spi_nor_init_params(struct spi_nor *nor) spi_nor_post_sfdp_fixups(nor); spi_nor_late_init_params(nor); + + return 0; } /** @@ -2822,14 +2843,14 @@ static void spi_nor_init_params(struct spi_nor *nor) */ static int spi_nor_quad_enable(struct spi_nor *nor) { - if (!nor->params.quad_enable) + if (!nor->params->quad_enable) return 0; if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 || spi_nor_get_protocol_width(nor->write_proto) == 4)) return 0; - return nor->params.quad_enable(nor); + return nor->params->quad_enable(nor); } /** @@ -2844,7 +2865,7 @@ static int spi_nor_quad_enable(struct spi_nor *nor) static int spi_nor_unlock_all(struct spi_nor *nor) { if (nor->flags & SNOR_F_HAS_LOCK) - return spi_nor_unlock(&nor->mtd, 0, nor->params.size); + return spi_nor_unlock(&nor->mtd, 0, nor->params->size); return 0; } @@ -2875,7 +2896,7 @@ static int spi_nor_init(struct spi_nor *nor) */ WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET, "enabling reset hack; may not recover from unexpected reboots\n"); - nor->params.set_4byte_addr_mode(nor, true); + nor->params->set_4byte_addr_mode(nor, true); } return 0; @@ -2899,7 +2920,7 @@ void spi_nor_restore(struct spi_nor *nor) /* restore the addressing mode */ if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) && nor->flags & SNOR_F_BROKEN_RESET) - nor->params.set_4byte_addr_mode(nor, false); + nor->params->set_4byte_addr_mode(nor, false); } EXPORT_SYMBOL_GPL(spi_nor_restore); @@ -3004,7 +3025,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, struct device *dev = nor->dev; struct mtd_info *mtd = &nor->mtd; struct device_node *np = spi_nor_get_flash_node(nor); - struct spi_nor_flash_parameter *params = &nor->params; int ret; int i; @@ -3055,7 +3075,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, mtd->_write = spi_nor_write; /* Init flash parameters based on flash_info struct and SFDP */ - spi_nor_init_params(nor); + ret = spi_nor_init_params(nor); + if (ret) + return ret; if (!mtd->name) mtd->name = dev_name(dev); @@ -3063,12 +3085,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, mtd->type = MTD_NORFLASH; mtd->writesize = 1; mtd->flags = MTD_CAP_NORFLASH; - mtd->size = params->size; + mtd->size = nor->params->size; mtd->_erase = spi_nor_erase; mtd->_read = spi_nor_read; mtd->_resume = spi_nor_resume; - if (nor->params.locking_ops) { + if (nor->params->locking_ops) { mtd->_lock = spi_nor_lock; mtd->_unlock = spi_nor_unlock; mtd->_is_locked = spi_nor_is_locked; @@ -3091,7 +3113,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, mtd->flags |= MTD_NO_ERASE; mtd->dev.parent = dev; - nor->page_size = params->page_size; + nor->page_size = nor->params->page_size; mtd->writebufsize = nor->page_size; if (of_property_read_bool(np, "broken-flash-reset")) diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h index 2bc620708d6f..3ce826b35ad1 100644 --- a/drivers/mtd/spi-nor/core.h +++ b/drivers/mtd/spi-nor/core.h @@ -11,6 +11,220 @@ #define SPI_NOR_MAX_ID_LEN 6 +enum spi_nor_option_flags { + SNOR_F_USE_FSR = BIT(0), + SNOR_F_HAS_SR_TB = BIT(1), + SNOR_F_NO_OP_CHIP_ERASE = BIT(2), + SNOR_F_READY_XSR_RDY = BIT(3), + SNOR_F_USE_CLSR = BIT(4), + SNOR_F_BROKEN_RESET = BIT(5), + SNOR_F_4B_OPCODES = BIT(6), + SNOR_F_HAS_4BAIT = BIT(7), + SNOR_F_HAS_LOCK = BIT(8), + SNOR_F_HAS_16BIT_SR = BIT(9), + SNOR_F_NO_READ_CR = BIT(10), + SNOR_F_HAS_SR_TB_BIT6 = BIT(11), +}; + +struct spi_nor_read_command { + u8 num_mode_clocks; + u8 num_wait_states; + u8 opcode; + enum spi_nor_protocol proto; +}; + +struct spi_nor_pp_command { + u8 opcode; + enum spi_nor_protocol proto; +}; + +enum spi_nor_read_command_index { + SNOR_CMD_READ, + SNOR_CMD_READ_FAST, + SNOR_CMD_READ_1_1_1_DTR, + + /* Dual SPI */ + SNOR_CMD_READ_1_1_2, + SNOR_CMD_READ_1_2_2, + SNOR_CMD_READ_2_2_2, + SNOR_CMD_READ_1_2_2_DTR, + + /* Quad SPI */ + SNOR_CMD_READ_1_1_4, + SNOR_CMD_READ_1_4_4, + SNOR_CMD_READ_4_4_4, + SNOR_CMD_READ_1_4_4_DTR, + + /* Octal SPI */ + SNOR_CMD_READ_1_1_8, + SNOR_CMD_READ_1_8_8, + SNOR_CMD_READ_8_8_8, + SNOR_CMD_READ_1_8_8_DTR, + + SNOR_CMD_READ_MAX +}; + +enum spi_nor_pp_command_index { + SNOR_CMD_PP, + + /* Quad SPI */ + SNOR_CMD_PP_1_1_4, + SNOR_CMD_PP_1_4_4, + SNOR_CMD_PP_4_4_4, + + /* Octal SPI */ + SNOR_CMD_PP_1_1_8, + SNOR_CMD_PP_1_8_8, + SNOR_CMD_PP_8_8_8, + + SNOR_CMD_PP_MAX +}; + +/** + * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type + * @size: the size of the sector/block erased by the erase type. + * JEDEC JESD216B imposes erase sizes to be a power of 2. + * @size_shift: @size is a power of 2, the shift is stored in + * @size_shift. + * @size_mask: the size mask based on @size_shift. + * @opcode: the SPI command op code to erase the sector/block. + * @idx: Erase Type index as sorted in the Basic Flash Parameter + * Table. It will be used to synchronize the supported + * Erase Types with the ones identified in the SFDP + * optional tables. + */ +struct spi_nor_erase_type { + u32 size; + u32 size_shift; + u32 size_mask; + u8 opcode; + u8 idx; +}; + +/** + * struct spi_nor_erase_command - Used for non-uniform erases + * The structure is used to describe a list of erase commands to be executed + * once we validate that the erase can be performed. The elements in the list + * are run-length encoded. + * @list: for inclusion into the list of erase commands. + * @count: how many times the same erase command should be + * consecutively used. + * @size: the size of the sector/block erased by the command. + * @opcode: the SPI command op code to erase the sector/block. + */ +struct spi_nor_erase_command { + struct list_head list; + u32 count; + u32 size; + u8 opcode; +}; + +/** + * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region + * @offset: the offset in the data array of erase region start. + * LSB bits are used as a bitmask encoding flags to + * determine if this region is overlaid, if this region is + * the last in the SPI NOR flash memory and to indicate + * all the supported erase commands inside this region. + * The erase types are sorted in ascending order with the + * smallest Erase Type size being at BIT(0). + * @size: the size of the region in bytes. + */ +struct spi_nor_erase_region { + u64 offset; + u64 size; +}; + +#define SNOR_ERASE_TYPE_MAX 4 +#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0) + +#define SNOR_LAST_REGION BIT(4) +#define SNOR_OVERLAID_REGION BIT(5) + +#define SNOR_ERASE_FLAGS_MAX 6 +#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0) + +/** + * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map + * @regions: array of erase regions. The regions are consecutive in + * address space. Walking through the regions is done + * incrementally. + * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform + * sector size (legacy implementation). + * @erase_type: an array of erase types shared by all the regions. + * The erase types are sorted in ascending order, with the + * smallest Erase Type size being the first member in the + * erase_type array. + * @uniform_erase_type: bitmask encoding erase types that can erase the + * entire memory. This member is completed at init by + * uniform and non-uniform SPI NOR flash memories if they + * support at least one erase type that can erase the + * entire memory. + */ +struct spi_nor_erase_map { + struct spi_nor_erase_region *regions; + struct spi_nor_erase_region uniform_region; + struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX]; + u8 uniform_erase_type; +}; + +/** + * struct spi_nor_locking_ops - SPI NOR locking methods + * @lock: lock a region of the SPI NOR. + * @unlock: unlock a region of the SPI NOR. + * @is_locked: check if a region of the SPI NOR is completely locked + */ +struct spi_nor_locking_ops { + int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); +}; + +/** + * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings. + * Includes legacy flash parameters and settings that can be overwritten + * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216 + * Serial Flash Discoverable Parameters (SFDP) tables. + * + * @size: the flash memory density in bytes. + * @page_size: the page size of the SPI NOR flash memory. + * @hwcaps: describes the read and page program hardware + * capabilities. + * @reads: read capabilities ordered by priority: the higher index + * in the array, the higher priority. + * @page_programs: page program capabilities ordered by priority: the + * higher index in the array, the higher priority. + * @erase_map: the erase map parsed from the SFDP Sector Map Parameter + * Table. + * @quad_enable: enables SPI NOR quad mode. + * @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode. + * @convert_addr: converts an absolute address into something the flash + * will understand. Particularly useful when pagesize is + * not a power-of-2. + * @setup: configures the SPI NOR memory. Useful for SPI NOR + * flashes that have peculiarities to the SPI NOR standard + * e.g. different opcodes, specific address calculation, + * page size, etc. + * @locking_ops: SPI NOR locking methods. + */ +struct spi_nor_flash_parameter { + u64 size; + u32 page_size; + + struct spi_nor_hwcaps hwcaps; + struct spi_nor_read_command reads[SNOR_CMD_READ_MAX]; + struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX]; + + struct spi_nor_erase_map erase_map; + + int (*quad_enable)(struct spi_nor *nor); + int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable); + u32 (*convert_addr)(struct spi_nor *nor, u32 addr); + int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps); + + const struct spi_nor_locking_ops *locking_ops; +}; + /** * struct spi_nor_fixups - SPI NOR fixup hooks * @default_init: called after default flash parameters init. Used to tweak diff --git a/drivers/mtd/spi-nor/gigadevice.c b/drivers/mtd/spi-nor/gigadevice.c index 7930e4490dab..447d84bb2128 100644 --- a/drivers/mtd/spi-nor/gigadevice.c +++ b/drivers/mtd/spi-nor/gigadevice.c @@ -16,7 +16,7 @@ static void gd25q256_default_init(struct spi_nor *nor) * indicate the quad_enable method for this case, we need * to set it in the default_init fixup hook. */ - nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable; + nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable; } static struct spi_nor_fixups gd25q256_fixups = { diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c index 3a1c34c41388..ffcb60e54a80 100644 --- a/drivers/mtd/spi-nor/issi.c +++ b/drivers/mtd/spi-nor/issi.c @@ -68,7 +68,7 @@ static const struct flash_info issi_parts[] = { static void issi_default_init(struct spi_nor *nor) { - nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable; + nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable; } static const struct spi_nor_fixups issi_fixups = { diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c index c9b6b45d8f99..ab0f963d630c 100644 --- a/drivers/mtd/spi-nor/macronix.c +++ b/drivers/mtd/spi-nor/macronix.c @@ -82,8 +82,8 @@ static const struct flash_info macronix_parts[] = { static void macronix_default_init(struct spi_nor *nor) { - nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable; - nor->params.set_4byte_addr_mode = spi_nor_set_4byte_addr_mode; + nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable; + nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode; } static const struct spi_nor_fixups macronix_fixups = { diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c index 9d32ee0ef5a5..3874a62d8b47 100644 --- a/drivers/mtd/spi-nor/micron-st.c +++ b/drivers/mtd/spi-nor/micron-st.c @@ -130,8 +130,8 @@ static void micron_st_default_init(struct spi_nor *nor) { nor->flags |= SNOR_F_HAS_LOCK; nor->flags &= ~SNOR_F_HAS_16BIT_SR; - nor->params.quad_enable = NULL; - nor->params.set_4byte_addr_mode = st_micron_set_4byte_addr_mode; + nor->params->quad_enable = NULL; + nor->params->set_4byte_addr_mode = st_micron_set_4byte_addr_mode; } static const struct spi_nor_fixups micron_st_fixups = { diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c index c162015d19b1..df967f1f4951 100644 --- a/drivers/mtd/spi-nor/sfdp.c +++ b/drivers/mtd/spi-nor/sfdp.c @@ -734,6 +734,16 @@ out: return ret; } +static void spi_nor_region_mark_end(struct spi_nor_erase_region *region) +{ + region->offset |= SNOR_LAST_REGION; +} + +static void spi_nor_region_mark_overlay(struct spi_nor_erase_region *region) +{ + region->offset |= SNOR_OVERLAID_REGION; +} + /** * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid * @region: pointer to a structure that describes a SPI NOR erase region diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index 16683983a20e..6756202ace4b 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -74,7 +74,7 @@ static const struct flash_info spansion_parts[] = { static void spansion_post_sfdp_fixups(struct spi_nor *nor) { - if (nor->params.size <= SZ_16M) + if (nor->params->size <= SZ_16M) return; nor->flags |= SNOR_F_4B_OPCODES; diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c index 3f8c568091d3..17deabad57e1 100644 --- a/drivers/mtd/spi-nor/winbond.c +++ b/drivers/mtd/spi-nor/winbond.c @@ -97,7 +97,7 @@ static int winbond_set_4byte_addr_mode(struct spi_nor *nor, bool enable) static void winbond_default_init(struct spi_nor *nor) { - nor->params.set_4byte_addr_mode = winbond_set_4byte_addr_mode; + nor->params->set_4byte_addr_mode = winbond_set_4byte_addr_mode; } static const struct spi_nor_fixups winbond_fixups = { diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c index fcf635d89f65..1138bdbf4199 100644 --- a/drivers/mtd/spi-nor/xilinx.c +++ b/drivers/mtd/spi-nor/xilinx.c @@ -70,7 +70,7 @@ static int xilinx_nor_setup(struct spi_nor *nor, nor->mtd.erasesize = 8 * nor->page_size; } else { /* Flash in Default addressing mode */ - nor->params.convert_addr = s3an_convert_addr; + nor->params->convert_addr = s3an_convert_addr; nor->mtd.erasesize = nor->info->sector_size; } @@ -79,7 +79,7 @@ static int xilinx_nor_setup(struct spi_nor *nor, static void xilinx_post_sfdp_fixups(struct spi_nor *nor) { - nor->params.setup = xilinx_nor_setup; + nor->params->setup = xilinx_nor_setup; } static const struct spi_nor_fixups xilinx_fixups = { diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 2f7725525460..e656858b50a5 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -210,110 +210,6 @@ static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto) return spi_nor_get_protocol_data_nbits(proto); } -enum spi_nor_option_flags { - SNOR_F_USE_FSR = BIT(0), - SNOR_F_HAS_SR_TB = BIT(1), - SNOR_F_NO_OP_CHIP_ERASE = BIT(2), - SNOR_F_READY_XSR_RDY = BIT(3), - SNOR_F_USE_CLSR = BIT(4), - SNOR_F_BROKEN_RESET = BIT(5), - SNOR_F_4B_OPCODES = BIT(6), - SNOR_F_HAS_4BAIT = BIT(7), - SNOR_F_HAS_LOCK = BIT(8), - SNOR_F_HAS_16BIT_SR = BIT(9), - SNOR_F_NO_READ_CR = BIT(10), - SNOR_F_HAS_SR_TB_BIT6 = BIT(11), - -}; - -/** - * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type - * @size: the size of the sector/block erased by the erase type. - * JEDEC JESD216B imposes erase sizes to be a power of 2. - * @size_shift: @size is a power of 2, the shift is stored in - * @size_shift. - * @size_mask: the size mask based on @size_shift. - * @opcode: the SPI command op code to erase the sector/block. - * @idx: Erase Type index as sorted in the Basic Flash Parameter - * Table. It will be used to synchronize the supported - * Erase Types with the ones identified in the SFDP - * optional tables. - */ -struct spi_nor_erase_type { - u32 size; - u32 size_shift; - u32 size_mask; - u8 opcode; - u8 idx; -}; - -/** - * struct spi_nor_erase_command - Used for non-uniform erases - * The structure is used to describe a list of erase commands to be executed - * once we validate that the erase can be performed. The elements in the list - * are run-length encoded. - * @list: for inclusion into the list of erase commands. - * @count: how many times the same erase command should be - * consecutively used. - * @size: the size of the sector/block erased by the command. - * @opcode: the SPI command op code to erase the sector/block. - */ -struct spi_nor_erase_command { - struct list_head list; - u32 count; - u32 size; - u8 opcode; -}; - -/** - * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region - * @offset: the offset in the data array of erase region start. - * LSB bits are used as a bitmask encoding flags to - * determine if this region is overlaid, if this region is - * the last in the SPI NOR flash memory and to indicate - * all the supported erase commands inside this region. - * The erase types are sorted in ascending order with the - * smallest Erase Type size being at BIT(0). - * @size: the size of the region in bytes. - */ -struct spi_nor_erase_region { - u64 offset; - u64 size; -}; - -#define SNOR_ERASE_TYPE_MAX 4 -#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0) - -#define SNOR_LAST_REGION BIT(4) -#define SNOR_OVERLAID_REGION BIT(5) - -#define SNOR_ERASE_FLAGS_MAX 6 -#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0) - -/** - * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map - * @regions: array of erase regions. The regions are consecutive in - * address space. Walking through the regions is done - * incrementally. - * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform - * sector size (legacy implementation). - * @erase_type: an array of erase types shared by all the regions. - * The erase types are sorted in ascending order, with the - * smallest Erase Type size being the first member in the - * erase_type array. - * @uniform_erase_type: bitmask encoding erase types that can erase the - * entire memory. This member is completed at init by - * uniform and non-uniform SPI NOR flash memories if they - * support at least one erase type that can erase the - * entire memory. - */ -struct spi_nor_erase_map { - struct spi_nor_erase_region *regions; - struct spi_nor_erase_region uniform_region; - struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX]; - u8 uniform_erase_type; -}; - /** * struct spi_nor_hwcaps - Structure for describing the hardware capabilies * supported by the SPI controller (bus master). @@ -389,61 +285,7 @@ struct spi_nor_hwcaps { #define SNOR_HWCAPS_ALL (SNOR_HWCAPS_READ_MASK | \ SNOR_HWCAPS_PP_MASK) -struct spi_nor_read_command { - u8 num_mode_clocks; - u8 num_wait_states; - u8 opcode; - enum spi_nor_protocol proto; -}; - -struct spi_nor_pp_command { - u8 opcode; - enum spi_nor_protocol proto; -}; - -enum spi_nor_read_command_index { - SNOR_CMD_READ, - SNOR_CMD_READ_FAST, - SNOR_CMD_READ_1_1_1_DTR, - - /* Dual SPI */ - SNOR_CMD_READ_1_1_2, - SNOR_CMD_READ_1_2_2, - SNOR_CMD_READ_2_2_2, - SNOR_CMD_READ_1_2_2_DTR, - - /* Quad SPI */ - SNOR_CMD_READ_1_1_4, - SNOR_CMD_READ_1_4_4, - SNOR_CMD_READ_4_4_4, - SNOR_CMD_READ_1_4_4_DTR, - - /* Octal SPI */ - SNOR_CMD_READ_1_1_8, - SNOR_CMD_READ_1_8_8, - SNOR_CMD_READ_8_8_8, - SNOR_CMD_READ_1_8_8_DTR, - - SNOR_CMD_READ_MAX -}; - -enum spi_nor_pp_command_index { - SNOR_CMD_PP, - - /* Quad SPI */ - SNOR_CMD_PP_1_1_4, - SNOR_CMD_PP_1_4_4, - SNOR_CMD_PP_4_4_4, - - /* Octal SPI */ - SNOR_CMD_PP_1_1_8, - SNOR_CMD_PP_1_8_8, - SNOR_CMD_PP_8_8_8, - - SNOR_CMD_PP_MAX -}; - -/* Forward declaration that will be used in 'struct spi_nor_flash_parameter' */ +/* Forward declaration that is used in 'struct spi_nor_controller_ops' */ struct spi_nor; /** @@ -474,74 +316,13 @@ struct spi_nor_controller_ops { int (*erase)(struct spi_nor *nor, loff_t offs); }; -/** - * struct spi_nor_locking_ops - SPI NOR locking methods - * @lock: lock a region of the SPI NOR. - * @unlock: unlock a region of the SPI NOR. - * @is_locked: check if a region of the SPI NOR is completely locked - */ -struct spi_nor_locking_ops { - int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); - int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); - int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); -}; - -/** - * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings. - * Includes legacy flash parameters and settings that can be overwritten - * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216 - * Serial Flash Discoverable Parameters (SFDP) tables. - * - * @size: the flash memory density in bytes. - * @page_size: the page size of the SPI NOR flash memory. - * @hwcaps: describes the read and page program hardware - * capabilities. - * @reads: read capabilities ordered by priority: the higher index - * in the array, the higher priority. - * @page_programs: page program capabilities ordered by priority: the - * higher index in the array, the higher priority. - * @erase_map: the erase map parsed from the SFDP Sector Map Parameter - * Table. - * @quad_enable: enables SPI NOR quad mode. - * @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode. - * @convert_addr: converts an absolute address into something the flash - * will understand. Particularly useful when pagesize is - * not a power-of-2. - * @setup: configures the SPI NOR memory. Useful for SPI NOR - * flashes that have peculiarities to the SPI NOR standard - * e.g. different opcodes, specific address calculation, - * page size, etc. - * @locking_ops: SPI NOR locking methods. - */ -struct spi_nor_flash_parameter { - u64 size; - u32 page_size; - - struct spi_nor_hwcaps hwcaps; - struct spi_nor_read_command reads[SNOR_CMD_READ_MAX]; - struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX]; - - struct spi_nor_erase_map erase_map; - - int (*quad_enable)(struct spi_nor *nor); - int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable); - u32 (*convert_addr)(struct spi_nor *nor, u32 addr); - int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps); - - const struct spi_nor_locking_ops *locking_ops; -}; - -/** - * struct flash_info - Forward declaration of a structure used internally by - * spi_nor_scan() +/* + * Forward declarations that are used internally by the core and manufacturer + * drivers. */ struct flash_info; - -/** - * struct spi_nor_manufacturer - Forward declaration of a structure used - * internally by the core and manufacturer drivers. - */ struct spi_nor_manufacturer; +struct spi_nor_flash_parameter; /** * struct spi_nor - Structure for defining a the SPI NOR layer @@ -596,7 +377,7 @@ struct spi_nor { const struct spi_nor_controller_ops *controller_ops; - struct spi_nor_flash_parameter params; + struct spi_nor_flash_parameter *params; struct { struct spi_mem_dirmap_desc *rdesc; @@ -606,35 +387,6 @@ struct spi_nor { void *priv; }; -static u64 __maybe_unused -spi_nor_region_is_last(const struct spi_nor_erase_region *region) -{ - return region->offset & SNOR_LAST_REGION; -} - -static u64 __maybe_unused -spi_nor_region_end(const struct spi_nor_erase_region *region) -{ - return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size; -} - -static void __maybe_unused -spi_nor_region_mark_end(struct spi_nor_erase_region *region) -{ - region->offset |= SNOR_LAST_REGION; -} - -static void __maybe_unused -spi_nor_region_mark_overlay(struct spi_nor_erase_region *region) -{ - region->offset |= SNOR_OVERLAID_REGION; -} - -static bool __maybe_unused spi_nor_has_uniform_erase(const struct spi_nor *nor) -{ - return !!nor->params.erase_map.uniform_erase_type; -} - static inline void spi_nor_set_flash_node(struct spi_nor *nor, struct device_node *np) { -- cgit v1.2.3-58-ga151 From a0e374525def2ef18a078523e1faefb5ce2b05e5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 30 Jan 2020 12:06:18 -0800 Subject: libnvdimm/region: Introduce NDD_LABELING The NDD_ALIASING flag is used to indicate where pmem capacity might alias with blk capacity and require labeling. It is also used to indicate whether the DIMM supports labeling. Separate this latter capability into its own flag so that the NDD_ALIASING flag is scoped to true aliased configurations. To my knowledge aliased configurations only exist in the ACPI spec, there are no known platforms that ship this support in production. This clarity allows namespace-capacity alignment constraints around interleave-ways to be relaxed. Cc: Vishal Verma Cc: Oliver O'Halloran Reviewed-by: Jeff Moyer Reviewed-by: Aneesh Kumar K.V Link: https://lore.kernel.org/r/158041477856.3889308.4212605617834097674.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- arch/powerpc/platforms/pseries/papr_scm.c | 2 +- drivers/acpi/nfit/core.c | 4 +++- drivers/nvdimm/dimm.c | 2 +- drivers/nvdimm/dimm_devs.c | 9 +++++---- drivers/nvdimm/namespace_devs.c | 2 +- drivers/nvdimm/nd.h | 2 +- drivers/nvdimm/region_devs.c | 10 +++++----- include/linux/libnvdimm.h | 2 ++ 8 files changed, 19 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 0b4467e378e5..589858cb3203 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -328,7 +328,7 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) } dimm_flags = 0; - set_bit(NDD_ALIASING, &dimm_flags); + set_bit(NDD_LABELING, &dimm_flags); p->nvdimm = nvdimm_create(p->bus, p, NULL, dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL); diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index a3320f93616d..71d7f2aa1b12 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -2026,8 +2026,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) continue; } - if (nfit_mem->bdw && nfit_mem->memdev_pmem) + if (nfit_mem->bdw && nfit_mem->memdev_pmem) { set_bit(NDD_ALIASING, &flags); + set_bit(NDD_LABELING, &flags); + } /* collate flags across all memdevs for this dimm */ list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c index 64776ed15bb3..7d4ddc4d9322 100644 --- a/drivers/nvdimm/dimm.c +++ b/drivers/nvdimm/dimm.c @@ -99,7 +99,7 @@ static int nvdimm_probe(struct device *dev) if (ndd->ns_current >= 0) { rc = nd_label_reserve_dpa(ndd); if (rc == 0) - nvdimm_set_aliasing(dev); + nvdimm_set_labeling(dev); } nvdimm_bus_unlock(dev); diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 94ea6dba6b4f..39a61a514746 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -32,7 +32,7 @@ int nvdimm_check_config_data(struct device *dev) if (!nvdimm->cmd_mask || !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) { - if (test_bit(NDD_ALIASING, &nvdimm->flags)) + if (test_bit(NDD_LABELING, &nvdimm->flags)) return -ENXIO; else return -ENOTTY; @@ -173,11 +173,11 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, return rc; } -void nvdimm_set_aliasing(struct device *dev) +void nvdimm_set_labeling(struct device *dev) { struct nvdimm *nvdimm = to_nvdimm(dev); - set_bit(NDD_ALIASING, &nvdimm->flags); + set_bit(NDD_LABELING, &nvdimm->flags); } void nvdimm_set_locked(struct device *dev) @@ -312,8 +312,9 @@ static ssize_t flags_show(struct device *dev, { struct nvdimm *nvdimm = to_nvdimm(dev); - return sprintf(buf, "%s%s\n", + return sprintf(buf, "%s%s%s\n", test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "", + test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "", test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : ""); } static DEVICE_ATTR_RO(flags); diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 77e211c7d94d..01f6c22f0d1a 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -2538,7 +2538,7 @@ static int init_active_labels(struct nd_region *nd_region) if (!ndd) { if (test_bit(NDD_LOCKED, &nvdimm->flags)) /* fail, label data may be unreadable */; - else if (test_bit(NDD_ALIASING, &nvdimm->flags)) + else if (test_bit(NDD_LABELING, &nvdimm->flags)) /* fail, labels needed to disambiguate dpa */; else return 0; diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index c9f6a5b5253a..ca39abe29c7c 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -252,7 +252,7 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, void *buf, size_t len); long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, unsigned int len); -void nvdimm_set_aliasing(struct device *dev); +void nvdimm_set_labeling(struct device *dev); void nvdimm_set_locked(struct device *dev); void nvdimm_clear_locked(struct device *dev); int nvdimm_security_setup_events(struct device *dev); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index a19e535830d9..a5fc6e4c56ff 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -195,16 +195,16 @@ EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); int nd_region_to_nstype(struct nd_region *nd_region) { if (is_memory(&nd_region->dev)) { - u16 i, alias; + u16 i, label; - for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) { + for (i = 0, label = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm *nvdimm = nd_mapping->nvdimm; - if (test_bit(NDD_ALIASING, &nvdimm->flags)) - alias++; + if (test_bit(NDD_LABELING, &nvdimm->flags)) + label++; } - if (alias) + if (label) return ND_DEVICE_NAMESPACE_PMEM; else return ND_DEVICE_NAMESPACE_IO; diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 9df091bd30ba..18da4059be09 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -37,6 +37,8 @@ enum { NDD_WORK_PENDING = 4, /* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */ NDD_NOBLK = 5, + /* dimm supports namespace labels */ + NDD_LABELING = 6, /* need to set a limit somewhere, but yes, this is likely overkill */ ND_IOCTL_MAX_BUFLEN = SZ_4M, -- cgit v1.2.3-58-ga151 From 3f9d51333129e16d77dcc9414bd548151d884c8a Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 29 Feb 2020 12:50:46 +0100 Subject: watchdog: wm831x_wdt: Remove GPIO handling An attempt to convert the driver to using GPIO descriptors (see Link tag) was discouraged in favor of deleting the handling of the update GPIO altogehter since there are no in-tree users. This patch deletes the GPIO handling instead. Cc: Richard Fitzgerald Cc: Charles Keepax Cc: Mark Brown Link: https://lore.kernel.org/linux-watchdog/20200210102209.289379-1-linus.walleij@linaro.org/ Signed-off-by: Linus Walleij Reviewed-by: Guenter Roeck Acked-by: Charles Keepax Link: https://lore.kernel.org/r/20200229115046.57781-1-linus.walleij@linaro.org Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- drivers/watchdog/wm831x_wdt.c | 27 --------------------------- include/linux/mfd/wm831x/pdata.h | 1 - 2 files changed, 28 deletions(-) (limited to 'include/linux') diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c index 030ce240620d..d96ad8f38bd2 100644 --- a/drivers/watchdog/wm831x_wdt.c +++ b/drivers/watchdog/wm831x_wdt.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include @@ -29,7 +28,6 @@ struct wm831x_wdt_drvdata { struct watchdog_device wdt; struct wm831x *wm831x; struct mutex lock; - int update_gpio; int update_state; }; @@ -103,14 +101,6 @@ static int wm831x_wdt_ping(struct watchdog_device *wdt_dev) mutex_lock(&driver_data->lock); - if (driver_data->update_gpio) { - gpio_set_value_cansleep(driver_data->update_gpio, - driver_data->update_state); - driver_data->update_state = !driver_data->update_state; - ret = 0; - goto out; - } - reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG); if (!(reg & WM831X_WDOG_RST_SRC)) { @@ -239,23 +229,6 @@ static int wm831x_wdt_probe(struct platform_device *pdev) reg |= pdata->secondary << WM831X_WDOG_SECACT_SHIFT; reg |= pdata->software << WM831X_WDOG_RST_SRC_SHIFT; - if (pdata->update_gpio) { - ret = devm_gpio_request_one(dev, pdata->update_gpio, - GPIOF_OUT_INIT_LOW, - "Watchdog update"); - if (ret < 0) { - dev_err(wm831x->dev, - "Failed to request update GPIO: %d\n", - ret); - return ret; - } - - driver_data->update_gpio = pdata->update_gpio; - - /* Make sure the watchdog takes hardware updates */ - reg |= WM831X_WDOG_RST_SRC; - } - ret = wm831x_reg_unlock(wm831x); if (ret == 0) { ret = wm831x_reg_write(wm831x, WM831X_WATCHDOG, reg); diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h index 986986fe4e4e..75aa94dadf1c 100644 --- a/include/linux/mfd/wm831x/pdata.h +++ b/include/linux/mfd/wm831x/pdata.h @@ -89,7 +89,6 @@ enum wm831x_watchdog_action { struct wm831x_watchdog_pdata { enum wm831x_watchdog_action primary, secondary; - int update_gpio; unsigned int software:1; }; -- cgit v1.2.3-58-ga151 From 6ce6ae7c178b95f83ca0e15bd2ac961425a3af5c Mon Sep 17 00:00:00 2001 From: Zhenzhong Duan Date: Wed, 11 Mar 2020 15:16:53 +0800 Subject: misc: cleanup minor number definitions in c file into miscdevice.h HWRNG_MINOR and RNG_MISCDEV_MINOR are duplicate definitions, use unified HWRNG_MINOR instead and moved into miscdevice.h ANSLCD_MINOR and LCD_MINOR are duplicate definitions, use unified LCD_MINOR instead and moved into miscdevice.h MISCDEV_MINOR is renamed to PXA3XX_GCU_MINOR and moved into miscdevice.h Other definitions are just moved without any change. Link: https://lore.kernel.org/lkml/20200120221323.GJ15860@mit.edu/t/ Suggested-by: Arnd Bergmann Build-tested-by: Willy TARREAU Build-tested-by: Miguel Ojeda Signed-off-by: Zhenzhong Duan Acked-by: Miguel Ojeda Acked-by: Arnd Bergmann Acked-by: Herbert Xu Link: https://lore.kernel.org/r/20200311071654.335-2-zhenzhong.duan@gmail.com Signed-off-by: Greg Kroah-Hartman --- arch/um/drivers/random.c | 4 +--- drivers/auxdisplay/charlcd.c | 2 -- drivers/auxdisplay/panel.c | 2 -- drivers/char/applicom.c | 1 - drivers/char/nwbutton.h | 1 - drivers/char/toshiba.c | 2 -- drivers/macintosh/ans-lcd.c | 2 +- drivers/macintosh/ans-lcd.h | 2 -- drivers/macintosh/via-pmu.c | 3 --- drivers/sbus/char/envctrl.c | 2 -- drivers/sbus/char/uctrl.c | 2 -- drivers/video/fbdev/pxa3xx-gcu.c | 7 +++---- include/linux/miscdevice.h | 10 ++++++++++ kernel/power/user.c | 2 -- 14 files changed, 15 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c index 1d5d3057e6f1..ce115fce52f0 100644 --- a/arch/um/drivers/random.c +++ b/arch/um/drivers/random.c @@ -23,8 +23,6 @@ #define RNG_VERSION "1.0.0" #define RNG_MODULE_NAME "hw_random" -#define RNG_MISCDEV_MINOR 183 /* official */ - /* Changed at init time, in the non-modular case, and at module load * time, in the module case. Presumably, the module subsystem * protects against a module being loaded twice at the same time. @@ -104,7 +102,7 @@ static const struct file_operations rng_chrdev_ops = { /* rng_init shouldn't be called more than once at boot time */ static struct miscdevice rng_miscdev = { - RNG_MISCDEV_MINOR, + HWRNG_MINOR, RNG_MODULE_NAME, &rng_chrdev_ops, }; diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c index 874c259a8829..e7048658cb5e 100644 --- a/drivers/auxdisplay/charlcd.c +++ b/drivers/auxdisplay/charlcd.c @@ -22,8 +22,6 @@ #include "charlcd.h" -#define LCD_MINOR 156 - #define DEFAULT_LCD_BWIDTH 40 #define DEFAULT_LCD_HWIDTH 64 diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index 85965953683e..99980aa3644b 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -57,8 +57,6 @@ #include "charlcd.h" -#define KEYPAD_MINOR 185 - #define LCD_MAXBYTES 256 /* max burst write */ #define KEYPAD_BUFFER 64 diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index 51121a4b82c7..14b2d8034c51 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -53,7 +53,6 @@ #define MAX_BOARD 8 /* maximum of pc board possible */ #define MAX_ISA_BOARD 4 #define LEN_RAM_IO 0x800 -#define AC_MINOR 157 #ifndef PCI_VENDOR_ID_APPLICOM #define PCI_VENDOR_ID_APPLICOM 0x1389 diff --git a/drivers/char/nwbutton.h b/drivers/char/nwbutton.h index 9dedfd7adc0e..f2b9fdc1f9ea 100644 --- a/drivers/char/nwbutton.h +++ b/drivers/char/nwbutton.h @@ -14,7 +14,6 @@ #define NUM_PRESSES_REBOOT 2 /* How many presses to activate shutdown */ #define BUTTON_DELAY 30 /* How many jiffies for sequence to end */ #define VERSION "0.3" /* Driver version number */ -#define BUTTON_MINOR 158 /* Major 10, Minor 158, /dev/nwbutton */ /* Structure definitions: */ diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c index 98f3150e0048..aff0a8e44fff 100644 --- a/drivers/char/toshiba.c +++ b/drivers/char/toshiba.c @@ -61,8 +61,6 @@ #include #include -#define TOSH_MINOR_DEV 181 - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jonathan Buzzard "); MODULE_DESCRIPTION("Toshiba laptop SMM driver"); diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c index b1314d104b06..b4821c751d04 100644 --- a/drivers/macintosh/ans-lcd.c +++ b/drivers/macintosh/ans-lcd.c @@ -142,7 +142,7 @@ const struct file_operations anslcd_fops = { }; static struct miscdevice anslcd_dev = { - ANSLCD_MINOR, + LCD_MINOR, "anslcd", &anslcd_fops }; diff --git a/drivers/macintosh/ans-lcd.h b/drivers/macintosh/ans-lcd.h index f0a6e4c68557..bca7d76d441b 100644 --- a/drivers/macintosh/ans-lcd.h +++ b/drivers/macintosh/ans-lcd.h @@ -2,8 +2,6 @@ #ifndef _PPC_ANS_LCD_H #define _PPC_ANS_LCD_H -#define ANSLCD_MINOR 156 - #define ANSLCD_CLEAR 0x01 #define ANSLCD_SENDCTRL 0x02 #define ANSLCD_SETSHORTDELAY 0x03 diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index d38fb78a3b23..83eb05bf85ff 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -75,9 +75,6 @@ /* Some compile options */ #undef DEBUG_SLEEP -/* Misc minor number allocated for /dev/pmu */ -#define PMU_MINOR 154 - /* How many iterations between battery polls */ #define BATTERY_POLLING_COUNT 2 diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index 12d66aa61ede..843e830b5f87 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c @@ -37,8 +37,6 @@ #define DRIVER_NAME "envctrl" #define PFX DRIVER_NAME ": " -#define ENVCTRL_MINOR 162 - #define PCF8584_ADDRESS 0x55 #define CONTROL_PIN 0x80 diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c index 7173a2e4e8cf..37d252f2548d 100644 --- a/drivers/sbus/char/uctrl.c +++ b/drivers/sbus/char/uctrl.c @@ -23,8 +23,6 @@ #include #include -#define UCTRL_MINOR 174 - #define DEBUG 1 #ifdef DEBUG #define dprintk(x) printk x diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c index 74ffb446e00c..4279e13a3b58 100644 --- a/drivers/video/fbdev/pxa3xx-gcu.c +++ b/drivers/video/fbdev/pxa3xx-gcu.c @@ -36,7 +36,6 @@ #include "pxa3xx-gcu.h" #define DRV_NAME "pxa3xx-gcu" -#define MISCDEV_MINOR 197 #define REG_GCCR 0x00 #define GCCR_SYNC_CLR (1 << 9) @@ -595,7 +594,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev) * container_of(). This isn't really necessary as we have a fixed minor * number anyway, but this is to avoid statics. */ - priv->misc_dev.minor = MISCDEV_MINOR, + priv->misc_dev.minor = PXA3XX_GCU_MINOR, priv->misc_dev.name = DRV_NAME, priv->misc_dev.fops = &pxa3xx_gcu_miscdev_fops; @@ -638,7 +637,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev) ret = misc_register(&priv->misc_dev); if (ret < 0) { dev_err(dev, "misc_register() for minor %d failed\n", - MISCDEV_MINOR); + PXA3XX_GCU_MINOR); goto err_free_dma; } @@ -714,7 +713,7 @@ module_platform_driver(pxa3xx_gcu_driver); MODULE_DESCRIPTION("PXA3xx graphics controller unit driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(MISCDEV_MINOR); +MODULE_ALIAS_MISCDEV(PXA3XX_GCU_MINOR); MODULE_AUTHOR("Janine Kropp , " "Denis Oliver Kropp , " "Daniel Mack "); diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index becde6981a95..42360fcd7342 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -31,14 +31,23 @@ #define DMAPI_MINOR 140 /* unused */ #define NVRAM_MINOR 144 #define SGI_MMTIMER 153 +#define PMU_MINOR 154 #define STORE_QUEUE_MINOR 155 /* unused */ +#define LCD_MINOR 156 +#define AC_MINOR 157 +#define BUTTON_MINOR 158 /* Major 10, Minor 158, /dev/nwbutton */ +#define ENVCTRL_MINOR 162 #define I2O_MINOR 166 +#define UCTRL_MINOR 174 #define AGPGART_MINOR 175 +#define TOSH_MINOR_DEV 181 #define HWRNG_MINOR 183 #define MICROCODE_MINOR 184 +#define KEYPAD_MINOR 185 #define IRNET_MINOR 187 #define D7S_MINOR 193 #define VFIO_MINOR 196 +#define PXA3XX_GCU_MINOR 197 #define TUN_MINOR 200 #define CUSE_MINOR 203 #define MWAVE_MINOR 219 /* ACP/Mwave Modem */ @@ -49,6 +58,7 @@ #define MISC_MCELOG_MINOR 227 #define HPET_MINOR 228 #define FUSE_MINOR 229 +#define SNAPSHOT_MINOR 231 #define KVM_MINOR 232 #define BTRFS_MINOR 234 #define AUTOFS_MINOR 235 diff --git a/kernel/power/user.c b/kernel/power/user.c index 77438954cc2b..98fb65970b6b 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -27,8 +27,6 @@ #include "power.h" -#define SNAPSHOT_MINOR 231 - static struct snapshot_data { struct snapshot_handle handle; int swap; -- cgit v1.2.3-58-ga151 From 2668dba6df53584fb147d656c45a600d9e723dcb Mon Sep 17 00:00:00 2001 From: Zhenzhong Duan Date: Wed, 11 Mar 2020 15:16:54 +0800 Subject: misc: move FLASH_MINOR into miscdevice.h and fix conflicts FLASH_MINOR is used in both drivers/char/nwflash.c and drivers/sbus/char/flash.c with conflict minor numbers. Move all the definitions of FLASH_MINOR into miscdevice.h. Rename FLASH_MINOR for drivers/char/nwflash.c to NWFLASH_MINOR and FLASH_MINOR for drivers/sbus/char/flash.c to SBUS_FLASH_MINOR. Link: https://lore.kernel.org/lkml/20200120221323.GJ15860@mit.edu/t/ Suggested-by: Arnd Bergmann Signed-off-by: Zhenzhong Duan Acked-by: Arnd Bergmann Cc: Greg Kroah-Hartman Cc: Russell King Cc: "David S. Miller" Link: https://lore.kernel.org/r/20200311071654.335-3-zhenzhong.duan@gmail.com Signed-off-by: Greg Kroah-Hartman --- arch/arm/include/asm/nwflash.h | 1 - drivers/char/nwflash.c | 2 +- drivers/sbus/char/flash.c | 4 +--- include/linux/miscdevice.h | 2 ++ 4 files changed, 4 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/include/asm/nwflash.h b/arch/arm/include/asm/nwflash.h index 0ec6f07c2d8a..66b7e68c9b58 100644 --- a/arch/arm/include/asm/nwflash.h +++ b/arch/arm/include/asm/nwflash.h @@ -2,7 +2,6 @@ #ifndef _FLASH_H #define _FLASH_H -#define FLASH_MINOR 160 /* MAJOR is 10 - miscdevice */ #define CMD_WRITE_DISABLE 0 #define CMD_WRITE_ENABLE 0x28 #define CMD_WRITE_BASE64K_ENABLE 0x47 diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c index a4a0797daa19..0973c2c2b01a 100644 --- a/drivers/char/nwflash.c +++ b/drivers/char/nwflash.c @@ -576,7 +576,7 @@ static const struct file_operations flash_fops = static struct miscdevice flash_miscdev = { - FLASH_MINOR, + NWFLASH_MINOR, "nwflash", &flash_fops }; diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c index e85a05aca4d6..4147d22fd448 100644 --- a/drivers/sbus/char/flash.c +++ b/drivers/sbus/char/flash.c @@ -31,8 +31,6 @@ static struct { unsigned long busy; /* In use? */ } flash; -#define FLASH_MINOR 152 - static int flash_mmap(struct file *file, struct vm_area_struct *vma) { @@ -157,7 +155,7 @@ static const struct file_operations flash_fops = { .release = flash_release, }; -static struct miscdevice flash_dev = { FLASH_MINOR, "flash", &flash_fops }; +static struct miscdevice flash_dev = { SBUS_FLASH_MINOR, "flash", &flash_fops }; static int flash_probe(struct platform_device *op) { diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 42360fcd7342..66cc45e0624b 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -30,12 +30,14 @@ #define SUN_OPENPROM_MINOR 139 #define DMAPI_MINOR 140 /* unused */ #define NVRAM_MINOR 144 +#define SBUS_FLASH_MINOR 152 #define SGI_MMTIMER 153 #define PMU_MINOR 154 #define STORE_QUEUE_MINOR 155 /* unused */ #define LCD_MINOR 156 #define AC_MINOR 157 #define BUTTON_MINOR 158 /* Major 10, Minor 158, /dev/nwbutton */ +#define NWFLASH_MINOR 160 /* MAJOR is 10 - miscdevice */ #define ENVCTRL_MINOR 162 #define I2O_MINOR 166 #define UCTRL_MINOR 174 -- cgit v1.2.3-58-ga151 From 05d67ec3ca59627f2c1dd62538a345c4a9cdff44 Mon Sep 17 00:00:00 2001 From: Qiang Su Date: Fri, 6 Mar 2020 15:03:59 +0800 Subject: UIO: fix up inapposite whiteplace in uio head file Whitespace is used in the inapposite place, which makes checkpatch complain. Signed-off-by: Qiang Su Link: https://lore.kernel.org/r/20200306070359.71398-1-suqiang4@huawei.com Signed-off-by: Greg Kroah-Hartman --- include/linux/uio_driver.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 01081c4726c0..461db05819f4 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -24,10 +24,10 @@ struct uio_map; * struct uio_mem - description of a UIO memory region * @name: name of the memory region for identification * @addr: address of the device's memory rounded to page - * size (phys_addr is used since addr can be - * logical, virtual, or physical & phys_addr_t - * should always be large enough to handle any of - * the address types) + * size (phys_addr is used since addr can be + * logical, virtual, or physical & phys_addr_t + * should always be large enough to handle any of + * the address types) * @offs: offset of device memory within the page * @size: size of IO (multiple of page size) * @memtype: type of memory addr points to @@ -67,16 +67,16 @@ struct uio_port { #define MAX_UIO_PORT_REGIONS 5 struct uio_device { - struct module *owner; + struct module *owner; struct device dev; - int minor; - atomic_t event; - struct fasync_struct *async_queue; - wait_queue_head_t wait; - struct uio_info *info; + int minor; + atomic_t event; + struct fasync_struct *async_queue; + wait_queue_head_t wait; + struct uio_info *info; struct mutex info_lock; - struct kobject *map_dir; - struct kobject *portio_dir; + struct kobject *map_dir; + struct kobject *portio_dir; }; /** -- cgit v1.2.3-58-ga151 From 86a78b1cfc78a6378c4ff3b30f822899c066dca5 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Fri, 6 Mar 2020 18:18:52 +0200 Subject: uio: add resource managed devm_uio_register_device() function This change adds a resource managed equivalent of uio_register_device(). Not adding devm_uio_unregister_device(), since the intent is to discourage it's usage. Having such a function may allow some bad driver designs. Most users of devm_*register*() functions rarely use the unregister equivalents. Signed-off-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20200306161853.25368-1-alexandru.ardelean@analog.com Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio.c | 38 ++++++++++++++++++++++++++++++++++++++ include/linux/uio_driver.h | 9 +++++++++ 2 files changed, 47 insertions(+) (limited to 'include/linux') diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index a57698985f9c..6e725c6c6256 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -996,6 +996,44 @@ err_device_create: } EXPORT_SYMBOL_GPL(__uio_register_device); +static void devm_uio_unregister_device(struct device *dev, void *res) +{ + uio_unregister_device(*(struct uio_info **)res); +} + +/** + * devm_uio_register_device - Resource managed uio_register_device() + * @owner: module that creates the new device + * @parent: parent device + * @info: UIO device capabilities + * + * returns zero on success or a negative error code. + */ +int __devm_uio_register_device(struct module *owner, + struct device *parent, + struct uio_info *info) +{ + struct uio_info **ptr; + int ret; + + ptr = devres_alloc(devm_uio_unregister_device, sizeof(*ptr), + GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + *ptr = info; + ret = __uio_register_device(owner, parent, info); + if (ret) { + devres_free(ptr); + return ret; + } + + devres_add(parent, ptr); + + return 0; +} +EXPORT_SYMBOL_GPL(__devm_uio_register_device); + /** * uio_unregister_device - unregister a industrial IO device * @info: UIO device capabilities diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 461db05819f4..54bf6b118401 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -123,6 +123,15 @@ extern int __must_check extern void uio_unregister_device(struct uio_info *info); extern void uio_event_notify(struct uio_info *info); +extern int __must_check + __devm_uio_register_device(struct module *owner, + struct device *parent, + struct uio_info *info); + +/* use a define to avoid include chaining to get THIS_MODULE */ +#define devm_uio_register_device(parent, info) \ + __devm_uio_register_device(THIS_MODULE, parent, info) + /* defines for uio_info->irq */ #define UIO_IRQ_CUSTOM -1 #define UIO_IRQ_NONE 0 -- cgit v1.2.3-58-ga151 From 2644f912b41012c1ce5ff9be99efeec721491b86 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 10 Feb 2020 11:15:46 +0100 Subject: backlight: pwm_bl: Switch to full GPIO descriptor The PWM backlight still supports passing a enable GPIO line as platform data using the legacy API. It turns out that ever board using this mechanism except one is pass .enable_gpio = -1. So we drop all these cargo-culted -1's from all instances of this platform data in the kernel. The remaning board, Palm TC, is converted to pass a machine descriptior table with the "enable" GPIO instead, and delete the platform data entry for enable_gpio and the code handling it and things should work smoothly with the new API. Signed-off-by: Linus Walleij Acked-by: Robert Jarzmik Acked-by: Krzysztof Kozlowski Reviewed-by: Daniel Thompson Signed-off-by: Lee Jones --- arch/arm/mach-pxa/cm-x300.c | 1 - arch/arm/mach-pxa/colibri-pxa270-income.c | 1 - arch/arm/mach-pxa/ezx.c | 1 - arch/arm/mach-pxa/hx4700.c | 1 - arch/arm/mach-pxa/lpd270.c | 1 - arch/arm/mach-pxa/magician.c | 1 - arch/arm/mach-pxa/mainstone.c | 1 - arch/arm/mach-pxa/mioa701.c | 1 - arch/arm/mach-pxa/palm27x.c | 1 - arch/arm/mach-pxa/palmtc.c | 11 ++++++++++- arch/arm/mach-pxa/palmte2.c | 1 - arch/arm/mach-pxa/pcm990-baseboard.c | 1 - arch/arm/mach-pxa/tavorevb.c | 2 -- arch/arm/mach-pxa/viper.c | 1 - arch/arm/mach-pxa/z2.c | 2 -- arch/arm/mach-pxa/zylonite.c | 1 - arch/arm/mach-s3c24xx/mach-h1940.c | 1 - arch/arm/mach-s3c24xx/mach-rx1950.c | 1 - arch/arm/mach-s3c64xx/dev-backlight.c | 3 --- arch/arm/mach-s3c64xx/mach-crag6410.c | 1 - arch/arm/mach-s3c64xx/mach-hmt.c | 1 - arch/arm/mach-s3c64xx/mach-smartq.c | 1 - arch/arm/mach-s3c64xx/mach-smdk6410.c | 2 +- arch/unicore32/kernel/puv3-nb0916.c | 1 - drivers/video/backlight/pwm_bl.c | 19 ------------------- include/linux/pwm_backlight.h | 2 -- 26 files changed, 11 insertions(+), 49 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c index 425855f456f2..2e35354b61f5 100644 --- a/arch/arm/mach-pxa/cm-x300.c +++ b/arch/arm/mach-pxa/cm-x300.c @@ -312,7 +312,6 @@ static struct pwm_lookup cm_x300_pwm_lookup[] = { static struct platform_pwm_backlight_data cm_x300_backlight_data = { .max_brightness = 100, .dft_brightness = 100, - .enable_gpio = -1, }; static struct platform_device cm_x300_backlight_device = { diff --git a/arch/arm/mach-pxa/colibri-pxa270-income.c b/arch/arm/mach-pxa/colibri-pxa270-income.c index dbad2f13706c..e5879e8b0682 100644 --- a/arch/arm/mach-pxa/colibri-pxa270-income.c +++ b/arch/arm/mach-pxa/colibri-pxa270-income.c @@ -202,7 +202,6 @@ static struct pwm_lookup income_pwm_lookup[] = { static struct platform_pwm_backlight_data income_backlight_data = { .max_brightness = 0x3ff, .dft_brightness = 0x1ff, - .enable_gpio = -1, }; static struct platform_device income_backlight = { diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c index ec10851b63cf..eb85950e7c0e 100644 --- a/arch/arm/mach-pxa/ezx.c +++ b/arch/arm/mach-pxa/ezx.c @@ -55,7 +55,6 @@ static struct pwm_lookup ezx_pwm_lookup[] __maybe_unused = { static struct platform_pwm_backlight_data ezx_backlight_data = { .max_brightness = 1023, .dft_brightness = 1023, - .enable_gpio = -1, }; static struct platform_device ezx_backlight_device = { diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c index 238a751a8797..1d4c5db54be2 100644 --- a/arch/arm/mach-pxa/hx4700.c +++ b/arch/arm/mach-pxa/hx4700.c @@ -556,7 +556,6 @@ static struct platform_device hx4700_lcd = { static struct platform_pwm_backlight_data backlight_data = { .max_brightness = 200, .dft_brightness = 100, - .enable_gpio = -1, }; static struct platform_device backlight = { diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index 20e00e970385..6fc40bc06910 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c @@ -277,7 +277,6 @@ static struct pwm_lookup lpd270_pwm_lookup[] = { static struct platform_pwm_backlight_data lpd270_backlight_data = { .max_brightness = 1, .dft_brightness = 1, - .enable_gpio = -1, }; static struct platform_device lpd270_backlight_device = { diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c index 5d0591f93f4d..cd9fa465b9b2 100644 --- a/arch/arm/mach-pxa/magician.c +++ b/arch/arm/mach-pxa/magician.c @@ -401,7 +401,6 @@ static void magician_backlight_exit(struct device *dev) static struct platform_pwm_backlight_data backlight_data = { .max_brightness = 272, .dft_brightness = 100, - .enable_gpio = -1, .init = magician_backlight_init, .notify = magician_backlight_notify, .exit = magician_backlight_exit, diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c index 1b7882920164..d1010ec26e9f 100644 --- a/arch/arm/mach-pxa/mainstone.c +++ b/arch/arm/mach-pxa/mainstone.c @@ -256,7 +256,6 @@ static struct pwm_lookup mainstone_pwm_lookup[] = { static struct platform_pwm_backlight_data mainstone_backlight_data = { .max_brightness = 1023, .dft_brightness = 1023, - .enable_gpio = -1, }; static struct platform_device mainstone_backlight_device = { diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c index 0b8bae9610f1..d3af80317f2d 100644 --- a/arch/arm/mach-pxa/mioa701.c +++ b/arch/arm/mach-pxa/mioa701.c @@ -176,7 +176,6 @@ static struct pwm_lookup mioa701_pwm_lookup[] = { static struct platform_pwm_backlight_data mioa701_backlight_data = { .max_brightness = 100, .dft_brightness = 50, - .enable_gpio = -1, }; /* diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c index b600b63af3a6..0d246a1aebbc 100644 --- a/arch/arm/mach-pxa/palm27x.c +++ b/arch/arm/mach-pxa/palm27x.c @@ -318,7 +318,6 @@ static void palm27x_backlight_exit(struct device *dev) static struct platform_pwm_backlight_data palm27x_backlight_data = { .max_brightness = 0xfe, .dft_brightness = 0x7e, - .enable_gpio = -1, .init = palm27x_backlight_init, .notify = palm27x_backlight_notify, .exit = palm27x_backlight_exit, diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c index fda9deaaae02..455cb8ccaf26 100644 --- a/arch/arm/mach-pxa/palmtc.c +++ b/arch/arm/mach-pxa/palmtc.c @@ -174,6 +174,15 @@ static inline void palmtc_keys_init(void) {} * Backlight ******************************************************************************/ #if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE) + +static struct gpiod_lookup_table palmtc_pwm_bl_gpio_table = { + .dev_id = "pwm-backlight.0", + .table = { + GPIO_LOOKUP("gpio-pxa", GPIO_NR_PALMTC_BL_POWER, + "enable", GPIO_ACTIVE_HIGH), + }, +}; + static struct pwm_lookup palmtc_pwm_lookup[] = { PWM_LOOKUP("pxa25x-pwm.1", 0, "pwm-backlight.0", NULL, PALMTC_PERIOD_NS, PWM_POLARITY_NORMAL), @@ -182,7 +191,6 @@ static struct pwm_lookup palmtc_pwm_lookup[] = { static struct platform_pwm_backlight_data palmtc_backlight_data = { .max_brightness = PALMTC_MAX_INTENSITY, .dft_brightness = PALMTC_MAX_INTENSITY, - .enable_gpio = GPIO_NR_PALMTC_BL_POWER, }; static struct platform_device palmtc_backlight = { @@ -195,6 +203,7 @@ static struct platform_device palmtc_backlight = { static void __init palmtc_pwm_init(void) { + gpiod_add_lookup_table(&palmtc_pwm_bl_gpio_table); pwm_add_table(palmtc_pwm_lookup, ARRAY_SIZE(palmtc_pwm_lookup)); platform_device_register(&palmtc_backlight); } diff --git a/arch/arm/mach-pxa/palmte2.c b/arch/arm/mach-pxa/palmte2.c index 7171014fd311..e3bcf58b4e63 100644 --- a/arch/arm/mach-pxa/palmte2.c +++ b/arch/arm/mach-pxa/palmte2.c @@ -175,7 +175,6 @@ static void palmte2_backlight_exit(struct device *dev) static struct platform_pwm_backlight_data palmte2_backlight_data = { .max_brightness = PALMTE2_MAX_INTENSITY, .dft_brightness = PALMTE2_MAX_INTENSITY, - .enable_gpio = -1, .init = palmte2_backlight_init, .notify = palmte2_backlight_notify, .exit = palmte2_backlight_exit, diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c index cb1c56769fbc..bf613f88d70b 100644 --- a/arch/arm/mach-pxa/pcm990-baseboard.c +++ b/arch/arm/mach-pxa/pcm990-baseboard.c @@ -154,7 +154,6 @@ static struct pwm_lookup pcm990_pwm_lookup[] = { static struct platform_pwm_backlight_data pcm990_backlight_data = { .max_brightness = 1023, .dft_brightness = 1023, - .enable_gpio = -1, }; static struct platform_device pcm990_backlight_device = { diff --git a/arch/arm/mach-pxa/tavorevb.c b/arch/arm/mach-pxa/tavorevb.c index 93466fa3b0fe..a15eb3b9484d 100644 --- a/arch/arm/mach-pxa/tavorevb.c +++ b/arch/arm/mach-pxa/tavorevb.c @@ -178,13 +178,11 @@ static struct platform_pwm_backlight_data tavorevb_backlight_data[] = { /* primary backlight */ .max_brightness = 100, .dft_brightness = 100, - .enable_gpio = -1, }, [1] = { /* secondary backlight */ .max_brightness = 100, .dft_brightness = 100, - .enable_gpio = -1, }, }; diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c index c06031da6676..3aa34e9a15d3 100644 --- a/arch/arm/mach-pxa/viper.c +++ b/arch/arm/mach-pxa/viper.c @@ -404,7 +404,6 @@ static void viper_backlight_exit(struct device *dev) static struct platform_pwm_backlight_data viper_backlight_data = { .max_brightness = 100, .dft_brightness = 100, - .enable_gpio = -1, .init = viper_backlight_init, .notify = viper_backlight_notify, .exit = viper_backlight_exit, diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c index 900cefc4c5ea..21fd76bb09cd 100644 --- a/arch/arm/mach-pxa/z2.c +++ b/arch/arm/mach-pxa/z2.c @@ -210,13 +210,11 @@ static struct platform_pwm_backlight_data z2_backlight_data[] = { /* Keypad Backlight */ .max_brightness = 1023, .dft_brightness = 0, - .enable_gpio = -1, }, [1] = { /* LCD Backlight */ .max_brightness = 1023, .dft_brightness = 512, - .enable_gpio = -1, }, }; diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c index bf2ab5bd49ec..79f0025fa17a 100644 --- a/arch/arm/mach-pxa/zylonite.c +++ b/arch/arm/mach-pxa/zylonite.c @@ -117,7 +117,6 @@ static struct pwm_lookup zylonite_pwm_lookup[] = { static struct platform_pwm_backlight_data zylonite_backlight_data = { .max_brightness = 100, .dft_brightness = 100, - .enable_gpio = -1, }; static struct platform_device zylonite_backlight_device = { diff --git a/arch/arm/mach-s3c24xx/mach-h1940.c b/arch/arm/mach-s3c24xx/mach-h1940.c index 74d6b68e91c7..e1c372e5447b 100644 --- a/arch/arm/mach-s3c24xx/mach-h1940.c +++ b/arch/arm/mach-s3c24xx/mach-h1940.c @@ -516,7 +516,6 @@ static void h1940_backlight_exit(struct device *dev) static struct platform_pwm_backlight_data backlight_data = { .max_brightness = 100, .dft_brightness = 50, - .enable_gpio = -1, .init = h1940_backlight_init, .notify = h1940_backlight_notify, .exit = h1940_backlight_exit, diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c index 03d8f27cdc32..fde98b175c75 100644 --- a/arch/arm/mach-s3c24xx/mach-rx1950.c +++ b/arch/arm/mach-s3c24xx/mach-rx1950.c @@ -534,7 +534,6 @@ static int rx1950_backlight_notify(struct device *dev, int brightness) static struct platform_pwm_backlight_data rx1950_backlight_data = { .max_brightness = 24, .dft_brightness = 4, - .enable_gpio = -1, .init = rx1950_backlight_init, .notify = rx1950_backlight_notify, .exit = rx1950_backlight_exit, diff --git a/arch/arm/mach-s3c64xx/dev-backlight.c b/arch/arm/mach-s3c64xx/dev-backlight.c index 799cfdf0606b..09e6da305f60 100644 --- a/arch/arm/mach-s3c64xx/dev-backlight.c +++ b/arch/arm/mach-s3c64xx/dev-backlight.c @@ -65,7 +65,6 @@ static struct samsung_bl_drvdata samsung_dfl_bl_data __initdata = { .plat_data = { .max_brightness = 255, .dft_brightness = 255, - .enable_gpio = -1, .init = samsung_bl_init, .exit = samsung_bl_exit, }, @@ -111,8 +110,6 @@ void __init samsung_bl_set(struct samsung_bl_gpio_info *gpio_info, samsung_bl_data->dft_brightness = bl_data->dft_brightness; if (bl_data->lth_brightness) samsung_bl_data->lth_brightness = bl_data->lth_brightness; - if (bl_data->enable_gpio >= 0) - samsung_bl_data->enable_gpio = bl_data->enable_gpio; if (bl_data->init) samsung_bl_data->init = bl_data->init; if (bl_data->notify) diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c index 8ec6a4f5eb05..da9654255e3f 100644 --- a/arch/arm/mach-s3c64xx/mach-crag6410.c +++ b/arch/arm/mach-s3c64xx/mach-crag6410.c @@ -114,7 +114,6 @@ static struct pwm_lookup crag6410_pwm_lookup[] = { static struct platform_pwm_backlight_data crag6410_backlight_data = { .max_brightness = 1000, .dft_brightness = 600, - .enable_gpio = -1, }; static struct platform_device crag6410_backlight_device = { diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c index bfe9881d12cc..e7080215c624 100644 --- a/arch/arm/mach-s3c64xx/mach-hmt.c +++ b/arch/arm/mach-s3c64xx/mach-hmt.c @@ -115,7 +115,6 @@ static void hmt_bl_exit(struct device *dev) static struct platform_pwm_backlight_data hmt_backlight_data = { .max_brightness = 100 * 256, .dft_brightness = 40 * 256, - .enable_gpio = -1, .init = hmt_bl_init, .notify = hmt_bl_notify, .exit = hmt_bl_exit, diff --git a/arch/arm/mach-s3c64xx/mach-smartq.c b/arch/arm/mach-s3c64xx/mach-smartq.c index 829d5dbd69ee..5025db607c0f 100644 --- a/arch/arm/mach-s3c64xx/mach-smartq.c +++ b/arch/arm/mach-s3c64xx/mach-smartq.c @@ -150,7 +150,6 @@ static int smartq_bl_init(struct device *dev) static struct platform_pwm_backlight_data smartq_backlight_data = { .max_brightness = 1000, .dft_brightness = 600, - .enable_gpio = -1, .init = smartq_bl_init, }; diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c index 908e5aa831c8..56f406c0c3dd 100644 --- a/arch/arm/mach-s3c64xx/mach-smdk6410.c +++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c @@ -623,7 +623,7 @@ static struct pwm_lookup smdk6410_pwm_lookup[] = { }; static struct platform_pwm_backlight_data smdk6410_bl_data = { - .enable_gpio = -1, + /* Intentionally blank */ }; static struct dwc2_hsotg_plat smdk6410_hsotg_pdata; diff --git a/arch/unicore32/kernel/puv3-nb0916.c b/arch/unicore32/kernel/puv3-nb0916.c index a3bf2ffc54dd..e251f5028396 100644 --- a/arch/unicore32/kernel/puv3-nb0916.c +++ b/arch/unicore32/kernel/puv3-nb0916.c @@ -55,7 +55,6 @@ static struct pwm_lookup nb0916_pwm_lookup[] = { static struct platform_pwm_backlight_data nb0916_backlight_data = { .max_brightness = 100, .dft_brightness = 100, - .enable_gpio = -1, }; static struct gpio_keys_button nb0916_gpio_keys[] = { diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index efb4efc2a13d..82b8d7594701 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -7,7 +7,6 @@ #include #include -#include #include #include #include @@ -258,8 +257,6 @@ static int pwm_backlight_parse_dt(struct device *dev, &data->post_pwm_on_delay); of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay); - data->enable_gpio = -EINVAL; - /* * Determine the number of brightness levels, if this property is not * set a default table of brightness levels will be used. @@ -502,22 +499,6 @@ static int pwm_backlight_probe(struct platform_device *pdev) goto err_alloc; } - /* - * Compatibility fallback for drivers still using the integer GPIO - * platform data. Must go away soon. - */ - if (!pb->enable_gpio && gpio_is_valid(data->enable_gpio)) { - ret = devm_gpio_request_one(&pdev->dev, data->enable_gpio, - GPIOF_OUT_INIT_HIGH, "enable"); - if (ret < 0) { - dev_err(&pdev->dev, "failed to request GPIO#%d: %d\n", - data->enable_gpio, ret); - goto err_alloc; - } - - pb->enable_gpio = gpio_to_desc(data->enable_gpio); - } - /* * If the GPIO is not known to be already configured as output, that * is, if gpiod_get_direction returns either 1 or -EINVAL, change the diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h index 8ea265a022fd..06086cb93b6f 100644 --- a/include/linux/pwm_backlight.h +++ b/include/linux/pwm_backlight.h @@ -16,8 +16,6 @@ struct platform_pwm_backlight_data { unsigned int *levels; unsigned int post_pwm_on_delay; unsigned int pwm_off_delay; - /* TODO remove once all users are switched to gpiod_* API */ - int enable_gpio; int (*init)(struct device *dev); int (*notify)(struct device *dev, int brightness); void (*notify_after)(struct device *dev, int brightness); -- cgit v1.2.3-58-ga151 From b52cc1bb952f23d0d05615ef1a390a19f6b6b5fd Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Mon, 17 Feb 2020 19:47:28 +0900 Subject: extcon: Remove unneeded extern keyword from extcon-provider.h The commit tb7365587f513 ("extcon: Remove unneeded extern keyword from extcon.h") removes the unneeded extern keyword from extcon header file. But, The commit tb7365587f513 has missed that deletes 'extern' keyword from extcon-provider.h. So that it deletes extern keyword from extcon-provider.h. Signed-off-by: Chanwoo Choi Link: https://lore.kernel.org/r/20200217104728.29330-1-cw00.choi@samsung.com Signed-off-by: Greg Kroah-Hartman --- include/linux/extcon-provider.h | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/extcon-provider.h b/include/linux/extcon-provider.h index 1c143d200caa..fa70945f4e6b 100644 --- a/include/linux/extcon-provider.h +++ b/include/linux/extcon-provider.h @@ -17,30 +17,30 @@ struct extcon_dev; #if IS_ENABLED(CONFIG_EXTCON) /* Following APIs register/unregister the extcon device. */ -extern int extcon_dev_register(struct extcon_dev *edev); -extern void extcon_dev_unregister(struct extcon_dev *edev); -extern int devm_extcon_dev_register(struct device *dev, +int extcon_dev_register(struct extcon_dev *edev); +void extcon_dev_unregister(struct extcon_dev *edev); +int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev); -extern void devm_extcon_dev_unregister(struct device *dev, +void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev); /* Following APIs allocate/free the memory of the extcon device. */ -extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable); -extern void extcon_dev_free(struct extcon_dev *edev); -extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, +struct extcon_dev *extcon_dev_allocate(const unsigned int *cable); +void extcon_dev_free(struct extcon_dev *edev); +struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, const unsigned int *cable); -extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev); +void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev); /* Synchronize the state and property value for each external connector. */ -extern int extcon_sync(struct extcon_dev *edev, unsigned int id); +int extcon_sync(struct extcon_dev *edev, unsigned int id); /* * Following APIs set the connected state of each external connector. * The 'id' argument indicates the defined external connector. */ -extern int extcon_set_state(struct extcon_dev *edev, unsigned int id, +int extcon_set_state(struct extcon_dev *edev, unsigned int id, bool state); -extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, +int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, bool state); /* @@ -52,13 +52,13 @@ extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, * for each external connector. They are used to set the capability of the * property of each external connector based on the id and property. */ -extern int extcon_set_property(struct extcon_dev *edev, unsigned int id, +int extcon_set_property(struct extcon_dev *edev, unsigned int id, unsigned int prop, union extcon_property_value prop_val); -extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id, +int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id, unsigned int prop, union extcon_property_value prop_val); -extern int extcon_set_property_capability(struct extcon_dev *edev, +int extcon_set_property_capability(struct extcon_dev *edev, unsigned int id, unsigned int prop); #else /* CONFIG_EXTCON */ -- cgit v1.2.3-58-ga151 From 8067c0b0c6ac7bce201961f0092e2532b12fc00a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 26 Feb 2020 23:43:21 +0100 Subject: rtc/ia64: remove legacy efirtc driver There are two EFI RTC drivers, the original drivers/char/efirtc.c driver and the more modern drivers/rtc/rtc-efi.c. Both implement the same interface, but the new one does so in a more portable way. Move everything over to that one and remove the old one. Cc: linux-ia64@vger.kernel.org Cc: Fenghua Yu Cc: Tony Luck Cc: Stephane Eranian Signed-off-by: Arnd Bergmann Signed-off-by: Alexandre Belloni Link: https://lore.kernel.org/r/20200226224322.187960-1-alexandre.belloni@bootlin.com Signed-off-by: Greg Kroah-Hartman --- arch/ia64/configs/bigsur_defconfig | 3 +- arch/ia64/configs/generic_defconfig | 3 +- arch/ia64/configs/gensparse_defconfig | 3 +- arch/ia64/configs/tiger_defconfig | 3 +- arch/ia64/configs/zx1_defconfig | 3 +- drivers/char/Kconfig | 4 - drivers/char/Makefile | 1 - drivers/char/efirtc.c | 366 ---------------------------------- include/linux/miscdevice.h | 2 +- 9 files changed, 11 insertions(+), 377 deletions(-) delete mode 100644 drivers/char/efirtc.c (limited to 'include/linux') diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig index b630bd7351c4..f3ba813a5b80 100644 --- a/arch/ia64/configs/bigsur_defconfig +++ b/arch/ia64/configs/bigsur_defconfig @@ -57,7 +57,8 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_SHARE_IRQ=y # CONFIG_HW_RANDOM is not set -CONFIG_EFI_RTC=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_EFI=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_AGP=m diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index 661d90b3e148..cb267a07c57f 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig @@ -94,7 +94,8 @@ CONFIG_SERIAL_8250_NR_UARTS=6 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_SHARE_IRQ=y # CONFIG_HW_RANDOM is not set -CONFIG_EFI_RTC=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_EFI=y CONFIG_RAW_DRIVER=m CONFIG_HPET=y CONFIG_AGP=m diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig index 7844e6a956a4..7e25f2f031b6 100644 --- a/arch/ia64/configs/gensparse_defconfig +++ b/arch/ia64/configs/gensparse_defconfig @@ -82,7 +82,8 @@ CONFIG_SERIAL_8250_NR_UARTS=6 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_SHARE_IRQ=y # CONFIG_HW_RANDOM is not set -CONFIG_EFI_RTC=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_EFI=y CONFIG_RAW_DRIVER=m CONFIG_HPET=y CONFIG_AGP=m diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index 1d6e2a01452b..3f486d5bdc2d 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig @@ -86,7 +86,8 @@ CONFIG_SERIAL_8250_NR_UARTS=6 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_SHARE_IRQ=y # CONFIG_HW_RANDOM is not set -CONFIG_EFI_RTC=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_EFI=y CONFIG_RAW_DRIVER=m CONFIG_HPET=y CONFIG_AGP=m diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig index 8c92e095f8bb..70788a500448 100644 --- a/arch/ia64/configs/zx1_defconfig +++ b/arch/ia64/configs/zx1_defconfig @@ -69,7 +69,8 @@ CONFIG_SERIAL_8250_NR_UARTS=8 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_SHARE_IRQ=y # CONFIG_HW_RANDOM is not set -CONFIG_EFI_RTC=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_EFI=y CONFIG_I2C_CHARDEV=y CONFIG_AGP=y CONFIG_AGP_HP_ZX1=y diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 5d30b19099aa..6d76ba471d28 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -250,10 +250,6 @@ config JS_RTC To compile this driver as a module, choose M here: the module will be called js-rtc. -config EFI_RTC - bool "EFI Real Time Clock Services" - depends on IA64 - endif # RTC_LIB config DTLK diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 7c5ea6f9df14..abe3138b1f5a 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -22,7 +22,6 @@ obj-$(CONFIG_APPLICOM) += applicom.o obj-$(CONFIG_SONYPI) += sonypi.o obj-$(CONFIG_RTC) += rtc.o obj-$(CONFIG_HPET) += hpet.o -obj-$(CONFIG_EFI_RTC) += efirtc.o obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/ obj-$(CONFIG_NVRAM) += nvram.o obj-$(CONFIG_TOSHIBA) += toshiba.o diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c deleted file mode 100644 index 4f73064d0c6f..000000000000 --- a/drivers/char/efirtc.c +++ /dev/null @@ -1,366 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * EFI Time Services Driver for Linux - * - * Copyright (C) 1999 Hewlett-Packard Co - * Copyright (C) 1999 Stephane Eranian - * - * Based on skeleton from the drivers/char/rtc.c driver by P. Gortmaker - * - * This code provides an architected & portable interface to the real time - * clock by using EFI instead of direct bit fiddling. The functionalities are - * quite different from the rtc.c driver. The only way to talk to the device - * is by using ioctl(). There is a /proc interface which provides the raw - * information. - * - * Please note that we have kept the API as close as possible to the - * legacy RTC. The standard /sbin/hwclock program should work normally - * when used to get/set the time. - * - * NOTES: - * - Locking is required for safe execution of EFI calls with regards - * to interrupts and SMP. - * - * TODO (December 1999): - * - provide the API to set/get the WakeUp Alarm (different from the - * rtc.c alarm). - * - SMP testing - * - Add module support - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -#define EFI_RTC_VERSION "0.4" - -#define EFI_ISDST (EFI_TIME_ADJUST_DAYLIGHT|EFI_TIME_IN_DAYLIGHT) -/* - * EFI Epoch is 1/1/1998 - */ -#define EFI_RTC_EPOCH 1998 - -static DEFINE_SPINLOCK(efi_rtc_lock); - -static long efi_rtc_ioctl(struct file *file, unsigned int cmd, - unsigned long arg); - -#define is_leap(year) \ - ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) - -static const unsigned short int __mon_yday[2][13] = -{ - /* Normal years. */ - { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, - /* Leap years. */ - { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } -}; - -/* - * returns day of the year [0-365] - */ -static inline int -compute_yday(efi_time_t *eft) -{ - /* efi_time_t.month is in the [1-12] so, we need -1 */ - return __mon_yday[is_leap(eft->year)][eft->month-1]+ eft->day -1; -} -/* - * returns day of the week [0-6] 0=Sunday - * - * Don't try to provide a year that's before 1998, please ! - */ -static int -compute_wday(efi_time_t *eft) -{ - int y; - int ndays = 0; - - if ( eft->year < 1998 ) { - printk(KERN_ERR "efirtc: EFI year < 1998, invalid date\n"); - return -1; - } - - for(y=EFI_RTC_EPOCH; y < eft->year; y++ ) { - ndays += 365 + (is_leap(y) ? 1 : 0); - } - ndays += compute_yday(eft); - - /* - * 4=1/1/1998 was a Thursday - */ - return (ndays + 4) % 7; -} - -static void -convert_to_efi_time(struct rtc_time *wtime, efi_time_t *eft) -{ - - eft->year = wtime->tm_year + 1900; - eft->month = wtime->tm_mon + 1; - eft->day = wtime->tm_mday; - eft->hour = wtime->tm_hour; - eft->minute = wtime->tm_min; - eft->second = wtime->tm_sec; - eft->nanosecond = 0; - eft->daylight = wtime->tm_isdst ? EFI_ISDST: 0; - eft->timezone = EFI_UNSPECIFIED_TIMEZONE; -} - -static void -convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime) -{ - memset(wtime, 0, sizeof(*wtime)); - wtime->tm_sec = eft->second; - wtime->tm_min = eft->minute; - wtime->tm_hour = eft->hour; - wtime->tm_mday = eft->day; - wtime->tm_mon = eft->month - 1; - wtime->tm_year = eft->year - 1900; - - /* day of the week [0-6], Sunday=0 */ - wtime->tm_wday = compute_wday(eft); - - /* day in the year [1-365]*/ - wtime->tm_yday = compute_yday(eft); - - - switch (eft->daylight & EFI_ISDST) { - case EFI_ISDST: - wtime->tm_isdst = 1; - break; - case EFI_TIME_ADJUST_DAYLIGHT: - wtime->tm_isdst = 0; - break; - default: - wtime->tm_isdst = -1; - } -} - -static long efi_rtc_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - - efi_status_t status; - unsigned long flags; - efi_time_t eft; - efi_time_cap_t cap; - struct rtc_time wtime; - struct rtc_wkalrm __user *ewp; - unsigned char enabled, pending; - - switch (cmd) { - case RTC_UIE_ON: - case RTC_UIE_OFF: - case RTC_PIE_ON: - case RTC_PIE_OFF: - case RTC_AIE_ON: - case RTC_AIE_OFF: - case RTC_ALM_SET: - case RTC_ALM_READ: - case RTC_IRQP_READ: - case RTC_IRQP_SET: - case RTC_EPOCH_READ: - case RTC_EPOCH_SET: - return -EINVAL; - - case RTC_RD_TIME: - spin_lock_irqsave(&efi_rtc_lock, flags); - - status = efi.get_time(&eft, &cap); - - spin_unlock_irqrestore(&efi_rtc_lock,flags); - - if (status != EFI_SUCCESS) { - /* should never happen */ - printk(KERN_ERR "efitime: can't read time\n"); - return -EINVAL; - } - - convert_from_efi_time(&eft, &wtime); - - return copy_to_user((void __user *)arg, &wtime, - sizeof (struct rtc_time)) ? - EFAULT : 0; - - case RTC_SET_TIME: - - if (!capable(CAP_SYS_TIME)) return -EACCES; - - if (copy_from_user(&wtime, (struct rtc_time __user *)arg, - sizeof(struct rtc_time)) ) - return -EFAULT; - - convert_to_efi_time(&wtime, &eft); - - spin_lock_irqsave(&efi_rtc_lock, flags); - - status = efi.set_time(&eft); - - spin_unlock_irqrestore(&efi_rtc_lock,flags); - - return status == EFI_SUCCESS ? 0 : -EINVAL; - - case RTC_WKALM_SET: - - if (!capable(CAP_SYS_TIME)) return -EACCES; - - ewp = (struct rtc_wkalrm __user *)arg; - - if ( get_user(enabled, &ewp->enabled) - || copy_from_user(&wtime, &ewp->time, sizeof(struct rtc_time)) ) - return -EFAULT; - - convert_to_efi_time(&wtime, &eft); - - spin_lock_irqsave(&efi_rtc_lock, flags); - /* - * XXX Fixme: - * As of EFI 0.92 with the firmware I have on my - * machine this call does not seem to work quite - * right - */ - status = efi.set_wakeup_time((efi_bool_t)enabled, &eft); - - spin_unlock_irqrestore(&efi_rtc_lock,flags); - - return status == EFI_SUCCESS ? 0 : -EINVAL; - - case RTC_WKALM_RD: - - spin_lock_irqsave(&efi_rtc_lock, flags); - - status = efi.get_wakeup_time((efi_bool_t *)&enabled, (efi_bool_t *)&pending, &eft); - - spin_unlock_irqrestore(&efi_rtc_lock,flags); - - if (status != EFI_SUCCESS) return -EINVAL; - - ewp = (struct rtc_wkalrm __user *)arg; - - if ( put_user(enabled, &ewp->enabled) - || put_user(pending, &ewp->pending)) return -EFAULT; - - convert_from_efi_time(&eft, &wtime); - - return copy_to_user(&ewp->time, &wtime, - sizeof(struct rtc_time)) ? -EFAULT : 0; - } - return -ENOTTY; -} - -/* - * The various file operations we support. - */ - -static const struct file_operations efi_rtc_fops = { - .owner = THIS_MODULE, - .unlocked_ioctl = efi_rtc_ioctl, - .llseek = no_llseek, -}; - -static struct miscdevice efi_rtc_dev= { - EFI_RTC_MINOR, - "efirtc", - &efi_rtc_fops -}; - -/* - * We export RAW EFI information to /proc/driver/efirtc - */ -static int efi_rtc_proc_show(struct seq_file *m, void *v) -{ - efi_time_t eft, alm; - efi_time_cap_t cap; - efi_bool_t enabled, pending; - unsigned long flags; - - memset(&eft, 0, sizeof(eft)); - memset(&alm, 0, sizeof(alm)); - memset(&cap, 0, sizeof(cap)); - - spin_lock_irqsave(&efi_rtc_lock, flags); - - efi.get_time(&eft, &cap); - efi.get_wakeup_time(&enabled, &pending, &alm); - - spin_unlock_irqrestore(&efi_rtc_lock,flags); - - seq_printf(m, - "Time : %u:%u:%u.%09u\n" - "Date : %u-%u-%u\n" - "Daylight : %u\n", - eft.hour, eft.minute, eft.second, eft.nanosecond, - eft.year, eft.month, eft.day, - eft.daylight); - - if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE) - seq_puts(m, "Timezone : unspecified\n"); - else - /* XXX fixme: convert to string? */ - seq_printf(m, "Timezone : %u\n", eft.timezone); - - - seq_printf(m, - "Alarm Time : %u:%u:%u.%09u\n" - "Alarm Date : %u-%u-%u\n" - "Alarm Daylight : %u\n" - "Enabled : %s\n" - "Pending : %s\n", - alm.hour, alm.minute, alm.second, alm.nanosecond, - alm.year, alm.month, alm.day, - alm.daylight, - enabled == 1 ? "yes" : "no", - pending == 1 ? "yes" : "no"); - - if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE) - seq_puts(m, "Timezone : unspecified\n"); - else - /* XXX fixme: convert to string? */ - seq_printf(m, "Timezone : %u\n", alm.timezone); - - /* - * now prints the capabilities - */ - seq_printf(m, - "Resolution : %u\n" - "Accuracy : %u\n" - "SetstoZero : %u\n", - cap.resolution, cap.accuracy, cap.sets_to_zero); - - return 0; -} -static int __init -efi_rtc_init(void) -{ - int ret; - struct proc_dir_entry *dir; - - printk(KERN_INFO "EFI Time Services Driver v%s\n", EFI_RTC_VERSION); - - ret = misc_register(&efi_rtc_dev); - if (ret) { - printk(KERN_ERR "efirtc: can't misc_register on minor=%d\n", - EFI_RTC_MINOR); - return ret; - } - - dir = proc_create_single("driver/efirtc", 0, NULL, efi_rtc_proc_show); - if (dir == NULL) { - printk(KERN_ERR "efirtc: can't create /proc/driver/efirtc.\n"); - misc_deregister(&efi_rtc_dev); - return -1; - } - return 0; -} -device_initcall(efi_rtc_init); - -/* -MODULE_LICENSE("GPL"); -*/ diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 66cc45e0624b..c7a93002a3c1 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -25,7 +25,7 @@ #define TEMP_MINOR 131 /* Temperature Sensor */ #define APM_MINOR_DEV 134 #define RTC_MINOR 135 -#define EFI_RTC_MINOR 136 /* EFI Time services */ +/*#define EFI_RTC_MINOR 136 was EFI Time services */ #define VHCI_MINOR 137 #define SUN_OPENPROM_MINOR 139 #define DMAPI_MINOR 140 /* unused */ -- cgit v1.2.3-58-ga151 From 8b977c5498b8336b0c61b0fa72f6353e71f938da Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Tue, 10 Mar 2020 13:22:46 +0000 Subject: nvmem: core: add nvmem_cell_read_u64 Add nvmem_cell_read_u64() helper to ease read of an u64 value on consumer side. This helper is useful on some sunxi platform that has 64 bits data cells stored in no volatile memory. Signed-off-by: Yangtao Li Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20200310132257.23358-4-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/nvmem/core.c | 15 +++++++++++++++ include/linux/nvmem-consumer.h | 7 +++++++ 2 files changed, 22 insertions(+) (limited to 'include/linux') diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index b3619f335693..4634af1f6341 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -1146,6 +1146,21 @@ int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) } EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); +/** + * nvmem_cell_read_u64() - Read a cell value as an u64 + * + * @dev: Device that requests the nvmem cell. + * @cell_id: Name of nvmem cell to read. + * @val: pointer to output value. + * + * Return: 0 on success or negative errno. + */ +int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) +{ + return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); +} +EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); + /** * nvmem_device_cell_read() - Read a given nvmem device and cell * diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index d3776be48c53..1b311d27c9b8 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -63,6 +63,7 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len); int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len); int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val); int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val); +int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val); /* direct nvmem device read/write interface */ struct nvmem_device *nvmem_device_get(struct device *dev, const char *name); @@ -138,6 +139,12 @@ static inline int nvmem_cell_read_u32(struct device *dev, return -EOPNOTSUPP; } +static inline int nvmem_cell_read_u64(struct device *dev, + const char *cell_id, u64 *val) +{ + return -EOPNOTSUPP; +} + static inline struct nvmem_device *nvmem_device_get(struct device *dev, const char *name) { -- cgit v1.2.3-58-ga151 From 0cbf260820fa780a336e4a08cce1f81cd66a7ac1 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Thu, 20 Feb 2020 15:28:40 +0530 Subject: bus: mhi: core: Add support for registering MHI controllers This commit adds support for registering MHI controller drivers with the MHI stack. MHI controller drivers manages the interaction with the MHI client devices such as the external modems and WiFi chipsets. They are also the MHI bus master in charge of managing the physical link between the host and client device. This is based on the patch submitted by Sujeev Dias: https://lkml.org/lkml/2018/7/9/987 Signed-off-by: Sujeev Dias Signed-off-by: Siddartha Mohanadoss [jhugo: added static config for controllers and fixed several bugs] Signed-off-by: Jeffrey Hugo [mani: removed DT dependency, splitted and cleaned up for upstream] Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20200220095854.4804-3-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/Kconfig | 1 + drivers/bus/Makefile | 3 + drivers/bus/mhi/Kconfig | 14 ++ drivers/bus/mhi/Makefile | 2 + drivers/bus/mhi/core/Makefile | 3 + drivers/bus/mhi/core/init.c | 402 ++++++++++++++++++++++++++++++++++++++++ drivers/bus/mhi/core/internal.h | 151 +++++++++++++++ include/linux/mhi.h | 400 +++++++++++++++++++++++++++++++++++++++ include/linux/mod_devicetable.h | 12 ++ 9 files changed, 988 insertions(+) create mode 100644 drivers/bus/mhi/Kconfig create mode 100644 drivers/bus/mhi/Makefile create mode 100644 drivers/bus/mhi/core/Makefile create mode 100644 drivers/bus/mhi/core/init.c create mode 100644 drivers/bus/mhi/core/internal.h create mode 100644 include/linux/mhi.h (limited to 'include/linux') diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 6095b6df8a81..6d4e4497b59b 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -201,5 +201,6 @@ config DA8XX_MSTPRI peripherals. source "drivers/bus/fsl-mc/Kconfig" +source "drivers/bus/mhi/Kconfig" endmenu diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 1320bcf9fa9d..05f32cd694a4 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -34,3 +34,6 @@ obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o + +# MHI +obj-$(CONFIG_MHI_BUS) += mhi/ diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig new file mode 100644 index 000000000000..a8bd9bd7db7c --- /dev/null +++ b/drivers/bus/mhi/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# MHI bus +# +# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. +# + +config MHI_BUS + tristate "Modem Host Interface (MHI) bus" + help + Bus driver for MHI protocol. Modem Host Interface (MHI) is a + communication protocol used by the host processors to control + and communicate with modem devices over a high speed peripheral + bus or shared memory. diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile new file mode 100644 index 000000000000..19e6443b72df --- /dev/null +++ b/drivers/bus/mhi/Makefile @@ -0,0 +1,2 @@ +# core layer +obj-y += core/ diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile new file mode 100644 index 000000000000..2db32697c67f --- /dev/null +++ b/drivers/bus/mhi/core/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_MHI_BUS) := mhi.o + +mhi-y := init.o diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c new file mode 100644 index 000000000000..6f24c21284ec --- /dev/null +++ b/drivers/bus/mhi/core/init.c @@ -0,0 +1,402 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, + struct mhi_controller_config *config) +{ + struct mhi_event *mhi_event; + struct mhi_event_config *event_cfg; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int i, num; + + num = config->num_events; + mhi_cntrl->total_ev_rings = num; + mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + /* Populate event ring */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < num; i++) { + event_cfg = &config->event_cfg[i]; + + mhi_event->er_index = i; + mhi_event->ring.elements = event_cfg->num_elements; + mhi_event->intmod = event_cfg->irq_moderation_ms; + mhi_event->irq = event_cfg->irq; + + if (event_cfg->channel != U32_MAX) { + /* This event ring has a dedicated channel */ + mhi_event->chan = event_cfg->channel; + if (mhi_event->chan >= mhi_cntrl->max_chan) { + dev_err(dev, + "Event Ring channel not available\n"); + goto error_ev_cfg; + } + + mhi_event->mhi_chan = + &mhi_cntrl->mhi_chan[mhi_event->chan]; + } + + /* Priority is fixed to 1 for now */ + mhi_event->priority = 1; + + mhi_event->db_cfg.brstmode = event_cfg->mode; + if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) + goto error_ev_cfg; + + mhi_event->data_type = event_cfg->data_type; + + mhi_event->hw_ring = event_cfg->hardware_event; + if (mhi_event->hw_ring) + mhi_cntrl->hw_ev_rings++; + else + mhi_cntrl->sw_ev_rings++; + + mhi_event->cl_manage = event_cfg->client_managed; + mhi_event->offload_ev = event_cfg->offload_channel; + mhi_event++; + } + + /* We need IRQ for each event ring + additional one for BHI */ + mhi_cntrl->nr_irqs_req = mhi_cntrl->total_ev_rings + 1; + + return 0; + +error_ev_cfg: + + kfree(mhi_cntrl->mhi_event); + return -EINVAL; +} + +static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, + struct mhi_controller_config *config) +{ + struct mhi_channel_config *ch_cfg; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int i; + u32 chan; + + mhi_cntrl->max_chan = config->max_channels; + + /* + * The allocation of MHI channels can exceed 32KB in some scenarios, + * so to avoid any memory possible allocation failures, vzalloc is + * used here + */ + mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan * + sizeof(*mhi_cntrl->mhi_chan)); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); + + /* Populate channel configurations */ + for (i = 0; i < config->num_channels; i++) { + struct mhi_chan *mhi_chan; + + ch_cfg = &config->ch_cfg[i]; + + chan = ch_cfg->num; + if (chan >= mhi_cntrl->max_chan) { + dev_err(dev, "Channel %d not available\n", chan); + goto error_chan_cfg; + } + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + mhi_chan->name = ch_cfg->name; + mhi_chan->chan = chan; + + mhi_chan->tre_ring.elements = ch_cfg->num_elements; + if (!mhi_chan->tre_ring.elements) + goto error_chan_cfg; + + /* + * For some channels, local ring length should be bigger than + * the transfer ring length due to internal logical channels + * in device. So host can queue much more buffers than transfer + * ring length. Example, RSC channels should have a larger local + * channel length than transfer ring length. + */ + mhi_chan->buf_ring.elements = ch_cfg->local_elements; + if (!mhi_chan->buf_ring.elements) + mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; + mhi_chan->er_index = ch_cfg->event_ring; + mhi_chan->dir = ch_cfg->dir; + + /* + * For most channels, chtype is identical to channel directions. + * So, if it is not defined then assign channel direction to + * chtype + */ + mhi_chan->type = ch_cfg->type; + if (!mhi_chan->type) + mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; + + mhi_chan->ee_mask = ch_cfg->ee_mask; + mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg; + mhi_chan->lpm_notify = ch_cfg->lpm_notify; + mhi_chan->offload_ch = ch_cfg->offload_channel; + mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch; + mhi_chan->pre_alloc = ch_cfg->auto_queue; + mhi_chan->auto_start = ch_cfg->auto_start; + + /* + * If MHI host allocates buffers, then the channel direction + * should be DMA_FROM_DEVICE + */ + if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) { + dev_err(dev, "Invalid channel configuration\n"); + goto error_chan_cfg; + } + + /* + * Bi-directional and direction less channel must be an + * offload channel + */ + if ((mhi_chan->dir == DMA_BIDIRECTIONAL || + mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) { + dev_err(dev, "Invalid channel configuration\n"); + goto error_chan_cfg; + } + + if (!mhi_chan->offload_ch) { + mhi_chan->db_cfg.brstmode = ch_cfg->doorbell; + if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) { + dev_err(dev, "Invalid Door bell mode\n"); + goto error_chan_cfg; + } + } + + mhi_chan->configured = true; + + if (mhi_chan->lpm_notify) + list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); + } + + return 0; + +error_chan_cfg: + vfree(mhi_cntrl->mhi_chan); + + return -EINVAL; +} + +static int parse_config(struct mhi_controller *mhi_cntrl, + struct mhi_controller_config *config) +{ + int ret; + + /* Parse MHI channel configuration */ + ret = parse_ch_cfg(mhi_cntrl, config); + if (ret) + return ret; + + /* Parse MHI event configuration */ + ret = parse_ev_cfg(mhi_cntrl, config); + if (ret) + goto error_ev_cfg; + + mhi_cntrl->timeout_ms = config->timeout_ms; + if (!mhi_cntrl->timeout_ms) + mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + + mhi_cntrl->bounce_buf = config->use_bounce_buf; + mhi_cntrl->buffer_len = config->buf_len; + if (!mhi_cntrl->buffer_len) + mhi_cntrl->buffer_len = MHI_MAX_MTU; + + return 0; + +error_ev_cfg: + vfree(mhi_cntrl->mhi_chan); + + return ret; +} + +int mhi_register_controller(struct mhi_controller *mhi_cntrl, + struct mhi_controller_config *config) +{ + int ret; + int i; + struct mhi_event *mhi_event; + struct mhi_chan *mhi_chan; + struct mhi_cmd *mhi_cmd; + struct mhi_device *mhi_dev; + + if (!mhi_cntrl) + return -EINVAL; + + if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put) + return -EINVAL; + + if (!mhi_cntrl->status_cb || !mhi_cntrl->link_status) + return -EINVAL; + + ret = parse_config(mhi_cntrl, config); + if (ret) + return -EINVAL; + + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, + sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); + if (!mhi_cntrl->mhi_cmd) { + ret = -ENOMEM; + goto error_alloc_cmd; + } + + INIT_LIST_HEAD(&mhi_cntrl->transition_list); + spin_lock_init(&mhi_cntrl->transition_lock); + spin_lock_init(&mhi_cntrl->wlock); + init_waitqueue_head(&mhi_cntrl->state_event); + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) + spin_lock_init(&mhi_cmd->lock); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + /* Skip for offload events */ + if (mhi_event->offload_ev) + continue; + + mhi_event->mhi_cntrl = mhi_cntrl; + spin_lock_init(&mhi_event->lock); + } + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + mutex_init(&mhi_chan->mutex); + init_completion(&mhi_chan->completion); + rwlock_init(&mhi_chan->lock); + } + + /* Register controller with MHI bus */ + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (IS_ERR(mhi_dev)) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n"); + ret = PTR_ERR(mhi_dev); + goto error_alloc_dev; + } + + mhi_dev->dev_type = MHI_DEVICE_CONTROLLER; + mhi_dev->mhi_cntrl = mhi_cntrl; + dev_set_name(&mhi_dev->dev, "%s", dev_name(mhi_cntrl->cntrl_dev)); + + /* Init wakeup source */ + device_init_wakeup(&mhi_dev->dev, true); + + ret = device_add(&mhi_dev->dev); + if (ret) + goto error_add_dev; + + mhi_cntrl->mhi_dev = mhi_dev; + + return 0; + +error_add_dev: + put_device(&mhi_dev->dev); + +error_alloc_dev: + kfree(mhi_cntrl->mhi_cmd); + +error_alloc_cmd: + vfree(mhi_cntrl->mhi_chan); + kfree(mhi_cntrl->mhi_event); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_register_controller); + +void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan; + unsigned int i; + + kfree(mhi_cntrl->mhi_cmd); + kfree(mhi_cntrl->mhi_event); + + /* Drop the references to MHI devices created for channels */ + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + if (!mhi_chan->mhi_dev) + continue; + + put_device(&mhi_chan->mhi_dev->dev); + } + vfree(mhi_cntrl->mhi_chan); + + device_del(&mhi_dev->dev); + put_device(&mhi_dev->dev); +} +EXPORT_SYMBOL_GPL(mhi_unregister_controller); + +static void mhi_release_device(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + + kfree(mhi_dev); +} + +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev; + struct device *dev; + + mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); + if (!mhi_dev) + return ERR_PTR(-ENOMEM); + + dev = &mhi_dev->dev; + device_initialize(dev); + dev->bus = &mhi_bus_type; + dev->release = mhi_release_device; + dev->parent = mhi_cntrl->cntrl_dev; + mhi_dev->mhi_cntrl = mhi_cntrl; + mhi_dev->dev_wake = 0; + + return mhi_dev; +} + +static int mhi_match(struct device *dev, struct device_driver *drv) +{ + return 0; +}; + +struct bus_type mhi_bus_type = { + .name = "mhi", + .dev_name = "mhi", + .match = mhi_match, +}; + +static int __init mhi_init(void) +{ + return bus_register(&mhi_bus_type); +} + +static void __exit mhi_exit(void) +{ + bus_unregister(&mhi_bus_type); +} + +postcore_initcall(mhi_init); +module_exit(mhi_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MHI Host Interface"); diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h new file mode 100644 index 000000000000..6af59ac3ec9d --- /dev/null +++ b/drivers/bus/mhi/core/internal.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ + +#ifndef _MHI_INT_H +#define _MHI_INT_H + +#include + +extern struct bus_type mhi_bus_type; + +/* MHI transfer completion events */ +enum mhi_ev_ccs { + MHI_EV_CC_INVALID = 0x0, + MHI_EV_CC_SUCCESS = 0x1, + MHI_EV_CC_EOT = 0x2, /* End of transfer event */ + MHI_EV_CC_OVERFLOW = 0x3, + MHI_EV_CC_EOB = 0x4, /* End of block event */ + MHI_EV_CC_OOB = 0x5, /* Out of block event */ + MHI_EV_CC_DB_MODE = 0x6, + MHI_EV_CC_UNDEFINED_ERR = 0x10, + MHI_EV_CC_BAD_TRE = 0x11, +}; + +enum mhi_ch_state { + MHI_CH_STATE_DISABLED = 0x0, + MHI_CH_STATE_ENABLED = 0x1, + MHI_CH_STATE_RUNNING = 0x2, + MHI_CH_STATE_SUSPENDED = 0x3, + MHI_CH_STATE_STOP = 0x4, + MHI_CH_STATE_ERROR = 0x5, +}; + +#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \ + mode != MHI_DB_BRST_ENABLE) + +#define NR_OF_CMD_RINGS 1 +#define CMD_EL_PER_RING 128 +#define PRIMARY_CMD_RING 0 +#define MHI_MAX_MTU 0xffff + +enum mhi_er_type { + MHI_ER_TYPE_INVALID = 0x0, + MHI_ER_TYPE_VALID = 0x1, +}; + +struct db_cfg { + bool reset_req; + bool db_mode; + u32 pollcfg; + enum mhi_db_brst_mode brstmode; + dma_addr_t db_val; + void (*process_db)(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, void __iomem *io_addr, + dma_addr_t db_val); +}; + +struct mhi_ring { + dma_addr_t dma_handle; + dma_addr_t iommu_base; + u64 *ctxt_wp; /* point to ctxt wp */ + void *pre_aligned; + void *base; + void *rp; + void *wp; + size_t el_size; + size_t len; + size_t elements; + size_t alloc_size; + void __iomem *db_addr; +}; + +struct mhi_cmd { + struct mhi_ring ring; + spinlock_t lock; +}; + +struct mhi_buf_info { + void *v_addr; + void *bb_addr; + void *wp; + void *cb_buf; + dma_addr_t p_addr; + size_t len; + enum dma_data_direction dir; +}; + +struct mhi_event { + struct mhi_controller *mhi_cntrl; + struct mhi_chan *mhi_chan; /* dedicated to channel */ + u32 er_index; + u32 intmod; + u32 irq; + int chan; /* this event ring is dedicated to a channel (optional) */ + u32 priority; + enum mhi_er_data_type data_type; + struct mhi_ring ring; + struct db_cfg db_cfg; + struct tasklet_struct task; + spinlock_t lock; + int (*process_event)(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota); + bool hw_ring; + bool cl_manage; + bool offload_ev; /* managed by a device driver */ +}; + +struct mhi_chan { + const char *name; + /* + * Important: When consuming, increment tre_ring first and when + * releasing, decrement buf_ring first. If tre_ring has space, buf_ring + * is guranteed to have space so we do not need to check both rings. + */ + struct mhi_ring buf_ring; + struct mhi_ring tre_ring; + u32 chan; + u32 er_index; + u32 intmod; + enum mhi_ch_type type; + enum dma_data_direction dir; + struct db_cfg db_cfg; + enum mhi_ch_ee_mask ee_mask; + enum mhi_ch_state ch_state; + enum mhi_ev_ccs ccs; + struct mhi_device *mhi_dev; + void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result); + struct mutex mutex; + struct completion completion; + rwlock_t lock; + struct list_head node; + bool lpm_notify; + bool configured; + bool offload_ch; + bool pre_alloc; + bool auto_start; + bool wake_capable; +}; + +/* Default MHI timeout */ +#define MHI_TIMEOUT_MS (1000) + +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); + +int mhi_destroy_device(struct device *dev, void *data); +void mhi_create_devices(struct mhi_controller *mhi_cntrl); + +#endif /* _MHI_INT_H */ diff --git a/include/linux/mhi.h b/include/linux/mhi.h new file mode 100644 index 000000000000..a34aa50120c8 --- /dev/null +++ b/include/linux/mhi.h @@ -0,0 +1,400 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ +#ifndef _MHI_H_ +#define _MHI_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct mhi_chan; +struct mhi_event; +struct mhi_ctxt; +struct mhi_cmd; +struct mhi_buf_info; + +/** + * enum mhi_callback - MHI callback + * @MHI_CB_IDLE: MHI entered idle state + * @MHI_CB_PENDING_DATA: New data available for client to process + * @MHI_CB_LPM_ENTER: MHI host entered low power mode + * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode + * @MHI_CB_EE_RDDM: MHI device entered RDDM exec env + * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env + * @MHI_CB_SYS_ERROR: MHI device entered error state (may recover) + * @MHI_CB_FATAL_ERROR: MHI device entered fatal error state + */ +enum mhi_callback { + MHI_CB_IDLE, + MHI_CB_PENDING_DATA, + MHI_CB_LPM_ENTER, + MHI_CB_LPM_EXIT, + MHI_CB_EE_RDDM, + MHI_CB_EE_MISSION_MODE, + MHI_CB_SYS_ERROR, + MHI_CB_FATAL_ERROR, +}; + +/** + * enum mhi_flags - Transfer flags + * @MHI_EOB: End of buffer for bulk transfer + * @MHI_EOT: End of transfer + * @MHI_CHAIN: Linked transfer + */ +enum mhi_flags { + MHI_EOB, + MHI_EOT, + MHI_CHAIN, +}; + +/** + * enum mhi_device_type - Device types + * @MHI_DEVICE_XFER: Handles data transfer + * @MHI_DEVICE_CONTROLLER: Control device + */ +enum mhi_device_type { + MHI_DEVICE_XFER, + MHI_DEVICE_CONTROLLER, +}; + +/** + * enum mhi_ch_type - Channel types + * @MHI_CH_TYPE_INVALID: Invalid channel type + * @MHI_CH_TYPE_OUTBOUND: Outbound channel to the device + * @MHI_CH_TYPE_INBOUND: Inbound channel from the device + * @MHI_CH_TYPE_INBOUND_COALESCED: Coalesced channel for the device to combine + * multiple packets and send them as a single + * large packet to reduce CPU consumption + */ +enum mhi_ch_type { + MHI_CH_TYPE_INVALID = 0, + MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE, + MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE, + MHI_CH_TYPE_INBOUND_COALESCED = 3, +}; + +/** + * enum mhi_ee_type - Execution environment types + * @MHI_EE_PBL: Primary Bootloader + * @MHI_EE_SBL: Secondary Bootloader + * @MHI_EE_AMSS: Modem, aka the primary runtime EE + * @MHI_EE_RDDM: Ram dump download mode + * @MHI_EE_WFW: WLAN firmware mode + * @MHI_EE_PTHRU: Passthrough + * @MHI_EE_EDL: Embedded downloader + */ +enum mhi_ee_type { + MHI_EE_PBL, + MHI_EE_SBL, + MHI_EE_AMSS, + MHI_EE_RDDM, + MHI_EE_WFW, + MHI_EE_PTHRU, + MHI_EE_EDL, + MHI_EE_MAX_SUPPORTED = MHI_EE_EDL, + MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */ + MHI_EE_NOT_SUPPORTED, + MHI_EE_MAX, +}; + +/** + * enum mhi_ch_ee_mask - Execution environment mask for channel + * @MHI_CH_EE_PBL: Allow channel to be used in PBL EE + * @MHI_CH_EE_SBL: Allow channel to be used in SBL EE + * @MHI_CH_EE_AMSS: Allow channel to be used in AMSS EE + * @MHI_CH_EE_RDDM: Allow channel to be used in RDDM EE + * @MHI_CH_EE_PTHRU: Allow channel to be used in PTHRU EE + * @MHI_CH_EE_WFW: Allow channel to be used in WFW EE + * @MHI_CH_EE_EDL: Allow channel to be used in EDL EE + */ +enum mhi_ch_ee_mask { + MHI_CH_EE_PBL = BIT(MHI_EE_PBL), + MHI_CH_EE_SBL = BIT(MHI_EE_SBL), + MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS), + MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM), + MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU), + MHI_CH_EE_WFW = BIT(MHI_EE_WFW), + MHI_CH_EE_EDL = BIT(MHI_EE_EDL), +}; + +/** + * enum mhi_er_data_type - Event ring data types + * @MHI_ER_DATA: Only client data over this ring + * @MHI_ER_CTRL: MHI control data and client data + */ +enum mhi_er_data_type { + MHI_ER_DATA, + MHI_ER_CTRL, +}; + +/** + * enum mhi_db_brst_mode - Doorbell mode + * @MHI_DB_BRST_DISABLE: Burst mode disable + * @MHI_DB_BRST_ENABLE: Burst mode enable + */ +enum mhi_db_brst_mode { + MHI_DB_BRST_DISABLE = 0x2, + MHI_DB_BRST_ENABLE = 0x3, +}; + +/** + * struct mhi_channel_config - Channel configuration structure for controller + * @name: The name of this channel + * @num: The number assigned to this channel + * @num_elements: The number of elements that can be queued to this channel + * @local_elements: The local ring length of the channel + * @event_ring: The event rung index that services this channel + * @dir: Direction that data may flow on this channel + * @type: Channel type + * @ee_mask: Execution Environment mask for this channel + * @pollcfg: Polling configuration for burst mode. 0 is default. milliseconds + for UL channels, multiple of 8 ring elements for DL channels + * @doorbell: Doorbell mode + * @lpm_notify: The channel master requires low power mode notifications + * @offload_channel: The client manages the channel completely + * @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition + * @auto_queue: Framework will automatically queue buffers for DL traffic + * @auto_start: Automatically start (open) this channel + */ +struct mhi_channel_config { + char *name; + u32 num; + u32 num_elements; + u32 local_elements; + u32 event_ring; + enum dma_data_direction dir; + enum mhi_ch_type type; + u32 ee_mask; + u32 pollcfg; + enum mhi_db_brst_mode doorbell; + bool lpm_notify; + bool offload_channel; + bool doorbell_mode_switch; + bool auto_queue; + bool auto_start; +}; + +/** + * struct mhi_event_config - Event ring configuration structure for controller + * @num_elements: The number of elements that can be queued to this ring + * @irq_moderation_ms: Delay irq for additional events to be aggregated + * @irq: IRQ associated with this ring + * @channel: Dedicated channel number. U32_MAX indicates a non-dedicated ring + * @priority: Priority of this ring. Use 1 for now + * @mode: Doorbell mode + * @data_type: Type of data this ring will process + * @hardware_event: This ring is associated with hardware channels + * @client_managed: This ring is client managed + * @offload_channel: This ring is associated with an offloaded channel + */ +struct mhi_event_config { + u32 num_elements; + u32 irq_moderation_ms; + u32 irq; + u32 channel; + u32 priority; + enum mhi_db_brst_mode mode; + enum mhi_er_data_type data_type; + bool hardware_event; + bool client_managed; + bool offload_channel; +}; + +/** + * struct mhi_controller_config - Root MHI controller configuration + * @max_channels: Maximum number of channels supported + * @timeout_ms: Timeout value for operations. 0 means use default + * @buf_len: Size of automatically allocated buffers. 0 means use default + * @num_channels: Number of channels defined in @ch_cfg + * @ch_cfg: Array of defined channels + * @num_events: Number of event rings defined in @event_cfg + * @event_cfg: Array of defined event rings + * @use_bounce_buf: Use a bounce buffer pool due to limited DDR access + * @m2_no_db: Host is not allowed to ring DB in M2 state + */ +struct mhi_controller_config { + u32 max_channels; + u32 timeout_ms; + u32 buf_len; + u32 num_channels; + struct mhi_channel_config *ch_cfg; + u32 num_events; + struct mhi_event_config *event_cfg; + bool use_bounce_buf; + bool m2_no_db; +}; + +/** + * struct mhi_controller - Master MHI controller structure + * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI + * controller (required) + * @mhi_dev: MHI device instance for the controller + * @regs: Base address of MHI MMIO register space (required) + * @iova_start: IOMMU starting address for data (required) + * @iova_stop: IOMMU stop address for data (required) + * @fw_image: Firmware image name for normal booting (required) + * @edl_image: Firmware image name for emergency download mode (optional) + * @sbl_size: SBL image size downloaded through BHIe (optional) + * @seg_len: BHIe vector size (optional) + * @mhi_chan: Points to the channel configuration table + * @lpm_chans: List of channels that require LPM notifications + * @irq: base irq # to request (required) + * @max_chan: Maximum number of channels the controller supports + * @total_ev_rings: Total # of event rings allocated + * @hw_ev_rings: Number of hardware event rings + * @sw_ev_rings: Number of software event rings + * @nr_irqs_req: Number of IRQs required to operate (optional) + * @nr_irqs: Number of IRQ allocated by bus master (required) + * @mhi_event: MHI event ring configurations table + * @mhi_cmd: MHI command ring configurations table + * @mhi_ctxt: MHI device context, shared memory between host and device + * @pm_mutex: Mutex for suspend/resume operation + * @pm_lock: Lock for protecting MHI power management state + * @timeout_ms: Timeout in ms for state transitions + * @pm_state: MHI power management state + * @db_access: DB access states + * @ee: MHI device execution environment + * @dev_wake: Device wakeup count + * @pending_pkts: Pending packets for the controller + * @transition_list: List of MHI state transitions + * @transition_lock: Lock for protecting MHI state transition list + * @wlock: Lock for protecting device wakeup + * @st_worker: State transition worker + * @fw_worker: Firmware download worker + * @syserr_worker: System error worker + * @state_event: State change event + * @status_cb: CB function to notify power states of the device (required) + * @link_status: CB function to query link status of the device (required) + * @wake_get: CB function to assert device wake (optional) + * @wake_put: CB function to de-assert device wake (optional) + * @wake_toggle: CB function to assert and de-assert device wake (optional) + * @runtime_get: CB function to controller runtime resume (required) + * @runtimet_put: CB function to decrement pm usage (required) + * @buffer_len: Bounce buffer length + * @bounce_buf: Use of bounce buffer + * @fbc_download: MHI host needs to do complete image transfer (optional) + * @pre_init: MHI host needs to do pre-initialization before power up + * @wake_set: Device wakeup set flag + * + * Fields marked as (required) need to be populated by the controller driver + * before calling mhi_register_controller(). For the fields marked as (optional) + * they can be populated depending on the usecase. + */ +struct mhi_controller { + struct device *cntrl_dev; + struct mhi_device *mhi_dev; + void __iomem *regs; + dma_addr_t iova_start; + dma_addr_t iova_stop; + const char *fw_image; + const char *edl_image; + size_t sbl_size; + size_t seg_len; + struct mhi_chan *mhi_chan; + struct list_head lpm_chans; + int *irq; + u32 max_chan; + u32 total_ev_rings; + u32 hw_ev_rings; + u32 sw_ev_rings; + u32 nr_irqs_req; + u32 nr_irqs; + + struct mhi_event *mhi_event; + struct mhi_cmd *mhi_cmd; + struct mhi_ctxt *mhi_ctxt; + + struct mutex pm_mutex; + rwlock_t pm_lock; + u32 timeout_ms; + u32 pm_state; + u32 db_access; + enum mhi_ee_type ee; + atomic_t dev_wake; + atomic_t pending_pkts; + struct list_head transition_list; + spinlock_t transition_lock; + spinlock_t wlock; + struct work_struct st_worker; + struct work_struct fw_worker; + struct work_struct syserr_worker; + wait_queue_head_t state_event; + + void (*status_cb)(struct mhi_controller *mhi_cntrl, + enum mhi_callback cb); + int (*link_status)(struct mhi_controller *mhi_cntrl); + void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override); + void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override); + void (*wake_toggle)(struct mhi_controller *mhi_cntrl); + int (*runtime_get)(struct mhi_controller *mhi_cntrl); + void (*runtime_put)(struct mhi_controller *mhi_cntrl); + + size_t buffer_len; + bool bounce_buf; + bool fbc_download; + bool pre_init; + bool wake_set; +}; + +/** + * struct mhi_device - Structure representing a MHI device which binds + * to channels + * @id: Pointer to MHI device ID struct + * @chan_name: Name of the channel to which the device binds + * @mhi_cntrl: Controller the device belongs to + * @ul_chan: UL channel for the device + * @dl_chan: DL channel for the device + * @dev: Driver model device node for the MHI device + * @dev_type: MHI device type + * @dev_wake: Device wakeup counter + */ +struct mhi_device { + const struct mhi_device_id *id; + const char *chan_name; + struct mhi_controller *mhi_cntrl; + struct mhi_chan *ul_chan; + struct mhi_chan *dl_chan; + struct device dev; + enum mhi_device_type dev_type; + u32 dev_wake; +}; + +/** + * struct mhi_result - Completed buffer information + * @buf_addr: Address of data buffer + * @bytes_xferd: # of bytes transferred + * @dir: Channel direction + * @transaction_status: Status of last transaction + */ +struct mhi_result { + void *buf_addr; + size_t bytes_xferd; + enum dma_data_direction dir; + int transaction_status; +}; + +#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev) + +/** + * mhi_register_controller - Register MHI controller + * @mhi_cntrl: MHI controller to register + * @config: Configuration to use for the controller + */ +int mhi_register_controller(struct mhi_controller *mhi_cntrl, + struct mhi_controller_config *config); + +/** + * mhi_unregister_controller - Unregister MHI controller + * @mhi_cntrl: MHI controller to unregister + */ +void mhi_unregister_controller(struct mhi_controller *mhi_cntrl); + +#endif /* _MHI_H_ */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index e3596db077dc..be15e997fe39 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -821,4 +821,16 @@ struct wmi_device_id { const void *context; }; +#define MHI_NAME_SIZE 32 + +/** + * struct mhi_device_id - MHI device identification + * @chan: MHI channel name + * @driver_data: driver data; + */ +struct mhi_device_id { + const char chan[MHI_NAME_SIZE]; + kernel_ulong_t driver_data; +}; + #endif /* LINUX_MOD_DEVICETABLE_H */ -- cgit v1.2.3-58-ga151 From e755cadb0171ce78b29b89fe8bdd0179121a7827 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Thu, 20 Feb 2020 15:28:41 +0530 Subject: bus: mhi: core: Add support for registering MHI client drivers This commit adds support for registering MHI client drivers with the MHI stack. MHI client drivers binds to one or more MHI devices inorder to sends and receive the upper-layer protocol packets like IP packets, modem control messages, and diagnostics messages over MHI bus. This is based on the patch submitted by Sujeev Dias: https://lkml.org/lkml/2018/7/9/987 Signed-off-by: Sujeev Dias Signed-off-by: Siddartha Mohanadoss [mani: splitted and cleaned up for upstream] Signed-off-by: Manivannan Sadhasivam Reviewed-by: Jeffrey Hugo Tested-by: Jeffrey Hugo Link: https://lore.kernel.org/r/20200220095854.4804-4-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/core/init.c | 149 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi.h | 39 ++++++++++++ 2 files changed, 188 insertions(+) (limited to 'include/linux') diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 6f24c21284ec..12e386862b3f 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -374,8 +374,157 @@ struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) return mhi_dev; } +static int mhi_driver_probe(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device_driver *drv = dev->driver; + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + struct mhi_event *mhi_event; + struct mhi_chan *ul_chan = mhi_dev->ul_chan; + struct mhi_chan *dl_chan = mhi_dev->dl_chan; + + if (ul_chan) { + /* + * If channel supports LPM notifications then status_cb should + * be provided + */ + if (ul_chan->lpm_notify && !mhi_drv->status_cb) + return -EINVAL; + + /* For non-offload channels then xfer_cb should be provided */ + if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) + return -EINVAL; + + ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + } + + if (dl_chan) { + /* + * If channel supports LPM notifications then status_cb should + * be provided + */ + if (dl_chan->lpm_notify && !mhi_drv->status_cb) + return -EINVAL; + + /* For non-offload channels then xfer_cb should be provided */ + if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) + return -EINVAL; + + mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; + + /* + * If the channel event ring is managed by client, then + * status_cb must be provided so that the framework can + * notify pending data + */ + if (mhi_event->cl_manage && !mhi_drv->status_cb) + return -EINVAL; + + dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; + } + + /* Call the user provided probe function */ + return mhi_drv->probe(mhi_dev, mhi_dev->id); +} + +static int mhi_driver_remove(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); + struct mhi_chan *mhi_chan; + enum mhi_ch_state ch_state[] = { + MHI_CH_STATE_DISABLED, + MHI_CH_STATE_DISABLED + }; + int dir; + + /* Skip if it is a controller device */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + /* Reset both channels */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + /* Wake all threads waiting for completion */ + write_lock_irq(&mhi_chan->lock); + mhi_chan->ccs = MHI_EV_CC_INVALID; + complete_all(&mhi_chan->completion); + write_unlock_irq(&mhi_chan->lock); + + /* Set the channel state to disabled */ + mutex_lock(&mhi_chan->mutex); + write_lock_irq(&mhi_chan->lock); + ch_state[dir] = mhi_chan->ch_state; + mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED; + write_unlock_irq(&mhi_chan->lock); + + mutex_unlock(&mhi_chan->mutex); + } + + mhi_drv->remove(mhi_dev); + + /* De-init channel if it was enabled */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + mutex_lock(&mhi_chan->mutex); + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + + mutex_unlock(&mhi_chan->mutex); + } + + return 0; +} + +int mhi_driver_register(struct mhi_driver *mhi_drv) +{ + struct device_driver *driver = &mhi_drv->driver; + + if (!mhi_drv->probe || !mhi_drv->remove) + return -EINVAL; + + driver->bus = &mhi_bus_type; + driver->probe = mhi_driver_probe; + driver->remove = mhi_driver_remove; + + return driver_register(driver); +} +EXPORT_SYMBOL_GPL(mhi_driver_register); + +void mhi_driver_unregister(struct mhi_driver *mhi_drv) +{ + driver_unregister(&mhi_drv->driver); +} +EXPORT_SYMBOL_GPL(mhi_driver_unregister); + static int mhi_match(struct device *dev, struct device_driver *drv) { + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + const struct mhi_device_id *id; + + /* + * If the device is a controller type then there is no client driver + * associated with it + */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + for (id = mhi_drv->id_table; id->chan[0]; id++) + if (!strcmp(mhi_dev->chan_name, id->chan)) { + mhi_dev->id = id; + return 1; + } + return 0; }; diff --git a/include/linux/mhi.h b/include/linux/mhi.h index a34aa50120c8..7e6b7743c705 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -354,6 +354,8 @@ struct mhi_controller { * @dl_chan: DL channel for the device * @dev: Driver model device node for the MHI device * @dev_type: MHI device type + * @ul_chan_id: MHI channel id for UL transfer + * @dl_chan_id: MHI channel id for DL transfer * @dev_wake: Device wakeup counter */ struct mhi_device { @@ -364,6 +366,8 @@ struct mhi_device { struct mhi_chan *dl_chan; struct device dev; enum mhi_device_type dev_type; + int ul_chan_id; + int dl_chan_id; u32 dev_wake; }; @@ -381,6 +385,29 @@ struct mhi_result { int transaction_status; }; +/** + * struct mhi_driver - Structure representing a MHI client driver + * @probe: CB function for client driver probe function + * @remove: CB function for client driver remove function + * @ul_xfer_cb: CB function for UL data transfer + * @dl_xfer_cb: CB function for DL data transfer + * @status_cb: CB functions for asynchronous status + * @driver: Device driver model driver + */ +struct mhi_driver { + const struct mhi_device_id *id_table; + int (*probe)(struct mhi_device *mhi_dev, + const struct mhi_device_id *id); + void (*remove)(struct mhi_device *mhi_dev); + void (*ul_xfer_cb)(struct mhi_device *mhi_dev, + struct mhi_result *result); + void (*dl_xfer_cb)(struct mhi_device *mhi_dev, + struct mhi_result *result); + void (*status_cb)(struct mhi_device *mhi_dev, enum mhi_callback mhi_cb); + struct device_driver driver; +}; + +#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver) #define to_mhi_device(dev) container_of(dev, struct mhi_device, dev) /** @@ -397,4 +424,16 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, */ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl); +/** + * mhi_driver_register - Register driver with MHI framework + * @mhi_drv: Driver associated with the device + */ +int mhi_driver_register(struct mhi_driver *mhi_drv); + +/** + * mhi_driver_unregister - Unregister a driver for mhi_devices + * @mhi_drv: Driver associated with the device + */ +void mhi_driver_unregister(struct mhi_driver *mhi_drv); + #endif /* _MHI_H_ */ -- cgit v1.2.3-58-ga151 From da1c4f85692476ab038e3279209f07b8f4b7641e Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Thu, 20 Feb 2020 15:28:42 +0530 Subject: bus: mhi: core: Add support for creating and destroying MHI devices This commit adds support for creating and destroying MHI devices. The MHI devices binds to the MHI channels and are used to transfer data between MHI host and client device. This is based on the patch submitted by Sujeev Dias: https://lkml.org/lkml/2018/7/9/989 Signed-off-by: Sujeev Dias Signed-off-by: Siddartha Mohanadoss [mani: splitted from pm patch and cleaned up for upstream] Signed-off-by: Manivannan Sadhasivam Reviewed-by: Jeffrey Hugo Tested-by: Jeffrey Hugo Link: https://lore.kernel.org/r/20200220095854.4804-5-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/core/Makefile | 2 +- drivers/bus/mhi/core/main.c | 123 ++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi.h | 2 + 3 files changed, 126 insertions(+), 1 deletion(-) create mode 100644 drivers/bus/mhi/core/main.c (limited to 'include/linux') diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile index 2db32697c67f..77f7730da4bf 100644 --- a/drivers/bus/mhi/core/Makefile +++ b/drivers/bus/mhi/core/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_MHI_BUS) := mhi.o -mhi-y := init.o +mhi-y := init.o main.o diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c new file mode 100644 index 000000000000..7c35744ec0c0 --- /dev/null +++ b/drivers/bus/mhi/core/main.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +int mhi_destroy_device(struct device *dev, void *data) +{ + struct mhi_device *mhi_dev; + struct mhi_controller *mhi_cntrl; + + if (dev->bus != &mhi_bus_type) + return 0; + + mhi_dev = to_mhi_device(dev); + mhi_cntrl = mhi_dev->mhi_cntrl; + + /* Only destroy virtual devices thats attached to bus */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n", + mhi_dev->chan_name); + + /* Notify the client and remove the device from MHI bus */ + device_del(dev); + put_device(dev); + + return 0; +} + +static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason) +{ + struct mhi_driver *mhi_drv; + + if (!mhi_dev->dev.driver) + return; + + mhi_drv = to_mhi_driver(mhi_dev->dev.driver); + + if (mhi_drv->status_cb) + mhi_drv->status_cb(mhi_dev, cb_reason); +} + +/* Bind MHI channels to MHI devices */ +void mhi_create_devices(struct mhi_controller *mhi_cntrl) +{ + struct mhi_chan *mhi_chan; + struct mhi_device *mhi_dev; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int i, ret; + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + if (!mhi_chan->configured || mhi_chan->mhi_dev || + !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) + continue; + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (!mhi_dev) + return; + + mhi_dev->dev_type = MHI_DEVICE_XFER; + switch (mhi_chan->dir) { + case DMA_TO_DEVICE: + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + break; + case DMA_FROM_DEVICE: + /* We use dl_chan as offload channels */ + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + break; + default: + dev_err(dev, "Direction not supported\n"); + put_device(&mhi_dev->dev); + return; + } + + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + + /* Check next channel if it matches */ + if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { + if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { + i++; + mhi_chan++; + if (mhi_chan->dir == DMA_TO_DEVICE) { + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + } else { + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + } + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + } + } + + /* Channel name is same for both UL and DL */ + mhi_dev->chan_name = mhi_chan->name; + dev_set_name(&mhi_dev->dev, "%04x_%s", mhi_chan->chan, + mhi_dev->chan_name); + + /* Init wakeup source if available */ + if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable) + device_init_wakeup(&mhi_dev->dev, true); + + ret = device_add(&mhi_dev->dev); + if (ret) + put_device(&mhi_dev->dev); + } +} diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 7e6b7743c705..1ce2bdd5f2f4 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -163,6 +163,7 @@ enum mhi_db_brst_mode { * @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition * @auto_queue: Framework will automatically queue buffers for DL traffic * @auto_start: Automatically start (open) this channel + * @wake-capable: Channel capable of waking up the system */ struct mhi_channel_config { char *name; @@ -180,6 +181,7 @@ struct mhi_channel_config { bool doorbell_mode_switch; bool auto_queue; bool auto_start; + bool wake_capable; }; /** -- cgit v1.2.3-58-ga151 From 6cd330ae76ffd5c8f6294c423cabde7eeef1b40c Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Thu, 20 Feb 2020 15:28:43 +0530 Subject: bus: mhi: core: Add support for ringing channel/event ring doorbells This commit adds support for ringing channel and event ring doorbells by MHI host. The MHI host can use the channel and event ring doorbells for notifying the client device about processing transfer and event rings which it has queued using MMIO registers. This is based on the patch submitted by Sujeev Dias: https://lkml.org/lkml/2018/7/9/989 Signed-off-by: Sujeev Dias Signed-off-by: Siddartha Mohanadoss [mani: splitted from pm patch and cleaned up for upstream] Signed-off-by: Manivannan Sadhasivam Reviewed-by: Jeffrey Hugo Tested-by: Jeffrey Hugo Link: https://lore.kernel.org/r/20200220095854.4804-6-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/core/init.c | 141 ++++++++++++++++++++ drivers/bus/mhi/core/internal.h | 282 ++++++++++++++++++++++++++++++++++++++++ drivers/bus/mhi/core/main.c | 118 +++++++++++++++++ include/linux/mhi.h | 4 + 4 files changed, 545 insertions(+) (limited to 'include/linux') diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 12e386862b3f..8d81c9860ba7 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -17,6 +17,137 @@ #include #include "internal.h" +int mhi_init_mmio(struct mhi_controller *mhi_cntrl) +{ + u32 val; + int i, ret; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + void __iomem *base = mhi_cntrl->regs; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct { + u32 offset; + u32 mask; + u32 shift; + u32 val; + } reg_info[] = { + { + CCABAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + CCABAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + ECABAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + ECABAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + CRCBAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, + { + CRCBAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, + { + MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, + mhi_cntrl->total_ev_rings, + }, + { + MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, + mhi_cntrl->hw_ev_rings, + }, + { + MHICTRLBASE_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_start), + }, + { + MHICTRLBASE_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_start), + }, + { + MHIDATABASE_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_start), + }, + { + MHIDATABASE_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_start), + }, + { + MHICTRLLIMIT_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_stop), + }, + { + MHICTRLLIMIT_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_stop), + }, + { + MHIDATALIMIT_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_stop), + }, + { + MHIDATALIMIT_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_stop), + }, + { 0, 0, 0 } + }; + + dev_dbg(dev, "Initializing MHI registers\n"); + + /* Read channel db offset */ + ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK, + CHDBOFF_CHDBOFF_SHIFT, &val); + if (ret) { + dev_err(dev, "Unable to read CHDBOFF register\n"); + return -EIO; + } + + /* Setup wake db */ + mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); + mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0); + mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0); + mhi_cntrl->wake_set = false; + + /* Setup channel db address for each channel in tre_ring */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) + mhi_chan->tre_ring.db_addr = base + val; + + /* Read event ring db offset */ + ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK, + ERDBOFF_ERDBOFF_SHIFT, &val); + if (ret) { + dev_err(dev, "Unable to read ERDBOFF register\n"); + return -EIO; + } + + /* Setup event db address for each ev_ring */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + mhi_event->ring.db_addr = base + val; + } + + /* Setup DB register for primary CMD rings */ + mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; + + /* Write to MMIO registers */ + for (i = 0; reg_info[i].offset; i++) + mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, + reg_info[i].mask, reg_info[i].shift, + reg_info[i].val); + + return 0; +} + static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, struct mhi_controller_config *config) { @@ -62,6 +193,11 @@ static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) goto error_ev_cfg; + if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE) + mhi_event->db_cfg.process_db = mhi_db_brstmode; + else + mhi_event->db_cfg.process_db = mhi_db_brstmode_disable; + mhi_event->data_type = event_cfg->data_type; mhi_event->hw_ring = event_cfg->hardware_event; @@ -185,6 +321,11 @@ static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, } } + if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE) + mhi_chan->db_cfg.process_db = mhi_db_brstmode; + else + mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable; + mhi_chan->configured = true; if (mhi_chan->lpm_notify) diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index 6af59ac3ec9d..e32621eefa2b 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -11,6 +11,262 @@ extern struct bus_type mhi_bus_type; +/* MHI MMIO register mapping */ +#define PCI_INVALID_READ(val) (val == U32_MAX) + +#define MHIREGLEN (0x0) +#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF) +#define MHIREGLEN_MHIREGLEN_SHIFT (0) + +#define MHIVER (0x8) +#define MHIVER_MHIVER_MASK (0xFFFFFFFF) +#define MHIVER_MHIVER_SHIFT (0) + +#define MHICFG (0x10) +#define MHICFG_NHWER_MASK (0xFF000000) +#define MHICFG_NHWER_SHIFT (24) +#define MHICFG_NER_MASK (0xFF0000) +#define MHICFG_NER_SHIFT (16) +#define MHICFG_NHWCH_MASK (0xFF00) +#define MHICFG_NHWCH_SHIFT (8) +#define MHICFG_NCH_MASK (0xFF) +#define MHICFG_NCH_SHIFT (0) + +#define CHDBOFF (0x18) +#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF) +#define CHDBOFF_CHDBOFF_SHIFT (0) + +#define ERDBOFF (0x20) +#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF) +#define ERDBOFF_ERDBOFF_SHIFT (0) + +#define BHIOFF (0x28) +#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF) +#define BHIOFF_BHIOFF_SHIFT (0) + +#define BHIEOFF (0x2C) +#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF) +#define BHIEOFF_BHIEOFF_SHIFT (0) + +#define DEBUGOFF (0x30) +#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF) +#define DEBUGOFF_DEBUGOFF_SHIFT (0) + +#define MHICTRL (0x38) +#define MHICTRL_MHISTATE_MASK (0x0000FF00) +#define MHICTRL_MHISTATE_SHIFT (8) +#define MHICTRL_RESET_MASK (0x2) +#define MHICTRL_RESET_SHIFT (1) + +#define MHISTATUS (0x48) +#define MHISTATUS_MHISTATE_MASK (0x0000FF00) +#define MHISTATUS_MHISTATE_SHIFT (8) +#define MHISTATUS_SYSERR_MASK (0x4) +#define MHISTATUS_SYSERR_SHIFT (2) +#define MHISTATUS_READY_MASK (0x1) +#define MHISTATUS_READY_SHIFT (0) + +#define CCABAP_LOWER (0x58) +#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF) +#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0) + +#define CCABAP_HIGHER (0x5C) +#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF) +#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0) + +#define ECABAP_LOWER (0x60) +#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF) +#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0) + +#define ECABAP_HIGHER (0x64) +#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF) +#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0) + +#define CRCBAP_LOWER (0x68) +#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF) +#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0) + +#define CRCBAP_HIGHER (0x6C) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0) + +#define CRDB_LOWER (0x70) +#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF) +#define CRDB_LOWER_CRDB_LOWER_SHIFT (0) + +#define CRDB_HIGHER (0x74) +#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF) +#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0) + +#define MHICTRLBASE_LOWER (0x80) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0) + +#define MHICTRLBASE_HIGHER (0x84) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0) + +#define MHICTRLLIMIT_LOWER (0x88) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0) + +#define MHICTRLLIMIT_HIGHER (0x8C) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0) + +#define MHIDATABASE_LOWER (0x98) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0) + +#define MHIDATABASE_HIGHER (0x9C) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0) + +#define MHIDATALIMIT_LOWER (0xA0) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0) + +#define MHIDATALIMIT_HIGHER (0xA4) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0) + +/* Host request register */ +#define MHI_SOC_RESET_REQ_OFFSET (0xB0) +#define MHI_SOC_RESET_REQ BIT(0) + +/* MHI BHI offfsets */ +#define BHI_BHIVERSION_MINOR (0x00) +#define BHI_BHIVERSION_MAJOR (0x04) +#define BHI_IMGADDR_LOW (0x08) +#define BHI_IMGADDR_HIGH (0x0C) +#define BHI_IMGSIZE (0x10) +#define BHI_RSVD1 (0x14) +#define BHI_IMGTXDB (0x18) +#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHI_TXDB_SEQNUM_SHFT (0) +#define BHI_RSVD2 (0x1C) +#define BHI_INTVEC (0x20) +#define BHI_RSVD3 (0x24) +#define BHI_EXECENV (0x28) +#define BHI_STATUS (0x2C) +#define BHI_ERRCODE (0x30) +#define BHI_ERRDBG1 (0x34) +#define BHI_ERRDBG2 (0x38) +#define BHI_ERRDBG3 (0x3C) +#define BHI_SERIALNU (0x40) +#define BHI_SBLANTIROLLVER (0x44) +#define BHI_NUMSEG (0x48) +#define BHI_MSMHWID(n) (0x4C + (0x4 * n)) +#define BHI_OEMPKHASH(n) (0x64 + (0x4 * n)) +#define BHI_RSVD5 (0xC4) +#define BHI_STATUS_MASK (0xC0000000) +#define BHI_STATUS_SHIFT (30) +#define BHI_STATUS_ERROR (3) +#define BHI_STATUS_SUCCESS (2) +#define BHI_STATUS_RESET (0) + +/* MHI BHIE offsets */ +#define BHIE_MSMSOCID_OFFS (0x0000) +#define BHIE_TXVECADDR_LOW_OFFS (0x002C) +#define BHIE_TXVECADDR_HIGH_OFFS (0x0030) +#define BHIE_TXVECSIZE_OFFS (0x0034) +#define BHIE_TXVECDB_OFFS (0x003C) +#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_TXVECDB_SEQNUM_SHFT (0) +#define BHIE_TXVECSTATUS_OFFS (0x0044) +#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0) +#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000) +#define BHIE_TXVECSTATUS_STATUS_SHFT (30) +#define BHIE_TXVECSTATUS_STATUS_RESET (0x00) +#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02) +#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03) +#define BHIE_RXVECADDR_LOW_OFFS (0x0060) +#define BHIE_RXVECADDR_HIGH_OFFS (0x0064) +#define BHIE_RXVECSIZE_OFFS (0x0068) +#define BHIE_RXVECDB_OFFS (0x0070) +#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_RXVECDB_SEQNUM_SHFT (0) +#define BHIE_RXVECSTATUS_OFFS (0x0078) +#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0) +#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000) +#define BHIE_RXVECSTATUS_STATUS_SHFT (30) +#define BHIE_RXVECSTATUS_STATUS_RESET (0x00) +#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02) +#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03) + +#define EV_CTX_RESERVED_MASK GENMASK(7, 0) +#define EV_CTX_INTMODC_MASK GENMASK(15, 8) +#define EV_CTX_INTMODC_SHIFT 8 +#define EV_CTX_INTMODT_MASK GENMASK(31, 16) +#define EV_CTX_INTMODT_SHIFT 16 +struct mhi_event_ctxt { + __u32 intmod; + __u32 ertype; + __u32 msivec; + + __u64 rbase __packed __aligned(4); + __u64 rlen __packed __aligned(4); + __u64 rp __packed __aligned(4); + __u64 wp __packed __aligned(4); +}; + +#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0) +#define CHAN_CTX_CHSTATE_SHIFT 0 +#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8) +#define CHAN_CTX_BRSTMODE_SHIFT 8 +#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10) +#define CHAN_CTX_POLLCFG_SHIFT 10 +#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16) +struct mhi_chan_ctxt { + __u32 chcfg; + __u32 chtype; + __u32 erindex; + + __u64 rbase __packed __aligned(4); + __u64 rlen __packed __aligned(4); + __u64 rp __packed __aligned(4); + __u64 wp __packed __aligned(4); +}; + +struct mhi_cmd_ctxt { + __u32 reserved0; + __u32 reserved1; + __u32 reserved2; + + __u64 rbase __packed __aligned(4); + __u64 rlen __packed __aligned(4); + __u64 rp __packed __aligned(4); + __u64 wp __packed __aligned(4); +}; + +struct mhi_ctxt { + struct mhi_event_ctxt *er_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + dma_addr_t er_ctxt_addr; + dma_addr_t chan_ctxt_addr; + dma_addr_t cmd_ctxt_addr; +}; + +struct mhi_tre { + u64 ptr; + u32 dword[2]; +}; + +struct bhi_vec_entry { + u64 dma_addr; + u64 size; +}; + +enum mhi_cmd_type { + MHI_CMD_NOP = 1, + MHI_CMD_RESET_CHAN = 16, + MHI_CMD_STOP_CHAN = 17, + MHI_CMD_START_CHAN = 18, +}; + /* MHI transfer completion events */ enum mhi_ev_ccs { MHI_EV_CC_INVALID = 0x0, @@ -39,6 +295,7 @@ enum mhi_ch_state { #define NR_OF_CMD_RINGS 1 #define CMD_EL_PER_RING 128 #define PRIMARY_CMD_RING 0 +#define MHI_DEV_WAKE_DB 127 #define MHI_MAX_MTU 0xffff enum mhi_er_type { @@ -148,4 +405,29 @@ struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); int mhi_destroy_device(struct device *dev, void *data); void mhi_create_devices(struct mhi_controller *mhi_cntrl); +/* Register access methods */ +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg, + void __iomem *db_addr, dma_addr_t db_val); +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_mode, void __iomem *db_addr, + dma_addr_t db_val); +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 *out); +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 shift, u32 *out); +void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 val); +void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 mask, u32 shift, u32 val); +void mhi_ring_er_db(struct mhi_event *mhi_event); +void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, + dma_addr_t db_val); +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd); +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); + +/* Initialization methods */ +int mhi_init_mmio(struct mhi_controller *mhi_cntrl); + #endif /* _MHI_INT_H */ diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index 7c35744ec0c0..75d91e5fcd65 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -15,6 +15,124 @@ #include #include "internal.h" +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 *out) +{ + u32 tmp = readl(base + offset); + + /* If there is any unexpected value, query the link status */ + if (PCI_INVALID_READ(tmp) && + mhi_cntrl->link_status(mhi_cntrl)) + return -EIO; + + *out = tmp; + + return 0; +} + +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, + u32 mask, u32 shift, u32 *out) +{ + u32 tmp; + int ret; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return ret; + + *out = (tmp & mask) >> shift; + + return 0; +} + +void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 val) +{ + writel(val, base + offset); +} + +void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 mask, u32 shift, u32 val) +{ + int ret; + u32 tmp; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return; + + tmp &= ~mask; + tmp |= (val << shift); + mhi_write_reg(mhi_cntrl, base, offset, tmp); +} + +void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, + dma_addr_t db_val) +{ + mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val)); + mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val)); +} + +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t db_val) +{ + if (db_cfg->db_mode) { + db_cfg->db_val = db_val; + mhi_write_db(mhi_cntrl, db_addr, db_val); + db_cfg->db_mode = 0; + } +} + +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t db_val) +{ + db_cfg->db_val = db_val; + mhi_write_db(mhi_cntrl, db_addr, db_val); +} + +void mhi_ring_er_db(struct mhi_event *mhi_event) +{ + struct mhi_ring *ring = &mhi_event->ring; + + mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, + ring->db_addr, *ring->ctxt_wp); +} + +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) +{ + dma_addr_t db; + struct mhi_ring *ring = &mhi_cmd->ring; + + db = ring->iommu_base + (ring->wp - ring->base); + *ring->ctxt_wp = db; + mhi_write_db(mhi_cntrl, ring->db_addr, db); +} + +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *ring = &mhi_chan->tre_ring; + dma_addr_t db; + + db = ring->iommu_base + (ring->wp - ring->base); + *ring->ctxt_wp = db; + mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, + ring->db_addr, db); +} + +enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl) +{ + u32 exec; + int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); + + return (ret) ? MHI_EE_MAX : exec; +} + int mhi_destroy_device(struct device *dev, void *data) { struct mhi_device *mhi_dev; diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 1ce2bdd5f2f4..099d1643b072 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -240,6 +240,8 @@ struct mhi_controller_config { * controller (required) * @mhi_dev: MHI device instance for the controller * @regs: Base address of MHI MMIO register space (required) + * @bhi: Points to base of MHI BHI register space + * @wake_db: MHI WAKE doorbell register address * @iova_start: IOMMU starting address for data (required) * @iova_stop: IOMMU stop address for data (required) * @fw_image: Firmware image name for normal booting (required) @@ -294,6 +296,8 @@ struct mhi_controller { struct device *cntrl_dev; struct mhi_device *mhi_dev; void __iomem *regs; + void __iomem *bhi; + void __iomem *wake_db; dma_addr_t iova_start; dma_addr_t iova_stop; const char *fw_image; -- cgit v1.2.3-58-ga151 From a6e2e3522f29141b95c1ef8580c665a3582b3e66 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Thu, 20 Feb 2020 15:28:44 +0530 Subject: bus: mhi: core: Add support for PM state transitions This commit adds support for transitioning the MHI states as a part of the power management operations. Helpers functions are provided for the state transitions, which will be consumed by the actual power management routines. This is based on the patch submitted by Sujeev Dias: https://lkml.org/lkml/2018/7/9/989 Signed-off-by: Sujeev Dias Signed-off-by: Siddartha Mohanadoss [jhugo: removed dma_zalloc_coherent() and fixed several bugs] Signed-off-by: Jeffrey Hugo [mani: splitted the pm patch and cleaned up for upstream] Signed-off-by: Manivannan Sadhasivam Reviewed-by: Jeffrey Hugo Tested-by: Jeffrey Hugo Link: https://lore.kernel.org/r/20200220095854.4804-7-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/core/Makefile | 2 +- drivers/bus/mhi/core/init.c | 65 ++++ drivers/bus/mhi/core/internal.h | 175 +++++++++++ drivers/bus/mhi/core/main.c | 9 + drivers/bus/mhi/core/pm.c | 678 ++++++++++++++++++++++++++++++++++++++++ include/linux/mhi.h | 52 +++ 6 files changed, 980 insertions(+), 1 deletion(-) create mode 100644 drivers/bus/mhi/core/pm.c (limited to 'include/linux') diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile index 77f7730da4bf..a0070f9cdfcd 100644 --- a/drivers/bus/mhi/core/Makefile +++ b/drivers/bus/mhi/core/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_MHI_BUS) := mhi.o -mhi-y := init.o main.o +mhi-y := init.o main.o pm.o diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 8d81c9860ba7..c5fe49999906 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -17,6 +17,62 @@ #include #include "internal.h" +const char * const mhi_ee_str[MHI_EE_MAX] = { + [MHI_EE_PBL] = "PBL", + [MHI_EE_SBL] = "SBL", + [MHI_EE_AMSS] = "AMSS", + [MHI_EE_RDDM] = "RDDM", + [MHI_EE_WFW] = "WFW", + [MHI_EE_PTHRU] = "PASS THRU", + [MHI_EE_EDL] = "EDL", + [MHI_EE_DISABLE_TRANSITION] = "DISABLE", + [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED", +}; + +const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = { + [DEV_ST_TRANSITION_PBL] = "PBL", + [DEV_ST_TRANSITION_READY] = "READY", + [DEV_ST_TRANSITION_SBL] = "SBL", + [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE", +}; + +const char * const mhi_state_str[MHI_STATE_MAX] = { + [MHI_STATE_RESET] = "RESET", + [MHI_STATE_READY] = "READY", + [MHI_STATE_M0] = "M0", + [MHI_STATE_M1] = "M1", + [MHI_STATE_M2] = "M2", + [MHI_STATE_M3] = "M3", + [MHI_STATE_M3_FAST] = "M3_FAST", + [MHI_STATE_BHI] = "BHI", + [MHI_STATE_SYS_ERR] = "SYS_ERR", +}; + +static const char * const mhi_pm_state_str[] = { + [MHI_PM_STATE_DISABLE] = "DISABLE", + [MHI_PM_STATE_POR] = "POR", + [MHI_PM_STATE_M0] = "M0", + [MHI_PM_STATE_M2] = "M2", + [MHI_PM_STATE_M3_ENTER] = "M?->M3", + [MHI_PM_STATE_M3] = "M3", + [MHI_PM_STATE_M3_EXIT] = "M3->M0", + [MHI_PM_STATE_FW_DL_ERR] = "FW DL Error", + [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS_ERR Detect", + [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS_ERR Process", + [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process", + [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect", +}; + +const char *to_mhi_pm_state_str(enum mhi_pm_state state) +{ + int index = find_last_bit((unsigned long *)&state, 32); + + if (index >= ARRAY_SIZE(mhi_pm_state_str)) + return "Invalid State"; + + return mhi_pm_state_str[index]; +} + int mhi_init_mmio(struct mhi_controller *mhi_cntrl) { u32 val; @@ -364,6 +420,11 @@ static int parse_config(struct mhi_controller *mhi_cntrl, if (!mhi_cntrl->buffer_len) mhi_cntrl->buffer_len = MHI_MAX_MTU; + /* By default, host is allowed to ring DB in both M0 and M2 states */ + mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2; + if (config->m2_no_db) + mhi_cntrl->db_access &= ~MHI_PM_M2; + return 0; error_ev_cfg: @@ -403,8 +464,12 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, } INIT_LIST_HEAD(&mhi_cntrl->transition_list); + mutex_init(&mhi_cntrl->pm_mutex); + rwlock_init(&mhi_cntrl->pm_lock); spin_lock_init(&mhi_cntrl->transition_lock); spin_lock_init(&mhi_cntrl->wlock); + INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); + INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker); init_waitqueue_head(&mhi_cntrl->state_event); mhi_cmd = mhi_cntrl->mhi_cmd; diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index e32621eefa2b..2527dc383fd8 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -267,6 +267,79 @@ enum mhi_cmd_type { MHI_CMD_START_CHAN = 18, }; +/* No operation command */ +#define MHI_TRE_CMD_NOOP_PTR (0) +#define MHI_TRE_CMD_NOOP_DWORD0 (0) +#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16) + +/* Channel reset command */ +#define MHI_TRE_CMD_RESET_PTR (0) +#define MHI_TRE_CMD_RESET_DWORD0 (0) +#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_RESET_CHAN << 16)) + +/* Channel stop command */ +#define MHI_TRE_CMD_STOP_PTR (0) +#define MHI_TRE_CMD_STOP_DWORD0 (0) +#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_STOP_CHAN << 16)) + +/* Channel start command */ +#define MHI_TRE_CMD_START_PTR (0) +#define MHI_TRE_CMD_START_DWORD0 (0) +#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_START_CHAN << 16)) + +#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) + +/* Event descriptor macros */ +#define MHI_TRE_EV_PTR(ptr) (ptr) +#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len) +#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16)) +#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr) +#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF) +#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) +#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) +#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) +#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr) +#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF) +#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF) + +/* Transfer descriptor macros */ +#define MHI_TRE_DATA_PTR(ptr) (ptr) +#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU) +#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \ + | (ieot << 9) | (ieob << 8) | chain) + +/* RSC transfer descriptor macros */ +#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr) +#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie) +#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16) + +enum mhi_pkt_type { + MHI_PKT_TYPE_INVALID = 0x0, + MHI_PKT_TYPE_NOOP_CMD = 0x1, + MHI_PKT_TYPE_TRANSFER = 0x2, + MHI_PKT_TYPE_COALESCING = 0x8, + MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, + MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, + MHI_PKT_TYPE_START_CHAN_CMD = 0x12, + MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, + MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, + MHI_PKT_TYPE_TX_EVENT = 0x22, + MHI_PKT_TYPE_RSC_TX_EVENT = 0x28, + MHI_PKT_TYPE_EE_EVENT = 0x40, + MHI_PKT_TYPE_TSYNC_EVENT = 0x48, + MHI_PKT_TYPE_BW_REQ_EVENT = 0x50, + MHI_PKT_TYPE_STALE_EVENT, /* internal event */ +}; + /* MHI transfer completion events */ enum mhi_ev_ccs { MHI_EV_CC_INVALID = 0x0, @@ -292,6 +365,81 @@ enum mhi_ch_state { #define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \ mode != MHI_DB_BRST_ENABLE) +extern const char * const mhi_ee_str[MHI_EE_MAX]; +#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \ + "INVALID_EE" : mhi_ee_str[ee]) + +#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \ + ee == MHI_EE_EDL) + +#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW) + +enum dev_st_transition { + DEV_ST_TRANSITION_PBL, + DEV_ST_TRANSITION_READY, + DEV_ST_TRANSITION_SBL, + DEV_ST_TRANSITION_MISSION_MODE, + DEV_ST_TRANSITION_MAX, +}; + +extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX]; +#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \ + "INVALID_STATE" : dev_state_tran_str[state]) + +extern const char * const mhi_state_str[MHI_STATE_MAX]; +#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \ + !mhi_state_str[state]) ? \ + "INVALID_STATE" : mhi_state_str[state]) + +/* internal power states */ +enum mhi_pm_state { + MHI_PM_STATE_DISABLE, + MHI_PM_STATE_POR, + MHI_PM_STATE_M0, + MHI_PM_STATE_M2, + MHI_PM_STATE_M3_ENTER, + MHI_PM_STATE_M3, + MHI_PM_STATE_M3_EXIT, + MHI_PM_STATE_FW_DL_ERR, + MHI_PM_STATE_SYS_ERR_DETECT, + MHI_PM_STATE_SYS_ERR_PROCESS, + MHI_PM_STATE_SHUTDOWN_PROCESS, + MHI_PM_STATE_LD_ERR_FATAL_DETECT, + MHI_PM_STATE_MAX +}; + +#define MHI_PM_DISABLE BIT(0) +#define MHI_PM_POR BIT(1) +#define MHI_PM_M0 BIT(2) +#define MHI_PM_M2 BIT(3) +#define MHI_PM_M3_ENTER BIT(4) +#define MHI_PM_M3 BIT(5) +#define MHI_PM_M3_EXIT BIT(6) +/* firmware download failure state */ +#define MHI_PM_FW_DL_ERR BIT(7) +#define MHI_PM_SYS_ERR_DETECT BIT(8) +#define MHI_PM_SYS_ERR_PROCESS BIT(9) +#define MHI_PM_SHUTDOWN_PROCESS BIT(10) +/* link not accessible */ +#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11) + +#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \ + MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \ + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR))) +#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) +#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) +#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \ + mhi_cntrl->db_access) +#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_EXIT)) +#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2) +#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state) +#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \ + MHI_PM_IN_ERROR_STATE(pm_state)) +#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ + (MHI_PM_M3_ENTER | MHI_PM_M3)) + #define NR_OF_CMD_RINGS 1 #define CMD_EL_PER_RING 128 #define PRIMARY_CMD_RING 0 @@ -314,6 +462,16 @@ struct db_cfg { dma_addr_t db_val); }; +struct mhi_pm_transitions { + enum mhi_pm_state from_state; + u32 to_states; +}; + +struct state_transition { + struct list_head node; + enum dev_st_transition state; +}; + struct mhi_ring { dma_addr_t dma_handle; dma_addr_t iommu_base; @@ -405,6 +563,23 @@ struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); int mhi_destroy_device(struct device *dev, void *data); void mhi_create_devices(struct mhi_controller *mhi_cntrl); +/* Power management APIs */ +enum mhi_pm_state __must_check mhi_tryset_pm_state( + struct mhi_controller *mhi_cntrl, + enum mhi_pm_state state); +const char *to_mhi_pm_state_str(enum mhi_pm_state state); +enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl); +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum dev_st_transition state); +void mhi_pm_st_worker(struct work_struct *work); +void mhi_pm_sys_err_worker(struct work_struct *work); +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); +void mhi_ctrl_ev_task(unsigned long data); +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl); +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl); + /* Register access methods */ void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg, void __iomem *db_addr, dma_addr_t db_val); diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index 75d91e5fcd65..404c88de4c47 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -133,6 +133,15 @@ enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl) return (ret) ? MHI_EE_MAX : exec; } +enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) +{ + u32 state; + int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, &state); + return ret ? MHI_STATE_MAX : state; +} + int mhi_destroy_device(struct device *dev, void *data) { struct mhi_device *mhi_dev; diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c new file mode 100644 index 000000000000..0f0b576a8bf7 --- /dev/null +++ b/drivers/bus/mhi/core/pm.c @@ -0,0 +1,678 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +/* + * Not all MHI state transitions are synchronous. Transitions like Linkdown, + * SYS_ERR, and shutdown can happen anytime asynchronously. This function will + * transition to a new state only if we're allowed to. + * + * Priority increases as we go down. For instance, from any state in L0, the + * transition can be made to states in L1, L2 and L3. A notable exception to + * this rule is state DISABLE. From DISABLE state we can only transition to + * POR state. Also, while in L2 state, user cannot jump back to previous + * L1 or L0 states. + * + * Valid transitions: + * L0: DISABLE <--> POR + * POR <--> POR + * POR -> M0 -> M2 --> M0 + * POR -> FW_DL_ERR + * FW_DL_ERR <--> FW_DL_ERR + * M0 <--> M0 + * M0 -> FW_DL_ERR + * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 + * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR + * L2: SHUTDOWN_PROCESS -> DISABLE + * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT + * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS + */ +static struct mhi_pm_transitions const dev_state_transitions[] = { + /* L0 States */ + { + MHI_PM_DISABLE, + MHI_PM_POR + }, + { + MHI_PM_POR, + MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 | + MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M0, + MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER | + MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M2, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_ENTER, + MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3, + MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_EXIT, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_FW_DL_ERR, + MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L1 States */ + { + MHI_PM_SYS_ERR_DETECT, + MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_SYS_ERR_PROCESS, + MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L2 States */ + { + MHI_PM_SHUTDOWN_PROCESS, + MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L3 States */ + { + MHI_PM_LD_ERR_FATAL_DETECT, + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS + }, +}; + +enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl, + enum mhi_pm_state state) +{ + unsigned long cur_state = mhi_cntrl->pm_state; + int index = find_last_bit(&cur_state, 32); + + if (unlikely(index >= ARRAY_SIZE(dev_state_transitions))) + return cur_state; + + if (unlikely(dev_state_transitions[index].from_state != cur_state)) + return cur_state; + + if (unlikely(!(dev_state_transitions[index].to_states & state))) + return cur_state; + + mhi_cntrl->pm_state = state; + return mhi_cntrl->pm_state; +} + +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state) +{ + if (state == MHI_STATE_RESET) { + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1); + } else { + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_MHISTATE_MASK, + MHICTRL_MHISTATE_SHIFT, state); + } +} + +/* Handle device ready state transition */ +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) +{ + void __iomem *base = mhi_cntrl->regs; + struct mhi_event *mhi_event; + enum mhi_pm_state cur_state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 reset = 1, ready = 0; + int ret, i; + + /* Wait for RESET to be cleared and READY bit to be set by the device */ + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, &reset) || + mhi_read_reg_field(mhi_cntrl, base, MHISTATUS, + MHISTATUS_READY_MASK, + MHISTATUS_READY_SHIFT, &ready) || + (!reset && ready), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + /* Check if device entered error state */ + if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { + dev_err(dev, "Device link is not accessible\n"); + return -EIO; + } + + /* Timeout if device did not transition to ready state */ + if (reset || !ready) { + dev_err(dev, "Device Ready timeout\n"); + return -ETIMEDOUT; + } + + dev_dbg(dev, "Device in READY State\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); + mhi_cntrl->dev_state = MHI_STATE_READY; + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state != MHI_PM_POR) { + dev_err(dev, "Error moving to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_POR), + to_mhi_pm_state_str(cur_state)); + return -EIO; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + dev_err(dev, "Device registers not accessible\n"); + goto error_mmio; + } + + /* Configure MMIO registers */ + ret = mhi_init_mmio(mhi_cntrl); + if (ret) { + dev_err(dev, "Error configuring MMIO registers\n"); + goto error_mmio; + } + + /* Add elements to all SW event rings */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip if this is an offload or HW event */ + if (mhi_event->offload_ev || mhi_event->hw_ring) + continue; + + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; + /* Update all cores */ + smp_wmb(); + + /* Ring the event ring db */ + spin_lock_irq(&mhi_event->lock); + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + /* Set MHI to M0 state */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; + +error_mmio: + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; +} + +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state cur_state; + struct mhi_chan *mhi_chan; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int i; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M0; + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_M0)) { + dev_err(dev, "Unable to transition to M0 state\n"); + return -EIO; + } + + /* Wake up the device */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + + /* Ring all event rings and CMD ring only if we're in mission mode */ + if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + struct mhi_cmd *mhi_cmd = + &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + spin_lock_irq(&mhi_event->lock); + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + /* Only ring primary cmd ring if ring is not empty */ + spin_lock_irq(&mhi_cmd->lock); + if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + spin_unlock_irq(&mhi_cmd->lock); + } + + /* Ring channel DB registers */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + write_lock_irq(&mhi_chan->lock); + if (mhi_chan->db_cfg.reset_req) + mhi_chan->db_cfg.db_mode = true; + + /* Only ring DB if ring is not empty */ + if (tre_ring->base && tre_ring->wp != tre_ring->rp) + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + write_unlock_irq(&mhi_chan->lock); + } + + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + + return 0; +} + +/* + * After receiving the MHI state change event from the device indicating the + * transition to M1 state, the host can transition the device to M2 state + * for keeping it in low power state. + */ +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + write_lock_irq(&mhi_cntrl->pm_lock); + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); + if (state == MHI_PM_M2) { + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2); + mhi_cntrl->dev_state = MHI_STATE_M2; + + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + + /* If there are any pending resources, exit M2 immediately */ + if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) || + atomic_read(&mhi_cntrl->dev_wake))) { + dev_dbg(dev, + "Exiting M2, pending_pkts: %d dev_wake: %d\n", + atomic_read(&mhi_cntrl->pending_pkts), + atomic_read(&mhi_cntrl->dev_wake)); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + mhi_cntrl->wake_put(mhi_cntrl, true); + read_unlock_bh(&mhi_cntrl->pm_lock); + } else { + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE); + } + } else { + write_unlock_irq(&mhi_cntrl->pm_lock); + } +} + +/* MHI M3 completion handler */ +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M3; + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (state != MHI_PM_M3) { + dev_err(dev, "Unable to transition to M3 state\n"); + return -EIO; + } + + wake_up_all(&mhi_cntrl->state_event); + + return 0; +} + +/* Handle device Mission Mode transition */ +static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) +{ + struct mhi_event *mhi_event; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int i, ret; + + dev_dbg(dev, "Processing Mission Mode transition\n"); + + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) + return -EIO; + + wake_up_all(&mhi_cntrl->state_event); + + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE); + + /* Force MHI to be in M0 state before continuing */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + read_lock_bh(&mhi_cntrl->pm_lock); + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + ret = -EIO; + goto error_mission_mode; + } + + /* Add elements to all HW event rings */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev || !mhi_event->hw_ring) + continue; + + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; + /* Update to all cores */ + smp_wmb(); + + spin_lock_irq(&mhi_event->lock); + if (MHI_DB_ACCESS_VALID(mhi_cntrl)) + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + read_unlock_bh(&mhi_cntrl->pm_lock); + + /* + * The MHI devices are only created when the client device switches its + * Execution Environment (EE) to either SBL or AMSS states + */ + mhi_create_devices(mhi_cntrl); + + read_lock_bh(&mhi_cntrl->pm_lock); + +error_mission_mode: + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} + +/* Handle SYS_ERR and Shutdown transitions */ +static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, + enum mhi_pm_state transition_state) +{ + enum mhi_pm_state cur_state, prev_state; + struct mhi_event *mhi_event; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event_ctxt *er_ctxt; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret, i; + + dev_dbg(dev, "Transitioning from PM state: %s to: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + to_mhi_pm_state_str(transition_state)); + + /* We must notify MHI control driver so it can clean up first */ + if (transition_state == MHI_PM_SYS_ERR_PROCESS) { + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); + } + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + prev_state = mhi_cntrl->pm_state; + cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); + if (cur_state == transition_state) { + mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; + mhi_cntrl->dev_state = MHI_STATE_RESET; + } + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* Wake up threads waiting for state transition */ + wake_up_all(&mhi_cntrl->state_event); + + if (cur_state != transition_state) { + dev_err(dev, "Failed to transition to state: %s from: %s\n", + to_mhi_pm_state_str(transition_state), + to_mhi_pm_state_str(cur_state)); + mutex_unlock(&mhi_cntrl->pm_mutex); + return; + } + + /* Trigger MHI RESET so that the device will not access host memory */ + if (MHI_REG_ACCESS_VALID(prev_state)) { + u32 in_reset = -1; + unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); + + dev_dbg(dev, "Triggering MHI Reset in device\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + + /* Wait for the reset bit to be cleared by the device */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_read_reg_field(mhi_cntrl, + mhi_cntrl->regs, + MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, + &in_reset) || + !in_reset, timeout); + if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) { + dev_err(dev, "Device failed to exit MHI Reset state\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return; + } + + /* + * Device will clear BHI_INTVEC as a part of RESET processing, + * hence re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + } + + dev_dbg(dev, + "Waiting for all pending event ring processing to complete\n"); + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + tasklet_kill(&mhi_event->task); + } + + /* Release lock and wait for all pending threads to complete */ + mutex_unlock(&mhi_cntrl->pm_mutex); + dev_dbg(dev, "Waiting for all pending threads to complete\n"); + wake_up_all(&mhi_cntrl->state_event); + flush_work(&mhi_cntrl->st_worker); + flush_work(&mhi_cntrl->fw_worker); + + dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); + device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device); + + mutex_lock(&mhi_cntrl->pm_mutex); + + WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); + WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); + + /* Reset the ev rings and cmd rings */ + dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->rp = ring->base; + ring->wp = ring->base; + cmd_ctxt->rp = cmd_ctxt->rbase; + cmd_ctxt->wp = cmd_ctxt->rbase; + } + + mhi_event = mhi_cntrl->mhi_event; + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip offload events */ + if (mhi_event->offload_ev) + continue; + + ring->rp = ring->base; + ring->wp = ring->base; + er_ctxt->rp = er_ctxt->rbase; + er_ctxt->wp = er_ctxt->rbase; + } + + if (cur_state == MHI_PM_SYS_ERR_PROCESS) { + mhi_ready_state_transition(mhi_cntrl); + } else { + /* Move to disable state */ + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_DISABLE)) + dev_err(dev, "Error moving from PM state: %s to: %s\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(MHI_PM_DISABLE)); + } + + dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + mutex_unlock(&mhi_cntrl->pm_mutex); +} + +/* Queue a new work item and schedule work */ +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum dev_st_transition state) +{ + struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); + unsigned long flags; + + if (!item) + return -ENOMEM; + + item->state = state; + spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); + list_add_tail(&item->node, &mhi_cntrl->transition_list); + spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); + + schedule_work(&mhi_cntrl->st_worker); + + return 0; +} + +/* SYS_ERR worker */ +void mhi_pm_sys_err_worker(struct work_struct *work) +{ + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + syserr_worker); + + mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); +} + +/* Device State Transition worker */ +void mhi_pm_st_worker(struct work_struct *work) +{ + struct state_transition *itr, *tmp; + LIST_HEAD(head); + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + st_worker); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + spin_lock_irq(&mhi_cntrl->transition_lock); + list_splice_tail_init(&mhi_cntrl->transition_list, &head); + spin_unlock_irq(&mhi_cntrl->transition_lock); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + dev_dbg(dev, "Handling state transition: %s\n", + TO_DEV_STATE_TRANS_STR(itr->state)); + + switch (itr->state) { + case DEV_ST_TRANSITION_PBL: + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (MHI_IN_PBL(mhi_cntrl->ee)) + wake_up_all(&mhi_cntrl->state_event); + break; + case DEV_ST_TRANSITION_SBL: + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_SBL; + write_unlock_irq(&mhi_cntrl->pm_lock); + /* + * The MHI devices are only created when the client + * device switches its Execution Environment (EE) to + * either SBL or AMSS states + */ + mhi_create_devices(mhi_cntrl); + break; + case DEV_ST_TRANSITION_MISSION_MODE: + mhi_pm_mission_mode_transition(mhi_cntrl); + break; + case DEV_ST_TRANSITION_READY: + mhi_ready_state_transition(mhi_cntrl); + break; + default: + break; + } + kfree(itr); + } +} + +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) +{ + int ret; + + /* Wake up the device */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); + mhi_cntrl->runtime_get(mhi_cntrl); + mhi_cntrl->runtime_put(mhi_cntrl); + } + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->pm_state == MHI_PM_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + return 0; +} diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 099d1643b072..fc0cd4af646c 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -105,6 +105,31 @@ enum mhi_ee_type { MHI_EE_MAX, }; +/** + * enum mhi_state - MHI states + * @MHI_STATE_RESET: Reset state + * @MHI_STATE_READY: Ready state + * @MHI_STATE_M0: M0 state + * @MHI_STATE_M1: M1 state + * @MHI_STATE_M2: M2 state + * @MHI_STATE_M3: M3 state + * @MHI_STATE_M3_FAST: M3 Fast state + * @MHI_STATE_BHI: BHI state + * @MHI_STATE_SYS_ERR: System Error state + */ +enum mhi_state { + MHI_STATE_RESET = 0x0, + MHI_STATE_READY = 0x1, + MHI_STATE_M0 = 0x2, + MHI_STATE_M1 = 0x3, + MHI_STATE_M2 = 0x4, + MHI_STATE_M3 = 0x5, + MHI_STATE_M3_FAST = 0x6, + MHI_STATE_BHI = 0x7, + MHI_STATE_SYS_ERR = 0xFF, + MHI_STATE_MAX, +}; + /** * enum mhi_ch_ee_mask - Execution environment mask for channel * @MHI_CH_EE_PBL: Allow channel to be used in PBL EE @@ -266,6 +291,7 @@ struct mhi_controller_config { * @pm_state: MHI power management state * @db_access: DB access states * @ee: MHI device execution environment + * @dev_state: MHI device state * @dev_wake: Device wakeup count * @pending_pkts: Pending packets for the controller * @transition_list: List of MHI state transitions @@ -298,6 +324,7 @@ struct mhi_controller { void __iomem *regs; void __iomem *bhi; void __iomem *wake_db; + dma_addr_t iova_start; dma_addr_t iova_stop; const char *fw_image; @@ -324,6 +351,7 @@ struct mhi_controller { u32 pm_state; u32 db_access; enum mhi_ee_type ee; + enum mhi_state dev_state; atomic_t dev_wake; atomic_t pending_pkts; struct list_head transition_list; @@ -391,6 +419,22 @@ struct mhi_result { int transaction_status; }; +/** + * struct mhi_buf - MHI Buffer description + * @buf: Virtual address of the buffer + * @name: Buffer label. For offload channel, configurations name must be: + * ECA - Event context array data + * CCA - Channel context array data + * @dma_addr: IOMMU address of the buffer + * @len: # of bytes + */ +struct mhi_buf { + void *buf; + const char *name; + dma_addr_t dma_addr; + size_t len; +}; + /** * struct mhi_driver - Structure representing a MHI client driver * @probe: CB function for client driver probe function @@ -442,4 +486,12 @@ int mhi_driver_register(struct mhi_driver *mhi_drv); */ void mhi_driver_unregister(struct mhi_driver *mhi_drv); +/** + * mhi_set_mhi_state - Set MHI device state + * @mhi_cntrl: MHI controller + * @state: State to set + */ +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, + enum mhi_state state); + #endif /* _MHI_H_ */ -- cgit v1.2.3-58-ga151 From 3000f85b8f47b2c860add5cce4c201c83bde6468 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Thu, 20 Feb 2020 15:28:45 +0530 Subject: bus: mhi: core: Add support for basic PM operations This commit adds support for basic MHI PM operations such as mhi_async_power_up, mhi_sync_power_up, and mhi_power_down. These routines places the MHI bus into respective power domain states and calls the state_transition APIs when necessary. The MHI controller driver is expected to call these PM routines for MHI powerup and powerdown. This is based on the patch submitted by Sujeev Dias: https://lkml.org/lkml/2018/7/9/989 Signed-off-by: Sujeev Dias Signed-off-by: Siddartha Mohanadoss [mani: splitted the pm patch and cleaned up for upstream] Signed-off-by: Manivannan Sadhasivam Reviewed-by: Jeffrey Hugo Tested-by: Jeffrey Hugo Link: https://lore.kernel.org/r/20200220095854.4804-8-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/core/Makefile | 2 +- drivers/bus/mhi/core/boot.c | 87 +++++++++++ drivers/bus/mhi/core/init.c | 322 ++++++++++++++++++++++++++++++++++++++++ drivers/bus/mhi/core/internal.h | 34 +++++ drivers/bus/mhi/core/main.c | 87 +++++++++++ drivers/bus/mhi/core/pm.c | 219 +++++++++++++++++++++++++++ include/linux/mhi.h | 51 +++++++ 7 files changed, 801 insertions(+), 1 deletion(-) create mode 100644 drivers/bus/mhi/core/boot.c (limited to 'include/linux') diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile index a0070f9cdfcd..66e2700c9032 100644 --- a/drivers/bus/mhi/core/Makefile +++ b/drivers/bus/mhi/core/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_MHI_BUS) := mhi.o -mhi-y := init.o main.o pm.o +mhi-y := init.o main.o pm.o boot.o diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c new file mode 100644 index 000000000000..94431500a2d1 --- /dev/null +++ b/drivers/bus/mhi/core/boot.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info) +{ + int i; + struct mhi_buf *mhi_buf = image_info->mhi_buf; + + for (i = 0; i < image_info->entries; i++, mhi_buf++) + mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf, + mhi_buf->dma_addr); + + kfree(image_info->mhi_buf); + kfree(image_info); +} + +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, + size_t alloc_size) +{ + size_t seg_size = mhi_cntrl->seg_len; + int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1; + int i; + struct image_info *img_info; + struct mhi_buf *mhi_buf; + + img_info = kzalloc(sizeof(*img_info), GFP_KERNEL); + if (!img_info) + return -ENOMEM; + + /* Allocate memory for entries */ + img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf), + GFP_KERNEL); + if (!img_info->mhi_buf) + goto error_alloc_mhi_buf; + + /* Allocate and populate vector table */ + mhi_buf = img_info->mhi_buf; + for (i = 0; i < segments; i++, mhi_buf++) { + size_t vec_size = seg_size; + + /* Vector table is the last entry */ + if (i == segments - 1) + vec_size = sizeof(struct bhi_vec_entry) * i; + + mhi_buf->len = vec_size; + mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size, + &mhi_buf->dma_addr, + GFP_KERNEL); + if (!mhi_buf->buf) + goto error_alloc_segment; + } + + img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; + img_info->entries = segments; + *image_info = img_info; + + return 0; + +error_alloc_segment: + for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) + mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf, + mhi_buf->dma_addr); + +error_alloc_mhi_buf: + kfree(img_info); + + return -ENOMEM; +} diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index c5fe49999906..109db1f5cdf2 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -73,6 +73,293 @@ const char *to_mhi_pm_state_str(enum mhi_pm_state state) return mhi_pm_state_str[index]; } +/* MHI protocol requires the transfer ring to be aligned with ring length */ +static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring, + u64 len) +{ + ring->alloc_size = len + (len - 1); + ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size, + &ring->dma_handle, GFP_KERNEL); + if (!ring->pre_aligned) + return -ENOMEM; + + ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1); + ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); + + return 0; +} + +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); + } + + free_irq(mhi_cntrl->irq[0], mhi_cntrl); +} + +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) +{ + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int i, ret; + + /* Setup BHI_INTVEC IRQ */ + ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler, + mhi_intvec_threaded_handler, + IRQF_SHARED | IRQF_NO_SUSPEND, + "bhi", mhi_cntrl); + if (ret) + return ret; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + ret = request_irq(mhi_cntrl->irq[mhi_event->irq], + mhi_irq_handler, + IRQF_SHARED | IRQF_NO_SUSPEND, + "mhi", mhi_event); + if (ret) { + dev_err(dev, "Error requesting irq:%d for ev:%d\n", + mhi_cntrl->irq[mhi_event->irq], i); + goto error_request; + } + } + + return 0; + +error_request: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); + } + free_irq(mhi_cntrl->irq[0], mhi_cntrl); + + return ret; +} + +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event *mhi_event; + struct mhi_ring *ring; + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) { + ring = &mhi_cmd->ring; + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + ring->base = NULL; + ring->iommu_base = 0; + } + + mhi_free_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + ring = &mhi_event->ring; + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + ring->base = NULL; + ring->iommu_base = 0; + } + + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, + mhi_ctxt->er_ctxt_addr); + + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, + mhi_ctxt->chan_ctxt_addr); + + kfree(mhi_ctxt); + mhi_cntrl->mhi_ctxt = NULL; +} + +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + struct mhi_ctxt *mhi_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_event_ctxt *er_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + struct mhi_cmd *mhi_cmd; + u32 tmp; + int ret = -ENOMEM, i; + + atomic_set(&mhi_cntrl->dev_wake, 0); + atomic_set(&mhi_cntrl->pending_pkts, 0); + + mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL); + if (!mhi_ctxt) + return -ENOMEM; + + /* Setup channel ctxt */ + mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, + &mhi_ctxt->chan_ctxt_addr, + GFP_KERNEL); + if (!mhi_ctxt->chan_ctxt) + goto error_alloc_chan_ctxt; + + mhi_chan = mhi_cntrl->mhi_chan; + chan_ctxt = mhi_ctxt->chan_ctxt; + for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { + /* Skip if it is an offload channel */ + if (mhi_chan->offload_ch) + continue; + + tmp = chan_ctxt->chcfg; + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); + tmp &= ~CHAN_CTX_BRSTMODE_MASK; + tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT); + tmp &= ~CHAN_CTX_POLLCFG_MASK; + tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT); + chan_ctxt->chcfg = tmp; + + chan_ctxt->chtype = mhi_chan->type; + chan_ctxt->erindex = mhi_chan->er_index; + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp; + } + + /* Setup event context */ + mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, + &mhi_ctxt->er_ctxt_addr, + GFP_KERNEL); + if (!mhi_ctxt->er_ctxt) + goto error_alloc_er_ctxt; + + er_ctxt = mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip if it is an offload event */ + if (mhi_event->offload_ev) + continue; + + tmp = er_ctxt->intmod; + tmp &= ~EV_CTX_INTMODC_MASK; + tmp &= ~EV_CTX_INTMODT_MASK; + tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT); + er_ctxt->intmod = tmp; + + er_ctxt->ertype = MHI_ER_TYPE_VALID; + er_ctxt->msivec = mhi_event->irq; + mhi_event->db_cfg.db_mode = true; + + ring->el_size = sizeof(struct mhi_tre); + ring->len = ring->el_size * ring->elements; + ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); + if (ret) + goto error_alloc_er; + + /* + * If the read pointer equals to the write pointer, then the + * ring is empty + */ + ring->rp = ring->wp = ring->base; + er_ctxt->rbase = ring->iommu_base; + er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; + er_ctxt->rlen = ring->len; + ring->ctxt_wp = &er_ctxt->wp; + } + + /* Setup cmd context */ + mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->cmd_ctxt) * + NR_OF_CMD_RINGS, + &mhi_ctxt->cmd_ctxt_addr, + GFP_KERNEL); + if (!mhi_ctxt->cmd_ctxt) + goto error_alloc_er; + + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->el_size = sizeof(struct mhi_tre); + ring->elements = CMD_EL_PER_RING; + ring->len = ring->el_size * ring->elements; + ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); + if (ret) + goto error_alloc_cmd; + + ring->rp = ring->wp = ring->base; + cmd_ctxt->rbase = ring->iommu_base; + cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; + cmd_ctxt->rlen = ring->len; + ring->ctxt_wp = &cmd_ctxt->wp; + } + + mhi_cntrl->mhi_ctxt = mhi_ctxt; + + return 0; + +error_alloc_cmd: + for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { + struct mhi_ring *ring = &mhi_cmd->ring; + + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + } + mhi_free_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); + i = mhi_cntrl->total_ev_rings; + mhi_event = mhi_cntrl->mhi_event + i; + +error_alloc_er: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev) + continue; + + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + } + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, + mhi_ctxt->er_ctxt_addr); + +error_alloc_er_ctxt: + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, + mhi_ctxt->chan_ctxt_addr); + +error_alloc_chan_ctxt: + kfree(mhi_ctxt); + + return ret; +} + int mhi_init_mmio(struct mhi_controller *mhi_cntrl) { u32 val; @@ -553,6 +840,41 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) } EXPORT_SYMBOL_GPL(mhi_unregister_controller); +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret; + + mutex_lock(&mhi_cntrl->pm_mutex); + + ret = mhi_init_dev_ctxt(mhi_cntrl); + if (ret) + goto error_dev_ctxt; + + mhi_cntrl->pre_init = true; + + mutex_unlock(&mhi_cntrl->pm_mutex); + + return 0; + +error_dev_ctxt: + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up); + +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) +{ + if (mhi_cntrl->fbc_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + } + + mhi_deinit_dev_ctxt(mhi_cntrl); + mhi_cntrl->pre_init = false; +} +EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down); + static void mhi_release_device(struct device *dev) { struct mhi_device *mhi_dev = to_mhi_device(dev); diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index 2527dc383fd8..09faab85902c 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -563,6 +563,11 @@ struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); int mhi_destroy_device(struct device *dev, void *data); void mhi_create_devices(struct mhi_controller *mhi_cntrl); +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, size_t alloc_size); +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info); + /* Power management APIs */ enum mhi_pm_state __must_check mhi_tryset_pm_state( struct mhi_controller *mhi_cntrl, @@ -604,5 +609,34 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, /* Initialization methods */ int mhi_init_mmio(struct mhi_controller *mhi_cntrl); +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); + +/* Memory allocation methods */ +static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl, + size_t size, + dma_addr_t *dma_handle, + gfp_t gfp) +{ + void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, dma_handle, + gfp); + + return buf; +} + +static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl, + size_t size, + void *vaddr, + dma_addr_t dma_handle) +{ + dma_free_coherent(mhi_cntrl->cntrl_dev, size, vaddr, dma_handle); +} + +/* ISR handlers */ +irqreturn_t mhi_irq_handler(int irq_number, void *dev); +irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev); +irqreturn_t mhi_intvec_handler(int irq_number, void *dev); #endif /* _MHI_INT_H */ diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index 404c88de4c47..201551b3cb5b 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -142,6 +142,11 @@ enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) return ret ? MHI_STATE_MAX : state; } +static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr) +{ + return (addr - ring->iommu_base) + ring->base; +} + int mhi_destroy_device(struct device *dev, void *data) { struct mhi_device *mhi_dev; @@ -248,3 +253,85 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl) put_device(&mhi_dev->dev); } } + +irqreturn_t mhi_irq_handler(int irq_number, void *dev) +{ + struct mhi_event *mhi_event = dev; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_ring *ev_ring = &mhi_event->ring; + void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + + /* Only proceed if event ring has pending events */ + if (ev_ring->rp == dev_rp) + return IRQ_HANDLED; + + /* For client managed event ring, notify pending data */ + if (mhi_event->cl_manage) { + struct mhi_chan *mhi_chan = mhi_event->mhi_chan; + struct mhi_device *mhi_dev = mhi_chan->mhi_dev; + + if (mhi_dev) + mhi_notify(mhi_dev, MHI_CB_PENDING_DATA); + } else { + tasklet_schedule(&mhi_event->task); + } + + return IRQ_HANDLED; +} + +irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev) +{ + struct mhi_controller *mhi_cntrl = dev; + enum mhi_state state = MHI_STATE_MAX; + enum mhi_pm_state pm_state = 0; + enum mhi_ee_type ee = 0; + + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + state = mhi_get_mhi_state(mhi_cntrl); + ee = mhi_cntrl->ee; + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + } + + if (state == MHI_STATE_SYS_ERR) { + dev_dbg(&mhi_cntrl->mhi_dev->dev, "System error detected\n"); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* If device in RDDM don't bother processing SYS error */ + if (mhi_cntrl->ee == MHI_EE_RDDM) { + if (mhi_cntrl->ee != ee) { + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); + wake_up_all(&mhi_cntrl->state_event); + } + goto exit_intvec; + } + + if (pm_state == MHI_PM_SYS_ERR_DETECT) { + wake_up_all(&mhi_cntrl->state_event); + + /* For fatal errors, we let controller decide next step */ + if (MHI_IN_PBL(ee)) + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); + else + schedule_work(&mhi_cntrl->syserr_worker); + } + +exit_intvec: + + return IRQ_HANDLED; +} + +irqreturn_t mhi_intvec_handler(int irq_number, void *dev) +{ + struct mhi_controller *mhi_cntrl = dev; + + /* Wake up events waiting for state change */ + wake_up_all(&mhi_cntrl->state_event); + + return IRQ_WAKE_THREAD; +} diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index 0f0b576a8bf7..bfe0371f6e75 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -138,6 +138,17 @@ void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state) } } +/* NOP for backward compatibility, host allowed to ring DB in M2 state */ +static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl) +{ +} + +static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl) +{ + mhi_cntrl->wake_get(mhi_cntrl, false); + mhi_cntrl->wake_put(mhi_cntrl, true); +} + /* Handle device ready state transition */ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) { @@ -676,3 +687,211 @@ int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) return 0; } + +/* Assert device wake db */ +static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) +{ + unsigned long flags; + + /* + * If force flag is set, then increment the wake count value and + * ring wake db + */ + if (unlikely(force)) { + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + atomic_inc(&mhi_cntrl->dev_wake); + if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } else { + /* + * If resources are already requested, then just increment + * the wake count value and return + */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && + MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } +} + +/* De-assert device wake db */ +static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, + bool override) +{ + unsigned long flags; + + /* + * Only continue if there is a single resource, else just decrement + * and return + */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && + MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && + mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); + mhi_cntrl->wake_set = false; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); +} + +int mhi_async_power_up(struct mhi_controller *mhi_cntrl) +{ + enum mhi_ee_type current_ee; + enum dev_st_transition next_state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 val; + int ret; + + dev_info(dev, "Requested to power ON\n"); + + if (mhi_cntrl->nr_irqs < mhi_cntrl->total_ev_rings) + return -EINVAL; + + /* Supply default wake routines if not provided by controller driver */ + if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put || + !mhi_cntrl->wake_toggle) { + mhi_cntrl->wake_get = mhi_assert_dev_wake; + mhi_cntrl->wake_put = mhi_deassert_dev_wake; + mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ? + mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake; + } + + mutex_lock(&mhi_cntrl->pm_mutex); + mhi_cntrl->pm_state = MHI_PM_DISABLE; + + if (!mhi_cntrl->pre_init) { + /* Setup device context */ + ret = mhi_init_dev_ctxt(mhi_cntrl); + if (ret) + goto error_dev_ctxt; + } + + ret = mhi_init_irq_setup(mhi_cntrl); + if (ret) + goto error_setup_irq; + + /* Setup BHI offset & INTVEC */ + write_lock_irq(&mhi_cntrl->pm_lock); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val); + if (ret) { + write_unlock_irq(&mhi_cntrl->pm_lock); + goto error_bhi_offset; + } + + mhi_cntrl->bhi = mhi_cntrl->regs + val; + + /* Setup BHIE offset */ + if (mhi_cntrl->fbc_download) { + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val); + if (ret) { + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_err(dev, "Error reading BHIE offset\n"); + goto error_bhi_offset; + } + + mhi_cntrl->bhie = mhi_cntrl->regs + val; + } + + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + mhi_cntrl->pm_state = MHI_PM_POR; + mhi_cntrl->ee = MHI_EE_MAX; + current_ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* Confirm that the device is in valid exec env */ + if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) { + dev_err(dev, "Not a valid EE for power on\n"); + ret = -EIO; + goto error_bhi_offset; + } + + /* Transition to next state */ + next_state = MHI_IN_PBL(current_ee) ? + DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY; + + if (next_state == DEV_ST_TRANSITION_PBL) + schedule_work(&mhi_cntrl->fw_worker); + + mhi_queue_state_transition(mhi_cntrl, next_state); + + mutex_unlock(&mhi_cntrl->pm_mutex); + + dev_info(dev, "Power on setup success\n"); + + return 0; + +error_bhi_offset: + mhi_deinit_free_irq(mhi_cntrl); + +error_setup_irq: + if (!mhi_cntrl->pre_init) + mhi_deinit_dev_ctxt(mhi_cntrl); + +error_dev_ctxt: + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_async_power_up); + +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) +{ + enum mhi_pm_state cur_state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + /* If it's not a graceful shutdown, force MHI to linkdown state */ + if (!graceful) { + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_LD_ERR_FATAL_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); + if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT) + dev_dbg(dev, "Failed to move to state: %s from: %s\n", + to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + } + mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS); + mhi_deinit_free_irq(mhi_cntrl); + + if (!mhi_cntrl->pre_init) { + /* Free all allocated resources */ + if (mhi_cntrl->fbc_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + } + mhi_deinit_dev_ctxt(mhi_cntrl); + } +} +EXPORT_SYMBOL_GPL(mhi_power_down); + +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret = mhi_async_power_up(mhi_cntrl); + + if (ret) + return ret; + + wait_event_timeout(mhi_cntrl->state_event, + MHI_IN_MISSION_MODE(mhi_cntrl->ee) || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -EIO; +} +EXPORT_SYMBOL(mhi_sync_power_up); diff --git a/include/linux/mhi.h b/include/linux/mhi.h index fc0cd4af646c..630643f6b4a4 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -81,6 +81,17 @@ enum mhi_ch_type { MHI_CH_TYPE_INBOUND_COALESCED = 3, }; +/** + * struct image_info - Firmware and RDDM table table + * @mhi_buf - Buffer for firmware and RDDM table + * @entries - # of entries in table + */ +struct image_info { + struct mhi_buf *mhi_buf; + struct bhi_vec_entry *bhi_vec; + u32 entries; +}; + /** * enum mhi_ee_type - Execution environment types * @MHI_EE_PBL: Primary Bootloader @@ -266,6 +277,7 @@ struct mhi_controller_config { * @mhi_dev: MHI device instance for the controller * @regs: Base address of MHI MMIO register space (required) * @bhi: Points to base of MHI BHI register space + * @bhie: Points to base of MHI BHIe register space * @wake_db: MHI WAKE doorbell register address * @iova_start: IOMMU starting address for data (required) * @iova_stop: IOMMU stop address for data (required) @@ -273,6 +285,7 @@ struct mhi_controller_config { * @edl_image: Firmware image name for emergency download mode (optional) * @sbl_size: SBL image size downloaded through BHIe (optional) * @seg_len: BHIe vector size (optional) + * @fbc_image: Points to firmware image buffer * @mhi_chan: Points to the channel configuration table * @lpm_chans: List of channels that require LPM notifications * @irq: base irq # to request (required) @@ -323,6 +336,7 @@ struct mhi_controller { struct mhi_device *mhi_dev; void __iomem *regs; void __iomem *bhi; + void __iomem *bhie; void __iomem *wake_db; dma_addr_t iova_start; @@ -331,6 +345,7 @@ struct mhi_controller { const char *edl_image; size_t sbl_size; size_t seg_len; + struct image_info *fbc_image; struct mhi_chan *mhi_chan; struct list_head lpm_chans; int *irq; @@ -494,4 +509,40 @@ void mhi_driver_unregister(struct mhi_driver *mhi_drv); void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state); +/** + * mhi_prepare_for_power_up - Do pre-initialization before power up. + * This is optional, call this before power up if + * the controller does not want bus framework to + * automatically free any allocated memory during + * shutdown process. + * @mhi_cntrl: MHI controller + */ +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_async_power_up - Start MHI power up sequence + * @mhi_cntrl: MHI controller + */ +int mhi_async_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_sync_power_up - Start MHI power up sequence and wait till the device + * device enters valid EE state + * @mhi_cntrl: MHI controller + */ +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_power_down - Start MHI power down sequence + * @mhi_cntrl: MHI controller + * @graceful: Link is still accessible, so do a graceful shutdown process + */ +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful); + +/** + * mhi_unprepare_after_power_down - Free any allocated memory after power down + * @mhi_cntrl: MHI controller + */ +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl); + #endif /* _MHI_H_ */ -- cgit v1.2.3-58-ga151 From 6fdfdd27328ceef39f4b8daec3510874ad68e753 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Thu, 20 Feb 2020 15:28:47 +0530 Subject: bus: mhi: core: Add support for downloading RDDM image during panic MHI protocol supports downloading RDDM (RAM Dump) image from the device through BHIE. This is useful to debugging as the RDDM image can capture the firmware state. This is based on the patch submitted by Sujeev Dias: https://lkml.org/lkml/2018/7/9/989 Signed-off-by: Sujeev Dias Signed-off-by: Siddartha Mohanadoss [mani: splitted the data transfer patch and cleaned up for upstream] Signed-off-by: Manivannan Sadhasivam Reviewed-by: Jeffrey Hugo Tested-by: Jeffrey Hugo Link: https://lore.kernel.org/r/20200220095854.4804-10-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/core/boot.c | 152 ++++++++++++++++++++++++++++++++++++++++ drivers/bus/mhi/core/init.c | 39 +++++++++++ drivers/bus/mhi/core/internal.h | 2 + drivers/bus/mhi/core/pm.c | 32 +++++++++ include/linux/mhi.h | 24 +++++++ 5 files changed, 249 insertions(+) (limited to 'include/linux') diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c index 26422a7da35b..220faa886eb3 100644 --- a/drivers/bus/mhi/core/boot.c +++ b/drivers/bus/mhi/core/boot.c @@ -18,6 +18,158 @@ #include #include "internal.h" +/* Setup RDDM vector table for RDDM transfer and program RXVEC */ +void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info) +{ + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 sequence_id; + unsigned int i; + + for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = mhi_buf->len; + } + + dev_dbg(dev, "BHIe programming for RDDM\n"); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); + sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; + + if (unlikely(!sequence_id)) + sequence_id = 1; + + mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, + sequence_id); + + dev_dbg(dev, "Address: %p and len: 0x%lx sequence: %u\n", + &mhi_buf->dma_addr, mhi_buf->len, sequence_id); +} + +/* Collect RDDM buffer during kernel panic */ +static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) +{ + int ret; + u32 rx_status; + enum mhi_ee_type ee; + const u32 delayus = 2000; + u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + const u32 rddm_timeout_us = 200000; + int rddm_retry = rddm_timeout_us / delayus; + void __iomem *base = mhi_cntrl->bhie; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + /* + * This should only be executing during a kernel panic, we expect all + * other cores to shutdown while we're collecting RDDM buffer. After + * returning from this function, we expect the device to reset. + * + * Normaly, we read/write pm_state only after grabbing the + * pm_lock, since we're in a panic, skipping it. Also there is no + * gurantee that this state change would take effect since + * we're setting it w/o grabbing pm_lock + */ + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; + /* update should take the effect immediately */ + smp_wmb(); + + /* + * Make sure device is not already in RDDM. In case the device asserts + * and a kernel panic follows, device will already be in RDDM. + * Do not trigger SYS ERR again and proceed with waiting for + * image download completion. + */ + ee = mhi_get_exec_env(mhi_cntrl); + if (ee != MHI_EE_RDDM) { + dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + + dev_dbg(dev, "Waiting for device to enter RDDM\n"); + while (rddm_retry--) { + ee = mhi_get_exec_env(mhi_cntrl); + if (ee == MHI_EE_RDDM) + break; + + udelay(delayus); + } + + if (rddm_retry <= 0) { + /* Hardware reset so force device to enter RDDM */ + dev_dbg(dev, + "Did not enter RDDM, do a host req reset\n"); + mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, + MHI_SOC_RESET_REQ_OFFSET, + MHI_SOC_RESET_REQ); + udelay(delayus); + } + + ee = mhi_get_exec_env(mhi_cntrl); + } + + dev_dbg(dev, "Waiting for image download completion, current EE: %s\n", + TO_MHI_EXEC_STR(ee)); + + while (retry--) { + ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + BHIE_RXVECSTATUS_STATUS_SHFT, + &rx_status); + if (ret) + return -EIO; + + if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) + return 0; + + udelay(delayus); + } + + ee = mhi_get_exec_env(mhi_cntrl); + ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status); + + dev_err(dev, "Did not complete RDDM transfer\n"); + dev_err(dev, "Current EE: %s\n", TO_MHI_EXEC_STR(ee)); + dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status); + + return -EIO; +} + +/* Download RDDM image from device */ +int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic) +{ + void __iomem *base = mhi_cntrl->bhie; + u32 rx_status; + + if (in_panic) + return __mhi_download_rddm_in_panic(mhi_cntrl); + + /* Wait for the image download to complete */ + wait_event_timeout(mhi_cntrl->state_event, + mhi_read_reg_field(mhi_cntrl, base, + BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + BHIE_RXVECSTATUS_STATUS_SHFT, + &rx_status) || rx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; +} +EXPORT_SYMBOL_GPL(mhi_download_rddm_img); + static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, const struct mhi_buf *mhi_buf) { diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index fffcbcdeb6c6..e81cdd0207e2 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -843,6 +843,8 @@ EXPORT_SYMBOL_GPL(mhi_unregister_controller); int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) { + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 bhie_off; int ret; mutex_lock(&mhi_cntrl->pm_mutex); @@ -851,12 +853,44 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) if (ret) goto error_dev_ctxt; + /* + * Allocate RDDM table if specified, this table is for debugging purpose + */ + if (mhi_cntrl->rddm_size) { + mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, + mhi_cntrl->rddm_size); + + /* + * This controller supports RDDM, so we need to manually clear + * BHIE RX registers since POR values are undefined. + */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, + &bhie_off); + if (ret) { + dev_err(dev, "Error getting BHIE offset\n"); + goto bhie_error; + } + + memset_io(mhi_cntrl->regs + bhie_off + BHIE_RXVECADDR_LOW_OFFS, + 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + + 4); + + if (mhi_cntrl->rddm_image) + mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image); + } + mhi_cntrl->pre_init = true; mutex_unlock(&mhi_cntrl->pm_mutex); return 0; +bhie_error: + if (mhi_cntrl->rddm_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); + mhi_cntrl->rddm_image = NULL; + } + error_dev_ctxt: mutex_unlock(&mhi_cntrl->pm_mutex); @@ -871,6 +905,11 @@ void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) mhi_cntrl->fbc_image = NULL; } + if (mhi_cntrl->rddm_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); + mhi_cntrl->rddm_image = NULL; + } + mhi_deinit_dev_ctxt(mhi_cntrl); mhi_cntrl->pre_init = false; } diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index 0f6246c6162e..e1d3f2ca4922 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -614,6 +614,8 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); +void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info); /* Memory allocation methods */ static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl, diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index bfe0371f6e75..2ba2f6aba9d5 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -450,6 +450,16 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, /* We must notify MHI control driver so it can clean up first */ if (transition_state == MHI_PM_SYS_ERR_PROCESS) { + /* + * If controller supports RDDM, we do not process + * SYS error state, instead we will jump directly + * to RDDM state + */ + if (mhi_cntrl->rddm_image) { + dev_dbg(dev, + "Controller supports RDDM, so skip SYS_ERR\n"); + return; + } mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); } @@ -895,3 +905,25 @@ int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -EIO; } EXPORT_SYMBOL(mhi_sync_power_up); + +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + /* Check if device is already in RDDM */ + if (mhi_cntrl->ee == MHI_EE_RDDM) + return 0; + + dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + + /* Wait for RDDM event */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_RDDM, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + ret = ret ? 0 : -EIO; + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_force_rddm_mode); diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 630643f6b4a4..d3453a1de835 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -283,9 +283,11 @@ struct mhi_controller_config { * @iova_stop: IOMMU stop address for data (required) * @fw_image: Firmware image name for normal booting (required) * @edl_image: Firmware image name for emergency download mode (optional) + * @rddm_size: RAM dump size that host should allocate for debugging purpose * @sbl_size: SBL image size downloaded through BHIe (optional) * @seg_len: BHIe vector size (optional) * @fbc_image: Points to firmware image buffer + * @rddm_image: Points to RAM dump buffer * @mhi_chan: Points to the channel configuration table * @lpm_chans: List of channels that require LPM notifications * @irq: base irq # to request (required) @@ -343,9 +345,11 @@ struct mhi_controller { dma_addr_t iova_stop; const char *fw_image; const char *edl_image; + size_t rddm_size; size_t sbl_size; size_t seg_len; struct image_info *fbc_image; + struct image_info *rddm_image; struct mhi_chan *mhi_chan; struct list_head lpm_chans; int *irq; @@ -545,4 +549,24 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful); */ void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl); +/** + * mhi_download_rddm_img - Download ramdump image from device for + * debugging purpose. + * @mhi_cntrl: MHI controller + * @in_panic: Download rddm image during kernel panic + */ +int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic); + +/** + * mhi_force_rddm_mode - Force device into rddm mode + * @mhi_cntrl: MHI controller + */ +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl); + +/** + * mhi_get_mhi_state - Get MHI state of the device + * @mhi_cntrl: MHI controller + */ +enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl); + #endif /* _MHI_H_ */ -- cgit v1.2.3-58-ga151 From 1d3173a3bae7039b765a0956e3e4bf846dbaacb8 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Thu, 20 Feb 2020 15:28:48 +0530 Subject: bus: mhi: core: Add support for processing events from client device This commit adds support for processing the MHI data and control events from the client device. The client device can report various events such as EE events, state change events by interrupting the host through IRQ and adding events to the event rings allocated by the host during initialization. This is based on the patch submitted by Sujeev Dias: https://lkml.org/lkml/2018/7/9/988 Signed-off-by: Sujeev Dias Signed-off-by: Siddartha Mohanadoss [mani: splitted the data transfer patch and cleaned up for upstream] Signed-off-by: Manivannan Sadhasivam Reviewed-by: Jeffrey Hugo Tested-by: Jeffrey Hugo Link: https://lore.kernel.org/r/20200220095854.4804-11-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/core/init.c | 18 ++ drivers/bus/mhi/core/internal.h | 10 + drivers/bus/mhi/core/main.c | 468 ++++++++++++++++++++++++++++++++++++++++ include/linux/mhi.h | 14 ++ 4 files changed, 510 insertions(+) (limited to 'include/linux') diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index e81cdd0207e2..3f77397eefea 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -543,6 +543,18 @@ static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, mhi_event->data_type = event_cfg->data_type; + switch (mhi_event->data_type) { + case MHI_ER_DATA: + mhi_event->process_event = mhi_process_data_event_ring; + break; + case MHI_ER_CTRL: + mhi_event->process_event = mhi_process_ctrl_ev_ring; + break; + default: + dev_err(dev, "Event Ring type not supported\n"); + goto error_ev_cfg; + } + mhi_event->hw_ring = event_cfg->hardware_event; if (mhi_event->hw_ring) mhi_cntrl->hw_ev_rings++; @@ -772,6 +784,12 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, mhi_event->mhi_cntrl = mhi_cntrl; spin_lock_init(&mhi_event->lock); + if (mhi_event->data_type == MHI_ER_CTRL) + tasklet_init(&mhi_event->task, mhi_ctrl_ev_task, + (ulong)mhi_event); + else + tasklet_init(&mhi_event->task, mhi_ev_task, + (ulong)mhi_event); } mhi_chan = mhi_cntrl->mhi_chan; diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index e1d3f2ca4922..37f9780d5bdc 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -500,6 +500,8 @@ struct mhi_buf_info { dma_addr_t p_addr; size_t len; enum dma_data_direction dir; + bool used; /* Indicates whether the buffer is used or not */ + bool pre_mapped; /* Already pre-mapped by client */ }; struct mhi_event { @@ -637,6 +639,14 @@ static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl, dma_free_coherent(mhi_cntrl->cntrl_dev, size, vaddr, dma_handle); } +/* Event processing methods */ +void mhi_ctrl_ev_task(unsigned long data); +void mhi_ev_task(unsigned long data); +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); + /* ISR handlers */ irqreturn_t mhi_irq_handler(int irq_number, void *dev); irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev); diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index 201551b3cb5b..56d46d32726c 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -147,6 +147,16 @@ static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr) return (addr - ring->iommu_base) + ring->base; } +static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + ring->rp += ring->el_size; + if (ring->rp >= (ring->base + ring->len)) + ring->rp = ring->base; + /* smp update */ + smp_wmb(); +} + int mhi_destroy_device(struct device *dev, void *data) { struct mhi_device *mhi_dev; @@ -335,3 +345,461 @@ irqreturn_t mhi_intvec_handler(int irq_number, void *dev) return IRQ_WAKE_THREAD; } + +static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + dma_addr_t ctxt_wp; + + /* Update the WP */ + ring->wp += ring->el_size; + ctxt_wp = *ring->ctxt_wp + ring->el_size; + + if (ring->wp >= (ring->base + ring->len)) { + ring->wp = ring->base; + ctxt_wp = ring->iommu_base; + } + + *ring->ctxt_wp = ctxt_wp; + + /* Update the RP */ + ring->rp += ring->el_size; + if (ring->rp >= (ring->base + ring->len)) + ring->rp = ring->base; + + /* Update to all cores */ + smp_wmb(); +} + +static int parse_xfer_event(struct mhi_controller *mhi_cntrl, + struct mhi_tre *event, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_result result; + unsigned long flags = 0; + u32 ev_code; + + ev_code = MHI_TRE_GET_EV_CODE(event); + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? + -EOVERFLOW : 0; + + /* + * If it's a DB Event then we need to grab the lock + * with preemption disabled and as a write because we + * have to update db register and there are chances that + * another thread could be doing the same. + */ + if (ev_code >= MHI_EV_CC_OOB) + write_lock_irqsave(&mhi_chan->lock, flags); + else + read_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto end_process_tx_event; + + switch (ev_code) { + case MHI_EV_CC_OVERFLOW: + case MHI_EV_CC_EOB: + case MHI_EV_CC_EOT: + { + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); + struct mhi_tre *local_rp, *ev_tre; + void *dev_rp; + struct mhi_buf_info *buf_info; + u16 xfer_len; + + /* Get the TRB this event points to */ + ev_tre = mhi_to_virtual(tre_ring, ptr); + + /* device rp after servicing the TREs */ + dev_rp = ev_tre + 1; + if (dev_rp >= (tre_ring->base + tre_ring->len)) + dev_rp = tre_ring->base; + + result.dir = mhi_chan->dir; + + /* local rp */ + local_rp = tre_ring->rp; + while (local_rp != dev_rp) { + buf_info = buf_ring->rp; + /* If it's the last TRE, get length from the event */ + if (local_rp == ev_tre) + xfer_len = MHI_TRE_GET_EV_LEN(event); + else + xfer_len = buf_info->len; + + result.buf_addr = buf_info->cb_buf; + result.bytes_xferd = xfer_len; + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + local_rp = tre_ring->rp; + + /* notify client */ + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + if (mhi_chan->dir == DMA_TO_DEVICE) + atomic_dec(&mhi_cntrl->pending_pkts); + } + break; + } /* CC_EOT */ + case MHI_EV_CC_OOB: + case MHI_EV_CC_DB_MODE: + { + unsigned long flags; + + mhi_chan->db_cfg.db_mode = 1; + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + if (tre_ring->wp != tre_ring->rp && + MHI_DB_ACCESS_VALID(mhi_cntrl)) { + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + } + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + break; + } + case MHI_EV_CC_BAD_TRE: + default: + dev_err(dev, "Unknown event 0x%x\n", ev_code); + break; + } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */ + +end_process_tx_event: + if (ev_code >= MHI_EV_CC_OOB) + write_unlock_irqrestore(&mhi_chan->lock, flags); + else + read_unlock_bh(&mhi_chan->lock); + + return 0; +} + +static int parse_rsc_event(struct mhi_controller *mhi_cntrl, + struct mhi_tre *event, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_buf_info *buf_info; + struct mhi_result result; + int ev_code; + u32 cookie; /* offset to local descriptor */ + u16 xfer_len; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + ev_code = MHI_TRE_GET_EV_CODE(event); + cookie = MHI_TRE_GET_EV_COOKIE(event); + xfer_len = MHI_TRE_GET_EV_LEN(event); + + /* Received out of bound cookie */ + WARN_ON(cookie >= buf_ring->len); + + buf_info = buf_ring->base + cookie; + + result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? + -EOVERFLOW : 0; + result.bytes_xferd = xfer_len; + result.buf_addr = buf_info->cb_buf; + result.dir = mhi_chan->dir; + + read_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto end_process_rsc_event; + + WARN_ON(!buf_info->used); + + /* notify the client */ + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* + * Note: We're arbitrarily incrementing RP even though, completion + * packet we processed might not be the same one, reason we can do this + * is because device guaranteed to cache descriptors in order it + * receive, so even though completion event is different we can re-use + * all descriptors in between. + * Example: + * Transfer Ring has descriptors: A, B, C, D + * Last descriptor host queue is D (WP) and first descriptor + * host queue is A (RP). + * The completion event we just serviced is descriptor C. + * Then we can safely queue descriptors to replace A, B, and C + * even though host did not receive any completions. + */ + mhi_del_ring_element(mhi_cntrl, tre_ring); + buf_info->used = false; + +end_process_rsc_event: + read_unlock_bh(&mhi_chan->lock); + + return 0; +} + +static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, + struct mhi_tre *tre) +{ + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre); + struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *mhi_ring = &cmd_ring->ring; + struct mhi_tre *cmd_pkt; + struct mhi_chan *mhi_chan; + u32 chan; + + cmd_pkt = mhi_to_virtual(mhi_ring, ptr); + + chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + write_lock_bh(&mhi_chan->lock); + mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_chan->completion); + write_unlock_bh(&mhi_chan->lock); + + mhi_del_ring_element(mhi_cntrl, mhi_ring); +} + +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_chan *mhi_chan; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 chan; + int count = 0; + + /* + * This is a quick check to avoid unnecessary event processing + * in case MHI is already in error state, but it's still possible + * to transition to error state while processing events + */ + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) + return -EIO; + + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp) { + enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); + + switch (type) { + case MHI_PKT_TYPE_BW_REQ_EVENT: + { + struct mhi_link_info *link_info; + + link_info = &mhi_cntrl->mhi_link_info; + write_lock_irq(&mhi_cntrl->pm_lock); + link_info->target_link_speed = + MHI_TRE_GET_EV_LINKSPEED(local_rp); + link_info->target_link_width = + MHI_TRE_GET_EV_LINKWIDTH(local_rp); + write_unlock_irq(&mhi_cntrl->pm_lock); + dev_dbg(dev, "Received BW_REQ event\n"); + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ); + break; + } + case MHI_PKT_TYPE_STATE_CHANGE_EVENT: + { + enum mhi_state new_state; + + new_state = MHI_TRE_GET_EV_STATE(local_rp); + + dev_dbg(dev, "State change event to state: %s\n", + TO_MHI_STATE_STR(new_state)); + + switch (new_state) { + case MHI_STATE_M0: + mhi_pm_m0_transition(mhi_cntrl); + break; + case MHI_STATE_M1: + mhi_pm_m1_transition(mhi_cntrl); + break; + case MHI_STATE_M3: + mhi_pm_m3_transition(mhi_cntrl); + break; + case MHI_STATE_SYS_ERR: + { + enum mhi_pm_state new_state; + + dev_dbg(dev, "System error detected\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + new_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (new_state == MHI_PM_SYS_ERR_DETECT) + schedule_work(&mhi_cntrl->syserr_worker); + break; + } + default: + dev_err(dev, "Invalid state: %s\n", + TO_MHI_STATE_STR(new_state)); + } + + break; + } + case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: + mhi_process_cmd_completion(mhi_cntrl, local_rp); + break; + case MHI_PKT_TYPE_EE_EVENT: + { + enum dev_st_transition st = DEV_ST_TRANSITION_MAX; + enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp); + + dev_dbg(dev, "Received EE event: %s\n", + TO_MHI_EXEC_STR(event)); + switch (event) { + case MHI_EE_SBL: + st = DEV_ST_TRANSITION_SBL; + break; + case MHI_EE_WFW: + case MHI_EE_AMSS: + st = DEV_ST_TRANSITION_MISSION_MODE; + break; + case MHI_EE_RDDM: + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = event; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + break; + default: + dev_err(dev, + "Unhandled EE event: 0x%x\n", type); + } + if (st != DEV_ST_TRANSITION_MAX) + mhi_queue_state_transition(mhi_cntrl, st); + + break; + } + case MHI_PKT_TYPE_TX_EVENT: + chan = MHI_TRE_GET_EV_CHID(local_rp); + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + break; + default: + dev_err(dev, "Unhandled event type: %d\n", type); + break; + } + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + count++; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return count; +} + +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + int count = 0; + u32 chan; + struct mhi_chan *mhi_chan; + + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) + return -EIO; + + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp && event_quota > 0) { + enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); + + chan = MHI_TRE_GET_EV_CHID(local_rp); + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + + if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { + parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + count++; + } + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return count; +} + +void mhi_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + + /* process all pending events */ + spin_lock_bh(&mhi_event->lock); + mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); + spin_unlock_bh(&mhi_event->lock); +} + +void mhi_ctrl_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + enum mhi_pm_state pm_state = 0; + int ret; + + /* + * We can check PM state w/o a lock here because there is no way + * PM state can change from reg access valid to no access while this + * thread being executed. + */ + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + /* + * We may have a pending event but not allowed to + * process it since we are probably in a suspended state, + * so trigger a resume. + */ + mhi_cntrl->runtime_get(mhi_cntrl); + mhi_cntrl->runtime_put(mhi_cntrl); + + return; + } + + /* Process ctrl events events */ + ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); + + /* + * We received an IRQ but no events to process, maybe device went to + * SYS_ERR state? Check the state to confirm. + */ + if (!ret) { + write_lock_irq(&mhi_cntrl->pm_lock); + state = mhi_get_mhi_state(mhi_cntrl); + if (state == MHI_STATE_SYS_ERR) { + dev_dbg(dev, "System error detected\n"); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irq(&mhi_cntrl->pm_lock); + if (pm_state == MHI_PM_SYS_ERR_DETECT) + schedule_work(&mhi_cntrl->syserr_worker); + } +} diff --git a/include/linux/mhi.h b/include/linux/mhi.h index d3453a1de835..bf8921ee0805 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -31,6 +31,7 @@ struct mhi_buf_info; * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env * @MHI_CB_SYS_ERROR: MHI device entered error state (may recover) * @MHI_CB_FATAL_ERROR: MHI device entered fatal error state + * @MHI_CB_BW_REQ: Received a bandwidth switch request from device */ enum mhi_callback { MHI_CB_IDLE, @@ -41,6 +42,7 @@ enum mhi_callback { MHI_CB_EE_MISSION_MODE, MHI_CB_SYS_ERROR, MHI_CB_FATAL_ERROR, + MHI_CB_BW_REQ, }; /** @@ -92,6 +94,16 @@ struct image_info { u32 entries; }; +/** + * struct mhi_link_info - BW requirement + * target_link_speed - Link speed as defined by TLS bits in LinkControl reg + * target_link_width - Link width as defined by NLW bits in LinkStatus reg + */ +struct mhi_link_info { + unsigned int target_link_speed; + unsigned int target_link_width; +}; + /** * enum mhi_ee_type - Execution environment types * @MHI_EE_PBL: Primary Bootloader @@ -312,6 +324,7 @@ struct mhi_controller_config { * @transition_list: List of MHI state transitions * @transition_lock: Lock for protecting MHI state transition list * @wlock: Lock for protecting device wakeup + * @mhi_link_info: Device bandwidth info * @st_worker: State transition worker * @fw_worker: Firmware download worker * @syserr_worker: System error worker @@ -376,6 +389,7 @@ struct mhi_controller { struct list_head transition_list; spinlock_t transition_lock; spinlock_t wlock; + struct mhi_link_info mhi_link_info; struct work_struct st_worker; struct work_struct fw_worker; struct work_struct syserr_worker; -- cgit v1.2.3-58-ga151 From 189ff97cca53e3fe2d8b38d64105040ce17fc62d Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Thu, 20 Feb 2020 15:28:49 +0530 Subject: bus: mhi: core: Add support for data transfer Add support for transferring data between external modem and host processor using MHI protocol. This is based on the patch submitted by Sujeev Dias: https://lkml.org/lkml/2018/7/9/988 Signed-off-by: Sujeev Dias Signed-off-by: Siddartha Mohanadoss [mani: splitted the data transfer patch and cleaned up for upstream] Signed-off-by: Manivannan Sadhasivam Reviewed-by: Jeffrey Hugo Tested-by: Jeffrey Hugo Link: https://lore.kernel.org/r/20200220095854.4804-12-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/core/init.c | 130 +++++++- drivers/bus/mhi/core/internal.h | 22 ++ drivers/bus/mhi/core/main.c | 715 +++++++++++++++++++++++++++++++++++++++- drivers/bus/mhi/core/pm.c | 40 +++ include/linux/mhi.h | 80 +++++ 5 files changed, 979 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 3f77397eefea..7bfffa9a5ad1 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -491,6 +491,73 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl) return 0; } +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; + + mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size, + tre_ring->pre_aligned, tre_ring->dma_handle); + vfree(buf_ring->base); + + buf_ring->base = tre_ring->base = NULL; + chan_ctxt->rbase = 0; +} + +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + u32 tmp; + int ret; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + tre_ring->el_size = sizeof(struct mhi_tre); + tre_ring->len = tre_ring->el_size * tre_ring->elements; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; + ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); + if (ret) + return -ENOMEM; + + buf_ring->el_size = sizeof(struct mhi_buf_info); + buf_ring->len = buf_ring->el_size * buf_ring->elements; + buf_ring->base = vzalloc(buf_ring->len); + + if (!buf_ring->base) { + mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size, + tre_ring->pre_aligned, tre_ring->dma_handle); + return -ENOMEM; + } + + tmp = chan_ctxt->chcfg; + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT); + chan_ctxt->chcfg = tmp; + + chan_ctxt->rbase = tre_ring->iommu_base; + chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; + chan_ctxt->rlen = tre_ring->len; + tre_ring->ctxt_wp = &chan_ctxt->wp; + + tre_ring->rp = tre_ring->wp = tre_ring->base; + buf_ring->rp = buf_ring->wp = buf_ring->base; + mhi_chan->db_cfg.db_mode = 1; + + /* Update to all cores */ + smp_wmb(); + + return 0; +} + static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, struct mhi_controller_config *config) { @@ -799,6 +866,14 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, rwlock_init(&mhi_chan->lock); } + if (mhi_cntrl->bounce_buf) { + mhi_cntrl->map_single = mhi_map_single_use_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; + } else { + mhi_cntrl->map_single = mhi_map_single_no_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; + } + /* Register controller with MHI bus */ mhi_dev = mhi_alloc_device(mhi_cntrl); if (IS_ERR(mhi_dev)) { @@ -969,6 +1044,14 @@ static int mhi_driver_probe(struct device *dev) struct mhi_event *mhi_event; struct mhi_chan *ul_chan = mhi_dev->ul_chan; struct mhi_chan *dl_chan = mhi_dev->dl_chan; + int ret; + + /* Bring device out of LPM */ + ret = mhi_device_get_sync(mhi_dev); + if (ret) + return ret; + + ret = -EINVAL; if (ul_chan) { /* @@ -976,13 +1059,18 @@ static int mhi_driver_probe(struct device *dev) * be provided */ if (ul_chan->lpm_notify && !mhi_drv->status_cb) - return -EINVAL; + goto exit_probe; /* For non-offload channels then xfer_cb should be provided */ if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) - return -EINVAL; + goto exit_probe; ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + if (ul_chan->auto_start) { + ret = mhi_prepare_channel(mhi_cntrl, ul_chan); + if (ret) + goto exit_probe; + } } if (dl_chan) { @@ -991,11 +1079,11 @@ static int mhi_driver_probe(struct device *dev) * be provided */ if (dl_chan->lpm_notify && !mhi_drv->status_cb) - return -EINVAL; + goto exit_probe; /* For non-offload channels then xfer_cb should be provided */ if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) - return -EINVAL; + goto exit_probe; mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; @@ -1005,19 +1093,36 @@ static int mhi_driver_probe(struct device *dev) * notify pending data */ if (mhi_event->cl_manage && !mhi_drv->status_cb) - return -EINVAL; + goto exit_probe; dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; } /* Call the user provided probe function */ - return mhi_drv->probe(mhi_dev, mhi_dev->id); + ret = mhi_drv->probe(mhi_dev, mhi_dev->id); + if (ret) + goto exit_probe; + + if (dl_chan && dl_chan->auto_start) + mhi_prepare_channel(mhi_cntrl, dl_chan); + + mhi_device_put(mhi_dev); + + return ret; + +exit_probe: + mhi_unprepare_from_transfer(mhi_dev); + + mhi_device_put(mhi_dev); + + return ret; } static int mhi_driver_remove(struct device *dev) { struct mhi_device *mhi_dev = to_mhi_device(dev); struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; struct mhi_chan *mhi_chan; enum mhi_ch_state ch_state[] = { MHI_CH_STATE_DISABLED, @@ -1049,6 +1154,10 @@ static int mhi_driver_remove(struct device *dev) mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED; write_unlock_irq(&mhi_chan->lock); + /* Reset the non-offload channel */ + if (!mhi_chan->offload_ch) + mhi_reset_chan(mhi_cntrl, mhi_chan); + mutex_unlock(&mhi_chan->mutex); } @@ -1063,11 +1172,20 @@ static int mhi_driver_remove(struct device *dev) mutex_lock(&mhi_chan->mutex); + if (ch_state[dir] == MHI_CH_STATE_ENABLED && + !mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; mutex_unlock(&mhi_chan->mutex); } + read_lock_bh(&mhi_cntrl->pm_lock); + while (mhi_dev->dev_wake) + mhi_device_put(mhi_dev); + read_unlock_bh(&mhi_cntrl->pm_lock); + return 0; } diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index 37f9780d5bdc..18066302e6e2 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -587,6 +587,8 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl); int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl); +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + enum mhi_cmd_type cmd); /* Register access methods */ void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg, @@ -618,6 +620,14 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, struct image_info *img_info); +int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); /* Memory allocation methods */ static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl, @@ -652,4 +662,16 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev); irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev); irqreturn_t mhi_intvec_handler(int irq_number, void *dev); +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + void *buf, void *cb, size_t buf_len, enum mhi_flags flags); + +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); + #endif /* _MHI_INT_H */ diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index 56d46d32726c..fa1c9000fc6c 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -142,11 +142,83 @@ enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) return ret ? MHI_STATE_MAX : state; } +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev, + buf_info->v_addr, buf_info->len, + buf_info->dir); + if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr)) + return -ENOMEM; + + return 0; +} + +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len, + &buf_info->p_addr, GFP_ATOMIC); + + if (!buf) + return -ENOMEM; + + if (buf_info->dir == DMA_TO_DEVICE) + memcpy(buf, buf_info->v_addr, buf_info->len); + + buf_info->bb_addr = buf; + + return 0; +} + +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len, + buf_info->dir); +} + +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + if (buf_info->dir == DMA_FROM_DEVICE) + memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); + + mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr, + buf_info->p_addr); +} + +static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + int nr_el; + + if (ring->wp < ring->rp) { + nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; + } else { + nr_el = (ring->rp - ring->base) / ring->el_size; + nr_el += ((ring->base + ring->len - ring->wp) / + ring->el_size) - 1; + } + + return nr_el; +} + static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr) { return (addr - ring->iommu_base) + ring->base; } +static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + ring->wp += ring->el_size; + if (ring->wp >= (ring->base + ring->len)) + ring->wp = ring->base; + /* smp update */ + smp_wmb(); +} + static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, struct mhi_ring *ring) { @@ -416,14 +488,12 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, /* Get the TRB this event points to */ ev_tre = mhi_to_virtual(tre_ring, ptr); - /* device rp after servicing the TREs */ dev_rp = ev_tre + 1; if (dev_rp >= (tre_ring->base + tre_ring->len)) dev_rp = tre_ring->base; result.dir = mhi_chan->dir; - /* local rp */ local_rp = tre_ring->rp; while (local_rp != dev_rp) { buf_info = buf_ring->rp; @@ -433,6 +503,10 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, else xfer_len = buf_info->len; + /* Unmap if it's not pre-mapped by client */ + if (likely(!buf_info->pre_mapped)) + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); + result.buf_addr = buf_info->cb_buf; result.bytes_xferd = xfer_len; mhi_del_ring_element(mhi_cntrl, buf_ring); @@ -444,6 +518,23 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, if (mhi_chan->dir == DMA_TO_DEVICE) atomic_dec(&mhi_cntrl->pending_pkts); + + /* + * Recycle the buffer if buffer is pre-allocated, + * if there is an error, not much we can do apart + * from dropping the packet + */ + if (mhi_chan->pre_alloc) { + if (mhi_queue_buf(mhi_chan->mhi_dev, + mhi_chan->dir, + buf_info->cb_buf, + buf_info->len, MHI_EOT)) { + dev_err(dev, + "Error recycling buffer for chan:%d\n", + mhi_chan->chan); + kfree(buf_info->cb_buf); + } + } } break; } /* CC_EOT */ @@ -803,3 +894,623 @@ void mhi_ctrl_ev_task(unsigned long data) schedule_work(&mhi_cntrl->syserr_worker); } } + +static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *tmp = ring->wp + ring->el_size; + + if (tmp >= (ring->base + ring->len)) + tmp = ring->base; + + return (tmp == ring->rp); +} + +int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, + struct sk_buff *skb, size_t len, enum mhi_flags mflags) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : + mhi_dev->dl_chan; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + struct mhi_ring *buf_ring = &mhi_chan->buf_ring; + struct mhi_buf_info *buf_info; + struct mhi_tre *mhi_tre; + int ret; + + /* If MHI host pre-allocates buffers then client drivers cannot queue */ + if (mhi_chan->pre_alloc) + return -EINVAL; + + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl); + mhi_cntrl->runtime_put(mhi_cntrl); + } + + /* Toggle wake to exit out of M2 */ + mhi_cntrl->wake_toggle(mhi_cntrl); + + /* Generate the TRE */ + buf_info = buf_ring->wp; + + buf_info->v_addr = skb->data; + buf_info->cb_buf = skb; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = len; + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) + goto map_error; + + mhi_tre = tre_ring->wp; + + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); + + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + + if (mhi_chan->dir == DMA_TO_DEVICE) + atomic_inc(&mhi_cntrl->pending_pkts); + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) { + read_lock_bh(&mhi_chan->lock); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_bh(&mhi_chan->lock); + } + + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; + +map_error: + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_queue_skb); + +int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, + struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : + mhi_dev->dl_chan; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + struct mhi_ring *buf_ring = &mhi_chan->buf_ring; + struct mhi_buf_info *buf_info; + struct mhi_tre *mhi_tre; + + /* If MHI host pre-allocates buffers then client drivers cannot queue */ + if (mhi_chan->pre_alloc) + return -EINVAL; + + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + dev_err(dev, "MHI is not in activate state, PM state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; + } + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl); + mhi_cntrl->runtime_put(mhi_cntrl); + } + + /* Toggle wake to exit out of M2 */ + mhi_cntrl->wake_toggle(mhi_cntrl); + + /* Generate the TRE */ + buf_info = buf_ring->wp; + WARN_ON(buf_info->used); + buf_info->p_addr = mhi_buf->dma_addr; + buf_info->pre_mapped = true; + buf_info->cb_buf = mhi_buf; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = len; + + mhi_tre = tre_ring->wp; + + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); + + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + + if (mhi_chan->dir == DMA_TO_DEVICE) + atomic_inc(&mhi_cntrl->pending_pkts); + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) { + read_lock_bh(&mhi_chan->lock); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_bh(&mhi_chan->lock); + } + + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_queue_dma); + +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + void *buf, void *cb, size_t buf_len, enum mhi_flags flags) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_tre *mhi_tre; + struct mhi_buf_info *buf_info; + int eot, eob, chain, bei; + int ret; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + buf_info = buf_ring->wp; + buf_info->v_addr = buf; + buf_info->cb_buf = cb; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = buf_len; + + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) + return ret; + + eob = !!(flags & MHI_EOB); + eot = !!(flags & MHI_EOT); + chain = !!(flags & MHI_CHAIN); + bei = !!(mhi_chan->intmod); + + mhi_tre = tre_ring->wp; + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); + + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + + return 0; +} + +int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, + void *buf, size_t len, enum mhi_flags mflags) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : + mhi_dev->dl_chan; + struct mhi_ring *tre_ring; + unsigned long flags; + int ret; + + /* + * this check here only as a guard, it's always + * possible mhi can enter error while executing rest of function, + * which is not fatal so we do not need to hold pm_lock + */ + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) + return -EIO; + + tre_ring = &mhi_chan->tre_ring; + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags); + if (unlikely(ret)) + return ret; + + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl); + mhi_cntrl->runtime_put(mhi_cntrl); + } + + /* Toggle wake to exit out of M2 */ + mhi_cntrl->wake_toggle(mhi_cntrl); + + if (mhi_chan->dir == DMA_TO_DEVICE) + atomic_inc(&mhi_cntrl->pending_pkts); + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) { + unsigned long flags; + + read_lock_irqsave(&mhi_chan->lock, flags); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irqrestore(&mhi_chan->lock, flags); + } + + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_queue_buf); + +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + enum mhi_cmd_type cmd) +{ + struct mhi_tre *cmd_tre = NULL; + struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *ring = &mhi_cmd->ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int chan = 0; + + if (mhi_chan) + chan = mhi_chan->chan; + + spin_lock_bh(&mhi_cmd->lock); + if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { + spin_unlock_bh(&mhi_cmd->lock); + return -ENOMEM; + } + + /* prepare the cmd tre */ + cmd_tre = ring->wp; + switch (cmd) { + case MHI_CMD_RESET_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); + break; + case MHI_CMD_START_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_START_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); + break; + default: + dev_err(dev, "Command not supported\n"); + break; + } + + /* queue to hardware */ + mhi_add_ring_element(mhi_cntrl, ring); + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + read_unlock_bh(&mhi_cntrl->pm_lock); + spin_unlock_bh(&mhi_cmd->lock); + + return 0; +} + +static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + dev_dbg(dev, "Entered: unprepare channel:%d\n", mhi_chan->chan); + + /* no more processing events for this channel */ + mutex_lock(&mhi_chan->mutex); + write_lock_irq(&mhi_chan->lock); + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) { + write_unlock_irq(&mhi_chan->lock); + mutex_unlock(&mhi_chan->mutex); + return; + } + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + write_unlock_irq(&mhi_chan->lock); + + reinit_completion(&mhi_chan->completion); + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + goto error_invalid_state; + } + + mhi_cntrl->wake_toggle(mhi_cntrl); + read_unlock_bh(&mhi_cntrl->pm_lock); + + mhi_cntrl->runtime_get(mhi_cntrl); + mhi_cntrl->runtime_put(mhi_cntrl); + ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN); + if (ret) + goto error_invalid_state; + + /* even if it fails we will still reset */ + ret = wait_for_completion_timeout(&mhi_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) + dev_err(dev, + "Failed to receive cmd completion, still resetting\n"); + +error_invalid_state: + if (!mhi_chan->offload_ch) { + mhi_reset_chan(mhi_cntrl, mhi_chan); + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + } + dev_dbg(dev, "chan:%d successfully resetted\n", mhi_chan->chan); + mutex_unlock(&mhi_chan->mutex); +} + +int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret = 0; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + dev_dbg(dev, "Preparing channel: %d\n", mhi_chan->chan); + + if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { + dev_err(dev, + "Current EE: %s Required EE Mask: 0x%x for chan: %s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask, + mhi_chan->name); + return -ENOTCONN; + } + + mutex_lock(&mhi_chan->mutex); + + /* If channel is not in disable state, do not allow it to start */ + if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) { + ret = -EIO; + dev_dbg(dev, "channel: %d is not in disabled state\n", + mhi_chan->chan); + goto error_init_chan; + } + + /* Check of client manages channel context for offload channels */ + if (!mhi_chan->offload_ch) { + ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); + if (ret) + goto error_init_chan; + } + + reinit_completion(&mhi_chan->completion); + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + ret = -EIO; + goto error_pm_state; + } + + mhi_cntrl->wake_toggle(mhi_cntrl); + read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->runtime_get(mhi_cntrl); + mhi_cntrl->runtime_put(mhi_cntrl); + + ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN); + if (ret) + goto error_pm_state; + + ret = wait_for_completion_timeout(&mhi_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { + ret = -EIO; + goto error_pm_state; + } + + write_lock_irq(&mhi_chan->lock); + mhi_chan->ch_state = MHI_CH_STATE_ENABLED; + write_unlock_irq(&mhi_chan->lock); + + /* Pre-allocate buffer for xfer ring */ + if (mhi_chan->pre_alloc) { + int nr_el = get_nr_avail_ring_elements(mhi_cntrl, + &mhi_chan->tre_ring); + size_t len = mhi_cntrl->buffer_len; + + while (nr_el--) { + void *buf; + + buf = kmalloc(len, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto error_pre_alloc; + } + + /* Prepare transfer descriptors */ + ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf, + len, MHI_EOT); + if (ret) { + kfree(buf); + goto error_pre_alloc; + } + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_DB_ACCESS_VALID(mhi_cntrl)) { + read_lock_irq(&mhi_chan->lock); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irq(&mhi_chan->lock); + } + read_unlock_bh(&mhi_cntrl->pm_lock); + } + + mutex_unlock(&mhi_chan->mutex); + + dev_dbg(dev, "Chan: %d successfully moved to start state\n", + mhi_chan->chan); + + return 0; + +error_pm_state: + if (!mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + +error_init_chan: + mutex_unlock(&mhi_chan->mutex); + + return ret; + +error_pre_alloc: + mutex_unlock(&mhi_chan->mutex); + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + + return ret; +} + +static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + struct mhi_event_ctxt *er_ctxt, + int chan) + +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + unsigned long flags; + + dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan); + + ev_ring = &mhi_event->ring; + + /* mark all stale events related to channel as STALE event */ + spin_lock_irqsave(&mhi_event->lock, flags); + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + + local_rp = ev_ring->rp; + while (dev_rp != local_rp) { + if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT && + chan == MHI_TRE_GET_EV_CHID(local_rp)) + local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, + MHI_PKT_TYPE_STALE_EVENT); + local_rp++; + if (local_rp == (ev_ring->base + ev_ring->len)) + local_rp = ev_ring->base; + } + + dev_dbg(dev, "Finished marking events as stale events\n"); + spin_unlock_irqrestore(&mhi_event->lock, flags); +} + +static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_result result; + + /* Reset any pending buffers */ + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + while (tre_ring->rp != tre_ring->wp) { + struct mhi_buf_info *buf_info = buf_ring->rp; + + if (mhi_chan->dir == DMA_TO_DEVICE) + atomic_dec(&mhi_cntrl->pending_pkts); + + if (!buf_info->pre_mapped) + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); + + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + + if (mhi_chan->pre_alloc) { + kfree(buf_info->cb_buf); + } else { + result.buf_addr = buf_info->cb_buf; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + } +} + +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) +{ + struct mhi_event *mhi_event; + struct mhi_event_ctxt *er_ctxt; + int chan = mhi_chan->chan; + + /* Nothing to reset, client doesn't queue buffers */ + if (mhi_chan->offload_ch) + return; + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; + + mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan); + + mhi_reset_data_chan(mhi_cntrl, mhi_chan); + + read_unlock_bh(&mhi_cntrl->pm_lock); +} + +/* Move channel to start state */ +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) +{ + int ret, dir; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; + if (!mhi_chan) + continue; + + ret = mhi_prepare_channel(mhi_cntrl, mhi_chan); + if (ret) + goto error_open_chan; + } + + return 0; + +error_open_chan: + for (--dir; dir >= 0; dir--) { + mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; + if (!mhi_chan) + continue; + + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + } + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer); + +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + int dir; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + if (!mhi_chan) + continue; + + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + } +} +EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer); + +int mhi_poll(struct mhi_device *mhi_dev, u32 budget) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = mhi_dev->dl_chan; + struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + int ret; + + spin_lock_bh(&mhi_event->lock); + ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget); + spin_unlock_bh(&mhi_event->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_poll); diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index 2ba2f6aba9d5..52690cb5c89c 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -927,3 +927,43 @@ int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) return ret; } EXPORT_SYMBOL_GPL(mhi_force_rddm_mode); + +void mhi_device_get(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + mhi_dev->dev_wake++; + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL_GPL(mhi_device_get); + +int mhi_device_get_sync(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int ret; + + ret = __mhi_device_get_sync(mhi_cntrl); + if (!ret) + mhi_dev->dev_wake++; + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_device_get_sync); + +void mhi_device_put(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + mhi_dev->dev_wake--; + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl); + mhi_cntrl->runtime_put(mhi_cntrl); + } + + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL_GPL(mhi_device_put); diff --git a/include/linux/mhi.h b/include/linux/mhi.h index bf8921ee0805..79cb9f898544 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -336,6 +337,8 @@ struct mhi_controller_config { * @wake_toggle: CB function to assert and de-assert device wake (optional) * @runtime_get: CB function to controller runtime resume (required) * @runtimet_put: CB function to decrement pm usage (required) + * @map_single: CB function to create TRE buffer + * @unmap_single: CB function to destroy TRE buffer * @buffer_len: Bounce buffer length * @bounce_buf: Use of bounce buffer * @fbc_download: MHI host needs to do complete image transfer (optional) @@ -403,6 +406,10 @@ struct mhi_controller { void (*wake_toggle)(struct mhi_controller *mhi_cntrl); int (*runtime_get)(struct mhi_controller *mhi_cntrl); void (*runtime_put)(struct mhi_controller *mhi_cntrl); + int (*map_single)(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf); + void (*unmap_single)(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf); size_t buffer_len; bool bounce_buf; @@ -583,4 +590,77 @@ int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl); */ enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl); +/** + * mhi_device_get - Disable device low power mode + * @mhi_dev: Device associated with the channel + */ +void mhi_device_get(struct mhi_device *mhi_dev); + +/** + * mhi_device_get_sync - Disable device low power mode. Synchronously + * take the controller out of suspended state + * @mhi_dev: Device associated with the channel + */ +int mhi_device_get_sync(struct mhi_device *mhi_dev); + +/** + * mhi_device_put - Re-enable device low power mode + * @mhi_dev: Device associated with the channel + */ +void mhi_device_put(struct mhi_device *mhi_dev); + +/** + * mhi_prepare_for_transfer - Setup channel for data transfer + * @mhi_dev: Device associated with the channels + */ +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_unprepare_from_transfer - Unprepare the channels + * @mhi_dev: Device associated with the channels + */ +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_poll - Poll for any available data in DL direction + * @mhi_dev: Device associated with the channels + * @budget: # of events to process + */ +int mhi_poll(struct mhi_device *mhi_dev, u32 budget); + +/** + * mhi_queue_dma - Send or receive DMA mapped buffers from client device + * over MHI channel + * @mhi_dev: Device associated with the channels + * @dir: DMA direction for the channel + * @mhi_buf: Buffer for holding the DMA mapped data + * @len: Buffer length + * @mflags: MHI transfer flags used for the transfer + */ +int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, + struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags); + +/** + * mhi_queue_buf - Send or receive raw buffers from client device over MHI + * channel + * @mhi_dev: Device associated with the channels + * @dir: DMA direction for the channel + * @buf: Buffer for holding the data + * @len: Buffer length + * @mflags: MHI transfer flags used for the transfer + */ +int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, + void *buf, size_t len, enum mhi_flags mflags); + +/** + * mhi_queue_skb - Send or receive SKBs from client device over MHI channel + * @mhi_dev: Device associated with the channels + * @dir: DMA direction for the channel + * @skb: Buffer for holding SKBs + * @len: Buffer length + * @mflags: MHI transfer flags used for the transfer + */ +int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, + struct sk_buff *skb, size_t len, enum mhi_flags mflags); + #endif /* _MHI_H_ */ -- cgit v1.2.3-58-ga151 From e6b0de469c5babfe29a86be289408ba2070ea44a Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Thu, 20 Feb 2020 15:28:50 +0530 Subject: bus: mhi: core: Add uevent support for module autoloading Add uevent support to MHI bus so that the client drivers can be autoloaded by udev when the MHI devices gets created. The client drivers are expected to provide MODULE_DEVICE_TABLE with the MHI id_table struct so that the alias can be exported. Signed-off-by: Manivannan Sadhasivam Reviewed-by: Jeffrey Hugo Tested-by: Jeffrey Hugo Link: https://lore.kernel.org/r/20200220095854.4804-13-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/core/init.c | 9 +++++++++ include/linux/mod_devicetable.h | 1 + scripts/mod/devicetable-offsets.c | 3 +++ scripts/mod/file2alias.c | 10 ++++++++++ 4 files changed, 23 insertions(+) (limited to 'include/linux') diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 7bfffa9a5ad1..5fb756ca335e 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -1210,6 +1210,14 @@ void mhi_driver_unregister(struct mhi_driver *mhi_drv) } EXPORT_SYMBOL_GPL(mhi_driver_unregister); +static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + + return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT, + mhi_dev->chan_name); +} + static int mhi_match(struct device *dev, struct device_driver *drv) { struct mhi_device *mhi_dev = to_mhi_device(dev); @@ -1236,6 +1244,7 @@ struct bus_type mhi_bus_type = { .name = "mhi", .dev_name = "mhi", .match = mhi_match, + .uevent = mhi_uevent, }; static int __init mhi_init(void) diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index be15e997fe39..f10e779a3fd0 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -821,6 +821,7 @@ struct wmi_device_id { const void *context; }; +#define MHI_DEVICE_MODALIAS_FMT "mhi:%s" #define MHI_NAME_SIZE 32 /** diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c index 054405b90ba4..fe3f4a95cb21 100644 --- a/scripts/mod/devicetable-offsets.c +++ b/scripts/mod/devicetable-offsets.c @@ -231,5 +231,8 @@ int main(void) DEVID(wmi_device_id); DEVID_FIELD(wmi_device_id, guid_string); + DEVID(mhi_device_id); + DEVID_FIELD(mhi_device_id, chan); + return 0; } diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index c91eba751804..cae6a4e471b5 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -1335,6 +1335,15 @@ static int do_wmi_entry(const char *filename, void *symval, char *alias) return 1; } +/* Looks like: mhi:S */ +static int do_mhi_entry(const char *filename, void *symval, char *alias) +{ + DEF_FIELD_ADDR(symval, mhi_device_id, chan); + sprintf(alias, MHI_DEVICE_MODALIAS_FMT, *chan); + + return 1; +} + /* Does namelen bytes of name exactly match the symbol? */ static bool sym_is(const char *name, unsigned namelen, const char *symbol) { @@ -1407,6 +1416,7 @@ static const struct devtable devtable[] = { {"typec", SIZE_typec_device_id, do_typec_entry}, {"tee", SIZE_tee_client_device_id, do_tee_entry}, {"wmi", SIZE_wmi_device_id, do_wmi_entry}, + {"mhi", SIZE_mhi_device_id, do_mhi_entry}, }; /* Create MODULE_ALIAS() statements. -- cgit v1.2.3-58-ga151 From ff895103a84abc85a5f43ecabc7f67cf36e1348f Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Tue, 17 Mar 2020 17:32:23 -0400 Subject: tracing: Save off entry when peeking at next entry In order to have the iterator read the buffer even when it's still updating, it requires that the ring buffer iterator saves each event in a separate location outside the ring buffer such that its use is immutable. There's one use case that saves off the event returned from the ring buffer interator and calls it again to look at the next event, before going back to use the first event. As the ring buffer iterator will only have a single copy, this use case will no longer be supported. Instead, have the one use case create its own buffer to store the first event when looking at the next event. This way, when looking at the first event again, it wont be corrupted by the second read. Link: http://lkml.kernel.org/r/20200317213415.722539921@goodmis.org Signed-off-by: Steven Rostedt (VMware) --- include/linux/trace_events.h | 2 ++ kernel/trace/trace.c | 40 +++++++++++++++++++++++++++++++++++++++- kernel/trace/trace_output.c | 15 ++++++--------- 3 files changed, 47 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 6c7a10a6d71e..5c6943354049 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -85,6 +85,8 @@ struct trace_iterator { struct mutex mutex; struct ring_buffer_iter **buffer_iter; unsigned long iter_flags; + void *temp; /* temp holder */ + unsigned int temp_size; /* trace_seq for __print_flags() and __print_symbolic() etc. */ struct trace_seq tmp_seq; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 02be4ddd4ad5..819e31d0d66c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3466,7 +3466,31 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) { - return __find_next_entry(iter, ent_cpu, NULL, ent_ts); + /* __find_next_entry will reset ent_size */ + int ent_size = iter->ent_size; + struct trace_entry *entry; + + /* + * The __find_next_entry() may call peek_next_entry(), which may + * call ring_buffer_peek() that may make the contents of iter->ent + * undefined. Need to copy iter->ent now. + */ + if (iter->ent && iter->ent != iter->temp) { + if (!iter->temp || iter->temp_size < iter->ent_size) { + kfree(iter->temp); + iter->temp = kmalloc(iter->ent_size, GFP_KERNEL); + if (!iter->temp) + return NULL; + } + memcpy(iter->temp, iter->ent, iter->ent_size); + iter->temp_size = iter->ent_size; + iter->ent = iter->temp; + } + entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts); + /* Put back the original ent_size */ + iter->ent_size = ent_size; + + return entry; } /* Find the next real entry, and increment the iterator to the next entry */ @@ -4197,6 +4221,18 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) if (!iter->buffer_iter) goto release; + /* + * trace_find_next_entry() may need to save off iter->ent. + * It will place it into the iter->temp buffer. As most + * events are less than 128, allocate a buffer of that size. + * If one is greater, then trace_find_next_entry() will + * allocate a new buffer to adjust for the bigger iter->ent. + * It's not critical if it fails to get allocated here. + */ + iter->temp = kmalloc(128, GFP_KERNEL); + if (iter->temp) + iter->temp_size = 128; + /* * We make a copy of the current tracer to avoid concurrent * changes on it while we are reading. @@ -4269,6 +4305,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) fail: mutex_unlock(&trace_types_lock); kfree(iter->trace); + kfree(iter->temp); kfree(iter->buffer_iter); release: seq_release_private(inode, file); @@ -4344,6 +4381,7 @@ static int tracing_release(struct inode *inode, struct file *file) mutex_destroy(&iter->mutex); free_cpumask_var(iter->started); + kfree(iter->temp); kfree(iter->trace); kfree(iter->buffer_iter); seq_release_private(inode, file); diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index e25a7da79c6b..9a121e147102 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -617,22 +617,19 @@ int trace_print_context(struct trace_iterator *iter) int trace_print_lat_context(struct trace_iterator *iter) { + struct trace_entry *entry, *next_entry; struct trace_array *tr = iter->tr; - /* trace_find_next_entry will reset ent_size */ - int ent_size = iter->ent_size; struct trace_seq *s = &iter->seq; - u64 next_ts; - struct trace_entry *entry = iter->ent, - *next_entry = trace_find_next_entry(iter, NULL, - &next_ts); unsigned long verbose = (tr->trace_flags & TRACE_ITER_VERBOSE); + u64 next_ts; - /* Restore the original ent_size */ - iter->ent_size = ent_size; - + next_entry = trace_find_next_entry(iter, NULL, &next_ts); if (!next_entry) next_ts = iter->ts; + /* trace_find_next_entry() may change iter->ent */ + entry = iter->ent; + if (verbose) { char comm[TASK_COMM_LEN]; -- cgit v1.2.3-58-ga151 From bc1a72afdc4a91844928831cac85731566e03bc6 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Tue, 17 Mar 2020 17:32:25 -0400 Subject: ring-buffer: Rename ring_buffer_read() to read_buffer_iter_advance() When the ring buffer was first created, the iterator followed the normal producer/consumer operations where it had both a peek() operation, that just returned the event at the current location, and a read(), that would return the event at the current location and also increment the iterator such that the next peek() or read() will return the next event. The only use of the ring_buffer_read() is currently to move the iterator to the next location and nothing now actually reads the event it returns. Rename this function to its actual use case to ring_buffer_iter_advance(), which also adds the "iter" part to the name, which is more meaningful. As the timestamp returned by ring_buffer_read() was never used, there's no reason that this new version should bother having returning it. It will also become a void function. Link: http://lkml.kernel.org/r/20200317213416.018928618@goodmis.org Signed-off-by: Steven Rostedt (VMware) --- include/linux/ring_buffer.h | 3 +-- kernel/trace/ring_buffer.c | 23 ++++++----------------- kernel/trace/trace.c | 4 ++-- kernel/trace/trace_functions_graph.c | 2 +- 4 files changed, 10 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index df0124eabece..0ae603b79b0e 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -135,8 +135,7 @@ void ring_buffer_read_finish(struct ring_buffer_iter *iter); struct ring_buffer_event * ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); -struct ring_buffer_event * -ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts); +void ring_buffer_iter_advance(struct ring_buffer_iter *iter); void ring_buffer_iter_reset(struct ring_buffer_iter *iter); int ring_buffer_iter_empty(struct ring_buffer_iter *iter); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 1718520a2809..f57eeaa80e3e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -4318,35 +4318,24 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter) EXPORT_SYMBOL_GPL(ring_buffer_read_finish); /** - * ring_buffer_read - read the next item in the ring buffer by the iterator + * ring_buffer_iter_advance - advance the iterator to the next location * @iter: The ring buffer iterator - * @ts: The time stamp of the event read. * - * This reads the next event in the ring buffer and increments the iterator. + * Move the location of the iterator such that the next read will + * be the next location of the iterator. */ -struct ring_buffer_event * -ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) +void ring_buffer_iter_advance(struct ring_buffer_iter *iter) { - struct ring_buffer_event *event; struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; unsigned long flags; raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); - again: - event = rb_iter_peek(iter, ts); - if (!event) - goto out; - - if (event->type_len == RINGBUF_TYPE_PADDING) - goto again; rb_advance_iter(iter); - out: - raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); - return event; + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); } -EXPORT_SYMBOL_GPL(ring_buffer_read); +EXPORT_SYMBOL_GPL(ring_buffer_iter_advance); /** * ring_buffer_size - return the size of the ring buffer (in bytes) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 819e31d0d66c..47889123be7f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3378,7 +3378,7 @@ static void trace_iterator_increment(struct trace_iterator *iter) iter->idx++; if (buf_iter) - ring_buffer_read(buf_iter, NULL); + ring_buffer_iter_advance(buf_iter); } static struct trace_entry * @@ -3562,7 +3562,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu) if (ts >= iter->array_buffer->time_start) break; entries++; - ring_buffer_read(buf_iter, NULL); + ring_buffer_iter_advance(buf_iter); } per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 7d71546ba00a..4a9c49c08ec9 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -482,7 +482,7 @@ get_return_for_leaf(struct trace_iterator *iter, /* this is a leaf, now advance the iterator */ if (ring_iter) - ring_buffer_read(ring_iter, NULL); + ring_buffer_iter_advance(ring_iter); return next; } -- cgit v1.2.3-58-ga151 From a9107de4b03604ce0d279315c91b31b8065ee4ea Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Wed, 11 Mar 2020 11:35:44 +0000 Subject: soundwire: stream: Add read_only_wordlength flag to port properties According to SoundWire Specification Version 1.2. "A Data Port number X (in the range 0-14) which supports only one value of WordLength may implement the WordLength field in the DPX_BlockCtrl1 Register as Read-Only, returning the fixed value of WordLength in response to reads." As WSA881x interfaces in PDM mode making the only field "WordLength" in DPX_BlockCtrl1" fixed and read-only. Behaviour of writing to this register on WSA881x soundwire slave with Qualcomm Soundwire Controller is throwing up an error. Not sure how other controllers deal with writing to readonly registers, but this patch provides a way to avoid writes to DPN_BlockCtrl1 register by providing a read_only_wordlength flag in struct sdw_dpn_prop Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20200311113545.23773-2-srinivas.kandagatla@linaro.org Signed-off-by: Vinod Koul --- drivers/soundwire/stream.c | 16 +++++++++------- include/linux/soundwire/sdw.h | 2 ++ 2 files changed, 11 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index 178ae92b8cc1..7fb89a94d9c0 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -167,13 +167,15 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus, return ret; } - /* Program DPN_BlockCtrl1 register */ - ret = sdw_write(s_rt->slave, addr2, (p_params->bps - 1)); - if (ret < 0) { - dev_err(&s_rt->slave->dev, - "DPN_BlockCtrl1 register write failed for port %d\n", - t_params->port_num); - return ret; + if (!dpn_prop->read_only_wordlength) { + /* Program DPN_BlockCtrl1 register */ + ret = sdw_write(s_rt->slave, addr2, (p_params->bps - 1)); + if (ret < 0) { + dev_err(&s_rt->slave->dev, + "DPN_BlockCtrl1 register write failed for port %d\n", + t_params->port_num); + return ret; + } } /* Program DPN_SampleCtrl1 register */ diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index b451bb622335..2dfe14ed3bb0 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -284,6 +284,7 @@ struct sdw_dpn_audio_mode { * @max_async_buffer: Number of samples that this port can buffer in * asynchronous modes * @block_pack_mode: Type of block port mode supported + * @read_only_wordlength: Read Only wordlength field in DPN_BlockCtrl1 register * @port_encoding: Payload Channel Sample encoding schemes supported * @audio_modes: Audio modes supported */ @@ -307,6 +308,7 @@ struct sdw_dpn_prop { u32 modes; u32 max_async_buffer; bool block_pack_mode; + bool read_only_wordlength; u32 port_encoding; struct sdw_dpn_audio_mode *audio_modes; }; -- cgit v1.2.3-58-ga151 From 835d722ba10ac924adba1e8a46f2d80955222b4b Mon Sep 17 00:00:00 2001 From: Mike Leach Date: Fri, 20 Mar 2020 10:52:52 -0600 Subject: coresight: cti: Initial CoreSight CTI Driver This introduces a baseline CTI driver and associated configuration files. Uses the platform agnostic naming standard for CoreSight devices, along with a generic platform probing method that currently supports device tree descriptions, but allows for the ACPI bindings to be added once these have been defined for the CTI devices. Driver will probe for the device on the AMBA bus, and load the CTI driver on CoreSight ID match to CTI IDs in tables. Initial sysfs support for enable / disable provided. Default CTI interconnection data is generated based on hardware register signal counts, with no additional connection information. Signed-off-by: Mike Leach Reviewed-by: Suzuki K Poulose Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20200320165303.13681-2-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/Kconfig | 12 + drivers/hwtracing/coresight/Makefile | 3 + .../hwtracing/coresight/coresight-cti-platform.c | 53 +++ drivers/hwtracing/coresight/coresight-cti-sysfs.c | 83 ++++ drivers/hwtracing/coresight/coresight-cti.c | 446 +++++++++++++++++++++ drivers/hwtracing/coresight/coresight-cti.h | 186 +++++++++ drivers/hwtracing/coresight/coresight.c | 3 + include/linux/coresight.h | 23 ++ 8 files changed, 809 insertions(+) create mode 100644 drivers/hwtracing/coresight/coresight-cti-platform.c create mode 100644 drivers/hwtracing/coresight/coresight-cti-sysfs.c create mode 100644 drivers/hwtracing/coresight/coresight-cti.c create mode 100644 drivers/hwtracing/coresight/coresight-cti.h (limited to 'include/linux') diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig index 6ff30e25af55..45d3822c8c8c 100644 --- a/drivers/hwtracing/coresight/Kconfig +++ b/drivers/hwtracing/coresight/Kconfig @@ -110,4 +110,16 @@ config CORESIGHT_CPU_DEBUG properly, please refer Documentation/trace/coresight-cpu-debug.rst for detailed description and the example for usage. +config CORESIGHT_CTI + bool "CoreSight Cross Trigger Interface (CTI) driver" + depends on ARM || ARM64 + help + This driver provides support for CoreSight CTI and CTM components. + These provide hardware triggering events between CoreSight trace + source and sink components. These can be used to halt trace or + inject events into the trace stream. CTI also provides a software + control to trigger the same halt events. This can provide fast trace + halt compared to disabling sources and sinks normally in driver + software. + endif diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile index 3c0ac421e211..0e3e72f0f510 100644 --- a/drivers/hwtracing/coresight/Makefile +++ b/drivers/hwtracing/coresight/Makefile @@ -17,3 +17,6 @@ obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \ obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o +obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o \ + coresight-cti-platform.o \ + coresight-cti-sysfs.o diff --git a/drivers/hwtracing/coresight/coresight-cti-platform.c b/drivers/hwtracing/coresight/coresight-cti-platform.c new file mode 100644 index 000000000000..665be86c585d --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-cti-platform.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019, The Linaro Limited. All rights reserved. + */ + +#include + +#include "coresight-cti.h" + +/* get the hardware configuration & connection data. */ +int cti_plat_get_hw_data(struct device *dev, + struct cti_drvdata *drvdata) +{ + int rc = 0; + struct cti_device *cti_dev = &drvdata->ctidev; + + /* if no connections, just add a single default based on max IN-OUT */ + if (cti_dev->nr_trig_con == 0) + rc = cti_add_default_connection(dev, drvdata); + return rc; +} + +struct coresight_platform_data * +coresight_cti_get_platform_data(struct device *dev) +{ + int ret = -ENOENT; + struct coresight_platform_data *pdata = NULL; + struct fwnode_handle *fwnode = dev_fwnode(dev); + struct cti_drvdata *drvdata = dev_get_drvdata(dev); + + if (IS_ERR_OR_NULL(fwnode)) + goto error; + + /* + * Alloc platform data but leave it zero init. CTI does not use the + * same connection infrastructuree as trace path components but an + * empty struct enables us to use the standard coresight component + * registration code. + */ + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + ret = -ENOMEM; + goto error; + } + + /* get some CTI specifics */ + ret = cti_plat_get_hw_data(dev, drvdata); + + if (!ret) + return pdata; +error: + return ERR_PTR(ret); +} diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c new file mode 100644 index 000000000000..6d2790568071 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 Linaro Limited, All rights reserved. + * Author: Mike Leach + */ + +#include + +#include "coresight-cti.h" + +/* basic attributes */ +static ssize_t enable_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int enable_req; + bool enabled, powered; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + enable_req = atomic_read(&drvdata->config.enable_req_count); + spin_lock(&drvdata->spinlock); + powered = drvdata->config.hw_powered; + enabled = drvdata->config.hw_enabled; + spin_unlock(&drvdata->spinlock); + + if (powered) + return sprintf(buf, "%d\n", enabled); + else + return sprintf(buf, "%d\n", !!enable_req); +} + +static ssize_t enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret = 0; + unsigned long val; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + ret = kstrtoul(buf, 0, &val); + if (ret) + return ret; + + if (val) + ret = cti_enable(drvdata->csdev); + else + ret = cti_disable(drvdata->csdev); + if (ret) + return ret; + return size; +} +static DEVICE_ATTR_RW(enable); + +static ssize_t powered_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + bool powered; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + spin_lock(&drvdata->spinlock); + powered = drvdata->config.hw_powered; + spin_unlock(&drvdata->spinlock); + + return sprintf(buf, "%d\n", powered); +} +static DEVICE_ATTR_RO(powered); + +/* attribute and group sysfs tables. */ +static struct attribute *coresight_cti_attrs[] = { + &dev_attr_enable.attr, + &dev_attr_powered.attr, + NULL, +}; + +static const struct attribute_group coresight_cti_group = { + .attrs = coresight_cti_attrs, +}; + +const struct attribute_group *coresight_cti_groups[] = { + &coresight_cti_group, + NULL, +}; diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c new file mode 100644 index 000000000000..c71b72d12534 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-cti.c @@ -0,0 +1,446 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 Linaro Limited, All rights reserved. + * Author: Mike Leach + */ + +#include "coresight-cti.h" + +/** + * CTI devices can be associated with a PE, or be connected to CoreSight + * hardware. We have a list of all CTIs irrespective of CPU bound or + * otherwise. + * + * We assume that the non-CPU CTIs are always powered as we do with sinks etc. + * + * We leave the client to figure out if all the CTIs are interconnected with + * the same CTM, in general this is the case but does not always have to be. + */ + +/* net of CTI devices connected via CTM */ +LIST_HEAD(ect_net); + +/* protect the list */ +static DEFINE_MUTEX(ect_mutex); + +#define csdev_to_cti_drvdata(csdev) \ + dev_get_drvdata(csdev->dev.parent) + +/* + * CTI naming. CTI bound to cores will have the name cti_cpu where + * N is the CPU ID. System CTIs will have the name cti_sys where I + * is an index allocated by order of discovery. + * + * CTI device name list - for CTI not bound to cores. + */ +DEFINE_CORESIGHT_DEVLIST(cti_sys_devs, "cti_sys"); + +/* write set of regs to hardware - call with spinlock claimed */ +void cti_write_all_hw_regs(struct cti_drvdata *drvdata) +{ + struct cti_config *config = &drvdata->config; + int i; + + CS_UNLOCK(drvdata->base); + + /* disable CTI before writing registers */ + writel_relaxed(0, drvdata->base + CTICONTROL); + + /* write the CTI trigger registers */ + for (i = 0; i < config->nr_trig_max; i++) { + writel_relaxed(config->ctiinen[i], drvdata->base + CTIINEN(i)); + writel_relaxed(config->ctiouten[i], + drvdata->base + CTIOUTEN(i)); + } + + /* other regs */ + writel_relaxed(config->ctigate, drvdata->base + CTIGATE); + writel_relaxed(config->asicctl, drvdata->base + ASICCTL); + writel_relaxed(config->ctiappset, drvdata->base + CTIAPPSET); + + /* re-enable CTI */ + writel_relaxed(1, drvdata->base + CTICONTROL); + + CS_LOCK(drvdata->base); +} + +static void cti_enable_hw_smp_call(void *info) +{ + struct cti_drvdata *drvdata = info; + + cti_write_all_hw_regs(drvdata); +} + +/* write regs to hardware and enable */ +static int cti_enable_hw(struct cti_drvdata *drvdata) +{ + struct cti_config *config = &drvdata->config; + struct device *dev = &drvdata->csdev->dev; + int rc = 0; + + pm_runtime_get_sync(dev->parent); + spin_lock(&drvdata->spinlock); + + /* no need to do anything if enabled or unpowered*/ + if (config->hw_enabled || !config->hw_powered) + goto cti_state_unchanged; + + /* claim the device */ + rc = coresight_claim_device(drvdata->base); + if (rc) + goto cti_err_not_enabled; + + if (drvdata->ctidev.cpu >= 0) { + rc = smp_call_function_single(drvdata->ctidev.cpu, + cti_enable_hw_smp_call, + drvdata, 1); + if (rc) + goto cti_err_not_enabled; + } else { + cti_write_all_hw_regs(drvdata); + } + + config->hw_enabled = true; + atomic_inc(&drvdata->config.enable_req_count); + spin_unlock(&drvdata->spinlock); + return rc; + +cti_state_unchanged: + atomic_inc(&drvdata->config.enable_req_count); + + /* cannot enable due to error */ +cti_err_not_enabled: + spin_unlock(&drvdata->spinlock); + pm_runtime_put(dev->parent); + return rc; +} + +/* disable hardware */ +static int cti_disable_hw(struct cti_drvdata *drvdata) +{ + struct cti_config *config = &drvdata->config; + struct device *dev = &drvdata->csdev->dev; + + spin_lock(&drvdata->spinlock); + + /* check refcount - disable on 0 */ + if (atomic_dec_return(&drvdata->config.enable_req_count) > 0) + goto cti_not_disabled; + + /* no need to do anything if disabled or cpu unpowered */ + if (!config->hw_enabled || !config->hw_powered) + goto cti_not_disabled; + + CS_UNLOCK(drvdata->base); + + /* disable CTI */ + writel_relaxed(0, drvdata->base + CTICONTROL); + config->hw_enabled = false; + + coresight_disclaim_device_unlocked(drvdata->base); + CS_LOCK(drvdata->base); + spin_unlock(&drvdata->spinlock); + pm_runtime_put(dev); + return 0; + + /* not disabled this call */ +cti_not_disabled: + spin_unlock(&drvdata->spinlock); + return 0; +} + +/* + * Look at the HW DEVID register for some of the HW settings. + * DEVID[15:8] - max number of in / out triggers. + */ +#define CTI_DEVID_MAXTRIGS(devid_val) ((int) BMVAL(devid_val, 8, 15)) + +/* DEVID[19:16] - number of CTM channels */ +#define CTI_DEVID_CTMCHANNELS(devid_val) ((int) BMVAL(devid_val, 16, 19)) + +static void cti_set_default_config(struct device *dev, + struct cti_drvdata *drvdata) +{ + struct cti_config *config = &drvdata->config; + u32 devid; + + devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID); + config->nr_trig_max = CTI_DEVID_MAXTRIGS(devid); + + /* + * no current hardware should exceed this, but protect the driver + * in case of fault / out of spec hw + */ + if (config->nr_trig_max > CTIINOUTEN_MAX) { + dev_warn_once(dev, + "Limiting HW MaxTrig value(%d) to driver max(%d)\n", + config->nr_trig_max, CTIINOUTEN_MAX); + config->nr_trig_max = CTIINOUTEN_MAX; + } + + config->nr_ctm_channels = CTI_DEVID_CTMCHANNELS(devid); + + /* Most regs default to 0 as zalloc'ed except...*/ + config->trig_filter_enable = true; + config->ctigate = GENMASK(config->nr_ctm_channels - 1, 0); + atomic_set(&config->enable_req_count, 0); +} + +/* + * Add a connection entry to the list of connections for this + * CTI device. + */ +int cti_add_connection_entry(struct device *dev, struct cti_drvdata *drvdata, + struct cti_trig_con *tc, + struct coresight_device *csdev, + const char *assoc_dev_name) +{ + struct cti_device *cti_dev = &drvdata->ctidev; + + tc->con_dev = csdev; + /* + * Prefer actual associated CS device dev name to supplied value - + * which is likely to be node name / other conn name. + */ + if (csdev) + tc->con_dev_name = dev_name(&csdev->dev); + else if (assoc_dev_name != NULL) { + tc->con_dev_name = devm_kstrdup(dev, + assoc_dev_name, GFP_KERNEL); + if (!tc->con_dev_name) + return -ENOMEM; + } + list_add_tail(&tc->node, &cti_dev->trig_cons); + cti_dev->nr_trig_con++; + + /* add connection usage bit info to overall info */ + drvdata->config.trig_in_use |= tc->con_in->used_mask; + drvdata->config.trig_out_use |= tc->con_out->used_mask; + + return 0; +} + +/* create a trigger connection with appropriately sized signal groups */ +struct cti_trig_con *cti_allocate_trig_con(struct device *dev, int in_sigs, + int out_sigs) +{ + struct cti_trig_con *tc = NULL; + struct cti_trig_grp *in = NULL, *out = NULL; + + tc = devm_kzalloc(dev, sizeof(struct cti_trig_con), GFP_KERNEL); + if (!tc) + return tc; + + in = devm_kzalloc(dev, + offsetof(struct cti_trig_grp, sig_types[in_sigs]), + GFP_KERNEL); + if (!in) + return NULL; + + out = devm_kzalloc(dev, + offsetof(struct cti_trig_grp, sig_types[out_sigs]), + GFP_KERNEL); + if (!out) + return NULL; + + tc->con_in = in; + tc->con_out = out; + tc->con_in->nr_sigs = in_sigs; + tc->con_out->nr_sigs = out_sigs; + return tc; +} + +/* + * Add a default connection if nothing else is specified. + * single connection based on max in/out info, no assoc device + */ +int cti_add_default_connection(struct device *dev, struct cti_drvdata *drvdata) +{ + int ret = 0; + int n_trigs = drvdata->config.nr_trig_max; + u32 n_trig_mask = GENMASK(n_trigs - 1, 0); + struct cti_trig_con *tc = NULL; + + /* + * Assume max trigs for in and out, + * all used, default sig types allocated + */ + tc = cti_allocate_trig_con(dev, n_trigs, n_trigs); + if (!tc) + return -ENOMEM; + + tc->con_in->used_mask = n_trig_mask; + tc->con_out->used_mask = n_trig_mask; + ret = cti_add_connection_entry(dev, drvdata, tc, NULL, "default"); + return ret; +} + +/** cti ect operations **/ +int cti_enable(struct coresight_device *csdev) +{ + struct cti_drvdata *drvdata = csdev_to_cti_drvdata(csdev); + + return cti_enable_hw(drvdata); +} + +int cti_disable(struct coresight_device *csdev) +{ + struct cti_drvdata *drvdata = csdev_to_cti_drvdata(csdev); + + return cti_disable_hw(drvdata); +} + +const struct coresight_ops_ect cti_ops_ect = { + .enable = cti_enable, + .disable = cti_disable, +}; + +const struct coresight_ops cti_ops = { + .ect_ops = &cti_ops_ect, +}; + +/* + * Free up CTI specific resources + * called by dev->release, need to call down to underlying csdev release. + */ +static void cti_device_release(struct device *dev) +{ + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct cti_drvdata *ect_item, *ect_tmp; + + mutex_lock(&ect_mutex); + + /* remove from the list */ + list_for_each_entry_safe(ect_item, ect_tmp, &ect_net, node) { + if (ect_item == drvdata) { + list_del(&ect_item->node); + break; + } + } + mutex_unlock(&ect_mutex); + + if (drvdata->csdev_release) + drvdata->csdev_release(dev); +} + +static int cti_probe(struct amba_device *adev, const struct amba_id *id) +{ + int ret = 0; + void __iomem *base; + struct device *dev = &adev->dev; + struct cti_drvdata *drvdata = NULL; + struct coresight_desc cti_desc; + struct coresight_platform_data *pdata = NULL; + struct resource *res = &adev->res; + + /* driver data*/ + drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) { + ret = -ENOMEM; + dev_info(dev, "%s, mem err\n", __func__); + goto err_out; + } + + /* Validity for the resource is already checked by the AMBA core */ + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) { + ret = PTR_ERR(base); + dev_err(dev, "%s, remap err\n", __func__); + goto err_out; + } + drvdata->base = base; + + dev_set_drvdata(dev, drvdata); + + /* default CTI device info */ + drvdata->ctidev.cpu = -1; + drvdata->ctidev.nr_trig_con = 0; + drvdata->ctidev.ctm_id = 0; + INIT_LIST_HEAD(&drvdata->ctidev.trig_cons); + + spin_lock_init(&drvdata->spinlock); + + /* initialise CTI driver config values */ + cti_set_default_config(dev, drvdata); + + pdata = coresight_cti_get_platform_data(dev); + if (IS_ERR(pdata)) { + dev_err(dev, "coresight_cti_get_platform_data err\n"); + ret = PTR_ERR(pdata); + goto err_out; + } + + /* default to powered - could change on PM notifications */ + drvdata->config.hw_powered = true; + + /* set up device name - will depend if cpu bound or otherwise */ + if (drvdata->ctidev.cpu >= 0) + cti_desc.name = devm_kasprintf(dev, GFP_KERNEL, "cti_cpu%d", + drvdata->ctidev.cpu); + else + cti_desc.name = coresight_alloc_device_name(&cti_sys_devs, dev); + if (!cti_desc.name) { + ret = -ENOMEM; + goto err_out; + } + + /* set up coresight component description */ + cti_desc.pdata = pdata; + cti_desc.type = CORESIGHT_DEV_TYPE_ECT; + cti_desc.subtype.ect_subtype = CORESIGHT_DEV_SUBTYPE_ECT_CTI; + cti_desc.ops = &cti_ops; + cti_desc.groups = coresight_cti_groups; + cti_desc.dev = dev; + drvdata->csdev = coresight_register(&cti_desc); + if (IS_ERR(drvdata->csdev)) { + ret = PTR_ERR(drvdata->csdev); + goto err_out; + } + + /* add to list of CTI devices */ + mutex_lock(&ect_mutex); + list_add(&drvdata->node, &ect_net); + mutex_unlock(&ect_mutex); + + /* set up release chain */ + drvdata->csdev_release = drvdata->csdev->dev.release; + drvdata->csdev->dev.release = cti_device_release; + + /* all done - dec pm refcount */ + pm_runtime_put(&adev->dev); + dev_info(&drvdata->csdev->dev, "CTI initialized\n"); + return 0; + +err_out: + return ret; +} + +static struct amba_cs_uci_id uci_id_cti[] = { + { + /* CTI UCI data */ + .devarch = 0x47701a14, /* CTI v2 */ + .devarch_mask = 0xfff0ffff, + .devtype = 0x00000014, /* maj(0x4-debug) min(0x1-ECT) */ + } +}; + +static const struct amba_id cti_ids[] = { + CS_AMBA_ID(0x000bb906), /* Coresight CTI (SoC 400), C-A72, C-A57 */ + CS_AMBA_ID(0x000bb922), /* CTI - C-A8 */ + CS_AMBA_ID(0x000bb9a8), /* CTI - C-A53 */ + CS_AMBA_ID(0x000bb9aa), /* CTI - C-A73 */ + CS_AMBA_UCI_ID(0x000bb9da, uci_id_cti), /* CTI - C-A35 */ + CS_AMBA_UCI_ID(0x000bb9ed, uci_id_cti), /* Coresight CTI (SoC 600) */ + { 0, 0}, +}; + +static struct amba_driver cti_driver = { + .drv = { + .name = "coresight-cti", + .owner = THIS_MODULE, + .suppress_bind_attrs = true, + }, + .probe = cti_probe, + .id_table = cti_ids, +}; +builtin_amba_driver(cti_driver); diff --git a/drivers/hwtracing/coresight/coresight-cti.h b/drivers/hwtracing/coresight/coresight-cti.h new file mode 100644 index 000000000000..d0ac90f49544 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-cti.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 Linaro Limited, All rights reserved. + * Author: Mike Leach + */ + +#ifndef _CORESIGHT_CORESIGHT_CTI_H +#define _CORESIGHT_CORESIGHT_CTI_H + +#include +#include +#include "coresight-priv.h" + +/* + * Device registers + * 0x000 - 0x144: CTI programming and status + * 0xEDC - 0xEF8: CTI integration test. + * 0xF00 - 0xFFC: Coresight management registers. + */ +/* CTI programming registers */ +#define CTICONTROL 0x000 +#define CTIINTACK 0x010 +#define CTIAPPSET 0x014 +#define CTIAPPCLEAR 0x018 +#define CTIAPPPULSE 0x01C +#define CTIINEN(n) (0x020 + (4 * n)) +#define CTIOUTEN(n) (0x0A0 + (4 * n)) +#define CTITRIGINSTATUS 0x130 +#define CTITRIGOUTSTATUS 0x134 +#define CTICHINSTATUS 0x138 +#define CTICHOUTSTATUS 0x13C +#define CTIGATE 0x140 +#define ASICCTL 0x144 +/* Integration test registers */ +#define ITCHINACK 0xEDC /* WO CTI CSSoc 400 only*/ +#define ITTRIGINACK 0xEE0 /* WO CTI CSSoc 400 only*/ +#define ITCHOUT 0xEE4 /* WO RW-600 */ +#define ITTRIGOUT 0xEE8 /* WO RW-600 */ +#define ITCHOUTACK 0xEEC /* RO CTI CSSoc 400 only*/ +#define ITTRIGOUTACK 0xEF0 /* RO CTI CSSoc 400 only*/ +#define ITCHIN 0xEF4 /* RO */ +#define ITTRIGIN 0xEF8 /* RO */ +/* management registers */ +#define CTIDEVAFF0 0xFA8 +#define CTIDEVAFF1 0xFAC + +/* + * CTI CSSoc 600 has a max of 32 trigger signals per direction. + * CTI CSSoc 400 has 8 IO triggers - other CTIs can be impl def. + * Max of in and out defined in the DEVID register. + * - pick up actual number used from .dts parameters if present. + */ +#define CTIINOUTEN_MAX 32 + +/** + * Group of related trigger signals + * + * @nr_sigs: number of signals in the group. + * @used_mask: bitmask representing the signal indexes in the group. + * @sig_types: array of types for the signals, length nr_sigs. + */ +struct cti_trig_grp { + int nr_sigs; + u32 used_mask; + int sig_types[]; +}; + +/** + * Trigger connection - connection between a CTI and other (coresight) device + * lists input and output trigger signals for the device + * + * @con_in: connected CTIIN signals for the device. + * @con_out: connected CTIOUT signals for the device. + * @con_dev: coresight device connected to the CTI, NULL if not CS device + * @con_dev_name: name of connected device (CS or CPU) + * @node: entry node in list of connections. + */ +struct cti_trig_con { + struct cti_trig_grp *con_in; + struct cti_trig_grp *con_out; + struct coresight_device *con_dev; + const char *con_dev_name; + struct list_head node; +}; + +/** + * struct cti_device - description of CTI device properties. + * + * @nt_trig_con: Number of external devices connected to this device. + * @ctm_id: which CTM this device is connected to (by default it is + * assumed there is a single CTM per SoC, ID 0). + * @trig_cons: list of connections to this device. + * @cpu: CPU ID if associated with CPU, -1 otherwise. + */ +struct cti_device { + int nr_trig_con; + u32 ctm_id; + struct list_head trig_cons; + int cpu; +}; + +/** + * struct cti_config - configuration of the CTI device hardware + * + * @nr_trig_max: Max number of trigger signals implemented on device. + * (max of trig_in or trig_out) - from ID register. + * @nr_ctm_channels: number of available CTM channels - from ID register. + * @enable_req_count: CTI is enabled alongside >=1 associated devices. + * @hw_enabled: true if hw is currently enabled. + * @hw_powered: true if associated cpu powered on, or no cpu. + * @trig_in_use: bitfield of in triggers registered as in use. + * @trig_out_use: bitfield of out triggers registered as in use. + * @trig_out_filter: bitfield of out triggers that are blocked if filter + * enabled. Typically this would be dbgreq / restart on + * a core CTI. + * @trig_filter_enable: 1 if filtering enabled. + * @xtrig_rchan_sel: channel selection for xtrigger connection show. + * @ctiappset: CTI Software application channel set. + * @ctiinout_sel: register selector for INEN and OUTEN regs. + * @ctiinen: enable input trigger to a channel. + * @ctiouten: enable output trigger from a channel. + * @ctigate: gate channel output from CTI to CTM. + * @asicctl: asic control register. + */ +struct cti_config { + /* hardware description */ + int nr_ctm_channels; + int nr_trig_max; + + /* cti enable control */ + atomic_t enable_req_count; + bool hw_enabled; + bool hw_powered; + + /* registered triggers and filtering */ + u32 trig_in_use; + u32 trig_out_use; + u32 trig_out_filter; + bool trig_filter_enable; + u8 xtrig_rchan_sel; + + /* cti cross trig programmable regs */ + u32 ctiappset; + u8 ctiinout_sel; + u32 ctiinen[CTIINOUTEN_MAX]; + u32 ctiouten[CTIINOUTEN_MAX]; + u32 ctigate; + u32 asicctl; +}; + +/** + * struct cti_drvdata - specifics for the CTI device + * @base: Memory mapped base address for this component.. + * @csdev: Standard CoreSight device information. + * @ctidev: Extra information needed by the CTI/CTM framework. + * @spinlock: Control data access to one at a time. + * @config: Configuration data for this CTI device. + * @node: List entry of this device in the list of CTI devices. + * @csdev_release: release function for underlying coresight_device. + */ +struct cti_drvdata { + void __iomem *base; + struct coresight_device *csdev; + struct cti_device ctidev; + spinlock_t spinlock; + struct cti_config config; + struct list_head node; + void (*csdev_release)(struct device *dev); +}; + +/* private cti driver fns & vars */ +extern const struct attribute_group *coresight_cti_groups[]; +int cti_add_default_connection(struct device *dev, + struct cti_drvdata *drvdata); +int cti_add_connection_entry(struct device *dev, struct cti_drvdata *drvdata, + struct cti_trig_con *tc, + struct coresight_device *csdev, + const char *assoc_dev_name); +struct cti_trig_con *cti_allocate_trig_con(struct device *dev, int in_sigs, + int out_sigs); +int cti_enable(struct coresight_device *csdev); +int cti_disable(struct coresight_device *csdev); +struct coresight_platform_data * +coresight_cti_get_platform_data(struct device *dev); + +#endif /* _CORESIGHT_CORESIGHT_CTI_H */ diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index ef20f74c85fa..1a5fdf2710ff 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -955,6 +955,9 @@ static struct device_type coresight_dev_type[] = { { .name = "helper", }, + { + .name = "ect", + }, }; static void coresight_device_release(struct device *dev) diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 44e552de419c..b3e582d96a34 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -41,6 +41,7 @@ enum coresight_dev_type { CORESIGHT_DEV_TYPE_LINKSINK, CORESIGHT_DEV_TYPE_SOURCE, CORESIGHT_DEV_TYPE_HELPER, + CORESIGHT_DEV_TYPE_ECT, }; enum coresight_dev_subtype_sink { @@ -68,6 +69,12 @@ enum coresight_dev_subtype_helper { CORESIGHT_DEV_SUBTYPE_HELPER_CATU, }; +/* Embedded Cross Trigger (ECT) sub-types */ +enum coresight_dev_subtype_ect { + CORESIGHT_DEV_SUBTYPE_ECT_NONE, + CORESIGHT_DEV_SUBTYPE_ECT_CTI, +}; + /** * union coresight_dev_subtype - further characterisation of a type * @sink_subtype: type of sink this component is, as defined @@ -78,6 +85,8 @@ enum coresight_dev_subtype_helper { * by @coresight_dev_subtype_source. * @helper_subtype: type of helper this component is, as defined * by @coresight_dev_subtype_helper. + * @ect_subtype: type of cross trigger this component is, as + * defined by @coresight_dev_subtype_ect */ union coresight_dev_subtype { /* We have some devices which acts as LINK and SINK */ @@ -87,6 +96,7 @@ union coresight_dev_subtype { }; enum coresight_dev_subtype_source source_subtype; enum coresight_dev_subtype_helper helper_subtype; + enum coresight_dev_subtype_ect ect_subtype; }; /** @@ -196,6 +206,7 @@ static struct coresight_dev_list (var) = { \ #define sink_ops(csdev) csdev->ops->sink_ops #define link_ops(csdev) csdev->ops->link_ops #define helper_ops(csdev) csdev->ops->helper_ops +#define ect_ops(csdev) csdev->ops->ect_ops /** * struct coresight_ops_sink - basic operations for a sink @@ -262,11 +273,23 @@ struct coresight_ops_helper { int (*disable)(struct coresight_device *csdev, void *data); }; +/** + * struct coresight_ops_ect - Ops for an embedded cross trigger device + * + * @enable : Enable the device + * @disable : Disable the device + */ +struct coresight_ops_ect { + int (*enable)(struct coresight_device *csdev); + int (*disable)(struct coresight_device *csdev); +}; + struct coresight_ops { const struct coresight_ops_sink *sink_ops; const struct coresight_ops_link *link_ops; const struct coresight_ops_source *source_ops; const struct coresight_ops_helper *helper_ops; + const struct coresight_ops_ect *ect_ops; }; #ifdef CONFIG_CORESIGHT -- cgit v1.2.3-58-ga151 From 177af8285b59a3887e4430d2c782598083cddcd7 Mon Sep 17 00:00:00 2001 From: Mike Leach Date: Fri, 20 Mar 2020 10:52:59 -0600 Subject: coresight: cti: Enable CTI associated with devices The CoreSight subsystem enables a path of devices from source to sink. Any CTI devices associated with the path devices must be enabled at the same time. This patch adds an associated coresight_device element to the main coresight device structure, and uses this to create associations between the CTI and other devices based on the device tree data. The associated device element is used to enable CTI in conjunction with the path elements. CTI devices are reference counted so where a single CTI is associated with multiple elements on the path, it will be enabled on the first associated device enable, and disabled with the last associated device disable. Signed-off-by: Mike Leach Reviewed-by: Suzuki K Poulose Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20200320165303.13681-9-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-cti.c | 125 +++++++++++++++++++++++++++ drivers/hwtracing/coresight/coresight-cti.h | 1 + drivers/hwtracing/coresight/coresight-priv.h | 12 +++ drivers/hwtracing/coresight/coresight.c | 71 +++++++++++++-- include/linux/coresight.h | 4 + 5 files changed, 208 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c index b8c94027fed0..2fc68760efbe 100644 --- a/drivers/hwtracing/coresight/coresight-cti.c +++ b/drivers/hwtracing/coresight/coresight-cti.c @@ -4,6 +4,7 @@ * Author: Mike Leach */ +#include #include "coresight-cti.h" /** @@ -441,6 +442,127 @@ int cti_channel_setop(struct device *dev, enum cti_chan_set_op op, return err; } +/* + * Look for a matching connection device name in the list of connections. + * If found then swap in the csdev name, set trig con association pointer + * and return found. + */ +static bool +cti_match_fixup_csdev(struct cti_device *ctidev, const char *node_name, + struct coresight_device *csdev) +{ + struct cti_trig_con *tc; + + list_for_each_entry(tc, &ctidev->trig_cons, node) { + if (tc->con_dev_name) { + if (!strcmp(node_name, tc->con_dev_name)) { + /* match: so swap in csdev name & dev */ + tc->con_dev_name = dev_name(&csdev->dev); + tc->con_dev = csdev; + return true; + } + } + } + return false; +} + +/* + * Search the cti list to add an associated CTI into the supplied CS device + * This will set the association if CTI declared before the CS device. + * (called from coresight_register() with coresight_mutex locked). + */ +void cti_add_assoc_to_csdev(struct coresight_device *csdev) +{ + struct cti_drvdata *ect_item; + struct cti_device *ctidev; + const char *node_name = NULL; + + /* protect the list */ + mutex_lock(&ect_mutex); + + /* exit if current is an ECT device.*/ + if ((csdev->type == CORESIGHT_DEV_TYPE_ECT) || list_empty(&ect_net)) + goto cti_add_done; + + /* if we didn't find the csdev previously we used the fwnode name */ + node_name = cti_plat_get_node_name(dev_fwnode(csdev->dev.parent)); + if (!node_name) + goto cti_add_done; + + /* for each CTI in list... */ + list_for_each_entry(ect_item, &ect_net, node) { + ctidev = &ect_item->ctidev; + if (cti_match_fixup_csdev(ctidev, node_name, csdev)) { + /* + * if we found a matching csdev then update the ECT + * association pointer for the device with this CTI. + */ + csdev->ect_dev = ect_item->csdev; + break; + } + } +cti_add_done: + mutex_unlock(&ect_mutex); +} +EXPORT_SYMBOL_GPL(cti_add_assoc_to_csdev); + +/* + * Removing the associated devices is easier. + * A CTI will not have a value for csdev->ect_dev. + */ +void cti_remove_assoc_from_csdev(struct coresight_device *csdev) +{ + struct cti_drvdata *ctidrv; + struct cti_trig_con *tc; + struct cti_device *ctidev; + + mutex_lock(&ect_mutex); + if (csdev->ect_dev) { + ctidrv = csdev_to_cti_drvdata(csdev->ect_dev); + ctidev = &ctidrv->ctidev; + list_for_each_entry(tc, &ctidev->trig_cons, node) { + if (tc->con_dev == csdev->ect_dev) { + tc->con_dev = NULL; + break; + } + } + csdev->ect_dev = NULL; + } + mutex_unlock(&ect_mutex); +} +EXPORT_SYMBOL_GPL(cti_remove_assoc_from_csdev); + +/* + * Update the cross references where the associated device was found + * while we were building the connection info. This will occur if the + * assoc device was registered before the CTI. + */ +static void cti_update_conn_xrefs(struct cti_drvdata *drvdata) +{ + struct cti_trig_con *tc; + struct cti_device *ctidev = &drvdata->ctidev; + + list_for_each_entry(tc, &ctidev->trig_cons, node) { + if (tc->con_dev) + /* set tc->con_dev->ect_dev */ + coresight_set_assoc_ectdev_mutex(tc->con_dev, + drvdata->csdev); + } +} + +static void cti_remove_conn_xrefs(struct cti_drvdata *drvdata) +{ + struct cti_trig_con *tc; + struct cti_device *ctidev = &drvdata->ctidev; + + list_for_each_entry(tc, &ctidev->trig_cons, node) { + if (tc->con_dev) { + coresight_set_assoc_ectdev_mutex(tc->con_dev, + NULL); + } + } +} + /** cti ect operations **/ int cti_enable(struct coresight_device *csdev) { @@ -475,6 +597,7 @@ static void cti_device_release(struct device *dev) struct cti_drvdata *ect_item, *ect_tmp; mutex_lock(&ect_mutex); + cti_remove_conn_xrefs(drvdata); /* remove from the list */ list_for_each_entry_safe(ect_item, ect_tmp, &ect_net, node) { @@ -566,6 +689,8 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id) /* add to list of CTI devices */ mutex_lock(&ect_mutex); list_add(&drvdata->node, &ect_net); + /* set any cross references */ + cti_update_conn_xrefs(drvdata); mutex_unlock(&ect_mutex); /* set up release chain */ diff --git a/drivers/hwtracing/coresight/coresight-cti.h b/drivers/hwtracing/coresight/coresight-cti.h index f664b4bb4644..ca277633b04f 100644 --- a/drivers/hwtracing/coresight/coresight-cti.h +++ b/drivers/hwtracing/coresight/coresight-cti.h @@ -216,6 +216,7 @@ int cti_channel_setop(struct device *dev, enum cti_chan_set_op op, u32 channel_idx); struct coresight_platform_data * coresight_cti_get_platform_data(struct device *dev); +const char *cti_plat_get_node_name(struct fwnode_handle *fwnode); /* cti powered and enabled */ static inline bool cti_active(struct cti_config *cfg) diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index 357ffef7b825..890f9a5c97c6 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -162,6 +162,16 @@ static inline int etm_readl_cp14(u32 off, unsigned int *val) { return 0; } static inline int etm_writel_cp14(u32 off, u32 val) { return 0; } #endif +#ifdef CONFIG_CORESIGHT_CTI +extern void cti_add_assoc_to_csdev(struct coresight_device *csdev); +extern void cti_remove_assoc_from_csdev(struct coresight_device *csdev); + +#else +static inline void cti_add_assoc_to_csdev(struct coresight_device *csdev) {} +static inline void +cti_remove_assoc_from_csdev(struct coresight_device *csdev) {} +#endif + /* * Macros and inline functions to handle CoreSight UCI data and driver * private data in AMBA ID table entries, and extract data values. @@ -204,5 +214,7 @@ static inline void *coresight_get_uci_data(const struct amba_id *id) void coresight_release_platform_data(struct coresight_platform_data *pdata); struct coresight_device * coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode); +void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev, + struct coresight_device *ect_csdev); #endif diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 39a5d9f7a395..c71553c09f8e 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -216,6 +216,44 @@ void coresight_disclaim_device(void __iomem *base) CS_LOCK(base); } +/* enable or disable an associated CTI device of the supplied CS device */ +static int +coresight_control_assoc_ectdev(struct coresight_device *csdev, bool enable) +{ + int ect_ret = 0; + struct coresight_device *ect_csdev = csdev->ect_dev; + + if (!ect_csdev) + return 0; + + if (enable) { + if (ect_ops(ect_csdev)->enable) + ect_ret = ect_ops(ect_csdev)->enable(ect_csdev); + } else { + if (ect_ops(ect_csdev)->disable) + ect_ret = ect_ops(ect_csdev)->disable(ect_csdev); + } + + /* output warning if ECT enable is preventing trace operation */ + if (ect_ret) + dev_info(&csdev->dev, "Associated ECT device (%s) %s failed\n", + dev_name(&ect_csdev->dev), + enable ? "enable" : "disable"); + return ect_ret; +} + +/* + * Set the associated ect / cti device while holding the coresight_mutex + * to avoid a race with coresight_enable that may try to use this value. + */ +void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev, + struct coresight_device *ect_csdev) +{ + mutex_lock(&coresight_mutex); + csdev->ect_dev = ect_csdev; + mutex_unlock(&coresight_mutex); +} + static int coresight_enable_sink(struct coresight_device *csdev, u32 mode, void *data) { @@ -228,9 +266,14 @@ static int coresight_enable_sink(struct coresight_device *csdev, if (!sink_ops(csdev)->enable) return -EINVAL; - ret = sink_ops(csdev)->enable(csdev, mode, data); + ret = coresight_control_assoc_ectdev(csdev, true); if (ret) return ret; + ret = sink_ops(csdev)->enable(csdev, mode, data); + if (ret) { + coresight_control_assoc_ectdev(csdev, false); + return ret; + } csdev->enable = true; return 0; @@ -246,6 +289,7 @@ static void coresight_disable_sink(struct coresight_device *csdev) ret = sink_ops(csdev)->disable(csdev); if (ret) return; + coresight_control_assoc_ectdev(csdev, false); csdev->enable = false; } @@ -269,8 +313,15 @@ static int coresight_enable_link(struct coresight_device *csdev, if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT && outport < 0) return outport; - if (link_ops(csdev)->enable) - ret = link_ops(csdev)->enable(csdev, inport, outport); + if (link_ops(csdev)->enable) { + ret = coresight_control_assoc_ectdev(csdev, true); + if (!ret) { + ret = link_ops(csdev)->enable(csdev, inport, outport); + if (ret) + coresight_control_assoc_ectdev(csdev, false); + } + } + if (!ret) csdev->enable = true; @@ -300,8 +351,10 @@ static void coresight_disable_link(struct coresight_device *csdev, nr_conns = 1; } - if (link_ops(csdev)->disable) + if (link_ops(csdev)->disable) { link_ops(csdev)->disable(csdev, inport, outport); + coresight_control_assoc_ectdev(csdev, false); + } for (i = 0; i < nr_conns; i++) if (atomic_read(&csdev->refcnt[i]) != 0) @@ -322,9 +375,14 @@ static int coresight_enable_source(struct coresight_device *csdev, u32 mode) if (!csdev->enable) { if (source_ops(csdev)->enable) { - ret = source_ops(csdev)->enable(csdev, NULL, mode); + ret = coresight_control_assoc_ectdev(csdev, true); if (ret) return ret; + ret = source_ops(csdev)->enable(csdev, NULL, mode); + if (ret) { + coresight_control_assoc_ectdev(csdev, false); + return ret; + }; } csdev->enable = true; } @@ -347,6 +405,7 @@ static bool coresight_disable_source(struct coresight_device *csdev) if (atomic_dec_return(csdev->refcnt) == 0) { if (source_ops(csdev)->disable) source_ops(csdev)->disable(csdev, NULL); + coresight_control_assoc_ectdev(csdev, false); csdev->enable = false; } return !csdev->enable; @@ -964,6 +1023,7 @@ static void coresight_device_release(struct device *dev) { struct coresight_device *csdev = to_coresight_device(dev); + cti_remove_assoc_from_csdev(csdev); fwnode_handle_put(csdev->dev.fwnode); kfree(csdev->refcnt); kfree(csdev); @@ -1246,6 +1306,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) coresight_fixup_device_conns(csdev); coresight_fixup_orphan_conns(csdev); + cti_add_assoc_to_csdev(csdev); mutex_unlock(&coresight_mutex); diff --git a/include/linux/coresight.h b/include/linux/coresight.h index b3e582d96a34..193cc9dbf448 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -163,6 +163,8 @@ struct coresight_connection { * activated but not yet enabled. Enabling for a _sink_ * appens when a source has been selected for that it. * @ea: Device attribute for sink representation under PMU directory. + * @ect_dev: Associated cross trigger device. Not part of the trace data + * path or connections. */ struct coresight_device { struct coresight_platform_data *pdata; @@ -176,6 +178,8 @@ struct coresight_device { /* sink specific fields */ bool activated; /* true only if a sink is part of a path */ struct dev_ext_attribute *ea; + /* cross trigger handling */ + struct coresight_device *ect_dev; }; /* -- cgit v1.2.3-58-ga151 From 141f15c66d9472c642f38aae010ed68289036d7c Mon Sep 17 00:00:00 2001 From: Denis Osterland-Heim Date: Sat, 21 Mar 2020 08:15:53 +0000 Subject: leds: pwm: remove header The header is only used by leds_pwm.c, so move contents to leds_pwm.c and remove it. Apply minor changes suggested by checkpatch. Remove deprecated and unused pwm_id member. Suggested-by: Pavel Machek Signed-off-by: Denis Osterland-Heim Signed-off-by: Pavel Machek --- drivers/leds/leds-pwm.c | 14 +++++++++++++- include/linux/leds_pwm.h | 22 ---------------------- 2 files changed, 13 insertions(+), 23 deletions(-) delete mode 100644 include/linux/leds_pwm.h (limited to 'include/linux') diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c index 9111cdede0ee..e1848e80aeb4 100644 --- a/drivers/leds/leds-pwm.c +++ b/drivers/leds/leds-pwm.c @@ -16,9 +16,21 @@ #include #include #include -#include #include +struct led_pwm { + const char *name; + const char *default_trigger; + u8 active_low; + unsigned int max_brightness; + unsigned int pwm_period_ns; +}; + +struct led_pwm_platform_data { + int num_leds; + struct led_pwm *leds; +}; + struct led_pwm_data { struct led_classdev cdev; struct pwm_device *pwm; diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h deleted file mode 100644 index 93d101d28943..000000000000 --- a/include/linux/leds_pwm.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * PWM LED driver data - see drivers/leds/leds-pwm.c - */ -#ifndef __LINUX_LEDS_PWM_H -#define __LINUX_LEDS_PWM_H - -struct led_pwm { - const char *name; - const char *default_trigger; - unsigned pwm_id __deprecated; - u8 active_low; - unsigned max_brightness; - unsigned pwm_period_ns; -}; - -struct led_pwm_platform_data { - int num_leds; - struct led_pwm *leds; -}; - -#endif -- cgit v1.2.3-58-ga151 From c84ef3c5e65ccf99a7a91a4d731ebb5d6331a178 Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Tue, 3 Mar 2020 19:59:26 +0530 Subject: f2fs: Add a new CP flag to help fsck fix resize SPO issues Add and set a new CP flag CP_RESIZEFS_FLAG during online resize FS to help fsck fix the metadata mismatch that may happen due to SPO during resize, where SB got updated but CP data couldn't be written yet. fsck errors - Info: CKPT version = 6ed7bccb Wrong user_block_count(2233856) [f2fs_do_mount:3365] Checkpoint is polluted Signed-off-by: Sahitya Tummala Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 8 ++++++-- include/linux/f2fs_fs.h | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 46fc9c1542fe..852890b72d6a 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1298,10 +1298,14 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) else __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); - if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) || - is_sbi_flag_set(sbi, SBI_IS_RESIZEFS)) + if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) __set_ckpt_flags(ckpt, CP_FSCK_FLAG); + if (is_sbi_flag_set(sbi, SBI_IS_RESIZEFS)) + __set_ckpt_flags(ckpt, CP_RESIZEFS_FLAG); + else + __clear_ckpt_flags(ckpt, CP_RESIZEFS_FLAG); + if (is_sbi_flag_set(sbi, SBI_CP_DISABLED)) __set_ckpt_flags(ckpt, CP_DISABLED_FLAG); else diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index ac3f4888b3df..3c383ddd92dd 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -125,6 +125,7 @@ struct f2fs_super_block { /* * For checkpoint */ +#define CP_RESIZEFS_FLAG 0x00004000 #define CP_DISABLED_QUICK_FLAG 0x00002000 #define CP_DISABLED_FLAG 0x00001000 #define CP_QUOTA_NEED_FSCK_FLAG 0x00000800 -- cgit v1.2.3-58-ga151 From 086b2d78375cffe58f5341359bebec0650793811 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 18 Mar 2020 20:55:20 +0100 Subject: PM: remove s390 specific callbacks ARCH_SAVE_PAGE_KEYS has been introduced in order to be able to save and restore s390 specific storage keys into a hibernation image. With hibernation support removed from s390 there is no point in keeping the callbacks. Acked-by: Christian Borntraeger Acked-by: Peter Oberparleiter Signed-off-by: Heiko Carstens Signed-off-by: Vasily Gorbik --- include/linux/suspend.h | 34 ---------------------------------- kernel/power/Kconfig | 3 --- kernel/power/snapshot.c | 18 ------------------ 3 files changed, 55 deletions(-) (limited to 'include/linux') diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 2b2055b035ee..4fcc6fd0cbd6 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -566,38 +566,4 @@ static inline void queue_up_suspend_work(void) {} #endif /* !CONFIG_PM_AUTOSLEEP */ -#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS -/* - * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture - * to save/restore additional information to/from the array of page - * frame numbers in the hibernation image. For s390 this is used to - * save and restore the storage key for each page that is included - * in the hibernation image. - */ -unsigned long page_key_additional_pages(unsigned long pages); -int page_key_alloc(unsigned long pages); -void page_key_free(void); -void page_key_read(unsigned long *pfn); -void page_key_memorize(unsigned long *pfn); -void page_key_write(void *address); - -#else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */ - -static inline unsigned long page_key_additional_pages(unsigned long pages) -{ - return 0; -} - -static inline int page_key_alloc(unsigned long pages) -{ - return 0; -} - -static inline void page_key_free(void) {} -static inline void page_key_read(unsigned long *pfn) {} -static inline void page_key_memorize(unsigned long *pfn) {} -static inline void page_key_write(void *address) {} - -#endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */ - #endif /* _LINUX_SUSPEND_H */ diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 7cbfbeacd68a..c208566c844b 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -80,9 +80,6 @@ config HIBERNATION For more information take a look at . -config ARCH_SAVE_PAGE_KEYS - bool - config PM_STD_PARTITION string "Default resume partition" depends on HIBERNATION diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index ddade80ad276..e99d13b0b8fc 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1744,9 +1744,6 @@ int hibernate_preallocate_memory(void) count += highmem; count -= totalreserve_pages; - /* Add number of pages required for page keys (s390 only). */ - size += page_key_additional_pages(saveable); - /* Compute the maximum number of saveable pages to leave in memory. */ max_size = (count - (size + PAGES_FOR_IO)) / 2 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE); @@ -2075,8 +2072,6 @@ static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm) buf[j] = memory_bm_next_pfn(bm); if (unlikely(buf[j] == BM_END_OF_MAP)) break; - /* Save page key for data page (s390 only). */ - page_key_read(buf + j); } } @@ -2226,9 +2221,6 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) if (unlikely(buf[j] == BM_END_OF_MAP)) break; - /* Extract and buffer page key for data page (s390 only). */ - page_key_memorize(buf + j); - if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j])) memory_bm_set_bit(bm, buf[j]); else @@ -2623,11 +2615,6 @@ int snapshot_write_next(struct snapshot_handle *handle) if (error) return error; - /* Allocate buffer for page keys. */ - error = page_key_alloc(nr_copy_pages); - if (error) - return error; - hibernate_restore_protection_begin(); } else if (handle->cur <= nr_meta_pages + 1) { error = unpack_orig_pfns(buffer, ©_bm); @@ -2649,8 +2636,6 @@ int snapshot_write_next(struct snapshot_handle *handle) } } else { copy_last_highmem_page(); - /* Restore page key for data page (s390 only). */ - page_key_write(handle->buffer); hibernate_restore_protect_page(handle->buffer); handle->buffer = get_buffer(&orig_bm, &ca); if (IS_ERR(handle->buffer)) @@ -2673,9 +2658,6 @@ int snapshot_write_next(struct snapshot_handle *handle) void snapshot_write_finalize(struct snapshot_handle *handle) { copy_last_highmem_page(); - /* Restore page key for data page (s390 only). */ - page_key_write(handle->buffer); - page_key_free(); hibernate_restore_protect_page(handle->buffer); /* Do that only if we have loaded the image entirely */ if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) { -- cgit v1.2.3-58-ga151 From 6473ea760ca1707ade74de5b57e74189d14f8e10 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 19 Mar 2020 17:10:09 +0200 Subject: fsnotify: tidy up FS_ and FAN_ constants Order by value, so the free value ranges are easier to find. Link: https://lore.kernel.org/r/20200319151022.31456-2-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- include/linux/fsnotify_backend.h | 11 +++++------ include/uapi/linux/fanotify.h | 4 ++-- 2 files changed, 7 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 1915bdba2fad..db3cabb4600e 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -49,16 +49,15 @@ #define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */ #define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */ -#define FS_ISDIR 0x40000000 /* event occurred against dir */ -#define FS_IN_ONESHOT 0x80000000 /* only send event once */ - -#define FS_DN_RENAME 0x10000000 /* file renamed */ -#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */ - /* This inode cares about things that happen to its children. Always set for * dnotify and inotify. */ #define FS_EVENT_ON_CHILD 0x08000000 +#define FS_DN_RENAME 0x10000000 /* file renamed */ +#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */ +#define FS_ISDIR 0x40000000 /* event occurred against dir */ +#define FS_IN_ONESHOT 0x80000000 /* only send event once */ + #define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO) /* diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h index b9effa6f8503..2a1844edda47 100644 --- a/include/uapi/linux/fanotify.h +++ b/include/uapi/linux/fanotify.h @@ -25,9 +25,9 @@ #define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */ #define FAN_OPEN_EXEC_PERM 0x00040000 /* File open/exec in perm check */ -#define FAN_ONDIR 0x40000000 /* event occurred against dir */ +#define FAN_EVENT_ON_CHILD 0x08000000 /* Interested in child events */ -#define FAN_EVENT_ON_CHILD 0x08000000 /* interested in child events */ +#define FAN_ONDIR 0x40000000 /* Event occurred against dir */ /* helper events */ #define FAN_CLOSE (FAN_CLOSE_WRITE | FAN_CLOSE_NOWRITE) /* close */ -- cgit v1.2.3-58-ga151 From eae36a2b8324c9dd0a66b2ae32abd4a456e49c39 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 19 Mar 2020 17:10:10 +0200 Subject: fsnotify: factor helpers fsnotify_dentry() and fsnotify_file() Most of the code in fsnotify hooks is boiler plate of one or the other. Link: https://lore.kernel.org/r/20200319151022.31456-3-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- include/linux/fsnotify.h | 99 ++++++++++++++++++------------------------------ 1 file changed, 37 insertions(+), 62 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index a2d5d175d3c1..f54936aa0365 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -41,16 +41,36 @@ static inline int fsnotify_parent(const struct path *path, } /* - * Simple wrapper to consolidate calls fsnotify_parent()/fsnotify() when - * an event is on a path. + * Simple wrappers to consolidate calls fsnotify_parent()/fsnotify() when + * an event is on a file/dentry. */ -static inline int fsnotify_path(struct inode *inode, const struct path *path, - __u32 mask) +static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask) { - int ret = fsnotify_parent(path, NULL, mask); + struct inode *inode = d_inode(dentry); + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + fsnotify_parent(NULL, dentry, mask); + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); +} + +static inline int fsnotify_file(struct file *file, __u32 mask) +{ + const struct path *path = &file->f_path; + struct inode *inode = file_inode(file); + int ret; + + if (file->f_mode & FMODE_NONOTIFY) + return 0; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + ret = fsnotify_parent(path, NULL, mask); if (ret) return ret; + return fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); } @@ -58,19 +78,16 @@ static inline int fsnotify_path(struct inode *inode, const struct path *path, static inline int fsnotify_perm(struct file *file, int mask) { int ret; - const struct path *path = &file->f_path; - struct inode *inode = file_inode(file); __u32 fsnotify_mask = 0; - if (file->f_mode & FMODE_NONOTIFY) - return 0; if (!(mask & (MAY_READ | MAY_OPEN))) return 0; + if (mask & MAY_OPEN) { fsnotify_mask = FS_OPEN_PERM; if (file->f_flags & __FMODE_EXEC) { - ret = fsnotify_path(inode, path, FS_OPEN_EXEC_PERM); + ret = fsnotify_file(file, FS_OPEN_EXEC_PERM); if (ret) return ret; @@ -79,10 +96,7 @@ static inline int fsnotify_perm(struct file *file, int mask) fsnotify_mask = FS_ACCESS_PERM; } - if (S_ISDIR(inode->i_mode)) - fsnotify_mask |= FS_ISDIR; - - return fsnotify_path(inode, path, fsnotify_mask); + return fsnotify_file(file, fsnotify_mask); } /* @@ -229,15 +243,7 @@ static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry) */ static inline void fsnotify_access(struct file *file) { - const struct path *path = &file->f_path; - struct inode *inode = file_inode(file); - __u32 mask = FS_ACCESS; - - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - if (!(file->f_mode & FMODE_NONOTIFY)) - fsnotify_path(inode, path, mask); + fsnotify_file(file, FS_ACCESS); } /* @@ -245,15 +251,7 @@ static inline void fsnotify_access(struct file *file) */ static inline void fsnotify_modify(struct file *file) { - const struct path *path = &file->f_path; - struct inode *inode = file_inode(file); - __u32 mask = FS_MODIFY; - - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - if (!(file->f_mode & FMODE_NONOTIFY)) - fsnotify_path(inode, path, mask); + fsnotify_file(file, FS_MODIFY); } /* @@ -261,16 +259,12 @@ static inline void fsnotify_modify(struct file *file) */ static inline void fsnotify_open(struct file *file) { - const struct path *path = &file->f_path; - struct inode *inode = file_inode(file); __u32 mask = FS_OPEN; - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; if (file->f_flags & __FMODE_EXEC) mask |= FS_OPEN_EXEC; - fsnotify_path(inode, path, mask); + fsnotify_file(file, mask); } /* @@ -278,16 +272,10 @@ static inline void fsnotify_open(struct file *file) */ static inline void fsnotify_close(struct file *file) { - const struct path *path = &file->f_path; - struct inode *inode = file_inode(file); - fmode_t mode = file->f_mode; - __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE; + __u32 mask = (file->f_mode & FMODE_WRITE) ? FS_CLOSE_WRITE : + FS_CLOSE_NOWRITE; - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - if (!(file->f_mode & FMODE_NONOTIFY)) - fsnotify_path(inode, path, mask); + fsnotify_file(file, mask); } /* @@ -295,14 +283,7 @@ static inline void fsnotify_close(struct file *file) */ static inline void fsnotify_xattr(struct dentry *dentry) { - struct inode *inode = dentry->d_inode; - __u32 mask = FS_ATTRIB; - - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - fsnotify_parent(NULL, dentry, mask); - fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + fsnotify_dentry(dentry, FS_ATTRIB); } /* @@ -311,7 +292,6 @@ static inline void fsnotify_xattr(struct dentry *dentry) */ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) { - struct inode *inode = dentry->d_inode; __u32 mask = 0; if (ia_valid & ATTR_UID) @@ -332,13 +312,8 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) if (ia_valid & ATTR_MODE) mask |= FS_ATTRIB; - if (mask) { - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - fsnotify_parent(NULL, dentry, mask); - fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); - } + if (mask) + fsnotify_dentry(dentry, mask); } #endif /* _LINUX_FS_NOTIFY_H */ -- cgit v1.2.3-58-ga151 From a1aae0570a2b806937120921db2c5d3100ca55fc Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 19 Mar 2020 17:10:11 +0200 Subject: fsnotify: funnel all dirent events through fsnotify_name() Factor out fsnotify_name() from fsnotify_dirent(), so it can also serve link and rename events and use this helper to report all directory entry change events. Both helpers return void because no caller checks their return value. Link: https://lore.kernel.org/r/20200319151022.31456-4-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- include/linux/fsnotify.h | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index f54936aa0365..751da17e003d 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -18,16 +18,24 @@ #include /* - * Notify this @dir inode about a change in the directory entry @dentry. + * Notify this @dir inode about a change in a child directory entry. + * The directory entry may have turned positive or negative or its inode may + * have changed (i.e. renamed over). * * Unlike fsnotify_parent(), the event will be reported regardless of the * FS_EVENT_ON_CHILD mask on the parent inode. */ -static inline int fsnotify_dirent(struct inode *dir, struct dentry *dentry, - __u32 mask) +static inline void fsnotify_name(struct inode *dir, __u32 mask, + struct inode *child, + const struct qstr *name, u32 cookie) { - return fsnotify(dir, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE, - &dentry->d_name, 0); + fsnotify(dir, mask, child, FSNOTIFY_EVENT_INODE, name, cookie); +} + +static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, + __u32 mask) +{ + fsnotify_name(dir, mask, d_inode(dentry), &dentry->d_name, 0); } /* Notify this dentry's parent about a child's events. */ @@ -136,10 +144,8 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, mask |= FS_ISDIR; } - fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name, - fs_cookie); - fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name, - fs_cookie); + fsnotify_name(old_dir, old_dir_mask, source, old_name, fs_cookie); + fsnotify_name(new_dir, new_dir_mask, source, new_name, fs_cookie); if (target) fsnotify_link_count(target); @@ -194,12 +200,13 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) * Note: We have to pass also the linked inode ptr as some filesystems leave * new_dentry->d_inode NULL and instantiate inode pointer later */ -static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) +static inline void fsnotify_link(struct inode *dir, struct inode *inode, + struct dentry *new_dentry) { fsnotify_link_count(inode); audit_inode_child(dir, new_dentry, AUDIT_TYPE_CHILD_CREATE); - fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, &new_dentry->d_name, 0); + fsnotify_name(dir, FS_CREATE, inode, &new_dentry->d_name, 0); } /* -- cgit v1.2.3-58-ga151 From aa93bdc5500cc93ba31afeda1a61610d117947ad Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 19 Mar 2020 17:10:12 +0200 Subject: fsnotify: use helpers to access data by data_type Create helpers to access path and inode from different data types. Link: https://lore.kernel.org/r/20200319151022.31456-5-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fanotify/fanotify.c | 18 ++++++++---------- fs/notify/fsnotify.c | 5 +++-- fs/notify/inotify/inotify_fsnotify.c | 8 +++----- include/linux/fsnotify_backend.h | 34 ++++++++++++++++++++++++++++++---- kernel/audit_fsnotify.c | 13 ++----------- kernel/audit_watch.c | 16 ++-------------- 6 files changed, 48 insertions(+), 46 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 5778d1347b35..19ec7a4f4d50 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -151,7 +151,7 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, { __u32 marks_mask = 0, marks_ignored_mask = 0; __u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS; - const struct path *path = data; + const struct path *path = fsnotify_data_path(data, data_type); struct fsnotify_mark *mark; int type; @@ -160,7 +160,7 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, if (!FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { /* Do we have path to open a file descriptor? */ - if (data_type != FSNOTIFY_EVENT_PATH) + if (!path) return 0; /* Path type events are only relevant for files and dirs */ if (!d_is_reg(path->dentry) && !d_can_lookup(path->dentry)) @@ -269,11 +269,8 @@ static struct inode *fanotify_fid_inode(struct inode *to_tell, u32 event_mask, { if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) return to_tell; - else if (data_type == FSNOTIFY_EVENT_INODE) - return (struct inode *)data; - else if (data_type == FSNOTIFY_EVENT_PATH) - return d_inode(((struct path *)data)->dentry); - return NULL; + + return (struct inode *)fsnotify_data_inode(data, data_type); } struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, @@ -284,6 +281,7 @@ struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, struct fanotify_event *event = NULL; gfp_t gfp = GFP_KERNEL_ACCOUNT; struct inode *id = fanotify_fid_inode(inode, mask, data, data_type); + const struct path *path = fsnotify_data_path(data, data_type); /* * For queues with unlimited length lost events are not expected and @@ -324,10 +322,10 @@ init: __maybe_unused if (id && FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { /* Report the event without a file identifier on encode error */ event->fh_type = fanotify_encode_fid(event, id, gfp, fsid); - } else if (data_type == FSNOTIFY_EVENT_PATH) { + } else if (path) { event->fh_type = FILEID_ROOT; - event->path = *((struct path *)data); - path_get(&event->path); + event->path = *path; + path_get(path); } else { event->fh_type = FILEID_INVALID; event->path.mnt = NULL; diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 46f225580009..a5d6467f89a0 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -318,6 +318,7 @@ static void fsnotify_iter_next(struct fsnotify_iter_info *iter_info) int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, const struct qstr *file_name, u32 cookie) { + const struct path *path = fsnotify_data_path(data, data_is); struct fsnotify_iter_info iter_info = {}; struct super_block *sb = to_tell->i_sb; struct mount *mnt = NULL; @@ -325,8 +326,8 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, int ret = 0; __u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS); - if (data_is == FSNOTIFY_EVENT_PATH) { - mnt = real_mount(((const struct path *)data)->mnt); + if (path) { + mnt = real_mount(path->mnt); mnt_or_sb_mask |= mnt->mnt_fsnotify_mask; } /* An event "on child" is not intended for a mount/sb mark */ diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index d510223d302c..6bb98522bbfd 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c @@ -61,6 +61,7 @@ int inotify_handle_event(struct fsnotify_group *group, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info) { + const struct path *path = fsnotify_data_path(data, data_type); struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); struct inotify_inode_mark *i_mark; struct inotify_event_info *event; @@ -73,12 +74,9 @@ int inotify_handle_event(struct fsnotify_group *group, return 0; if ((inode_mark->mask & FS_EXCL_UNLINK) && - (data_type == FSNOTIFY_EVENT_PATH)) { - const struct path *path = data; + path && d_unlinked(path->dentry)) + return 0; - if (d_unlinked(path->dentry)) - return 0; - } if (file_name) { len = file_name->len; alloc_len += len + 1; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index db3cabb4600e..5cc838db422a 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -212,10 +212,36 @@ struct fsnotify_group { }; }; -/* when calling fsnotify tell it if the data is a path or inode */ -#define FSNOTIFY_EVENT_NONE 0 -#define FSNOTIFY_EVENT_PATH 1 -#define FSNOTIFY_EVENT_INODE 2 +/* When calling fsnotify tell it if the data is a path or inode */ +enum fsnotify_data_type { + FSNOTIFY_EVENT_NONE, + FSNOTIFY_EVENT_PATH, + FSNOTIFY_EVENT_INODE, +}; + +static inline const struct inode *fsnotify_data_inode(const void *data, + int data_type) +{ + switch (data_type) { + case FSNOTIFY_EVENT_INODE: + return data; + case FSNOTIFY_EVENT_PATH: + return d_inode(((const struct path *)data)->dentry); + default: + return NULL; + } +} + +static inline const struct path *fsnotify_data_path(const void *data, + int data_type) +{ + switch (data_type) { + case FSNOTIFY_EVENT_PATH: + return data; + default: + return NULL; + } +} enum fsnotify_obj_type { FSNOTIFY_OBJ_TYPE_INODE, diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c index f0d243318452..3596448bfdab 100644 --- a/kernel/audit_fsnotify.c +++ b/kernel/audit_fsnotify.c @@ -160,23 +160,14 @@ static int audit_mark_handle_event(struct fsnotify_group *group, { struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); struct audit_fsnotify_mark *audit_mark; - const struct inode *inode = NULL; + const struct inode *inode = fsnotify_data_inode(data, data_type); audit_mark = container_of(inode_mark, struct audit_fsnotify_mark, mark); BUG_ON(group != audit_fsnotify_group); - switch (data_type) { - case (FSNOTIFY_EVENT_PATH): - inode = ((const struct path *)data)->dentry->d_inode; - break; - case (FSNOTIFY_EVENT_INODE): - inode = (const struct inode *)data; - break; - default: - BUG(); + if (WARN_ON(!inode)) return 0; - } if (mask & (FS_CREATE|FS_MOVED_TO|FS_DELETE|FS_MOVED_FROM)) { if (audit_compare_dname_path(dname, audit_mark->path, AUDIT_NAME_FULL)) diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 4508d5e0cf69..dcfbb44c6720 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -473,25 +473,13 @@ static int audit_watch_handle_event(struct fsnotify_group *group, struct fsnotify_iter_info *iter_info) { struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); - const struct inode *inode; + const struct inode *inode = fsnotify_data_inode(data, data_type); struct audit_parent *parent; parent = container_of(inode_mark, struct audit_parent, mark); BUG_ON(group != audit_watch_group); - - switch (data_type) { - case (FSNOTIFY_EVENT_PATH): - inode = d_backing_inode(((const struct path *)data)->dentry); - break; - case (FSNOTIFY_EVENT_INODE): - inode = (const struct inode *)data; - break; - default: - BUG(); - inode = NULL; - break; - } + WARN_ON(!inode); if (mask & (FS_CREATE|FS_MOVED_TO) && inode) audit_update_watch(parent, dname, inode->i_sb->s_dev, inode->i_ino, 0); -- cgit v1.2.3-58-ga151 From 017de65fe58f2b0ca428b5830609520ded5898b9 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 19 Mar 2020 17:10:13 +0200 Subject: fsnotify: simplify arguments passing to fsnotify_parent() Instead of passing both dentry and path and having to figure out which one to use, pass data/data_type to simplify the code. Link: https://lore.kernel.org/r/20200319151022.31456-6-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fsnotify.c | 15 ++++----------- include/linux/fsnotify.h | 14 ++------------ include/linux/fsnotify_backend.h | 14 ++++++++------ 3 files changed, 14 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index a5d6467f89a0..193530f57963 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -143,15 +143,13 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode) } /* Notify this dentry's parent about a child's events. */ -int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask) +int fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, + int data_type) { struct dentry *parent; struct inode *p_inode; int ret = 0; - if (!dentry) - dentry = path->dentry; - if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED)) return 0; @@ -168,12 +166,7 @@ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask mask |= FS_EVENT_ON_CHILD; take_dentry_name_snapshot(&name, dentry); - if (path) - ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH, - &name.name, 0); - else - ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE, - &name.name, 0); + ret = fsnotify(p_inode, mask, data, data_type, &name.name, 0); release_dentry_name_snapshot(&name); } @@ -181,7 +174,7 @@ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask return ret; } -EXPORT_SYMBOL_GPL(__fsnotify_parent); +EXPORT_SYMBOL_GPL(fsnotify_parent); static int send_to_group(struct inode *to_tell, __u32 mask, const void *data, diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 751da17e003d..860018f3e545 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -38,16 +38,6 @@ static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, fsnotify_name(dir, mask, d_inode(dentry), &dentry->d_name, 0); } -/* Notify this dentry's parent about a child's events. */ -static inline int fsnotify_parent(const struct path *path, - struct dentry *dentry, __u32 mask) -{ - if (!dentry) - dentry = path->dentry; - - return __fsnotify_parent(path, dentry, mask); -} - /* * Simple wrappers to consolidate calls fsnotify_parent()/fsnotify() when * an event is on a file/dentry. @@ -59,7 +49,7 @@ static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask) if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; - fsnotify_parent(NULL, dentry, mask); + fsnotify_parent(dentry, mask, inode, FSNOTIFY_EVENT_INODE); fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); } @@ -75,7 +65,7 @@ static inline int fsnotify_file(struct file *file, __u32 mask) if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; - ret = fsnotify_parent(path, NULL, mask); + ret = fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); if (ret) return ret; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 5cc838db422a..337c87cf34d6 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -376,9 +376,10 @@ struct fsnotify_mark { /* called from the vfs helpers */ /* main fsnotify call to send events */ -extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, - const struct qstr *name, u32 cookie); -extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask); +extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, + int data_type, const struct qstr *name, u32 cookie); +extern int fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, + int data_type); extern void __fsnotify_inode_delete(struct inode *inode); extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); extern void fsnotify_sb_delete(struct super_block *sb); @@ -533,13 +534,14 @@ static inline void fsnotify_init_event(struct fsnotify_event *event, #else -static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, - const struct qstr *name, u32 cookie) +static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, + int data_type, const struct qstr *name, u32 cookie) { return 0; } -static inline int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask) +static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, + const void *data, int data_type) { return 0; } -- cgit v1.2.3-58-ga151 From 05635c14a292de0e1a221dc31c04aba3913f03c8 Mon Sep 17 00:00:00 2001 From: Jungseung Lee Date: Wed, 18 Mar 2020 21:06:14 +0900 Subject: mtd: spi-nor: Add SR 4bit block protection support Currently we are supporting block protection only for flash chips with 3 block protection bits (BP0-2) in the SR register. Enable block protection support for flashes with 4 block protection bits (BP0-3). Add a flash_info flag for flashes that describe 4 block protection bits. Add another flash_info flag for flashes in which BP3 bit is not adjacent to the BP0-2 bits. Tested with a n25q512ax3 (BP0-3) and w25q128 (BP0-2). Signed-off-by: Jungseung Lee Reviewed-by: Michael Walle Tested-by: Michael Walle Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/core.c | 66 ++++++++++++++++++++++++++++++++------------- drivers/mtd/spi-nor/core.h | 10 +++++++ include/linux/mtd/spi-nor.h | 2 ++ 3 files changed, 60 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 8146d82afe61..cc68ea84318e 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -1536,13 +1536,34 @@ erase_err: return ret; } +static u8 spi_nor_get_sr_bp_mask(struct spi_nor *nor) +{ + u8 mask = SR_BP2 | SR_BP1 | SR_BP0; + + if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6) + return mask | SR_BP3_BIT6; + + if (nor->flags & SNOR_F_HAS_4BIT_BP) + return mask | SR_BP3; + + return mask; +} + +static u8 spi_nor_get_sr_tb_mask(struct spi_nor *nor) +{ + if (nor->flags & SNOR_F_HAS_SR_TB_BIT6) + return SR_TB_BIT6; + else + return SR_TB_BIT5; +} + static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor) { unsigned int bp_slots, bp_slots_needed; - u8 mask = SR_BP2 | SR_BP1 | SR_BP0; + u8 mask = spi_nor_get_sr_bp_mask(nor); /* Reserved one for "protect none" and one for "protect all". */ - bp_slots = (mask >> SR_BP_SHIFT) + 1 - 2; + bp_slots = (1 << hweight8(mask)) - 2; bp_slots_needed = ilog2(nor->info->n_sectors); if (bp_slots_needed > bp_slots) @@ -1557,12 +1578,14 @@ static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs, { struct mtd_info *mtd = &nor->mtd; u64 min_prot_len; - u8 mask = SR_BP2 | SR_BP1 | SR_BP0; - u8 tb_mask = SR_TB_BIT5; - u8 bp = (sr & mask) >> SR_BP_SHIFT; + u8 mask = spi_nor_get_sr_bp_mask(nor); + u8 tb_mask = spi_nor_get_sr_tb_mask(nor); + u8 bp, val = sr & mask; - if (nor->flags & SNOR_F_HAS_SR_TB_BIT6) - tb_mask = SR_TB_BIT6; + if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3_BIT6) + val = (val & ~SR_BP3_BIT6) | SR_BP3; + + bp = val >> SR_BP_SHIFT; if (!bp) { /* No protection */ @@ -1620,7 +1643,8 @@ static int spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, /* * Lock a region of the flash. Compatible with ST Micro and similar flash. - * Supports the block protection bits BP{0,1,2} in the status register + * Supports the block protection bits BP{0,1,2}/BP{0,1,2,3} in the status + * register * (SR). Does not support these features found in newer SR bitfields: * - SEC: sector/block protect - only handle SEC=0 (block protect) * - CMP: complement protect - only support CMP=0 (range is not complemented) @@ -1655,8 +1679,8 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) struct mtd_info *mtd = &nor->mtd; u64 min_prot_len; int ret, status_old, status_new; - u8 mask = SR_BP2 | SR_BP1 | SR_BP0; - u8 tb_mask = SR_TB_BIT5; + u8 mask = spi_nor_get_sr_bp_mask(nor); + u8 tb_mask = spi_nor_get_sr_tb_mask(nor); u8 pow, val; loff_t lock_len; bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB; @@ -1693,9 +1717,6 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) else lock_len = ofs + len; - if (nor->flags & SNOR_F_HAS_SR_TB_BIT6) - tb_mask = SR_TB_BIT6; - if (lock_len == mtd->size) { val = mask; } else { @@ -1703,6 +1724,9 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) pow = ilog2(lock_len) - ilog2(min_prot_len) + 1; val = pow << SR_BP_SHIFT; + if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3) + val = (val & ~SR_BP3) | SR_BP3_BIT6; + if (val & ~mask) return -EINVAL; @@ -1740,8 +1764,8 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) struct mtd_info *mtd = &nor->mtd; u64 min_prot_len; int ret, status_old, status_new; - u8 mask = SR_BP2 | SR_BP1 | SR_BP0; - u8 tb_mask = SR_TB_BIT5; + u8 mask = spi_nor_get_sr_bp_mask(nor); + u8 tb_mask = spi_nor_get_sr_tb_mask(nor); u8 pow, val; loff_t lock_len; bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB; @@ -1778,9 +1802,6 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) else lock_len = ofs; - if (nor->flags & SNOR_F_HAS_SR_TB_BIT6) - tb_mask = SR_TB_BIT6; - if (lock_len == 0) { val = 0; /* fully unlocked */ } else { @@ -1788,6 +1809,9 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) pow = ilog2(lock_len) - ilog2(min_prot_len) + 1; val = pow << SR_BP_SHIFT; + if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3) + val = (val & ~SR_BP3) | SR_BP3_BIT6; + /* Some power-of-two sizes are not supported */ if (val & ~mask) return -EINVAL; @@ -3147,6 +3171,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, if (info->flags & USE_CLSR) nor->flags |= SNOR_F_USE_CLSR; + if (info->flags & SPI_NOR_4BIT_BP) { + nor->flags |= SNOR_F_HAS_4BIT_BP; + if (info->flags & SPI_NOR_BP3_SR_BIT6) + nor->flags |= SNOR_F_HAS_SR_BP3_BIT6; + } + if (info->flags & SPI_NOR_NO_ERASE) mtd->flags |= MTD_NO_ERASE; diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h index 3ce826b35ad1..6f2f6b27173f 100644 --- a/drivers/mtd/spi-nor/core.h +++ b/drivers/mtd/spi-nor/core.h @@ -24,6 +24,8 @@ enum spi_nor_option_flags { SNOR_F_HAS_16BIT_SR = BIT(9), SNOR_F_NO_READ_CR = BIT(10), SNOR_F_HAS_SR_TB_BIT6 = BIT(11), + SNOR_F_HAS_4BIT_BP = BIT(12), + SNOR_F_HAS_SR_BP3_BIT6 = BIT(13), }; struct spi_nor_read_command { @@ -301,6 +303,14 @@ struct flash_info { * status register. Must be used with * SPI_NOR_HAS_TB. */ +#define SPI_NOR_4BIT_BP BIT(17) /* + * Flash SR has 4 bit fields (BP0-3) + * for block protection. + */ +#define SPI_NOR_BP3_SR_BIT6 BIT(18) /* + * BP3 is bit 6 of status register. + * Must be used with SPI_NOR_4BIT_BP. + */ /* Part specific fixup hooks. */ const struct spi_nor_fixups *fixups; diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index e656858b50a5..1e2af0ec1f03 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -111,7 +111,9 @@ #define SR_BP0 BIT(2) /* Block protect 0 */ #define SR_BP1 BIT(3) /* Block protect 1 */ #define SR_BP2 BIT(4) /* Block protect 2 */ +#define SR_BP3 BIT(5) /* Block protect 3 */ #define SR_TB_BIT5 BIT(5) /* Top/Bottom protect */ +#define SR_BP3_BIT6 BIT(6) /* Block protect 3 */ #define SR_TB_BIT6 BIT(6) /* Top/Bottom protect */ #define SR_SRWD BIT(7) /* SR write protect */ /* Spansion/Cypress specific status bits */ -- cgit v1.2.3-58-ga151 From dfc2d2594e4a79204a3967585245f00644b8f838 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 19 Mar 2020 17:10:15 +0200 Subject: fsnotify: replace inode pointer with an object id The event inode field is used only for comparison in queue merges and cannot be dereferenced after handle_event(), because it does not hold a refcount on the inode. Replace it with an abstract id to do the same thing. Link: https://lore.kernel.org/r/20200319151022.31456-8-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fanotify/fanotify.c | 4 ++-- fs/notify/inotify/inotify_fsnotify.c | 4 ++-- fs/notify/inotify/inotify_user.c | 2 +- include/linux/fsnotify_backend.h | 7 +++---- 4 files changed, 8 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 19ec7a4f4d50..6a202aaf941f 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -26,7 +26,7 @@ static bool should_merge(struct fsnotify_event *old_fsn, old = FANOTIFY_E(old_fsn); new = FANOTIFY_E(new_fsn); - if (old_fsn->inode != new_fsn->inode || old->pid != new->pid || + if (old_fsn->objectid != new_fsn->objectid || old->pid != new->pid || old->fh_type != new->fh_type || old->fh_len != new->fh_len) return false; @@ -312,7 +312,7 @@ struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, if (!event) goto out; init: __maybe_unused - fsnotify_init_event(&event->fse, inode); + fsnotify_init_event(&event->fse, (unsigned long)inode); event->mask = mask; if (FAN_GROUP_FLAG(group, FAN_REPORT_TID)) event->pid = get_pid(task_pid(current)); diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index 6bb98522bbfd..2ebc89047153 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c @@ -39,7 +39,7 @@ static bool event_compare(struct fsnotify_event *old_fsn, if (old->mask & FS_IN_IGNORED) return false; if ((old->mask == new->mask) && - (old_fsn->inode == new_fsn->inode) && + (old_fsn->objectid == new_fsn->objectid) && (old->name_len == new->name_len) && (!old->name_len || !strcmp(old->name, new->name))) return true; @@ -116,7 +116,7 @@ int inotify_handle_event(struct fsnotify_group *group, mask &= ~IN_ISDIR; fsn_event = &event->fse; - fsnotify_init_event(fsn_event, inode); + fsnotify_init_event(fsn_event, (unsigned long)inode); event->mask = mask; event->wd = i_mark->wd; event->sync_cookie = cookie; diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 107537a543fd..81ffc8629fc4 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -635,7 +635,7 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events) return ERR_PTR(-ENOMEM); } group->overflow_event = &oevent->fse; - fsnotify_init_event(group->overflow_event, NULL); + fsnotify_init_event(group->overflow_event, 0); oevent->mask = FS_Q_OVERFLOW; oevent->wd = -1; oevent->sync_cookie = 0; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 337c87cf34d6..c72cbea20ef7 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -132,8 +132,7 @@ struct fsnotify_ops { */ struct fsnotify_event { struct list_head list; - /* inode may ONLY be dereferenced during handle_event(). */ - struct inode *inode; /* either the inode the event happened to or its parent */ + unsigned long objectid; /* identifier for queue merges */ }; /* @@ -526,10 +525,10 @@ extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); static inline void fsnotify_init_event(struct fsnotify_event *event, - struct inode *inode) + unsigned long objectid) { INIT_LIST_HEAD(&event->list); - event->inode = inode; + event->objectid = objectid; } #else -- cgit v1.2.3-58-ga151 From 821747386cb6cd75593a8854208b8af188b4caed Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 24 Mar 2020 11:40:44 +0530 Subject: bus: mhi: core: Pass module owner during client driver registration The module owner field can be used to prevent the removal of kernel modules when there are any device files associated with it opened in userspace. Hence, modify the API to pass module owner field. For convenience, module_mhi_driver() macro is used which takes care of passing the module owner through THIS_MODULE of the module of the driver and also avoiding the use of specifying the default MHI client driver register/unregister routines. Suggested-by: Greg Kroah-Hartman Signed-off-by: Manivannan Sadhasivam Reviewed-by: Bjorn Andersson Link: https://lore.kernel.org/r/20200324061050.14845-2-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/core/init.c | 5 +++-- include/linux/mhi.h | 21 +++++++++++++++++++-- 2 files changed, 22 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 5fb756ca335e..eb7f556a8531 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -1189,7 +1189,7 @@ static int mhi_driver_remove(struct device *dev) return 0; } -int mhi_driver_register(struct mhi_driver *mhi_drv) +int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner) { struct device_driver *driver = &mhi_drv->driver; @@ -1197,12 +1197,13 @@ int mhi_driver_register(struct mhi_driver *mhi_drv) return -EINVAL; driver->bus = &mhi_bus_type; + driver->owner = owner; driver->probe = mhi_driver_probe; driver->remove = mhi_driver_remove; return driver_register(driver); } -EXPORT_SYMBOL_GPL(mhi_driver_register); +EXPORT_SYMBOL_GPL(__mhi_driver_register); void mhi_driver_unregister(struct mhi_driver *mhi_drv) { diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 79cb9f898544..d83e7772681b 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -514,11 +514,28 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, */ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl); +/* + * module_mhi_driver() - Helper macro for drivers that don't do + * anything special other than using default mhi_driver_register() and + * mhi_driver_unregister(). This eliminates a lot of boilerplate. + * Each module may only use this macro once. + */ +#define module_mhi_driver(mhi_drv) \ + module_driver(mhi_drv, mhi_driver_register, \ + mhi_driver_unregister) + +/* + * Macro to avoid include chaining to get THIS_MODULE + */ +#define mhi_driver_register(mhi_drv) \ + __mhi_driver_register(mhi_drv, THIS_MODULE) + /** - * mhi_driver_register - Register driver with MHI framework + * __mhi_driver_register - Register driver with MHI framework * @mhi_drv: Driver associated with the device + * @owner: The module owner */ -int mhi_driver_register(struct mhi_driver *mhi_drv); +int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner); /** * mhi_driver_unregister - Unregister a driver for mhi_devices -- cgit v1.2.3-58-ga151 From c0560f51cf77472f4ed113539b0a02ca6cda7961 Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Tue, 24 Mar 2020 09:27:56 -0600 Subject: vfio: allow external user to get vfio group from device external user calls vfio_group_get_external_user_from_dev() with a device pointer to get the VFIO group associated with this device. The VFIO group is checked to be vialbe and have IOMMU set. Then container user counter is increased and VFIO group reference is hold to prevent the VFIO group from disposal before external user exits. when the external user finishes using of the VFIO group, it calls vfio_group_put_external_user() to dereference the VFIO group and the container user counter. Suggested-by: Alex Williamson Signed-off-by: Yan Zhao Signed-off-by: Alex Williamson --- drivers/vfio/vfio.c | 38 ++++++++++++++++++++++++++++++++++++++ include/linux/vfio.h | 2 ++ 2 files changed, 40 insertions(+) (limited to 'include/linux') diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index c8482624ca34..97b972bfb735 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -1720,6 +1720,44 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep) } EXPORT_SYMBOL_GPL(vfio_group_get_external_user); +/** + * External user API, exported by symbols to be linked dynamically. + * The external user passes in a device pointer + * to verify that: + * - A VFIO group is assiciated with the device; + * - IOMMU is set for the group. + * If both checks passed, vfio_group_get_external_user_from_dev() + * increments the container user counter to prevent the VFIO group + * from disposal before external user exits and returns the pointer + * to the VFIO group. + * + * When the external user finishes using the VFIO group, it calls + * vfio_group_put_external_user() to release the VFIO group and + * decrement the container user counter. + * + * @dev [in] : device + * Return error PTR or pointer to VFIO group. + */ + +struct vfio_group *vfio_group_get_external_user_from_dev(struct device *dev) +{ + struct vfio_group *group; + int ret; + + group = vfio_group_get_from_dev(dev); + if (!group) + return ERR_PTR(-ENODEV); + + ret = vfio_group_add_container_user(group); + if (ret) { + vfio_group_put(group); + return ERR_PTR(ret); + } + + return group; +} +EXPORT_SYMBOL_GPL(vfio_group_get_external_user_from_dev); + void vfio_group_put_external_user(struct vfio_group *group) { vfio_group_try_dissolve_container(group); diff --git a/include/linux/vfio.h b/include/linux/vfio.h index e42a711a2800..fb71e0ac0e76 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -94,6 +94,8 @@ extern void vfio_unregister_iommu_driver( */ extern struct vfio_group *vfio_group_get_external_user(struct file *filep); extern void vfio_group_put_external_user(struct vfio_group *group); +extern struct vfio_group *vfio_group_get_external_user_from_dev(struct device + *dev); extern bool vfio_external_group_match_file(struct vfio_group *group, struct file *filep); extern int vfio_external_user_iommu_id(struct vfio_group *group); -- cgit v1.2.3-58-ga151 From 8d46c0cca5f4dc0538173d62cd36b1119b5105bc Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Tue, 24 Mar 2020 09:27:57 -0600 Subject: vfio: introduce vfio_dma_rw to read/write a range of IOVAs vfio_dma_rw will read/write a range of user space memory pointed to by IOVA into/from a kernel buffer without enforcing pinning the user space memory. TODO: mark the IOVAs to user space memory dirty if they are written in vfio_dma_rw(). Cc: Kevin Tian Signed-off-by: Yan Zhao Signed-off-by: Alex Williamson --- drivers/vfio/vfio.c | 49 ++++++++++++++++++++++++++ drivers/vfio/vfio_iommu_type1.c | 76 +++++++++++++++++++++++++++++++++++++++++ include/linux/vfio.h | 5 +++ 3 files changed, 130 insertions(+) (limited to 'include/linux') diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 97b972bfb735..6997f711b925 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -1999,6 +1999,55 @@ err_unpin_pages: } EXPORT_SYMBOL(vfio_unpin_pages); + +/* + * This interface allows the CPUs to perform some sort of virtual DMA on + * behalf of the device. + * + * CPUs read/write from/into a range of IOVAs pointing to user space memory + * into/from a kernel buffer. + * + * As the read/write of user space memory is conducted via the CPUs and is + * not a real device DMA, it is not necessary to pin the user space memory. + * + * The caller needs to call vfio_group_get_external_user() or + * vfio_group_get_external_user_from_dev() prior to calling this interface, + * so as to prevent the VFIO group from disposal in the middle of the call. + * But it can keep the reference to the VFIO group for several calls into + * this interface. + * After finishing using of the VFIO group, the caller needs to release the + * VFIO group by calling vfio_group_put_external_user(). + * + * @group [in] : VFIO group + * @user_iova [in] : base IOVA of a user space buffer + * @data [in] : pointer to kernel buffer + * @len [in] : kernel buffer length + * @write : indicate read or write + * Return error code on failure or 0 on success. + */ +int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova, + void *data, size_t len, bool write) +{ + struct vfio_container *container; + struct vfio_iommu_driver *driver; + int ret = 0; + + if (!group || !data || len <= 0) + return -EINVAL; + + container = group->container; + driver = container->iommu_driver; + + if (likely(driver && driver->ops->dma_rw)) + ret = driver->ops->dma_rw(container->iommu_data, + user_iova, data, len, write); + else + ret = -ENOTTY; + + return ret; +} +EXPORT_SYMBOL(vfio_dma_rw); + static int vfio_register_iommu_notifier(struct vfio_group *group, unsigned long *events, struct notifier_block *nb) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index a177bf2c6683..9fdfae1cb17a 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -2305,6 +2306,80 @@ static int vfio_iommu_type1_unregister_notifier(void *iommu_data, return blocking_notifier_chain_unregister(&iommu->notifier, nb); } +static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu, + dma_addr_t user_iova, void *data, + size_t count, bool write, + size_t *copied) +{ + struct mm_struct *mm; + unsigned long vaddr; + struct vfio_dma *dma; + bool kthread = current->mm == NULL; + size_t offset; + + *copied = 0; + + dma = vfio_find_dma(iommu, user_iova, 1); + if (!dma) + return -EINVAL; + + if ((write && !(dma->prot & IOMMU_WRITE)) || + !(dma->prot & IOMMU_READ)) + return -EPERM; + + mm = get_task_mm(dma->task); + + if (!mm) + return -EPERM; + + if (kthread) + use_mm(mm); + else if (current->mm != mm) + goto out; + + offset = user_iova - dma->iova; + + if (count > dma->size - offset) + count = dma->size - offset; + + vaddr = dma->vaddr + offset; + + if (write) + *copied = __copy_to_user((void __user *)vaddr, data, + count) ? 0 : count; + else + *copied = __copy_from_user(data, (void __user *)vaddr, + count) ? 0 : count; + if (kthread) + unuse_mm(mm); +out: + mmput(mm); + return *copied ? 0 : -EFAULT; +} + +static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova, + void *data, size_t count, bool write) +{ + struct vfio_iommu *iommu = iommu_data; + int ret = 0; + size_t done; + + mutex_lock(&iommu->lock); + while (count > 0) { + ret = vfio_iommu_type1_dma_rw_chunk(iommu, user_iova, data, + count, write, &done); + if (ret) + break; + + count -= done; + data += done; + user_iova += done; + } + + mutex_unlock(&iommu->lock); + return ret; +} + static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = { .name = "vfio-iommu-type1", .owner = THIS_MODULE, @@ -2317,6 +2392,7 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = { .unpin_pages = vfio_iommu_type1_unpin_pages, .register_notifier = vfio_iommu_type1_register_notifier, .unregister_notifier = vfio_iommu_type1_unregister_notifier, + .dma_rw = vfio_iommu_type1_dma_rw, }; static int __init vfio_iommu_type1_init(void) diff --git a/include/linux/vfio.h b/include/linux/vfio.h index fb71e0ac0e76..34b2fdf4de6e 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -82,6 +82,8 @@ struct vfio_iommu_driver_ops { struct notifier_block *nb); int (*unregister_notifier)(void *iommu_data, struct notifier_block *nb); + int (*dma_rw)(void *iommu_data, dma_addr_t user_iova, + void *data, size_t count, bool write); }; extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); @@ -109,6 +111,9 @@ extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage); +extern int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova, + void *data, size_t len, bool write); + /* each type has independent events */ enum vfio_notify_type { VFIO_IOMMU_NOTIFY = 0, -- cgit v1.2.3-58-ga151 From 40280cf7e8ca7d31bb0a9d626f36f458fec32815 Mon Sep 17 00:00:00 2001 From: Yan Zhao Date: Tue, 24 Mar 2020 09:27:57 -0600 Subject: vfio: avoid inefficient operations on VFIO group in vfio_pin/unpin_pages vfio_group_pin_pages() and vfio_group_unpin_pages() are introduced to avoid inefficient search/check/ref/deref opertions associated with VFIO group as those in each calling into vfio_pin_pages() and vfio_unpin_pages(). VFIO group is taken as arg directly. The callers combine search/check/ref/deref operations associated with VFIO group by calling vfio_group_get_external_user()/vfio_group_get_external_user_from_dev() beforehand, and vfio_group_put_external_user() afterwards. Suggested-by: Alex Williamson Signed-off-by: Yan Zhao Signed-off-by: Alex Williamson --- drivers/vfio/vfio.c | 91 ++++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/vfio.h | 6 ++++ 2 files changed, 97 insertions(+) (limited to 'include/linux') diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 6997f711b925..210fcf426643 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -1999,6 +1999,97 @@ err_unpin_pages: } EXPORT_SYMBOL(vfio_unpin_pages); +/* + * Pin a set of guest IOVA PFNs and return their associated host PFNs for a + * VFIO group. + * + * The caller needs to call vfio_group_get_external_user() or + * vfio_group_get_external_user_from_dev() prior to calling this interface, + * so as to prevent the VFIO group from disposal in the middle of the call. + * But it can keep the reference to the VFIO group for several calls into + * this interface. + * After finishing using of the VFIO group, the caller needs to release the + * VFIO group by calling vfio_group_put_external_user(). + * + * @group [in] : VFIO group + * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be pinned. + * @npage [in] : count of elements in user_iova_pfn array. + * This count should not be greater + * VFIO_PIN_PAGES_MAX_ENTRIES. + * @prot [in] : protection flags + * @phys_pfn [out] : array of host PFNs + * Return error or number of pages pinned. + */ +int vfio_group_pin_pages(struct vfio_group *group, + unsigned long *user_iova_pfn, int npage, + int prot, unsigned long *phys_pfn) +{ + struct vfio_container *container; + struct vfio_iommu_driver *driver; + int ret; + + if (!group || !user_iova_pfn || !phys_pfn || !npage) + return -EINVAL; + + if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) + return -E2BIG; + + container = group->container; + driver = container->iommu_driver; + if (likely(driver && driver->ops->pin_pages)) + ret = driver->ops->pin_pages(container->iommu_data, + user_iova_pfn, npage, + prot, phys_pfn); + else + ret = -ENOTTY; + + return ret; +} +EXPORT_SYMBOL(vfio_group_pin_pages); + +/* + * Unpin a set of guest IOVA PFNs for a VFIO group. + * + * The caller needs to call vfio_group_get_external_user() or + * vfio_group_get_external_user_from_dev() prior to calling this interface, + * so as to prevent the VFIO group from disposal in the middle of the call. + * But it can keep the reference to the VFIO group for several calls into + * this interface. + * After finishing using of the VFIO group, the caller needs to release the + * VFIO group by calling vfio_group_put_external_user(). + * + * @group [in] : vfio group + * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be unpinned. + * @npage [in] : count of elements in user_iova_pfn array. + * This count should not be greater than + * VFIO_PIN_PAGES_MAX_ENTRIES. + * Return error or number of pages unpinned. + */ +int vfio_group_unpin_pages(struct vfio_group *group, + unsigned long *user_iova_pfn, int npage) +{ + struct vfio_container *container; + struct vfio_iommu_driver *driver; + int ret; + + if (!group || !user_iova_pfn || !npage) + return -EINVAL; + + if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) + return -E2BIG; + + container = group->container; + driver = container->iommu_driver; + if (likely(driver && driver->ops->unpin_pages)) + ret = driver->ops->unpin_pages(container->iommu_data, + user_iova_pfn, npage); + else + ret = -ENOTTY; + + return ret; +} +EXPORT_SYMBOL(vfio_group_unpin_pages); + /* * This interface allows the CPUs to perform some sort of virtual DMA on diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 34b2fdf4de6e..be2bd358b952 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -111,6 +111,12 @@ extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage); +extern int vfio_group_pin_pages(struct vfio_group *group, + unsigned long *user_iova_pfn, int npage, + int prot, unsigned long *phys_pfn); +extern int vfio_group_unpin_pages(struct vfio_group *group, + unsigned long *user_iova_pfn, int npage); + extern int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova, void *data, size_t len, bool write); -- cgit v1.2.3-58-ga151 From 5f3874c2a2310d9bd6969ca6764961d27a843b9d Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Tue, 24 Mar 2020 09:28:25 -0600 Subject: vfio: Include optional device match in vfio_device_ops callbacks Allow bus drivers to provide their own callback to match a device to the user provided string. Reviewed-by: Cornelia Huck Reviewed-by: Kevin Tian Signed-off-by: Alex Williamson --- drivers/vfio/vfio.c | 20 ++++++++++++++++---- include/linux/vfio.h | 4 ++++ 2 files changed, 20 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index c8482624ca34..0bd77d6ea691 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -875,11 +875,23 @@ EXPORT_SYMBOL_GPL(vfio_device_get_from_dev); static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, char *buf) { - struct vfio_device *it, *device = NULL; + struct vfio_device *it, *device = ERR_PTR(-ENODEV); mutex_lock(&group->device_lock); list_for_each_entry(it, &group->device_list, group_next) { - if (!strcmp(dev_name(it->dev), buf)) { + int ret; + + if (it->ops->match) { + ret = it->ops->match(it->device_data, buf); + if (ret < 0) { + device = ERR_PTR(ret); + break; + } + } else { + ret = !strcmp(dev_name(it->dev), buf); + } + + if (ret) { device = it; vfio_device_get(device); break; @@ -1430,8 +1442,8 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) return -EPERM; device = vfio_device_get_from_name(group, buf); - if (!device) - return -ENODEV; + if (IS_ERR(device)) + return PTR_ERR(device); ret = device->ops->open(device->device_data); if (ret) { diff --git a/include/linux/vfio.h b/include/linux/vfio.h index e42a711a2800..029694b977f2 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -26,6 +26,9 @@ * operations documented below * @mmap: Perform mmap(2) on a region of the device file descriptor * @request: Request for the bus driver to release the device + * @match: Optional device name match callback (return: 0 for no-match, >0 for + * match, -errno for abort (ex. match with insufficient or incorrect + * additional args) */ struct vfio_device_ops { char *name; @@ -39,6 +42,7 @@ struct vfio_device_ops { unsigned long arg); int (*mmap)(void *device_data, struct vm_area_struct *vma); void (*request)(void *device_data, unsigned int count); + int (*match)(void *device_data, char *buf); }; extern struct iommu_group *vfio_iommu_group_get(struct device *dev); -- cgit v1.2.3-58-ga151 From f05a3849f6449f67843113778bf56e02f2b4ddf8 Mon Sep 17 00:00:00 2001 From: "Thomas Hellstrom (VMware)" Date: Tue, 24 Mar 2020 18:46:48 +0100 Subject: fs: Constify vma argument to vma_is_dax MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The function is used by upcoming vma_is_special_huge() with which we want to use a const vma argument. Since for vma_is_dax() the vma argument is only dereferenced for reading, constify it. Cc: Andrew Morton Cc: Michal Hocko Cc: "Matthew Wilcox (Oracle)" Cc: "Kirill A. Shutemov" Cc: Ralph Campbell Cc: "Jérôme Glisse" Cc: "Christian König" Cc: Dan Williams Signed-off-by: Thomas Hellstrom (VMware) Reviewed-by: Roland Scheidegger Acked-by: Christian König --- include/linux/fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 3cd4fe6b845e..2b38ce5b73ad 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3391,7 +3391,7 @@ static inline bool io_is_direct(struct file *filp) return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host); } -static inline bool vma_is_dax(struct vm_area_struct *vma) +static inline bool vma_is_dax(const struct vm_area_struct *vma) { return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); } -- cgit v1.2.3-58-ga151 From 2484ca9b6a20451debb789d0a89af6f15de99826 Mon Sep 17 00:00:00 2001 From: "Thomas Hellstrom (VMware)" Date: Tue, 24 Mar 2020 18:47:17 +0100 Subject: mm: Introduce vma_is_special_huge MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For VM_PFNMAP and VM_MIXEDMAP vmas that want to support transhuge pages and -page table entries, introduce vma_is_special_huge() that takes the same codepaths as vma_is_dax(). The use of "special" follows the definition in memory.c, vm_normal_page(): "Special" mappings do not wish to be associated with a "struct page" (either it doesn't exist, or it exists but they don't want to touch it) For PAGE_SIZE pages, "special" is determined per page table entry to be able to deal with COW pages. But since we don't have huge COW pages, we can classify a vma as either "special huge" or "normal huge". Cc: Andrew Morton Cc: Michal Hocko Cc: "Matthew Wilcox (Oracle)" Cc: "Kirill A. Shutemov" Cc: Ralph Campbell Cc: "Jérôme Glisse" Cc: "Christian König" Cc: Dan Williams Signed-off-by: Thomas Hellstrom (VMware) Acked-by: Christian König Acked-by: Andrew Morton --- include/linux/mm.h | 17 +++++++++++++++++ mm/huge_memory.c | 6 +++--- 2 files changed, 20 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index c54fb96cb1e6..bdd79a72bb42 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2867,6 +2867,23 @@ extern long copy_huge_page_from_user(struct page *dst_page, const void __user *usr_src, unsigned int pages_per_huge_page, bool allow_pagefault); + +/** + * vma_is_special_huge - Are transhuge page-table entries considered special? + * @vma: Pointer to the struct vm_area_struct to consider + * + * Whether transhuge page-table entries are considered "special" following + * the definition in vm_normal_page(). + * + * Return: true if transhuge page-table entries should be considered special, + * false otherwise. + */ +static inline bool vma_is_special_huge(const struct vm_area_struct *vma) +{ + return vma_is_dax(vma) || (vma->vm_file && + (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); +} + #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ #ifdef CONFIG_DEBUG_PAGEALLOC diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 24ad53b4dfc0..19c8d462ab08 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1802,7 +1802,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, tlb->fullmm); tlb_remove_pmd_tlb_entry(tlb, pmd, addr); - if (vma_is_dax(vma)) { + if (vma_is_special_huge(vma)) { if (arch_needs_pgtable_deposit()) zap_deposited_table(tlb->mm, pmd); spin_unlock(ptl); @@ -2066,7 +2066,7 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, */ pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm); tlb_remove_pud_tlb_entry(tlb, pud, addr); - if (vma_is_dax(vma)) { + if (vma_is_special_huge(vma)) { spin_unlock(ptl); /* No zero page support yet */ } else { @@ -2175,7 +2175,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, */ if (arch_needs_pgtable_deposit()) zap_deposited_table(mm, pmd); - if (vma_is_dax(vma)) + if (vma_is_special_huge(vma)) return; page = pmd_page(_pmd); if (!PageDirty(page) && pmd_dirty(_pmd)) -- cgit v1.2.3-58-ga151 From 9a9731b18c9bb70c023f0b2c731726fd5167673e Mon Sep 17 00:00:00 2001 From: "Thomas Hellstrom (VMware)" Date: Tue, 24 Mar 2020 18:48:09 +0100 Subject: mm: Add vmf_insert_pfn_xxx_prot() for huge page-table entries MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For graphics drivers needing to modify the page-protection, add huge page-table entries counterparts to vmf_insert_pfn_prot(). Cc: Andrew Morton Cc: Michal Hocko Cc: "Matthew Wilcox (Oracle)" Cc: "Kirill A. Shutemov" Cc: Ralph Campbell Cc: "Jérôme Glisse" Cc: "Christian König" Cc: Dan Williams Signed-off-by: Thomas Hellstrom (VMware) Acked-by: Christian König Acked-by: Andrew Morton --- include/linux/huge_mm.h | 41 +++++++++++++++++++++++++++++++++++++++-- mm/huge_memory.c | 38 ++++++++++++++++++++++++++++++++------ 2 files changed, 71 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 5aca3d1bdb32..f63b0882c1b3 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -47,8 +47,45 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, int prot_numa); -vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); -vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); +vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, + pgprot_t pgprot, bool write); + +/** + * vmf_insert_pfn_pmd - insert a pmd size pfn + * @vmf: Structure describing the fault + * @pfn: pfn to insert + * @pgprot: page protection to use + * @write: whether it's a write fault + * + * Insert a pmd size pfn. See vmf_insert_pfn() for additional info. + * + * Return: vm_fault_t value. + */ +static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, + bool write) +{ + return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write); +} +vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, + pgprot_t pgprot, bool write); + +/** + * vmf_insert_pfn_pud - insert a pud size pfn + * @vmf: Structure describing the fault + * @pfn: pfn to insert + * @pgprot: page protection to use + * @write: whether it's a write fault + * + * Insert a pud size pfn. See vmf_insert_pfn() for additional info. + * + * Return: vm_fault_t value. + */ +static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, + bool write) +{ + return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write); +} + enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 19c8d462ab08..4036d5e0a6f3 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -824,11 +824,24 @@ out_unlock: pte_free(mm, pgtable); } -vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) +/** + * vmf_insert_pfn_pmd_prot - insert a pmd size pfn + * @vmf: Structure describing the fault + * @pfn: pfn to insert + * @pgprot: page protection to use + * @write: whether it's a write fault + * + * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and + * also consult the vmf_insert_mixed_prot() documentation when + * @pgprot != @vmf->vma->vm_page_prot. + * + * Return: vm_fault_t value. + */ +vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, + pgprot_t pgprot, bool write) { unsigned long addr = vmf->address & PMD_MASK; struct vm_area_struct *vma = vmf->vma; - pgprot_t pgprot = vma->vm_page_prot; pgtable_t pgtable = NULL; /* @@ -856,7 +869,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); return VM_FAULT_NOPAGE; } -EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); +EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot); #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) @@ -902,11 +915,24 @@ out_unlock: spin_unlock(ptl); } -vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) +/** + * vmf_insert_pfn_pud_prot - insert a pud size pfn + * @vmf: Structure describing the fault + * @pfn: pfn to insert + * @pgprot: page protection to use + * @write: whether it's a write fault + * + * Insert a pud size pfn. See vmf_insert_pfn() for additional info and + * also consult the vmf_insert_mixed_prot() documentation when + * @pgprot != @vmf->vma->vm_page_prot. + * + * Return: vm_fault_t value. + */ +vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, + pgprot_t pgprot, bool write) { unsigned long addr = vmf->address & PUD_MASK; struct vm_area_struct *vma = vmf->vma; - pgprot_t pgprot = vma->vm_page_prot; /* * If we had pud_special, we could avoid all these restrictions, @@ -927,7 +953,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write); return VM_FAULT_NOPAGE; } -EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); +EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot); #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, -- cgit v1.2.3-58-ga151 From e6282fc6f889debe4d6eb6332dc6e49739faa5cb Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 24 Mar 2020 14:32:11 +0200 Subject: i2c: core: Provide generic definitions for bus frequencies MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are few maximum bus frequencies being used in the I²C core code. Provide generic definitions for bus frequencies and use them in the core. The drivers may use predefined constants where it is appropriate. Some of them are already using these under slightly different names. We will convert them later to use newly introduced defines. Note, the name of modes are chosen to follow well established naming scheme [1]. These definitions will also help to avoid typos in the numbers that may lead to subtle errors. [1]: https://en.wikipedia.org/wiki/I%C2%B2C#Differences_between_modes Acked-by: Mika Westerberg Signed-off-by: Andy Shevchenko Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-acpi.c | 2 +- drivers/i2c/i2c-core-base.c | 8 ++++---- include/linux/i2c.h | 8 ++++++++ 3 files changed, 13 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index 8f3dbc97a057..7665685e3ca8 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c @@ -318,7 +318,7 @@ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, lookup->min_speed = lookup->speed; if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0) - lookup->force_speed = 400000; + lookup->force_speed = I2C_MAX_FAST_MODE_FREQ; return AE_OK; } diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index cefad0881942..9b2972c7faa2 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -1612,13 +1612,13 @@ void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_de ret = device_property_read_u32(dev, "clock-frequency", &t->bus_freq_hz); if (ret && use_defaults) - t->bus_freq_hz = 100000; + t->bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ; ret = device_property_read_u32(dev, "i2c-scl-rising-time-ns", &t->scl_rise_ns); if (ret && use_defaults) { - if (t->bus_freq_hz <= 100000) + if (t->bus_freq_hz <= I2C_MAX_STANDARD_MODE_FREQ) t->scl_rise_ns = 1000; - else if (t->bus_freq_hz <= 400000) + else if (t->bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ) t->scl_rise_ns = 300; else t->scl_rise_ns = 120; @@ -1626,7 +1626,7 @@ void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_de ret = device_property_read_u32(dev, "i2c-scl-falling-time-ns", &t->scl_fall_ns); if (ret && use_defaults) { - if (t->bus_freq_hz <= 400000) + if (t->bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ) t->scl_fall_ns = 300; else t->scl_fall_ns = 120; diff --git a/include/linux/i2c.h b/include/linux/i2c.h index f834687989f7..72e759328cee 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -39,6 +39,14 @@ enum i2c_slave_event; typedef int (*i2c_slave_cb_t)(struct i2c_client *client, enum i2c_slave_event event, u8 *val); +/* I2C Frequency Modes */ +#define I2C_MAX_STANDARD_MODE_FREQ 100000 +#define I2C_MAX_FAST_MODE_FREQ 400000 +#define I2C_MAX_FAST_MODE_PLUS_FREQ 1000000 +#define I2C_MAX_TURBO_MODE_FREQ 1400000 +#define I2C_MAX_HIGH_SPEED_MODE_FREQ 3400000 +#define I2C_MAX_ULTRA_FAST_MODE_FREQ 5000000 + struct module; struct property_entry; -- cgit v1.2.3-58-ga151 From adc6162b9a0c60a81cf6a107196924526cd186f6 Mon Sep 17 00:00:00 2001 From: Mason Yang Date: Wed, 18 Mar 2020 15:42:27 +0800 Subject: mtd: rawnand: Add support for manufacturer specific suspend/resume operation Patch nand_suspend() & nand_resume() to let manufacturers overwrite suspend/resume operations. Signed-off-by: Mason Yang Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/1584517348-14486-2-git-send-email-masonccyang@mxic.com.tw --- drivers/mtd/nand/raw/nand_base.c | 17 +++++++++++++---- include/linux/mtd/rawnand.h | 4 ++++ 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index a13b91aa3780..985a15a735af 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -4326,16 +4326,22 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) /** * nand_suspend - [MTD Interface] Suspend the NAND flash * @mtd: MTD device structure + * + * Returns 0 for success or negative error code otherwise. */ static int nand_suspend(struct mtd_info *mtd) { struct nand_chip *chip = mtd_to_nand(mtd); + int ret = 0; mutex_lock(&chip->lock); - chip->suspended = 1; + if (chip->suspend) + ret = chip->suspend(chip); + if (!ret) + chip->suspended = 1; mutex_unlock(&chip->lock); - return 0; + return ret; } /** @@ -4347,11 +4353,14 @@ static void nand_resume(struct mtd_info *mtd) struct nand_chip *chip = mtd_to_nand(mtd); mutex_lock(&chip->lock); - if (chip->suspended) + if (chip->suspended) { + if (chip->resume) + chip->resume(chip); chip->suspended = 0; - else + } else { pr_err("%s called for a chip which is not in suspended state\n", __func__); + } mutex_unlock(&chip->lock); } diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 49ed50fb44ab..1e76196f9829 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1064,6 +1064,8 @@ struct nand_legacy { * @lock: lock protecting the suspended field. Also used to * serialize accesses to the NAND device. * @suspended: set to 1 when the device is suspended, 0 when it's not. + * @suspend: [REPLACEABLE] specific NAND device suspend operation + * @resume: [REPLACEABLE] specific NAND device resume operation * @bbt: [INTERN] bad block table pointer * @bbt_td: [REPLACEABLE] bad block table descriptor for flash * lookup. @@ -1119,6 +1121,8 @@ struct nand_chip { struct mutex lock; unsigned int suspended : 1; + int (*suspend)(struct nand_chip *chip); + void (*resume)(struct nand_chip *chip); uint8_t *oob_poi; struct nand_controller *controller; -- cgit v1.2.3-58-ga151 From 995bb1092326b8ba8fa29456c334ac6a49765ccd Mon Sep 17 00:00:00 2001 From: Mayank Rana Date: Mon, 16 Mar 2020 13:14:32 -0700 Subject: extcon: Mark extcon_get_edev_name() function as exported symbol extcon_get_edev_name() function provides client driver to request extcon dev's name. If extcon driver and client driver are compiled as loadable modules, extcon_get_edev_name() function symbol is not visible to client driver. Hence mark extcon_find_edev_name() function as exported symbol. Signed-off-by: Mayank Rana Signed-off-by: Chanwoo Choi --- drivers/extcon/extcon.c | 1 + include/linux/extcon.h | 5 +++++ 2 files changed, 6 insertions(+) (limited to 'include/linux') diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index e055893fd5c3..2dfbfec572f9 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -1406,6 +1406,7 @@ const char *extcon_get_edev_name(struct extcon_dev *edev) { return !edev ? NULL : edev->name; } +EXPORT_SYMBOL_GPL(extcon_get_edev_name); static int __init extcon_class_init(void) { diff --git a/include/linux/extcon.h b/include/linux/extcon.h index 1b1d77ec2114..fd183fb9c20f 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -286,6 +286,11 @@ static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, { return ERR_PTR(-ENODEV); } + +static inline const char *extcon_get_edev_name(struct extcon_dev *edev) +{ + return NULL; +} #endif /* CONFIG_EXTCON */ /* -- cgit v1.2.3-58-ga151 From d19d2de61fb131abcd29f7c61d3f168f687bfd6e Mon Sep 17 00:00:00 2001 From: Chuanhong Guo Date: Sun, 15 Mar 2020 20:13:37 +0800 Subject: gpio: mmio: introduce BGPIOF_NO_SET_ON_INPUT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some gpio controllers ignores pin value writing when that pin is configured as input mode. As a result, bgpio_dir_out should set pin to output before configuring pin values or gpio pin values can't be set up properly. Introduce two variants of bgpio_dir_out: bgpio_dir_out_val_first and bgpio_dir_out_dir_first, and assign direction_output according to a new flag: BGPIOF_NO_SET_ON_INPUT. Signed-off-by: Chuanhong Guo Tested-by: René van Dorst Reviewed-by: Sergio Paracuellos Signed-off-by: Bartosz Golaszewski --- drivers/gpio/gpio-mmio.c | 23 +++++++++++++++++++---- include/linux/gpio/driver.h | 1 + 2 files changed, 20 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c index f729e3e9e983..b778f33cc6af 100644 --- a/drivers/gpio/gpio-mmio.c +++ b/drivers/gpio/gpio-mmio.c @@ -389,12 +389,10 @@ static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio) return GPIO_LINE_DIRECTION_IN; } -static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) +static void bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long flags; - gc->set(gc, gpio, val); - spin_lock_irqsave(&gc->bgpio_lock, flags); gc->bgpio_dir |= bgpio_line2mask(gc, gpio); @@ -405,7 +403,21 @@ static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) gc->write_reg(gc->reg_dir_out, gc->bgpio_dir); spin_unlock_irqrestore(&gc->bgpio_lock, flags); +} +static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio, + int val) +{ + bgpio_dir_out(gc, gpio, val); + gc->set(gc, gpio, val); + return 0; +} + +static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio, + int val) +{ + gc->set(gc, gpio, val); + bgpio_dir_out(gc, gpio, val); return 0; } @@ -538,7 +550,10 @@ static int bgpio_setup_direction(struct gpio_chip *gc, if (dirout || dirin) { gc->reg_dir_out = dirout; gc->reg_dir_in = dirin; - gc->direction_output = bgpio_dir_out; + if (flags & BGPIOF_NO_SET_ON_INPUT) + gc->direction_output = bgpio_dir_out_dir_first; + else + gc->direction_output = bgpio_dir_out_val_first; gc->direction_input = bgpio_dir_in; gc->get_direction = bgpio_get_dir; } else { diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 6ef05bccc0a6..ed65e00ee977 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -572,6 +572,7 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev, #define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3) #define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */ #define BGPIOF_NO_OUTPUT BIT(5) /* only input */ +#define BGPIOF_NO_SET_ON_INPUT BIT(6) int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq); -- cgit v1.2.3-58-ga151 From 9e2ba2c34f1922ca1e0c7d31b30ace5842c2e7d1 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 19 Mar 2020 17:10:19 +0200 Subject: fanotify: send FAN_DIR_MODIFY event flavor with dir inode and name Dirent events are going to be supported in two flavors: 1. Directory fid info + mask that includes the specific event types (e.g. FAN_CREATE) and an optional FAN_ONDIR flag. 2. Directory fid info + name + mask that includes only FAN_DIR_MODIFY. To request the second event flavor, user needs to set the event type FAN_DIR_MODIFY in the mark mask. The first flavor is supported since kernel v5.1 for groups initialized with flag FAN_REPORT_FID. It is intended to be used for watching directories in "batch mode" - the watcher is notified when directory is changed and re-scans the directory content in response. This event flavor is stored more compactly in the event queue, so it is optimal for workloads with frequent directory changes. The second event flavor is intended to be used for watching large directories, where the cost of re-scan of the directory on every change is considered too high. The watcher getting the event with the directory fid and entry name is expected to call fstatat(2) to query the content of the entry after the change. Legacy inotify events are reported with name and event mask (e.g. "foo", FAN_CREATE | FAN_ONDIR). That can lead users to the conclusion that there is *currently* an entry "foo" that is a sub-directory, when in fact "foo" may be negative or non-dir by the time user gets the event. To make it clear that the current state of the named entry is unknown, when reporting an event with name info, fanotify obfuscates the specific event types (e.g. create,delete,rename) and uses a common event type - FAN_DIR_MODIFY to describe the change. This should make it harder for users to make wrong assumptions and write buggy filesystem monitors. At this point, name info reporting is not yet implemented, so trying to set FAN_DIR_MODIFY in mark mask will return -EINVAL. Link: https://lore.kernel.org/r/20200319151022.31456-12-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fanotify/fanotify.c | 7 ++++--- fs/notify/fsnotify.c | 2 +- include/linux/fsnotify.h | 6 ++++++ include/linux/fsnotify_backend.h | 4 +++- include/uapi/linux/fanotify.h | 1 + 5 files changed, 15 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 39eb71f7c413..74676228f784 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -235,9 +235,9 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, test_mask = event_mask & marks_mask & ~marks_ignored_mask; /* - * dirent modification events (create/delete/move) do not carry the - * child entry name/inode information. Instead, we report FAN_ONDIR - * for mkdir/rmdir so user can differentiate them from creat/unlink. + * For dirent modification events (create/delete/move) that do not carry + * the child entry name information, we report FAN_ONDIR for mkdir/rmdir + * so user can differentiate them from creat/unlink. * * For backward compatibility and consistency, do not report FAN_ONDIR * to user in legacy fanotify mode (reporting fd) and report FAN_ONDIR @@ -463,6 +463,7 @@ static int fanotify_handle_event(struct fsnotify_group *group, BUILD_BUG_ON(FAN_MOVED_FROM != FS_MOVED_FROM); BUILD_BUG_ON(FAN_CREATE != FS_CREATE); BUILD_BUG_ON(FAN_DELETE != FS_DELETE); + BUILD_BUG_ON(FAN_DIR_MODIFY != FS_DIR_MODIFY); BUILD_BUG_ON(FAN_DELETE_SELF != FS_DELETE_SELF); BUILD_BUG_ON(FAN_MOVE_SELF != FS_MOVE_SELF); BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD); diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 193530f57963..72d332ce8e12 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -383,7 +383,7 @@ static __init int fsnotify_init(void) { int ret; - BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 25); + BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 26); ret = init_srcu_struct(&fsnotify_mark_srcu); if (ret) diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 860018f3e545..5ab28f6c7d26 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -30,6 +30,12 @@ static inline void fsnotify_name(struct inode *dir, __u32 mask, const struct qstr *name, u32 cookie) { fsnotify(dir, mask, child, FSNOTIFY_EVENT_INODE, name, cookie); + /* + * Send another flavor of the event without child inode data and + * without the specific event type (e.g. FS_CREATE|FS_IS_DIR). + * The name is relative to the dir inode the event is reported to. + */ + fsnotify(dir, FS_DIR_MODIFY, dir, FSNOTIFY_EVENT_INODE, name, 0); } static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index c72cbea20ef7..f0c506405b54 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -47,6 +47,7 @@ #define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */ #define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */ #define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */ +#define FS_DIR_MODIFY 0x00080000 /* Directory entry was modified */ #define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */ /* This inode cares about things that happen to its children. Always set for @@ -66,7 +67,8 @@ * The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event * when a directory entry inside a child subdir changes. */ -#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE) +#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | \ + FS_DIR_MODIFY) #define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \ FS_OPEN_EXEC_PERM) diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h index 2a1844edda47..615fa2c87179 100644 --- a/include/uapi/linux/fanotify.h +++ b/include/uapi/linux/fanotify.h @@ -24,6 +24,7 @@ #define FAN_OPEN_PERM 0x00010000 /* File open in perm check */ #define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */ #define FAN_OPEN_EXEC_PERM 0x00040000 /* File open/exec in perm check */ +#define FAN_DIR_MODIFY 0x00080000 /* Directory entry was modified */ #define FAN_EVENT_ON_CHILD 0x08000000 /* Interested in child events */ -- cgit v1.2.3-58-ga151 From ee0c8e494cc3c135350cd5c4752e82af3feae1ab Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 20 Feb 2020 10:00:32 +0100 Subject: backlight: corgi: Convert to use GPIO descriptors The code in the Corgi backlight driver can be considerably simplified by moving to GPIO descriptors and lookup tables from the board files instead of passing GPIO numbers using the old API. Make sure to encode inversion semantics for the Akita and Spitz platforms inside the GPIO lookup table and drop the custom inversion semantics from the driver. All in-tree users are converted in this patch. Signed-off-by: Linus Walleij Acked-by: Robert Jarzmik Reviewed-by: Daniel Thompson Signed-off-by: Lee Jones --- arch/arm/mach-pxa/corgi.c | 12 +++++-- arch/arm/mach-pxa/spitz.c | 34 ++++++++++++++----- drivers/video/backlight/corgi_lcd.c | 68 ++++++++++--------------------------- include/linux/spi/corgi_lcd.h | 3 -- 4 files changed, 54 insertions(+), 63 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index f2d73289230f..593c7f793da5 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c @@ -563,13 +563,20 @@ static void corgi_bl_kick_battery(void) } } +static struct gpiod_lookup_table corgi_lcdcon_gpio_table = { + .dev_id = "spi1.1", + .table = { + GPIO_LOOKUP("gpio-pxa", CORGI_GPIO_BACKLIGHT_CONT, + "BL_CONT", GPIO_ACTIVE_HIGH), + { }, + }, +}; + static struct corgi_lcd_platform_data corgi_lcdcon_info = { .init_mode = CORGI_LCD_MODE_VGA, .max_intensity = 0x2f, .default_intensity = 0x1f, .limit_mask = 0x0b, - .gpio_backlight_cont = CORGI_GPIO_BACKLIGHT_CONT, - .gpio_backlight_on = -1, .kick_battery = corgi_bl_kick_battery, }; @@ -609,6 +616,7 @@ static struct spi_board_info corgi_spi_devices[] = { static void __init corgi_init_spi(void) { pxa2xx_set_spi_info(1, &corgi_spi_info); + gpiod_add_lookup_table(&corgi_lcdcon_gpio_table); spi_register_board_info(ARRAY_AND_SIZE(corgi_spi_devices)); } #else diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index a4fdc399d152..371008e9bb02 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -525,13 +525,33 @@ static void spitz_bl_kick_battery(void) } } +static struct gpiod_lookup_table spitz_lcdcon_gpio_table = { + .dev_id = "spi2.1", + .table = { + GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_BACKLIGHT_CONT, + "BL_CONT", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_BACKLIGHT_ON, + "BL_ON", GPIO_ACTIVE_HIGH), + { }, + }, +}; + +static struct gpiod_lookup_table akita_lcdcon_gpio_table = { + .dev_id = "spi2.1", + .table = { + GPIO_LOOKUP("gpio-pxa", AKITA_GPIO_BACKLIGHT_CONT, + "BL_CONT", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("gpio-pxa", AKITA_GPIO_BACKLIGHT_ON, + "BL_ON", GPIO_ACTIVE_HIGH), + { }, + }, +}; + static struct corgi_lcd_platform_data spitz_lcdcon_info = { .init_mode = CORGI_LCD_MODE_VGA, .max_intensity = 0x2f, .default_intensity = 0x1f, .limit_mask = 0x0b, - .gpio_backlight_cont = SPITZ_GPIO_BACKLIGHT_CONT, - .gpio_backlight_on = SPITZ_GPIO_BACKLIGHT_ON, .kick_battery = spitz_bl_kick_battery, }; @@ -574,12 +594,10 @@ static struct pxa2xx_spi_controller spitz_spi_info = { static void __init spitz_spi_init(void) { - struct corgi_lcd_platform_data *lcd_data = &spitz_lcdcon_info; - - if (machine_is_akita()) { - lcd_data->gpio_backlight_cont = AKITA_GPIO_BACKLIGHT_CONT; - lcd_data->gpio_backlight_on = AKITA_GPIO_BACKLIGHT_ON; - } + if (machine_is_akita()) + gpiod_add_lookup_table(&akita_lcdcon_gpio_table); + else + gpiod_add_lookup_table(&spitz_lcdcon_gpio_table); pxa2xx_set_spi_info(2, &spitz_spi_info); spi_register_board_info(ARRAY_AND_SIZE(spitz_spi_devices)); diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c index 68f7592c5060..25ef0cbd7583 100644 --- a/drivers/video/backlight/corgi_lcd.c +++ b/drivers/video/backlight/corgi_lcd.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include @@ -90,9 +90,8 @@ struct corgi_lcd { int mode; char buf[2]; - int gpio_backlight_on; - int gpio_backlight_cont; - int gpio_backlight_cont_inverted; + struct gpio_desc *backlight_on; + struct gpio_desc *backlight_cont; void (*kick_battery)(void); }; @@ -403,13 +402,13 @@ static int corgi_bl_set_intensity(struct corgi_lcd *lcd, int intensity) corgi_ssp_lcdtg_send(lcd, DUTYCTRL_ADRS, intensity); /* Bit 5 via GPIO_BACKLIGHT_CONT */ - cont = !!(intensity & 0x20) ^ lcd->gpio_backlight_cont_inverted; + cont = !!(intensity & 0x20); - if (gpio_is_valid(lcd->gpio_backlight_cont)) - gpio_set_value_cansleep(lcd->gpio_backlight_cont, cont); + if (lcd->backlight_cont) + gpiod_set_value_cansleep(lcd->backlight_cont, cont); - if (gpio_is_valid(lcd->gpio_backlight_on)) - gpio_set_value_cansleep(lcd->gpio_backlight_on, intensity); + if (lcd->backlight_on) + gpiod_set_value_cansleep(lcd->backlight_on, intensity); if (lcd->kick_battery) lcd->kick_battery(); @@ -482,48 +481,17 @@ static int setup_gpio_backlight(struct corgi_lcd *lcd, struct corgi_lcd_platform_data *pdata) { struct spi_device *spi = lcd->spi_dev; - int err; - - lcd->gpio_backlight_on = -1; - lcd->gpio_backlight_cont = -1; - - if (gpio_is_valid(pdata->gpio_backlight_on)) { - err = devm_gpio_request(&spi->dev, pdata->gpio_backlight_on, - "BL_ON"); - if (err) { - dev_err(&spi->dev, - "failed to request GPIO%d for backlight_on\n", - pdata->gpio_backlight_on); - return err; - } - - lcd->gpio_backlight_on = pdata->gpio_backlight_on; - gpio_direction_output(lcd->gpio_backlight_on, 0); - } - if (gpio_is_valid(pdata->gpio_backlight_cont)) { - err = devm_gpio_request(&spi->dev, pdata->gpio_backlight_cont, - "BL_CONT"); - if (err) { - dev_err(&spi->dev, - "failed to request GPIO%d for backlight_cont\n", - pdata->gpio_backlight_cont); - return err; - } - - lcd->gpio_backlight_cont = pdata->gpio_backlight_cont; - - /* spitz and akita use both GPIOs for backlight, and - * have inverted polarity of GPIO_BACKLIGHT_CONT - */ - if (gpio_is_valid(lcd->gpio_backlight_on)) { - lcd->gpio_backlight_cont_inverted = 1; - gpio_direction_output(lcd->gpio_backlight_cont, 1); - } else { - lcd->gpio_backlight_cont_inverted = 0; - gpio_direction_output(lcd->gpio_backlight_cont, 0); - } - } + lcd->backlight_on = devm_gpiod_get_optional(&spi->dev, + "BL_ON", GPIOD_OUT_LOW); + if (IS_ERR(lcd->backlight_on)) + return PTR_ERR(lcd->backlight_on); + + lcd->backlight_cont = devm_gpiod_get_optional(&spi->dev, "BL_CONT", + GPIOD_OUT_LOW); + if (IS_ERR(lcd->backlight_cont)) + return PTR_ERR(lcd->backlight_cont); + return 0; } diff --git a/include/linux/spi/corgi_lcd.h b/include/linux/spi/corgi_lcd.h index edf4beccdadb..0b857616919c 100644 --- a/include/linux/spi/corgi_lcd.h +++ b/include/linux/spi/corgi_lcd.h @@ -11,9 +11,6 @@ struct corgi_lcd_platform_data { int default_intensity; int limit_mask; - int gpio_backlight_on; /* -1 if n/a */ - int gpio_backlight_cont; /* -1 if n/a */ - void (*notify)(int intensity); void (*kick_battery)(void); }; -- cgit v1.2.3-58-ga151 From 44d705b0370b1d581f46ff23e5d33e8b5ff8ec58 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 19 Mar 2020 17:10:22 +0200 Subject: fanotify: report name info for FAN_DIR_MODIFY event Report event FAN_DIR_MODIFY with name in a variable length record similar to how fid's are reported. With name info reporting implemented, setting FAN_DIR_MODIFY in mark mask is now allowed. When events are reported with name, the reported fid identifies the directory and the name follows the fid. The info record type for this event info is FAN_EVENT_INFO_TYPE_DFID_NAME. For now, all reported events have at most one info record which is either FAN_EVENT_INFO_TYPE_FID or FAN_EVENT_INFO_TYPE_DFID_NAME (for FAN_DIR_MODIFY). Later on, events "on child" will report both records. There are several ways that an application can use this information: 1. When watching a single directory, the name is always relative to the watched directory, so application need to fstatat(2) the name relative to the watched directory. 2. When watching a set of directories, the application could keep a map of dirfd for all watched directories and hash the map by fid obtained with name_to_handle_at(2). When getting a name event, the fid in the event info could be used to lookup the base dirfd in the map and then call fstatat(2) with that dirfd. 3. When watching a filesystem (FAN_MARK_FILESYSTEM) or a large set of directories, the application could use open_by_handle_at(2) with the fid in event info to obtain dirfd for the directory where event happened and call fstatat(2) with this dirfd. The last option scales better for a large number of watched directories. The first two options may be available in the future also for non privileged fanotify watchers, because open_by_handle_at(2) requires the CAP_DAC_READ_SEARCH capability. Link: https://lore.kernel.org/r/20200319151022.31456-15-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fanotify/fanotify.c | 2 +- fs/notify/fanotify/fanotify_user.c | 117 ++++++++++++++++++++++++++++--------- include/linux/fanotify.h | 3 +- include/uapi/linux/fanotify.h | 8 ++- 4 files changed, 100 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 599654564b2a..4c1a4eb597d5 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -520,7 +520,7 @@ static int fanotify_handle_event(struct fsnotify_group *group, BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC); BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM); - BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 19); + BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 20); mask = fanotify_group_event_mask(group, iter_info, mask, data, data_type); diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index a9d287a56098..42cb794c62ac 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -51,22 +51,35 @@ struct kmem_cache *fanotify_path_event_cachep __read_mostly; struct kmem_cache *fanotify_perm_event_cachep __read_mostly; #define FANOTIFY_EVENT_ALIGN 4 +#define FANOTIFY_INFO_HDR_LEN \ + (sizeof(struct fanotify_event_info_fid) + sizeof(struct file_handle)) -static int fanotify_fid_info_len(int fh_len) +static int fanotify_fid_info_len(int fh_len, int name_len) { - return roundup(sizeof(struct fanotify_event_info_fid) + - sizeof(struct file_handle) + fh_len, - FANOTIFY_EVENT_ALIGN); + int info_len = fh_len; + + if (name_len) + info_len += name_len + 1; + + return roundup(FANOTIFY_INFO_HDR_LEN + info_len, FANOTIFY_EVENT_ALIGN); } static int fanotify_event_info_len(struct fanotify_event *event) { + int info_len = 0; int fh_len = fanotify_event_object_fh_len(event); - if (!fh_len) - return 0; + if (fh_len) + info_len += fanotify_fid_info_len(fh_len, 0); - return fanotify_fid_info_len(fh_len); + if (fanotify_event_name_len(event)) { + struct fanotify_name_event *fne = FANOTIFY_NE(event); + + info_len += fanotify_fid_info_len(fne->dir_fh.len, + fne->name_len); + } + + return info_len; } /* @@ -204,23 +217,32 @@ static int process_access_response(struct fsnotify_group *group, return -ENOENT; } -static int copy_fid_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh, - char __user *buf) +static int copy_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh, + const char *name, size_t name_len, + char __user *buf, size_t count) { struct fanotify_event_info_fid info = { }; struct file_handle handle = { }; unsigned char bounce[FANOTIFY_INLINE_FH_LEN], *fh_buf; size_t fh_len = fh ? fh->len : 0; - size_t len = fanotify_fid_info_len(fh_len); + size_t info_len = fanotify_fid_info_len(fh_len, name_len); + size_t len = info_len; + + pr_debug("%s: fh_len=%zu name_len=%zu, info_len=%zu, count=%zu\n", + __func__, fh_len, name_len, info_len, count); - if (!len) + if (!fh_len || (name && !name_len)) return 0; - if (WARN_ON_ONCE(len < sizeof(info) + sizeof(handle) + fh_len)) + if (WARN_ON_ONCE(len < sizeof(info) || len > count)) return -EFAULT; - /* Copy event info fid header followed by vaiable sized file handle */ - info.hdr.info_type = FAN_EVENT_INFO_TYPE_FID; + /* + * Copy event info fid header followed by variable sized file handle + * and optionally followed by variable sized filename. + */ + info.hdr.info_type = name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME : + FAN_EVENT_INFO_TYPE_FID; info.hdr.len = len; info.fsid = *fsid; if (copy_to_user(buf, &info, sizeof(info))) @@ -228,6 +250,9 @@ static int copy_fid_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh, buf += sizeof(info); len -= sizeof(info); + if (WARN_ON_ONCE(len < sizeof(handle))) + return -EFAULT; + handle.handle_type = fh->type; handle.handle_bytes = fh_len; if (copy_to_user(buf, &handle, sizeof(handle))) @@ -235,9 +260,12 @@ static int copy_fid_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh, buf += sizeof(handle); len -= sizeof(handle); + if (WARN_ON_ONCE(len < fh_len)) + return -EFAULT; + /* - * For an inline fh, copy through stack to exclude the copy from - * usercopy hardening protections. + * For an inline fh and inline file name, copy through stack to exclude + * the copy from usercopy hardening protections. */ fh_buf = fanotify_fh_buf(fh); if (fh_len <= FANOTIFY_INLINE_FH_LEN) { @@ -247,14 +275,28 @@ static int copy_fid_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh, if (copy_to_user(buf, fh_buf, fh_len)) return -EFAULT; - /* Pad with 0's */ buf += fh_len; len -= fh_len; + + if (name_len) { + /* Copy the filename with terminating null */ + name_len++; + if (WARN_ON_ONCE(len < name_len)) + return -EFAULT; + + if (copy_to_user(buf, name, name_len)) + return -EFAULT; + + buf += name_len; + len -= name_len; + } + + /* Pad with 0's */ WARN_ON_ONCE(len < 0 || len >= FANOTIFY_EVENT_ALIGN); if (len > 0 && clear_user(buf, len)) return -EFAULT; - return 0; + return info_len; } static ssize_t copy_event_to_user(struct fsnotify_group *group, @@ -268,16 +310,15 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, pr_debug("%s: group=%p event=%p\n", __func__, group, event); - metadata.event_len = FAN_EVENT_METADATA_LEN; + metadata.event_len = FAN_EVENT_METADATA_LEN + + fanotify_event_info_len(event); metadata.metadata_len = FAN_EVENT_METADATA_LEN; metadata.vers = FANOTIFY_METADATA_VERSION; metadata.reserved = 0; metadata.mask = event->mask & FANOTIFY_OUTGOING_EVENTS; metadata.pid = pid_vnr(event->pid); - if (fanotify_event_object_fh(event)) { - metadata.event_len += fanotify_event_info_len(event); - } else if (path && path->mnt && path->dentry) { + if (path && path->mnt && path->dentry) { fd = create_fd(group, path, &f); if (fd < 0) return fd; @@ -295,17 +336,39 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, if (copy_to_user(buf, &metadata, FAN_EVENT_METADATA_LEN)) goto out_close_fd; + buf += FAN_EVENT_METADATA_LEN; + count -= FAN_EVENT_METADATA_LEN; + if (fanotify_is_perm_event(event->mask)) FANOTIFY_PERM(event)->fd = fd; - if (f) { + if (f) fd_install(fd, f); - } else if (fanotify_event_object_fh(event)) { - ret = copy_fid_to_user(fanotify_event_fsid(event), - fanotify_event_object_fh(event), - buf + FAN_EVENT_METADATA_LEN); + + /* Event info records order is: dir fid + name, child fid */ + if (fanotify_event_name_len(event)) { + struct fanotify_name_event *fne = FANOTIFY_NE(event); + + ret = copy_info_to_user(fanotify_event_fsid(event), + fanotify_event_dir_fh(event), + fne->name, fne->name_len, + buf, count); if (ret < 0) return ret; + + buf += ret; + count -= ret; + } + + if (fanotify_event_object_fh_len(event)) { + ret = copy_info_to_user(fanotify_event_fsid(event), + fanotify_event_object_fh(event), + NULL, 0, buf, count); + if (ret < 0) + return ret; + + buf += ret; + count -= ret; } return metadata.event_len; diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index b79fa9bb7359..3049a6c06d9e 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -47,7 +47,8 @@ * Directory entry modification events - reported only to directory * where entry is modified and not to a watching parent. */ -#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE) +#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE | \ + FAN_DIR_MODIFY) /* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */ #define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \ diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h index 615fa2c87179..a88c7c6d0692 100644 --- a/include/uapi/linux/fanotify.h +++ b/include/uapi/linux/fanotify.h @@ -117,6 +117,7 @@ struct fanotify_event_metadata { }; #define FAN_EVENT_INFO_TYPE_FID 1 +#define FAN_EVENT_INFO_TYPE_DFID_NAME 2 /* Variable length info record following event metadata */ struct fanotify_event_info_header { @@ -125,7 +126,12 @@ struct fanotify_event_info_header { __u16 len; }; -/* Unique file identifier info record */ +/* + * Unique file identifier info record. This is used both for + * FAN_EVENT_INFO_TYPE_FID records and for FAN_EVENT_INFO_TYPE_DFID_NAME + * records. For FAN_EVENT_INFO_TYPE_DFID_NAME there is additionally a null + * terminated name immediately after the file handle. + */ struct fanotify_event_info_fid { struct fanotify_event_info_header hdr; __kernel_fsid_t fsid; -- cgit v1.2.3-58-ga151 From 9ce3bf225e5a908756b90b8f7bbc38834427296b Mon Sep 17 00:00:00 2001 From: Clement Leger Date: Mon, 2 Mar 2020 10:38:55 +0100 Subject: remoteproc: Use size_t type for len in da_to_va With upcoming changes in elf loader for elf64 support, section size will be a u64. When used with da_to_va, this will potentially lead to overflow if using the current "int" type for len argument. Change da_to_va prototype to use a size_t for len and fix all users of this function. Reviewed-by: Bjorn Andersson Reviewed-by: Mathieu Poirier Signed-off-by: Clement Leger Link: https://lore.kernel.org/r/20200302093902.27849-2-cleger@kalray.eu Signed-off-by: Bjorn Andersson --- drivers/remoteproc/imx_rproc.c | 11 ++++++----- drivers/remoteproc/keystone_remoteproc.c | 4 ++-- drivers/remoteproc/qcom_q6v5_adsp.c | 2 +- drivers/remoteproc/qcom_q6v5_mss.c | 2 +- drivers/remoteproc/qcom_q6v5_pas.c | 2 +- drivers/remoteproc/qcom_q6v5_wcss.c | 2 +- drivers/remoteproc/qcom_wcnss.c | 2 +- drivers/remoteproc/remoteproc_core.c | 2 +- drivers/remoteproc/remoteproc_internal.h | 2 +- drivers/remoteproc/st_slim_rproc.c | 4 ++-- drivers/remoteproc/wkup_m3_rproc.c | 4 ++-- include/linux/remoteproc.h | 2 +- 12 files changed, 20 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 3e72b6f38d4b..8957ed271d20 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -186,7 +186,7 @@ static int imx_rproc_stop(struct rproc *rproc) } static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da, - int len, u64 *sys) + size_t len, u64 *sys) { const struct imx_rproc_dcfg *dcfg = priv->dcfg; int i; @@ -203,19 +203,19 @@ static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da, } } - dev_warn(priv->dev, "Translation failed: da = 0x%llx len = 0x%x\n", + dev_warn(priv->dev, "Translation failed: da = 0x%llx len = 0x%zx\n", da, len); return -ENOENT; } -static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, int len) +static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) { struct imx_rproc *priv = rproc->priv; void *va = NULL; u64 sys; int i; - if (len <= 0) + if (len == 0) return NULL; /* @@ -235,7 +235,8 @@ static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, int len) } } - dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%x va = 0x%p\n", da, len, va); + dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%p\n", + da, len, va); return va; } diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c index 5c4658f00b3d..cd266163a65f 100644 --- a/drivers/remoteproc/keystone_remoteproc.c +++ b/drivers/remoteproc/keystone_remoteproc.c @@ -246,7 +246,7 @@ static void keystone_rproc_kick(struct rproc *rproc, int vqid) * can be used either by the remoteproc core for loading (when using kernel * remoteproc loader), or by any rpmsg bus drivers. */ -static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, int len) +static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) { struct keystone_rproc *ksproc = rproc->priv; void __iomem *va = NULL; @@ -255,7 +255,7 @@ static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, int len) size_t size; int i; - if (len <= 0) + if (len == 0) return NULL; for (i = 0; i < ksproc->num_mems; i++) { diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c index e953886b2eb7..2b01f2282062 100644 --- a/drivers/remoteproc/qcom_q6v5_adsp.c +++ b/drivers/remoteproc/qcom_q6v5_adsp.c @@ -270,7 +270,7 @@ static int adsp_stop(struct rproc *rproc) return ret; } -static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len) +static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len) { struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; int offset; diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index d7667418a62f..03ffc6db4c68 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -1325,7 +1325,7 @@ static int q6v5_stop(struct rproc *rproc) return 0; } -static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len) +static void *q6v5_da_to_va(struct rproc *rproc, u64 da, size_t len) { struct q6v5 *qproc = rproc->priv; int offset; diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index edf9d0e1a235..a41860d2243a 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -222,7 +222,7 @@ static int adsp_stop(struct rproc *rproc) return ret; } -static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len) +static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len) { struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; int offset; diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c index f93e1e4a1cc0..f1924b740a10 100644 --- a/drivers/remoteproc/qcom_q6v5_wcss.c +++ b/drivers/remoteproc/qcom_q6v5_wcss.c @@ -406,7 +406,7 @@ static int q6v5_wcss_stop(struct rproc *rproc) return 0; } -static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, int len) +static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len) { struct q6v5_wcss *wcss = rproc->priv; int offset; diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index dc135754bb9c..0c7afd038f0d 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c @@ -287,7 +287,7 @@ static int wcnss_stop(struct rproc *rproc) return ret; } -static void *wcnss_da_to_va(struct rproc *rproc, u64 da, int len) +static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len) { struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv; int offset; diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 3f0026cf67b7..1f20db16a708 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -185,7 +185,7 @@ EXPORT_SYMBOL(rproc_va_to_pa); * here the output of the DMA API for the carveouts, which should be more * correct. */ -void *rproc_da_to_va(struct rproc *rproc, u64 da, int len) +void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) { struct rproc_mem_entry *carveout; void *ptr = NULL; diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index 493ef9262411..58580210575c 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h @@ -50,7 +50,7 @@ void rproc_exit_sysfs(void); void rproc_free_vring(struct rproc_vring *rvring); int rproc_alloc_vring(struct rproc_vdev *rvdev, int i); -void *rproc_da_to_va(struct rproc *rproc, u64 da, int len); +void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len); phys_addr_t rproc_va_to_pa(void *cpu_addr); int rproc_trigger_recovery(struct rproc *rproc); diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c index 04492fead3c8..09bcb4d8b9e0 100644 --- a/drivers/remoteproc/st_slim_rproc.c +++ b/drivers/remoteproc/st_slim_rproc.c @@ -174,7 +174,7 @@ static int slim_rproc_stop(struct rproc *rproc) return 0; } -static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, int len) +static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) { struct st_slim_rproc *slim_rproc = rproc->priv; void *va = NULL; @@ -191,7 +191,7 @@ static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, int len) } } - dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%x va = 0x%pK\n", + dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%pK\n", da, len, va); return va; diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c index 3984e585c847..b9349d684258 100644 --- a/drivers/remoteproc/wkup_m3_rproc.c +++ b/drivers/remoteproc/wkup_m3_rproc.c @@ -80,14 +80,14 @@ static int wkup_m3_rproc_stop(struct rproc *rproc) return 0; } -static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, int len) +static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) { struct wkup_m3_rproc *wkupm3 = rproc->priv; void *va = NULL; int i; u32 offset; - if (len <= 0) + if (len == 0) return NULL; for (i = 0; i < WKUPM3_MEM_MAX; i++) { diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 16ad66683ad0..89215798eaea 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -374,7 +374,7 @@ struct rproc_ops { int (*start)(struct rproc *rproc); int (*stop)(struct rproc *rproc); void (*kick)(struct rproc *rproc, int vqid); - void * (*da_to_va)(struct rproc *rproc, u64 da, int len); + void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len); int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); int (*handle_rsc)(struct rproc *rproc, u32 rsc_type, void *rsc, int offset, int avail); -- cgit v1.2.3-58-ga151 From 096ee78669d2bc8fccc40117de8d4e838a0c80db Mon Sep 17 00:00:00 2001 From: Clement Leger Date: Mon, 2 Mar 2020 10:38:56 +0100 Subject: remoteproc: Use size_t instead of int for rproc_mem_entry len Now that rproc_da_to_va uses a size_t for length, use a size_t for len field of rproc_mem_entry. Function used to create such structures now takes a size_t instead of int to allow full size range to be handled. Reviewed-by: Bjorn Andersson Reviewed-by: Mathieu Poirier Signed-off-by: Clement Leger Link: https://lore.kernel.org/r/20200302093902.27849-3-cleger@kalray.eu Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_core.c | 14 ++++++++------ drivers/remoteproc/remoteproc_debugfs.c | 2 +- include/linux/remoteproc.h | 6 +++--- 3 files changed, 12 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 1f20db16a708..ebb7213c33b1 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -319,8 +319,9 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) struct device *dev = &rproc->dev; struct rproc_vring *rvring = &rvdev->vring[i]; struct fw_rsc_vdev *rsc; - int ret, size, notifyid; + int ret, notifyid; struct rproc_mem_entry *mem; + size_t size; /* actual size of vring (in bytes) */ size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); @@ -750,11 +751,12 @@ static int rproc_alloc_carveout(struct rproc *rproc, va = dma_alloc_coherent(dev->parent, mem->len, &dma, GFP_KERNEL); if (!va) { dev_err(dev->parent, - "failed to allocate dma memory: len 0x%x\n", mem->len); + "failed to allocate dma memory: len 0x%zx\n", + mem->len); return -ENOMEM; } - dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%x\n", + dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%zx\n", va, &dma, mem->len); if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) { @@ -962,7 +964,7 @@ EXPORT_SYMBOL(rproc_add_carveout); */ struct rproc_mem_entry * rproc_mem_entry_init(struct device *dev, - void *va, dma_addr_t dma, int len, u32 da, + void *va, dma_addr_t dma, size_t len, u32 da, int (*alloc)(struct rproc *, struct rproc_mem_entry *), int (*release)(struct rproc *, struct rproc_mem_entry *), const char *name, ...) @@ -1004,7 +1006,7 @@ EXPORT_SYMBOL(rproc_mem_entry_init); * provided by client. */ struct rproc_mem_entry * -rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len, +rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, u32 da, const char *name, ...) { struct rproc_mem_entry *mem; @@ -1275,7 +1277,7 @@ static void rproc_resource_cleanup(struct rproc *rproc) unmapped = iommu_unmap(rproc->domain, entry->da, entry->len); if (unmapped != entry->len) { /* nothing much to do besides complaining */ - dev_err(dev, "failed to unmap %u/%zu\n", entry->len, + dev_err(dev, "failed to unmap %zx/%zu\n", entry->len, unmapped); } diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c index dd93cf04e17f..82dc34b819df 100644 --- a/drivers/remoteproc/remoteproc_debugfs.c +++ b/drivers/remoteproc/remoteproc_debugfs.c @@ -293,7 +293,7 @@ static int rproc_carveouts_show(struct seq_file *seq, void *p) seq_printf(seq, "\tVirtual address: %pK\n", carveout->va); seq_printf(seq, "\tDMA address: %pad\n", &carveout->dma); seq_printf(seq, "\tDevice address: 0x%x\n", carveout->da); - seq_printf(seq, "\tLength: 0x%x Bytes\n\n", carveout->len); + seq_printf(seq, "\tLength: 0x%zx Bytes\n\n", carveout->len); } return 0; diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 89215798eaea..bee559330204 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -329,7 +329,7 @@ struct rproc; struct rproc_mem_entry { void *va; dma_addr_t dma; - int len; + size_t len; u32 da; void *priv; char name[32]; @@ -599,13 +599,13 @@ void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem); struct rproc_mem_entry * rproc_mem_entry_init(struct device *dev, - void *va, dma_addr_t dma, int len, u32 da, + void *va, dma_addr_t dma, size_t len, u32 da, int (*alloc)(struct rproc *, struct rproc_mem_entry *), int (*release)(struct rproc *, struct rproc_mem_entry *), const char *name, ...); struct rproc_mem_entry * -rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len, +rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, u32 da, const char *name, ...); int rproc_boot(struct rproc *rproc); -- cgit v1.2.3-58-ga151 From e4ae4b7d01699d0f3ea61bbef119f2d67e5455c0 Mon Sep 17 00:00:00 2001 From: Clement Leger Date: Mon, 2 Mar 2020 10:38:57 +0100 Subject: remoteproc: Use u64 type for boot_addr elf64 entry is defined as a u64. Since boot_addr is used to store the elf entry point, change boot_addr type to u64 to support both elf32 and elf64. In the same time, fix users that were using this variable. Reviewed-by: Bjorn Andersson Signed-off-by: Clement Leger Link: https://lore.kernel.org/r/20200302093902.27849-4-cleger@kalray.eu [bjorn: Fixes up return type of rproc_get_boot_addr()] Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_elf_loader.c | 2 +- drivers/remoteproc/remoteproc_internal.h | 4 ++-- drivers/remoteproc/st_remoteproc.c | 2 +- include/linux/remoteproc.h | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c index 606aae166eba..c2a9783cfb9a 100644 --- a/drivers/remoteproc/remoteproc_elf_loader.c +++ b/drivers/remoteproc/remoteproc_elf_loader.c @@ -102,7 +102,7 @@ EXPORT_SYMBOL(rproc_elf_sanity_check); * Note that the boot address is not a configurable property of all remote * processors. Some will always boot at a specific hard-coded address. */ -u32 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw) +u64 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw) { struct elf32_hdr *ehdr = (struct elf32_hdr *)fw->data; diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index 58580210575c..23f7a713995f 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h @@ -55,7 +55,7 @@ phys_addr_t rproc_va_to_pa(void *cpu_addr); int rproc_trigger_recovery(struct rproc *rproc); int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw); -u32 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw); +u64 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw); int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw); int rproc_elf_load_rsc_table(struct rproc *rproc, const struct firmware *fw); struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc, @@ -73,7 +73,7 @@ int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw) } static inline -u32 rproc_get_boot_addr(struct rproc *rproc, const struct firmware *fw) +u64 rproc_get_boot_addr(struct rproc *rproc, const struct firmware *fw) { if (rproc->ops->get_boot_addr) return rproc->ops->get_boot_addr(rproc, fw); diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c index ee13d23b43a9..a3268d95a50e 100644 --- a/drivers/remoteproc/st_remoteproc.c +++ b/drivers/remoteproc/st_remoteproc.c @@ -190,7 +190,7 @@ static int st_rproc_start(struct rproc *rproc) } } - dev_info(&rproc->dev, "Started from 0x%x\n", rproc->bootaddr); + dev_info(&rproc->dev, "Started from 0x%llx\n", rproc->bootaddr); return 0; diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index bee559330204..1683d6c386a6 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -382,7 +382,7 @@ struct rproc_ops { struct rproc *rproc, const struct firmware *fw); int (*load)(struct rproc *rproc, const struct firmware *fw); int (*sanity_check)(struct rproc *rproc, const struct firmware *fw); - u32 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw); + u64 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw); }; /** @@ -498,7 +498,7 @@ struct rproc { int num_traces; struct list_head carveouts; struct list_head mappings; - u32 bootaddr; + u64 bootaddr; struct list_head rvdevs; struct list_head subdevs; struct idr notifyids; -- cgit v1.2.3-58-ga151 From 8f4033507d856be9a7983921ab3d2a1d03b9a093 Mon Sep 17 00:00:00 2001 From: Clement Leger Date: Mon, 2 Mar 2020 10:39:02 +0100 Subject: remoteproc: Adapt coredump to generate correct elf type Now that remoteproc can load an elf64, coredump elf class should be the same as the loaded elf class. In order to do that, add a elf_class field to rproc with default values. If an elf is loaded successfully, this field will be updated with the loaded elf class. Then, the coredump core code has been modified to use the generic elf macro in order to create an elf file with correct class. Reviewed-by: Mathieu Poirier Reviewed-by: Bjorn Andersson Signed-off-by: Clement Leger Link: https://lore.kernel.org/r/20200302093902.27849-9-cleger@kalray.eu Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_core.c | 67 ++++++++++++++++-------------- drivers/remoteproc/remoteproc_elf_loader.c | 3 ++ include/linux/remoteproc.h | 1 + 3 files changed, 39 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index aa598f99791a..0a9bb745bd0d 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -38,6 +38,7 @@ #include #include "remoteproc_internal.h" +#include "remoteproc_elf_helpers.h" #define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL @@ -1571,20 +1572,21 @@ EXPORT_SYMBOL(rproc_coredump_add_custom_segment); static void rproc_coredump(struct rproc *rproc) { struct rproc_dump_segment *segment; - struct elf32_phdr *phdr; - struct elf32_hdr *ehdr; + void *phdr; + void *ehdr; size_t data_size; size_t offset; void *data; void *ptr; + u8 class = rproc->elf_class; int phnum = 0; if (list_empty(&rproc->dump_segments)) return; - data_size = sizeof(*ehdr); + data_size = elf_size_of_hdr(class); list_for_each_entry(segment, &rproc->dump_segments, node) { - data_size += sizeof(*phdr) + segment->size; + data_size += elf_size_of_phdr(class) + segment->size; phnum++; } @@ -1595,33 +1597,33 @@ static void rproc_coredump(struct rproc *rproc) ehdr = data; - memset(ehdr, 0, sizeof(*ehdr)); - memcpy(ehdr->e_ident, ELFMAG, SELFMAG); - ehdr->e_ident[EI_CLASS] = ELFCLASS32; - ehdr->e_ident[EI_DATA] = ELFDATA2LSB; - ehdr->e_ident[EI_VERSION] = EV_CURRENT; - ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE; - ehdr->e_type = ET_CORE; - ehdr->e_machine = EM_NONE; - ehdr->e_version = EV_CURRENT; - ehdr->e_entry = rproc->bootaddr; - ehdr->e_phoff = sizeof(*ehdr); - ehdr->e_ehsize = sizeof(*ehdr); - ehdr->e_phentsize = sizeof(*phdr); - ehdr->e_phnum = phnum; - - phdr = data + ehdr->e_phoff; - offset = ehdr->e_phoff + sizeof(*phdr) * ehdr->e_phnum; + memset(ehdr, 0, elf_size_of_hdr(class)); + /* e_ident field is common for both elf32 and elf64 */ + elf_hdr_init_ident(ehdr, class); + + elf_hdr_set_e_type(class, ehdr, ET_CORE); + elf_hdr_set_e_machine(class, ehdr, EM_NONE); + elf_hdr_set_e_version(class, ehdr, EV_CURRENT); + elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr); + elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class)); + elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class)); + elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class)); + elf_hdr_set_e_phnum(class, ehdr, phnum); + + phdr = data + elf_hdr_get_e_phoff(class, ehdr); + offset = elf_hdr_get_e_phoff(class, ehdr); + offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr); + list_for_each_entry(segment, &rproc->dump_segments, node) { - memset(phdr, 0, sizeof(*phdr)); - phdr->p_type = PT_LOAD; - phdr->p_offset = offset; - phdr->p_vaddr = segment->da; - phdr->p_paddr = segment->da; - phdr->p_filesz = segment->size; - phdr->p_memsz = segment->size; - phdr->p_flags = PF_R | PF_W | PF_X; - phdr->p_align = 0; + memset(phdr, 0, elf_size_of_phdr(class)); + elf_phdr_set_p_type(class, phdr, PT_LOAD); + elf_phdr_set_p_offset(class, phdr, offset); + elf_phdr_set_p_vaddr(class, phdr, segment->da); + elf_phdr_set_p_paddr(class, phdr, segment->da); + elf_phdr_set_p_filesz(class, phdr, segment->size); + elf_phdr_set_p_memsz(class, phdr, segment->size); + elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X); + elf_phdr_set_p_align(class, phdr, 0); if (segment->dump) { segment->dump(rproc, segment, data + offset); @@ -1637,8 +1639,8 @@ static void rproc_coredump(struct rproc *rproc) } } - offset += phdr->p_filesz; - phdr++; + offset += elf_phdr_get_p_filesz(class, phdr); + phdr += elf_size_of_phdr(class); } dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL); @@ -2037,6 +2039,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, rproc->name = name; rproc->priv = &rproc[1]; rproc->auto_boot = true; + rproc->elf_class = ELFCLASS32; device_initialize(&rproc->dev); rproc->dev.parent = dev; diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c index 4869fb7d8fe4..16e2c496fd45 100644 --- a/drivers/remoteproc/remoteproc_elf_loader.c +++ b/drivers/remoteproc/remoteproc_elf_loader.c @@ -248,6 +248,9 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw) memset(ptr + filesz, 0, memsz - filesz); } + if (ret == 0) + rproc->elf_class = class; + return ret; } EXPORT_SYMBOL(rproc_elf_load_segments); diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 1683d6c386a6..ed127b2d35ca 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -514,6 +514,7 @@ struct rproc { bool auto_boot; struct list_head dump_segments; int nb_vdev; + u8 elf_class; }; /** -- cgit v1.2.3-58-ga151 From dc5192c449368eed3385f4405670aa3ed21d6270 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Mon, 23 Mar 2020 22:29:02 -0700 Subject: remoteproc: Introduce "panic" callback in ops Introduce generic support for handling kernel panics in remoteproc drivers, in order to allow operations needed for aiding in post mortem system debugging, such as flushing caches etc. The function can return a number of milliseconds needed by the remote to "settle" and the core will wait the longest returned duration before returning from the panic handler. Reviewed-by: Mathieu Poirier Link: https://lore.kernel.org/r/20200324052904.738594-3-bjorn.andersson@linaro.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_core.c | 43 ++++++++++++++++++++++++++++++++++++ include/linux/remoteproc.h | 3 +++ 2 files changed, 46 insertions(+) (limited to 'include/linux') diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 7ee976ee2044..e12a54e67588 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -16,6 +16,7 @@ #define pr_fmt(fmt) "%s: " fmt, __func__ +#include #include #include #include @@ -45,6 +46,7 @@ static DEFINE_MUTEX(rproc_list_mutex); static LIST_HEAD(rproc_list); +static struct notifier_block rproc_panic_nb; typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int offset, int avail); @@ -2236,10 +2238,50 @@ void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type) } EXPORT_SYMBOL(rproc_report_crash); +static int rproc_panic_handler(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + unsigned int longest = 0; + struct rproc *rproc; + unsigned int d; + + rcu_read_lock(); + list_for_each_entry_rcu(rproc, &rproc_list, node) { + if (!rproc->ops->panic || rproc->state != RPROC_RUNNING) + continue; + + d = rproc->ops->panic(rproc); + longest = max(longest, d); + } + rcu_read_unlock(); + + /* + * Delay for the longest requested duration before returning. This can + * be used by the remoteproc drivers to give the remote processor time + * to perform any requested operations (such as flush caches), when + * it's not possible to signal the Linux side due to the panic. + */ + mdelay(longest); + + return NOTIFY_DONE; +} + +static void __init rproc_init_panic(void) +{ + rproc_panic_nb.notifier_call = rproc_panic_handler; + atomic_notifier_chain_register(&panic_notifier_list, &rproc_panic_nb); +} + +static void __exit rproc_exit_panic(void) +{ + atomic_notifier_chain_unregister(&panic_notifier_list, &rproc_panic_nb); +} + static int __init remoteproc_init(void) { rproc_init_sysfs(); rproc_init_debugfs(); + rproc_init_panic(); return 0; } @@ -2249,6 +2291,7 @@ static void __exit remoteproc_exit(void) { ida_destroy(&rproc_dev_index); + rproc_exit_panic(); rproc_exit_debugfs(); rproc_exit_sysfs(); } diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index ed127b2d35ca..9c07d7958c53 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -369,6 +369,8 @@ enum rsc_handling_status { * expects to find it * @sanity_check: sanity check the fw image * @get_boot_addr: get boot address to entry point specified in firmware + * @panic: optional callback to react to system panic, core will delay + * panic at least the returned number of milliseconds */ struct rproc_ops { int (*start)(struct rproc *rproc); @@ -383,6 +385,7 @@ struct rproc_ops { int (*load)(struct rproc *rproc, const struct firmware *fw); int (*sanity_check)(struct rproc *rproc, const struct firmware *fw); u64 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw); + unsigned long (*panic)(struct rproc *rproc); }; /** -- cgit v1.2.3-58-ga151 From 1070f24d4ae90420db342fe54c1ed90ef1129bb5 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Tue, 24 Mar 2020 13:00:28 +0200 Subject: remoteproc/omap: Remove the platform_data header The platform data header for OMAP remoteproc is no longer used for anything post ti-sysc and DT conversion, so just remove it completely. Signed-off-by: Tero Kristo Acked-by: Suman Anna Reviewed-by: Andrew F. Davis Acked-by: Mathieu Poirier Link: https://lore.kernel.org/r/20200324110035.29907-9-t-kristo@ti.com Signed-off-by: Bjorn Andersson --- include/linux/platform_data/remoteproc-omap.h | 51 --------------------------- 1 file changed, 51 deletions(-) delete mode 100644 include/linux/platform_data/remoteproc-omap.h (limited to 'include/linux') diff --git a/include/linux/platform_data/remoteproc-omap.h b/include/linux/platform_data/remoteproc-omap.h deleted file mode 100644 index 7e3a16097672..000000000000 --- a/include/linux/platform_data/remoteproc-omap.h +++ /dev/null @@ -1,51 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Remote Processor - omap-specific bits - * - * Copyright (C) 2011 Texas Instruments, Inc. - * Copyright (C) 2011 Google, Inc. - */ - -#ifndef _PLAT_REMOTEPROC_H -#define _PLAT_REMOTEPROC_H - -struct rproc_ops; -struct platform_device; - -/* - * struct omap_rproc_pdata - omap remoteproc's platform data - * @name: the remoteproc's name - * @oh_name: omap hwmod device - * @oh_name_opt: optional, secondary omap hwmod device - * @firmware: name of firmware file to load - * @mbox_name: name of omap mailbox device to use with this rproc - * @ops: start/stop rproc handlers - * @device_enable: omap-specific handler for enabling a device - * @device_shutdown: omap-specific handler for shutting down a device - * @set_bootaddr: omap-specific handler for setting the rproc boot address - */ -struct omap_rproc_pdata { - const char *name; - const char *oh_name; - const char *oh_name_opt; - const char *firmware; - const char *mbox_name; - const struct rproc_ops *ops; - int (*device_enable)(struct platform_device *pdev); - int (*device_shutdown)(struct platform_device *pdev); - void (*set_bootaddr)(u32); -}; - -#if defined(CONFIG_OMAP_REMOTEPROC) || defined(CONFIG_OMAP_REMOTEPROC_MODULE) - -void __init omap_rproc_reserve_cma(void); - -#else - -static inline void __init omap_rproc_reserve_cma(void) -{ -} - -#endif - -#endif /* _PLAT_REMOTEPROC_H */ -- cgit v1.2.3-58-ga151 From 3316ab2b45f6bf4797d8d65b22fda3cc13318890 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 24 Mar 2020 11:40:45 +0530 Subject: bus: mhi: core: Add support for reading MHI info from device The MHI register base has several registers used for getting the MHI specific information such as version, family, major, and minor numbers from the device. This information can be used by the controller drivers for usecases such as applying quirks for a specific revision etc... While at it, let's also rearrange the local variables in mhi_register_controller(). Suggested-by: Hemant Kumar Reviewed-by: Jeffrey Hugo Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20200324061050.14845-3-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/core/init.c | 19 +++++++++++++++++-- drivers/bus/mhi/core/internal.h | 10 ++++++++++ include/linux/mhi.h | 17 +++++++++++++++++ 3 files changed, 44 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index eb7f556a8531..d136f6c6ca78 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -802,12 +802,12 @@ error_ev_cfg: int mhi_register_controller(struct mhi_controller *mhi_cntrl, struct mhi_controller_config *config) { - int ret; - int i; struct mhi_event *mhi_event; struct mhi_chan *mhi_chan; struct mhi_cmd *mhi_cmd; struct mhi_device *mhi_dev; + u32 soc_info; + int ret, i; if (!mhi_cntrl) return -EINVAL; @@ -874,6 +874,21 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; } + /* Read the MHI device info */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, + SOC_HW_VERSION_OFFS, &soc_info); + if (ret) + goto error_alloc_dev; + + mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >> + SOC_HW_VERSION_FAM_NUM_SHFT; + mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >> + SOC_HW_VERSION_DEV_NUM_SHFT; + mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >> + SOC_HW_VERSION_MAJOR_VER_SHFT; + mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >> + SOC_HW_VERSION_MINOR_VER_SHFT; + /* Register controller with MHI bus */ mhi_dev = mhi_alloc_device(mhi_cntrl); if (IS_ERR(mhi_dev)) { diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index 18066302e6e2..5deadfaa053a 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -196,6 +196,16 @@ extern struct bus_type mhi_bus_type; #define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02) #define BHIE_RXVECSTATUS_STATUS_ERROR (0x03) +#define SOC_HW_VERSION_OFFS (0x224) +#define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000) +#define SOC_HW_VERSION_FAM_NUM_SHFT (28) +#define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000) +#define SOC_HW_VERSION_DEV_NUM_SHFT (16) +#define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00) +#define SOC_HW_VERSION_MAJOR_VER_SHFT (8) +#define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF) +#define SOC_HW_VERSION_MINOR_VER_SHFT (0) + #define EV_CTX_RESERVED_MASK GENMASK(7, 0) #define EV_CTX_INTMODC_MASK GENMASK(15, 8) #define EV_CTX_INTMODC_SHIFT 8 diff --git a/include/linux/mhi.h b/include/linux/mhi.h index d83e7772681b..ad1996001965 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -310,6 +310,10 @@ struct mhi_controller_config { * @sw_ev_rings: Number of software event rings * @nr_irqs_req: Number of IRQs required to operate (optional) * @nr_irqs: Number of IRQ allocated by bus master (required) + * @family_number: MHI controller family number + * @device_number: MHI controller device number + * @major_version: MHI controller major revision number + * @minor_version: MHI controller minor revision number * @mhi_event: MHI event ring configurations table * @mhi_cmd: MHI command ring configurations table * @mhi_ctxt: MHI device context, shared memory between host and device @@ -348,6 +352,15 @@ struct mhi_controller_config { * Fields marked as (required) need to be populated by the controller driver * before calling mhi_register_controller(). For the fields marked as (optional) * they can be populated depending on the usecase. + * + * The following fields are present for the purpose of implementing any device + * specific quirks or customizations for specific MHI revisions used in device + * by the controller drivers. The MHI stack will just populate these fields + * during mhi_register_controller(): + * family_number + * device_number + * major_version + * minor_version */ struct mhi_controller { struct device *cntrl_dev; @@ -375,6 +388,10 @@ struct mhi_controller { u32 sw_ev_rings; u32 nr_irqs_req; u32 nr_irqs; + u32 family_number; + u32 device_number; + u32 major_version; + u32 minor_version; struct mhi_event *mhi_event; struct mhi_cmd *mhi_cmd; -- cgit v1.2.3-58-ga151 From d7242c4641fba521a1ea9dbccb11a40cf38cd912 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 18 Mar 2020 17:22:47 -0400 Subject: pNFS: Add a helper to allocate the array of buckets Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.h | 3 +++ fs/nfs/pnfs_nfs.c | 31 +++++++++++++++++++++++++++++++ include/linux/nfs_xdr.h | 15 ++++++++++++--- 3 files changed, 46 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 7bfb6970134a..f6b1099aa151 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -366,6 +366,9 @@ bool nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node); void nfs4_deviceid_purge_client(const struct nfs_client *); /* pnfs_nfs.c */ +struct pnfs_commit_array *pnfs_alloc_commit_array(size_t n, gfp_t gfp_flags); +void pnfs_free_commit_array(struct pnfs_commit_array *p); + void pnfs_generic_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo); void pnfs_generic_commit_release(void *calldata); diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 3d0942541618..c8518ce3a4ef 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -87,6 +87,37 @@ out: } EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit); +struct pnfs_commit_array * +pnfs_alloc_commit_array(size_t n, gfp_t gfp_flags) +{ + struct pnfs_commit_array *p; + struct pnfs_commit_bucket *b; + + p = kmalloc(struct_size(p, buckets, n), gfp_flags); + if (!p) + return NULL; + p->nbuckets = n; + INIT_LIST_HEAD(&p->cinfo_list); + INIT_LIST_HEAD(&p->lseg_list); + p->lseg = NULL; + for (b = &p->buckets[0]; n != 0; b++, n--) { + INIT_LIST_HEAD(&b->written); + INIT_LIST_HEAD(&b->committing); + b->wlseg = NULL; + b->clseg = NULL; + b->direct_verf.committed = NFS_INVALID_STABLE_HOW; + } + return p; +} +EXPORT_SYMBOL_GPL(pnfs_alloc_commit_array); + +void +pnfs_free_commit_array(struct pnfs_commit_array *p) +{ + kfree_rcu(p, rcu); +} +EXPORT_SYMBOL_GPL(pnfs_free_commit_array); + static int pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, struct nfs_commit_info *cinfo, diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 94c77ed55ce1..e91c917c9c1c 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1270,10 +1270,19 @@ struct pnfs_commit_bucket { struct nfs_writeverf direct_verf; }; +struct pnfs_commit_array { + struct list_head cinfo_list; + struct list_head lseg_list; + struct pnfs_layout_segment *lseg; + struct rcu_head rcu; + unsigned int nbuckets; + struct pnfs_commit_bucket buckets[]; +}; + struct pnfs_ds_commit_info { - int nwritten; - int ncommitting; - int nbuckets; + unsigned int nwritten; + unsigned int ncommitting; + unsigned int nbuckets; struct pnfs_commit_bucket *buckets; }; -- cgit v1.2.3-58-ga151 From 7c8978c0837d40c302f5e90d24c298d9ca9fc097 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 25 Mar 2020 12:34:06 +0100 Subject: driver core: platform: Initialize dma_parms for platform devices It's currently the platform driver's responsibility to initialize the pointer, dma_parms, for its corresponding struct device. The benefit with this approach allows us to avoid the initialization and to not waste memory for the struct device_dma_parameters, as this can be decided on a case by case basis. However, it has turned out that this approach is not very practical. Not only does it lead to open coding, but also to real errors. In principle callers of dma_set_max_seg_size() doesn't check the error code, but just assumes it succeeds. For these reasons, let's do the initialization from the common platform bus at the device registration point. This also follows the way the PCI devices are being managed, see pci_device_add(). Cc: Suggested-by: Christoph Hellwig Tested-by: Ludovic Barre Reviewed-by: Linus Walleij Acked-by: Arnd Bergmann Signed-off-by: Ulf Hansson Link: https://lore.kernel.org/r/20200325113407.26996-2-ulf.hansson@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/base/platform.c | 1 + include/linux/platform_device.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include/linux') diff --git a/drivers/base/platform.c b/drivers/base/platform.c index b5ce7b085795..46abbfb52655 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -512,6 +512,7 @@ int platform_device_add(struct platform_device *pdev) pdev->dev.parent = &platform_bus; pdev->dev.bus = &platform_bus_type; + pdev->dev.dma_parms = &pdev->dma_parms; switch (pdev->id) { default: diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 041bfa412aa0..81900b3cbe37 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -25,6 +25,7 @@ struct platform_device { bool id_auto; struct device dev; u64 platform_dma_mask; + struct device_dma_parameters dma_parms; u32 num_resources; struct resource *resource; -- cgit v1.2.3-58-ga151 From 5caf6102e32ead7ed5d21b5309c1a4a7d70e6a9f Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 25 Mar 2020 12:34:07 +0100 Subject: amba: Initialize dma_parms for amba devices It's currently the amba driver's responsibility to initialize the pointer, dma_parms, for its corresponding struct device. The benefit with this approach allows us to avoid the initialization and to not waste memory for the struct device_dma_parameters, as this can be decided on a case by case basis. However, it has turned out that this approach is not very practical. Not only does it lead to open coding, but also to real errors. In principle callers of dma_set_max_seg_size() doesn't check the error code, but just assumes it succeeds. For these reasons, let's do the initialization from the common amba bus at the device registration point. This also follows the way the PCI devices are being managed, see pci_device_add(). Cc: Cc: Russell King Suggested-by: Christoph Hellwig Tested-by: Ludovic Barre Reviewed-by: Linus Walleij Acked-by: Arnd Bergmann Signed-off-by: Ulf Hansson Link: https://lore.kernel.org/r/20200325113407.26996-3-ulf.hansson@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/amba/bus.c | 2 ++ include/linux/amba/bus.h | 1 + 2 files changed, 3 insertions(+) (limited to 'include/linux') diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index fe1523664816..5e61783ce92d 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -374,6 +374,8 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent) WARN_ON(dev->irq[0] == (unsigned int)-1); WARN_ON(dev->irq[1] == (unsigned int)-1); + dev->dev.dma_parms = &dev->dma_parms; + ret = request_resource(parent, &dev->res); if (ret) goto err_out; diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 26f0ecf401ea..0bbfd647f5c6 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -65,6 +65,7 @@ struct amba_device { struct device dev; struct resource res; struct clk *pclk; + struct device_dma_parameters dma_parms; unsigned int periphid; unsigned int cid; struct amba_cs_uci_id uci; -- cgit v1.2.3-58-ga151 From 4d9cf7df8d355e519adb8b2f8759c84e1e633070 Mon Sep 17 00:00:00 2001 From: Jeff LaBundy Date: Sun, 16 Feb 2020 17:32:06 -0600 Subject: mfd: Add support for Azoteq IQS620A/621/622/624/625 This patch adds core support for the Azoteq IQS620A, IQS621, IQS622, IQS624 and IQS625 multi-function sensors. Signed-off-by: Jeff LaBundy Signed-off-by: Lee Jones --- drivers/mfd/Kconfig | 13 + drivers/mfd/Makefile | 1 + drivers/mfd/iqs62x.c | 1063 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/iqs62x.h | 139 ++++++ 4 files changed, 1216 insertions(+) create mode 100644 drivers/mfd/iqs62x.c create mode 100644 include/linux/mfd/iqs62x.h (limited to 'include/linux') diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 2b203290e7b9..daefcb6310f9 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -642,6 +642,19 @@ config MFD_IPAQ_MICRO AT90LS8535 microcontroller flashed with a special iPAQ firmware using the custom protocol implemented in this driver. +config MFD_IQS62X + tristate "Azoteq IQS620A/621/622/624/625 core support" + depends on I2C + select MFD_CORE + select REGMAP_I2C + help + Say Y here if you want to build core support for the Azoteq IQS620A, + IQS621, IQS622, IQS624 and IQS625 multi-function sensors. Additional + options must be selected to enable device-specific functions. + + To compile this driver as a module, choose M here: the module will + be called iqs62x. + config MFD_JANZ_CMODIO tristate "Janz CMOD-IO PCI MODULbus Carrier Board" select MFD_CORE diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index b83f172545e1..f935d10cbf0f 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -226,6 +226,7 @@ obj-$(CONFIG_MFD_AS3711) += as3711.o obj-$(CONFIG_MFD_AS3722) += as3722.o obj-$(CONFIG_MFD_STW481X) += stw481x.o obj-$(CONFIG_MFD_IPAQ_MICRO) += ipaq-micro.o +obj-$(CONFIG_MFD_IQS62X) += iqs62x.o obj-$(CONFIG_MFD_MENF21BMC) += menf21bmc.o obj-$(CONFIG_MFD_HI6421_PMIC) += hi6421-pmic-core.o obj-$(CONFIG_MFD_HI655X_PMIC) += hi655x-pmic.o diff --git a/drivers/mfd/iqs62x.c b/drivers/mfd/iqs62x.c new file mode 100644 index 000000000000..af764bc87d7c --- /dev/null +++ b/drivers/mfd/iqs62x.c @@ -0,0 +1,1063 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Azoteq IQS620A/621/622/624/625 Multi-Function Sensors + * + * Copyright (C) 2019 Jeff LaBundy + * + * These devices rely on application-specific register settings and calibration + * data developed in and exported from a suite of GUIs offered by the vendor. A + * separate tool converts the GUIs' ASCII-based output into a standard firmware + * file parsed by the driver. + * + * Link to datasheets and GUIs: https://www.azoteq.com/ + * + * Link to conversion tool: https://github.com/jlabundy/iqs62x-h2bin.git + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IQS62X_PROD_NUM 0x00 + +#define IQS62X_SYS_FLAGS 0x10 +#define IQS62X_SYS_FLAGS_IN_ATI BIT(2) + +#define IQS620_HALL_FLAGS 0x16 +#define IQS621_HALL_FLAGS 0x19 +#define IQS622_HALL_FLAGS IQS621_HALL_FLAGS + +#define IQS624_INTERVAL_NUM 0x18 +#define IQS625_INTERVAL_NUM 0x12 + +#define IQS622_PROX_SETTINGS_4 0x48 +#define IQS620_PROX_SETTINGS_4 0x50 +#define IQS620_PROX_SETTINGS_4_SAR_EN BIT(7) + +#define IQS621_ALS_CAL_DIV_LUX 0x82 +#define IQS621_ALS_CAL_DIV_IR 0x83 + +#define IQS620_TEMP_CAL_MULT 0xC2 +#define IQS620_TEMP_CAL_DIV 0xC3 +#define IQS620_TEMP_CAL_OFFS 0xC4 + +#define IQS62X_SYS_SETTINGS 0xD0 +#define IQS62X_SYS_SETTINGS_SOFT_RESET BIT(7) +#define IQS62X_SYS_SETTINGS_ACK_RESET BIT(6) +#define IQS62X_SYS_SETTINGS_EVENT_MODE BIT(5) +#define IQS62X_SYS_SETTINGS_CLK_DIV BIT(4) +#define IQS62X_SYS_SETTINGS_REDO_ATI BIT(1) + +#define IQS62X_PWR_SETTINGS 0xD2 +#define IQS62X_PWR_SETTINGS_DIS_AUTO BIT(5) +#define IQS62X_PWR_SETTINGS_PWR_MODE_MASK (BIT(4) | BIT(3)) +#define IQS62X_PWR_SETTINGS_PWR_MODE_HALT (BIT(4) | BIT(3)) +#define IQS62X_PWR_SETTINGS_PWR_MODE_NORM 0 + +#define IQS62X_OTP_CMD 0xF0 +#define IQS62X_OTP_CMD_FG3 0x13 +#define IQS62X_OTP_DATA 0xF1 +#define IQS62X_MAX_REG 0xFF + +#define IQS62X_HALL_CAL_MASK GENMASK(3, 0) + +#define IQS62X_FW_REC_TYPE_INFO 0 +#define IQS62X_FW_REC_TYPE_PROD 1 +#define IQS62X_FW_REC_TYPE_HALL 2 +#define IQS62X_FW_REC_TYPE_MASK 3 +#define IQS62X_FW_REC_TYPE_DATA 4 + +#define IQS62X_ATI_POLL_SLEEP_US 10000 +#define IQS62X_ATI_POLL_TIMEOUT_US 500000 +#define IQS62X_ATI_STABLE_DELAY_MS 150 + +struct iqs62x_fw_rec { + u8 type; + u8 addr; + u8 len; + u8 data; +} __packed; + +struct iqs62x_fw_blk { + struct list_head list; + u8 addr; + u8 mask; + u8 len; + u8 data[]; +}; + +struct iqs62x_info { + u8 prod_num; + u8 sw_num; + u8 hw_num; +} __packed; + +static int iqs62x_dev_init(struct iqs62x_core *iqs62x) +{ + struct iqs62x_fw_blk *fw_blk; + unsigned int val; + int ret; + u8 clk_div = 1; + + list_for_each_entry(fw_blk, &iqs62x->fw_blk_head, list) { + if (fw_blk->mask) + ret = regmap_update_bits(iqs62x->regmap, fw_blk->addr, + fw_blk->mask, *fw_blk->data); + else + ret = regmap_raw_write(iqs62x->regmap, fw_blk->addr, + fw_blk->data, fw_blk->len); + if (ret) + return ret; + } + + switch (iqs62x->dev_desc->prod_num) { + case IQS620_PROD_NUM: + case IQS622_PROD_NUM: + ret = regmap_read(iqs62x->regmap, + iqs62x->dev_desc->prox_settings, &val); + if (ret) + return ret; + + if (val & IQS620_PROX_SETTINGS_4_SAR_EN) + iqs62x->ui_sel = IQS62X_UI_SAR1; + + /* fall through */ + + case IQS621_PROD_NUM: + ret = regmap_write(iqs62x->regmap, IQS620_GLBL_EVENT_MASK, + IQS620_GLBL_EVENT_MASK_PMU | + iqs62x->dev_desc->prox_mask | + iqs62x->dev_desc->sar_mask | + iqs62x->dev_desc->hall_mask | + iqs62x->dev_desc->hyst_mask | + iqs62x->dev_desc->temp_mask | + iqs62x->dev_desc->als_mask | + iqs62x->dev_desc->ir_mask); + if (ret) + return ret; + break; + + default: + ret = regmap_write(iqs62x->regmap, IQS624_HALL_UI, + IQS624_HALL_UI_WHL_EVENT | + IQS624_HALL_UI_INT_EVENT | + IQS624_HALL_UI_AUTO_CAL); + if (ret) + return ret; + + /* + * The IQS625 default interval divider is below the minimum + * permissible value, and the datasheet mandates that it is + * corrected during initialization (unless an updated value + * has already been provided by firmware). + * + * To protect against an unacceptably low user-entered value + * stored in the firmware, the same check is extended to the + * IQS624 as well. + */ + ret = regmap_read(iqs62x->regmap, IQS624_INTERVAL_DIV, &val); + if (ret) + return ret; + + if (val >= iqs62x->dev_desc->interval_div) + break; + + ret = regmap_write(iqs62x->regmap, IQS624_INTERVAL_DIV, + iqs62x->dev_desc->interval_div); + if (ret) + return ret; + } + + ret = regmap_read(iqs62x->regmap, IQS62X_SYS_SETTINGS, &val); + if (ret) + return ret; + + if (val & IQS62X_SYS_SETTINGS_CLK_DIV) + clk_div = iqs62x->dev_desc->clk_div; + + ret = regmap_write(iqs62x->regmap, IQS62X_SYS_SETTINGS, val | + IQS62X_SYS_SETTINGS_ACK_RESET | + IQS62X_SYS_SETTINGS_EVENT_MODE | + IQS62X_SYS_SETTINGS_REDO_ATI); + if (ret) + return ret; + + ret = regmap_read_poll_timeout(iqs62x->regmap, IQS62X_SYS_FLAGS, val, + !(val & IQS62X_SYS_FLAGS_IN_ATI), + IQS62X_ATI_POLL_SLEEP_US, + IQS62X_ATI_POLL_TIMEOUT_US * clk_div); + if (ret) + return ret; + + msleep(IQS62X_ATI_STABLE_DELAY_MS * clk_div); + + return 0; +} + +static int iqs62x_firmware_parse(struct iqs62x_core *iqs62x, + const struct firmware *fw) +{ + struct i2c_client *client = iqs62x->client; + struct iqs62x_fw_rec *fw_rec; + struct iqs62x_fw_blk *fw_blk; + unsigned int val; + size_t pos = 0; + int ret = 0; + u8 mask, len, *data; + u8 hall_cal_index = 0; + + while (pos < fw->size) { + if (pos + sizeof(*fw_rec) > fw->size) { + ret = -EINVAL; + break; + } + fw_rec = (struct iqs62x_fw_rec *)(fw->data + pos); + pos += sizeof(*fw_rec); + + if (pos + fw_rec->len - 1 > fw->size) { + ret = -EINVAL; + break; + } + pos += fw_rec->len - 1; + + switch (fw_rec->type) { + case IQS62X_FW_REC_TYPE_INFO: + continue; + + case IQS62X_FW_REC_TYPE_PROD: + if (fw_rec->data == iqs62x->dev_desc->prod_num) + continue; + + dev_err(&client->dev, + "Incompatible product number: 0x%02X\n", + fw_rec->data); + ret = -EINVAL; + break; + + case IQS62X_FW_REC_TYPE_HALL: + if (!hall_cal_index) { + ret = regmap_write(iqs62x->regmap, + IQS62X_OTP_CMD, + IQS62X_OTP_CMD_FG3); + if (ret) + break; + + ret = regmap_read(iqs62x->regmap, + IQS62X_OTP_DATA, &val); + if (ret) + break; + + hall_cal_index = val & IQS62X_HALL_CAL_MASK; + if (!hall_cal_index) { + dev_err(&client->dev, + "Uncalibrated device\n"); + ret = -ENODATA; + break; + } + } + + if (hall_cal_index > fw_rec->len) { + ret = -EINVAL; + break; + } + + mask = 0; + data = &fw_rec->data + hall_cal_index - 1; + len = sizeof(*data); + break; + + case IQS62X_FW_REC_TYPE_MASK: + if (fw_rec->len < (sizeof(mask) + sizeof(*data))) { + ret = -EINVAL; + break; + } + + mask = fw_rec->data; + data = &fw_rec->data + sizeof(mask); + len = sizeof(*data); + break; + + case IQS62X_FW_REC_TYPE_DATA: + mask = 0; + data = &fw_rec->data; + len = fw_rec->len; + break; + + default: + dev_err(&client->dev, + "Unrecognized record type: 0x%02X\n", + fw_rec->type); + ret = -EINVAL; + } + + if (ret) + break; + + fw_blk = devm_kzalloc(&client->dev, + struct_size(fw_blk, data, len), + GFP_KERNEL); + if (!fw_blk) { + ret = -ENOMEM; + break; + } + + fw_blk->addr = fw_rec->addr; + fw_blk->mask = mask; + fw_blk->len = len; + memcpy(fw_blk->data, data, len); + + list_add(&fw_blk->list, &iqs62x->fw_blk_head); + } + + release_firmware(fw); + + return ret; +} + +const struct iqs62x_event_desc iqs62x_events[IQS62X_NUM_EVENTS] = { + [IQS62X_EVENT_PROX_CH0_T] = { + .reg = IQS62X_EVENT_PROX, + .mask = BIT(4), + .val = BIT(4), + }, + [IQS62X_EVENT_PROX_CH0_P] = { + .reg = IQS62X_EVENT_PROX, + .mask = BIT(0), + .val = BIT(0), + }, + [IQS62X_EVENT_PROX_CH1_T] = { + .reg = IQS62X_EVENT_PROX, + .mask = BIT(5), + .val = BIT(5), + }, + [IQS62X_EVENT_PROX_CH1_P] = { + .reg = IQS62X_EVENT_PROX, + .mask = BIT(1), + .val = BIT(1), + }, + [IQS62X_EVENT_PROX_CH2_T] = { + .reg = IQS62X_EVENT_PROX, + .mask = BIT(6), + .val = BIT(6), + }, + [IQS62X_EVENT_PROX_CH2_P] = { + .reg = IQS62X_EVENT_PROX, + .mask = BIT(2), + .val = BIT(2), + }, + [IQS62X_EVENT_HYST_POS_T] = { + .reg = IQS62X_EVENT_HYST, + .mask = BIT(6) | BIT(7), + .val = BIT(6), + }, + [IQS62X_EVENT_HYST_POS_P] = { + .reg = IQS62X_EVENT_HYST, + .mask = BIT(5) | BIT(7), + .val = BIT(5), + }, + [IQS62X_EVENT_HYST_NEG_T] = { + .reg = IQS62X_EVENT_HYST, + .mask = BIT(6) | BIT(7), + .val = BIT(6) | BIT(7), + }, + [IQS62X_EVENT_HYST_NEG_P] = { + .reg = IQS62X_EVENT_HYST, + .mask = BIT(5) | BIT(7), + .val = BIT(5) | BIT(7), + }, + [IQS62X_EVENT_SAR1_ACT] = { + .reg = IQS62X_EVENT_HYST, + .mask = BIT(4), + .val = BIT(4), + }, + [IQS62X_EVENT_SAR1_QRD] = { + .reg = IQS62X_EVENT_HYST, + .mask = BIT(2), + .val = BIT(2), + }, + [IQS62X_EVENT_SAR1_MOVE] = { + .reg = IQS62X_EVENT_HYST, + .mask = BIT(1), + .val = BIT(1), + }, + [IQS62X_EVENT_SAR1_HALT] = { + .reg = IQS62X_EVENT_HYST, + .mask = BIT(0), + .val = BIT(0), + }, + [IQS62X_EVENT_WHEEL_UP] = { + .reg = IQS62X_EVENT_WHEEL, + .mask = BIT(7) | BIT(6), + .val = BIT(7), + }, + [IQS62X_EVENT_WHEEL_DN] = { + .reg = IQS62X_EVENT_WHEEL, + .mask = BIT(7) | BIT(6), + .val = BIT(7) | BIT(6), + }, + [IQS62X_EVENT_HALL_N_T] = { + .reg = IQS62X_EVENT_HALL, + .mask = BIT(2) | BIT(0), + .val = BIT(2), + }, + [IQS62X_EVENT_HALL_N_P] = { + .reg = IQS62X_EVENT_HALL, + .mask = BIT(1) | BIT(0), + .val = BIT(1), + }, + [IQS62X_EVENT_HALL_S_T] = { + .reg = IQS62X_EVENT_HALL, + .mask = BIT(2) | BIT(0), + .val = BIT(2) | BIT(0), + }, + [IQS62X_EVENT_HALL_S_P] = { + .reg = IQS62X_EVENT_HALL, + .mask = BIT(1) | BIT(0), + .val = BIT(1) | BIT(0), + }, + [IQS62X_EVENT_SYS_RESET] = { + .reg = IQS62X_EVENT_SYS, + .mask = BIT(7), + .val = BIT(7), + }, +}; +EXPORT_SYMBOL_GPL(iqs62x_events); + +static irqreturn_t iqs62x_irq(int irq, void *context) +{ + struct iqs62x_core *iqs62x = context; + struct i2c_client *client = iqs62x->client; + struct iqs62x_event_data event_data; + struct iqs62x_event_desc event_desc; + enum iqs62x_event_reg event_reg; + unsigned long event_flags = 0; + int ret, i, j; + u8 event_map[IQS62X_EVENT_SIZE]; + + /* + * The device asserts the RDY output to signal the beginning of a + * communication window, which is closed by an I2C stop condition. + * As such, all interrupt status is captured in a single read and + * broadcast to any interested sub-device drivers. + */ + ret = regmap_raw_read(iqs62x->regmap, IQS62X_SYS_FLAGS, event_map, + sizeof(event_map)); + if (ret) { + dev_err(&client->dev, "Failed to read device status: %d\n", + ret); + return IRQ_NONE; + } + + for (i = 0; i < sizeof(event_map); i++) { + event_reg = iqs62x->dev_desc->event_regs[iqs62x->ui_sel][i]; + + switch (event_reg) { + case IQS62X_EVENT_UI_LO: + event_data.ui_data = get_unaligned_le16(&event_map[i]); + + /* fall through */ + + case IQS62X_EVENT_UI_HI: + case IQS62X_EVENT_NONE: + continue; + + case IQS62X_EVENT_ALS: + event_data.als_flags = event_map[i]; + continue; + + case IQS62X_EVENT_IR: + event_data.ir_flags = event_map[i]; + continue; + + case IQS62X_EVENT_INTER: + event_data.interval = event_map[i]; + continue; + + case IQS62X_EVENT_HYST: + event_map[i] <<= iqs62x->dev_desc->hyst_shift; + + /* fall through */ + + case IQS62X_EVENT_WHEEL: + case IQS62X_EVENT_HALL: + case IQS62X_EVENT_PROX: + case IQS62X_EVENT_SYS: + break; + } + + for (j = 0; j < IQS62X_NUM_EVENTS; j++) { + event_desc = iqs62x_events[j]; + + if (event_desc.reg != event_reg) + continue; + + if ((event_map[i] & event_desc.mask) == event_desc.val) + event_flags |= BIT(j); + } + } + + /* + * The device resets itself in response to the I2C master stalling + * communication past a fixed timeout. In this case, all registers + * are restored and any interested sub-device drivers are notified. + */ + if (event_flags & BIT(IQS62X_EVENT_SYS_RESET)) { + dev_err(&client->dev, "Unexpected device reset\n"); + + ret = iqs62x_dev_init(iqs62x); + if (ret) { + dev_err(&client->dev, + "Failed to re-initialize device: %d\n", ret); + return IRQ_NONE; + } + } + + ret = blocking_notifier_call_chain(&iqs62x->nh, event_flags, + &event_data); + if (ret & NOTIFY_STOP_MASK) + return IRQ_NONE; + + /* + * Once the communication window is closed, a small delay is added to + * ensure the device's RDY output has been deasserted by the time the + * interrupt handler returns. + */ + usleep_range(50, 100); + + return IRQ_HANDLED; +} + +static void iqs62x_firmware_load(const struct firmware *fw, void *context) +{ + struct iqs62x_core *iqs62x = context; + struct i2c_client *client = iqs62x->client; + int ret; + + if (fw) { + ret = iqs62x_firmware_parse(iqs62x, fw); + if (ret) { + dev_err(&client->dev, "Failed to parse firmware: %d\n", + ret); + goto err_out; + } + } + + ret = iqs62x_dev_init(iqs62x); + if (ret) { + dev_err(&client->dev, "Failed to initialize device: %d\n", ret); + goto err_out; + } + + ret = devm_request_threaded_irq(&client->dev, client->irq, + NULL, iqs62x_irq, IRQF_ONESHOT, + client->name, iqs62x); + if (ret) { + dev_err(&client->dev, "Failed to request IRQ: %d\n", ret); + goto err_out; + } + + ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE, + iqs62x->dev_desc->sub_devs, + iqs62x->dev_desc->num_sub_devs, + NULL, 0, NULL); + if (ret) + dev_err(&client->dev, "Failed to add sub-devices: %d\n", ret); + +err_out: + complete_all(&iqs62x->fw_done); +} + +static const struct mfd_cell iqs620at_sub_devs[] = { + { + .name = "iqs62x-keys", + .of_compatible = "azoteq,iqs620a-keys", + }, + { + .name = "iqs620a-pwm", + .of_compatible = "azoteq,iqs620a-pwm", + }, + { .name = "iqs620at-temp", }, +}; + +static const struct mfd_cell iqs620a_sub_devs[] = { + { + .name = "iqs62x-keys", + .of_compatible = "azoteq,iqs620a-keys", + }, + { + .name = "iqs620a-pwm", + .of_compatible = "azoteq,iqs620a-pwm", + }, +}; + +static const struct mfd_cell iqs621_sub_devs[] = { + { + .name = "iqs62x-keys", + .of_compatible = "azoteq,iqs621-keys", + }, + { .name = "iqs621-als", }, +}; + +static const struct mfd_cell iqs622_sub_devs[] = { + { + .name = "iqs62x-keys", + .of_compatible = "azoteq,iqs622-keys", + }, + { .name = "iqs621-als", }, +}; + +static const struct mfd_cell iqs624_sub_devs[] = { + { + .name = "iqs62x-keys", + .of_compatible = "azoteq,iqs624-keys", + }, + { .name = "iqs624-pos", }, +}; + +static const struct mfd_cell iqs625_sub_devs[] = { + { + .name = "iqs62x-keys", + .of_compatible = "azoteq,iqs625-keys", + }, + { .name = "iqs624-pos", }, +}; + +static const u8 iqs620at_cal_regs[] = { + IQS620_TEMP_CAL_MULT, + IQS620_TEMP_CAL_DIV, + IQS620_TEMP_CAL_OFFS, +}; + +static const u8 iqs621_cal_regs[] = { + IQS621_ALS_CAL_DIV_LUX, + IQS621_ALS_CAL_DIV_IR, +}; + +static const enum iqs62x_event_reg iqs620a_event_regs[][IQS62X_EVENT_SIZE] = { + [IQS62X_UI_PROX] = { + IQS62X_EVENT_SYS, /* 0x10 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_PROX, /* 0x12 */ + IQS62X_EVENT_HYST, /* 0x13 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_HALL, /* 0x16 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + }, + [IQS62X_UI_SAR1] = { + IQS62X_EVENT_SYS, /* 0x10 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_HYST, /* 0x13 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_HALL, /* 0x16 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + }, +}; + +static const enum iqs62x_event_reg iqs621_event_regs[][IQS62X_EVENT_SIZE] = { + [IQS62X_UI_PROX] = { + IQS62X_EVENT_SYS, /* 0x10 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_PROX, /* 0x12 */ + IQS62X_EVENT_HYST, /* 0x13 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_ALS, /* 0x16 */ + IQS62X_EVENT_UI_LO, /* 0x17 */ + IQS62X_EVENT_UI_HI, /* 0x18 */ + IQS62X_EVENT_HALL, /* 0x19 */ + }, +}; + +static const enum iqs62x_event_reg iqs622_event_regs[][IQS62X_EVENT_SIZE] = { + [IQS62X_UI_PROX] = { + IQS62X_EVENT_SYS, /* 0x10 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_PROX, /* 0x12 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_ALS, /* 0x14 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_IR, /* 0x16 */ + IQS62X_EVENT_UI_LO, /* 0x17 */ + IQS62X_EVENT_UI_HI, /* 0x18 */ + IQS62X_EVENT_HALL, /* 0x19 */ + }, + [IQS62X_UI_SAR1] = { + IQS62X_EVENT_SYS, /* 0x10 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_HYST, /* 0x13 */ + IQS62X_EVENT_ALS, /* 0x14 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_IR, /* 0x16 */ + IQS62X_EVENT_UI_LO, /* 0x17 */ + IQS62X_EVENT_UI_HI, /* 0x18 */ + IQS62X_EVENT_HALL, /* 0x19 */ + }, +}; + +static const enum iqs62x_event_reg iqs624_event_regs[][IQS62X_EVENT_SIZE] = { + [IQS62X_UI_PROX] = { + IQS62X_EVENT_SYS, /* 0x10 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_PROX, /* 0x12 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_WHEEL, /* 0x14 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_UI_LO, /* 0x16 */ + IQS62X_EVENT_UI_HI, /* 0x17 */ + IQS62X_EVENT_INTER, /* 0x18 */ + IQS62X_EVENT_NONE, + }, +}; + +static const enum iqs62x_event_reg iqs625_event_regs[][IQS62X_EVENT_SIZE] = { + [IQS62X_UI_PROX] = { + IQS62X_EVENT_SYS, /* 0x10 */ + IQS62X_EVENT_PROX, /* 0x11 */ + IQS62X_EVENT_INTER, /* 0x12 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + }, +}; + +static const struct iqs62x_dev_desc iqs62x_devs[] = { + { + .dev_name = "iqs620at", + .sub_devs = iqs620at_sub_devs, + .num_sub_devs = ARRAY_SIZE(iqs620at_sub_devs), + + .prod_num = IQS620_PROD_NUM, + .sw_num = 0x08, + .cal_regs = iqs620at_cal_regs, + .num_cal_regs = ARRAY_SIZE(iqs620at_cal_regs), + + .prox_mask = BIT(0), + .sar_mask = BIT(1) | BIT(7), + .hall_mask = BIT(2), + .hyst_mask = BIT(3), + .temp_mask = BIT(4), + + .prox_settings = IQS620_PROX_SETTINGS_4, + .hall_flags = IQS620_HALL_FLAGS, + + .clk_div = 4, + .fw_name = "iqs620a.bin", + .event_regs = &iqs620a_event_regs[IQS62X_UI_PROX], + }, + { + .dev_name = "iqs620a", + .sub_devs = iqs620a_sub_devs, + .num_sub_devs = ARRAY_SIZE(iqs620a_sub_devs), + + .prod_num = IQS620_PROD_NUM, + .sw_num = 0x08, + + .prox_mask = BIT(0), + .sar_mask = BIT(1) | BIT(7), + .hall_mask = BIT(2), + .hyst_mask = BIT(3), + .temp_mask = BIT(4), + + .prox_settings = IQS620_PROX_SETTINGS_4, + .hall_flags = IQS620_HALL_FLAGS, + + .clk_div = 4, + .fw_name = "iqs620a.bin", + .event_regs = &iqs620a_event_regs[IQS62X_UI_PROX], + }, + { + .dev_name = "iqs621", + .sub_devs = iqs621_sub_devs, + .num_sub_devs = ARRAY_SIZE(iqs621_sub_devs), + + .prod_num = IQS621_PROD_NUM, + .sw_num = 0x09, + .cal_regs = iqs621_cal_regs, + .num_cal_regs = ARRAY_SIZE(iqs621_cal_regs), + + .prox_mask = BIT(0), + .hall_mask = BIT(1), + .als_mask = BIT(2), + .hyst_mask = BIT(3), + .temp_mask = BIT(4), + + .als_flags = IQS621_ALS_FLAGS, + .hall_flags = IQS621_HALL_FLAGS, + .hyst_shift = 5, + + .clk_div = 2, + .fw_name = "iqs621.bin", + .event_regs = &iqs621_event_regs[IQS62X_UI_PROX], + }, + { + .dev_name = "iqs622", + .sub_devs = iqs622_sub_devs, + .num_sub_devs = ARRAY_SIZE(iqs622_sub_devs), + + .prod_num = IQS622_PROD_NUM, + .sw_num = 0x06, + + .prox_mask = BIT(0), + .sar_mask = BIT(1), + .hall_mask = BIT(2), + .als_mask = BIT(3), + .ir_mask = BIT(4), + + .prox_settings = IQS622_PROX_SETTINGS_4, + .als_flags = IQS622_ALS_FLAGS, + .hall_flags = IQS622_HALL_FLAGS, + + .clk_div = 2, + .fw_name = "iqs622.bin", + .event_regs = &iqs622_event_regs[IQS62X_UI_PROX], + }, + { + .dev_name = "iqs624", + .sub_devs = iqs624_sub_devs, + .num_sub_devs = ARRAY_SIZE(iqs624_sub_devs), + + .prod_num = IQS624_PROD_NUM, + .sw_num = 0x0B, + + .interval = IQS624_INTERVAL_NUM, + .interval_div = 3, + + .clk_div = 2, + .fw_name = "iqs624.bin", + .event_regs = &iqs624_event_regs[IQS62X_UI_PROX], + }, + { + .dev_name = "iqs625", + .sub_devs = iqs625_sub_devs, + .num_sub_devs = ARRAY_SIZE(iqs625_sub_devs), + + .prod_num = IQS625_PROD_NUM, + .sw_num = 0x0B, + + .interval = IQS625_INTERVAL_NUM, + .interval_div = 10, + + .clk_div = 2, + .fw_name = "iqs625.bin", + .event_regs = &iqs625_event_regs[IQS62X_UI_PROX], + }, +}; + +static const struct regmap_config iqs62x_map_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = IQS62X_MAX_REG, +}; + +static int iqs62x_probe(struct i2c_client *client) +{ + struct iqs62x_core *iqs62x; + struct iqs62x_info info; + unsigned int val; + int ret, i, j; + u8 sw_num = 0; + const char *fw_name = NULL; + + iqs62x = devm_kzalloc(&client->dev, sizeof(*iqs62x), GFP_KERNEL); + if (!iqs62x) + return -ENOMEM; + + i2c_set_clientdata(client, iqs62x); + iqs62x->client = client; + + BLOCKING_INIT_NOTIFIER_HEAD(&iqs62x->nh); + INIT_LIST_HEAD(&iqs62x->fw_blk_head); + init_completion(&iqs62x->fw_done); + + iqs62x->regmap = devm_regmap_init_i2c(client, &iqs62x_map_config); + if (IS_ERR(iqs62x->regmap)) { + ret = PTR_ERR(iqs62x->regmap); + dev_err(&client->dev, "Failed to initialize register map: %d\n", + ret); + return ret; + } + + ret = regmap_raw_read(iqs62x->regmap, IQS62X_PROD_NUM, &info, + sizeof(info)); + if (ret) + return ret; + + /* + * The following sequence validates the device's product and software + * numbers. It then determines if the device is factory-calibrated by + * checking for nonzero values in the device's designated calibration + * registers (if applicable). Depending on the device, the absence of + * calibration data indicates a reduced feature set or invalid device. + * + * For devices given in both calibrated and uncalibrated versions, the + * calibrated version (e.g. IQS620AT) appears first in the iqs62x_devs + * array. The uncalibrated version (e.g. IQS620A) appears next and has + * the same product and software numbers, but no calibration registers + * are specified. + */ + for (i = 0; i < ARRAY_SIZE(iqs62x_devs); i++) { + if (info.prod_num != iqs62x_devs[i].prod_num) + continue; + + iqs62x->dev_desc = &iqs62x_devs[i]; + + if (info.sw_num < iqs62x->dev_desc->sw_num) + continue; + + sw_num = info.sw_num; + + /* + * Read each of the device's designated calibration registers, + * if any, and exit from the inner loop early if any are equal + * to zero (indicating the device is uncalibrated). This could + * be acceptable depending on the device (e.g. IQS620A instead + * of IQS620AT). + */ + for (j = 0; j < iqs62x->dev_desc->num_cal_regs; j++) { + ret = regmap_read(iqs62x->regmap, + iqs62x->dev_desc->cal_regs[j], &val); + if (ret) + return ret; + + if (!val) + break; + } + + /* + * If the number of nonzero values read from the device equals + * the number of designated calibration registers (which could + * be zero), exit from the outer loop early to signal that the + * device's product and software numbers match a known device, + * and the device is calibrated (if applicable). + */ + if (j == iqs62x->dev_desc->num_cal_regs) + break; + } + + if (!iqs62x->dev_desc) { + dev_err(&client->dev, "Unrecognized product number: 0x%02X\n", + info.prod_num); + return -EINVAL; + } + + if (!sw_num) { + dev_err(&client->dev, "Unrecognized software number: 0x%02X\n", + info.sw_num); + return -EINVAL; + } + + if (i == ARRAY_SIZE(iqs62x_devs)) { + dev_err(&client->dev, "Uncalibrated device\n"); + return -ENODATA; + } + + device_property_read_string(&client->dev, "firmware-name", &fw_name); + + ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, + fw_name ? : iqs62x->dev_desc->fw_name, + &client->dev, GFP_KERNEL, iqs62x, + iqs62x_firmware_load); + if (ret) + dev_err(&client->dev, "Failed to request firmware: %d\n", ret); + + return ret; +} + +static int iqs62x_remove(struct i2c_client *client) +{ + struct iqs62x_core *iqs62x = i2c_get_clientdata(client); + + wait_for_completion(&iqs62x->fw_done); + + return 0; +} + +static int __maybe_unused iqs62x_suspend(struct device *dev) +{ + struct iqs62x_core *iqs62x = dev_get_drvdata(dev); + int ret; + + wait_for_completion(&iqs62x->fw_done); + + /* + * As per the datasheet, automatic mode switching must be disabled + * before the device is placed in or taken out of halt mode. + */ + ret = regmap_update_bits(iqs62x->regmap, IQS62X_PWR_SETTINGS, + IQS62X_PWR_SETTINGS_DIS_AUTO, 0xFF); + if (ret) + return ret; + + return regmap_update_bits(iqs62x->regmap, IQS62X_PWR_SETTINGS, + IQS62X_PWR_SETTINGS_PWR_MODE_MASK, + IQS62X_PWR_SETTINGS_PWR_MODE_HALT); +} + +static int __maybe_unused iqs62x_resume(struct device *dev) +{ + struct iqs62x_core *iqs62x = dev_get_drvdata(dev); + int ret; + + ret = regmap_update_bits(iqs62x->regmap, IQS62X_PWR_SETTINGS, + IQS62X_PWR_SETTINGS_PWR_MODE_MASK, + IQS62X_PWR_SETTINGS_PWR_MODE_NORM); + if (ret) + return ret; + + return regmap_update_bits(iqs62x->regmap, IQS62X_PWR_SETTINGS, + IQS62X_PWR_SETTINGS_DIS_AUTO, 0); +} + +static SIMPLE_DEV_PM_OPS(iqs62x_pm, iqs62x_suspend, iqs62x_resume); + +static const struct of_device_id iqs62x_of_match[] = { + { .compatible = "azoteq,iqs620a" }, + { .compatible = "azoteq,iqs621" }, + { .compatible = "azoteq,iqs622" }, + { .compatible = "azoteq,iqs624" }, + { .compatible = "azoteq,iqs625" }, + { } +}; +MODULE_DEVICE_TABLE(of, iqs62x_of_match); + +static struct i2c_driver iqs62x_i2c_driver = { + .driver = { + .name = "iqs62x", + .of_match_table = iqs62x_of_match, + .pm = &iqs62x_pm, + }, + .probe_new = iqs62x_probe, + .remove = iqs62x_remove, +}; +module_i2c_driver(iqs62x_i2c_driver); + +MODULE_AUTHOR("Jeff LaBundy "); +MODULE_DESCRIPTION("Azoteq IQS620A/621/622/624/625 Multi-Function Sensors"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/mfd/iqs62x.h b/include/linux/mfd/iqs62x.h new file mode 100644 index 000000000000..043d3b6de9ec --- /dev/null +++ b/include/linux/mfd/iqs62x.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Azoteq IQS620A/621/622/624/625 Multi-Function Sensors + * + * Copyright (C) 2019 Jeff LaBundy + */ + +#ifndef __LINUX_MFD_IQS62X_H +#define __LINUX_MFD_IQS62X_H + +#define IQS620_PROD_NUM 0x41 +#define IQS621_PROD_NUM 0x46 +#define IQS622_PROD_NUM 0x42 +#define IQS624_PROD_NUM 0x43 +#define IQS625_PROD_NUM 0x4E + +#define IQS621_ALS_FLAGS 0x16 +#define IQS622_ALS_FLAGS 0x14 + +#define IQS624_HALL_UI 0x70 +#define IQS624_HALL_UI_WHL_EVENT BIT(4) +#define IQS624_HALL_UI_INT_EVENT BIT(3) +#define IQS624_HALL_UI_AUTO_CAL BIT(2) + +#define IQS624_INTERVAL_DIV 0x7D + +#define IQS620_GLBL_EVENT_MASK 0xD7 +#define IQS620_GLBL_EVENT_MASK_PMU BIT(6) + +#define IQS62X_NUM_KEYS 16 +#define IQS62X_NUM_EVENTS (IQS62X_NUM_KEYS + 5) + +#define IQS62X_EVENT_SIZE 10 + +enum iqs62x_ui_sel { + IQS62X_UI_PROX, + IQS62X_UI_SAR1, +}; + +enum iqs62x_event_reg { + IQS62X_EVENT_NONE, + IQS62X_EVENT_SYS, + IQS62X_EVENT_PROX, + IQS62X_EVENT_HYST, + IQS62X_EVENT_HALL, + IQS62X_EVENT_ALS, + IQS62X_EVENT_IR, + IQS62X_EVENT_WHEEL, + IQS62X_EVENT_INTER, + IQS62X_EVENT_UI_LO, + IQS62X_EVENT_UI_HI, +}; + +enum iqs62x_event_flag { + /* keys */ + IQS62X_EVENT_PROX_CH0_T, + IQS62X_EVENT_PROX_CH0_P, + IQS62X_EVENT_PROX_CH1_T, + IQS62X_EVENT_PROX_CH1_P, + IQS62X_EVENT_PROX_CH2_T, + IQS62X_EVENT_PROX_CH2_P, + IQS62X_EVENT_HYST_POS_T, + IQS62X_EVENT_HYST_POS_P, + IQS62X_EVENT_HYST_NEG_T, + IQS62X_EVENT_HYST_NEG_P, + IQS62X_EVENT_SAR1_ACT, + IQS62X_EVENT_SAR1_QRD, + IQS62X_EVENT_SAR1_MOVE, + IQS62X_EVENT_SAR1_HALT, + IQS62X_EVENT_WHEEL_UP, + IQS62X_EVENT_WHEEL_DN, + + /* switches */ + IQS62X_EVENT_HALL_N_T, + IQS62X_EVENT_HALL_N_P, + IQS62X_EVENT_HALL_S_T, + IQS62X_EVENT_HALL_S_P, + + /* everything else */ + IQS62X_EVENT_SYS_RESET, +}; + +struct iqs62x_event_data { + u16 ui_data; + u8 als_flags; + u8 ir_flags; + u8 interval; +}; + +struct iqs62x_event_desc { + enum iqs62x_event_reg reg; + u8 mask; + u8 val; +}; + +struct iqs62x_dev_desc { + const char *dev_name; + const struct mfd_cell *sub_devs; + int num_sub_devs; + + u8 prod_num; + u8 sw_num; + const u8 *cal_regs; + int num_cal_regs; + + u8 prox_mask; + u8 sar_mask; + u8 hall_mask; + u8 hyst_mask; + u8 temp_mask; + u8 als_mask; + u8 ir_mask; + + u8 prox_settings; + u8 als_flags; + u8 hall_flags; + u8 hyst_shift; + + u8 interval; + u8 interval_div; + + u8 clk_div; + const char *fw_name; + const enum iqs62x_event_reg (*event_regs)[IQS62X_EVENT_SIZE]; +}; + +struct iqs62x_core { + const struct iqs62x_dev_desc *dev_desc; + struct i2c_client *client; + struct regmap *regmap; + struct blocking_notifier_head nh; + struct list_head fw_blk_head; + struct completion fw_done; + enum iqs62x_ui_sel ui_sel; +}; + +extern const struct iqs62x_event_desc iqs62x_events[IQS62X_NUM_EVENTS]; + +#endif /* __LINUX_MFD_IQS62X_H */ -- cgit v1.2.3-58-ga151 From 0c81604516afc0f3aedbb4dcf8215df7e5c7ef32 Mon Sep 17 00:00:00 2001 From: Andreas Kemnade Date: Fri, 20 Mar 2020 09:11:00 +0100 Subject: mfd: rn5t618: Add IRQ support This adds support for IRQ handling in the RC5T619 which is required for properly implementing subdevices like RTC. For now only definitions for the variant RC5T619 are included. Signed-off-by: Andreas Kemnade Signed-off-by: Lee Jones --- drivers/mfd/Kconfig | 1 + drivers/mfd/rn5t618.c | 78 ++++++++++++++++++++++++++++++++++++++++++++- include/linux/mfd/rn5t618.h | 15 +++++++++ 3 files changed, 93 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 2b203290e7b9..a7067888a41e 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1058,6 +1058,7 @@ config MFD_RN5T618 depends on OF select MFD_CORE select REGMAP_I2C + select REGMAP_IRQ help Say yes here to add support for the Ricoh RN5T567, RN5T618, RC5T619 PMIC. diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c index ead2e79036a9..b66dc4605d56 100644 --- a/drivers/mfd/rn5t618.c +++ b/drivers/mfd/rn5t618.c @@ -8,6 +8,8 @@ #include #include +#include +#include #include #include #include @@ -46,9 +48,56 @@ static const struct regmap_config rn5t618_regmap_config = { .cache_type = REGCACHE_RBTREE, }; +static const struct regmap_irq rc5t619_irqs[] = { + REGMAP_IRQ_REG(RN5T618_IRQ_SYS, 0, BIT(0)), + REGMAP_IRQ_REG(RN5T618_IRQ_DCDC, 0, BIT(1)), + REGMAP_IRQ_REG(RN5T618_IRQ_RTC, 0, BIT(2)), + REGMAP_IRQ_REG(RN5T618_IRQ_ADC, 0, BIT(3)), + REGMAP_IRQ_REG(RN5T618_IRQ_GPIO, 0, BIT(4)), + REGMAP_IRQ_REG(RN5T618_IRQ_CHG, 0, BIT(6)), +}; + +static const struct regmap_irq_chip rc5t619_irq_chip = { + .name = "rc5t619", + .irqs = rc5t619_irqs, + .num_irqs = ARRAY_SIZE(rc5t619_irqs), + .num_regs = 1, + .status_base = RN5T618_INTMON, + .mask_base = RN5T618_INTEN, + .mask_invert = true, +}; + static struct rn5t618 *rn5t618_pm_power_off; static struct notifier_block rn5t618_restart_handler; +static int rn5t618_irq_init(struct rn5t618 *rn5t618) +{ + const struct regmap_irq_chip *irq_chip = NULL; + int ret; + + if (!rn5t618->irq) + return 0; + + switch (rn5t618->variant) { + case RC5T619: + irq_chip = &rc5t619_irq_chip; + break; + default: + dev_err(rn5t618->dev, "Currently no IRQ support for variant %d\n", + (int)rn5t618->variant); + return -ENOENT; + } + + ret = devm_regmap_add_irq_chip(rn5t618->dev, rn5t618->regmap, + rn5t618->irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + 0, irq_chip, &rn5t618->irq_data); + if (ret) + dev_err(rn5t618->dev, "Failed to register IRQ chip\n"); + + return ret; +} + static void rn5t618_trigger_poweroff_sequence(bool repower) { /* disable automatic repower-on */ @@ -106,6 +155,8 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c, i2c_set_clientdata(i2c, priv); priv->variant = (long)of_id->data; + priv->irq = i2c->irq; + priv->dev = &i2c->dev; priv->regmap = devm_regmap_init_i2c(i2c, &rn5t618_regmap_config); if (IS_ERR(priv->regmap)) { @@ -138,7 +189,7 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c, return ret; } - return 0; + return rn5t618_irq_init(priv); } static int rn5t618_i2c_remove(struct i2c_client *i2c) @@ -155,15 +206,40 @@ static int rn5t618_i2c_remove(struct i2c_client *i2c) return 0; } +static int __maybe_unused rn5t618_i2c_suspend(struct device *dev) +{ + struct rn5t618 *priv = dev_get_drvdata(dev); + + if (priv->irq) + disable_irq(priv->irq); + + return 0; +} + +static int __maybe_unused rn5t618_i2c_resume(struct device *dev) +{ + struct rn5t618 *priv = dev_get_drvdata(dev); + + if (priv->irq) + enable_irq(priv->irq); + + return 0; +} + static const struct i2c_device_id rn5t618_i2c_id[] = { { } }; MODULE_DEVICE_TABLE(i2c, rn5t618_i2c_id); +static SIMPLE_DEV_PM_OPS(rn5t618_i2c_dev_pm_ops, + rn5t618_i2c_suspend, + rn5t618_i2c_resume); + static struct i2c_driver rn5t618_i2c_driver = { .driver = { .name = "rn5t618", .of_match_table = of_match_ptr(rn5t618_of_match), + .pm = &rn5t618_i2c_dev_pm_ops, }, .probe = rn5t618_i2c_probe, .remove = rn5t618_i2c_remove, diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h index d62ef48060b5..739571656f2b 100644 --- a/include/linux/mfd/rn5t618.h +++ b/include/linux/mfd/rn5t618.h @@ -242,9 +242,24 @@ enum { RC5T619, }; +/* RN5T618 IRQ definitions */ +enum { + RN5T618_IRQ_SYS = 0, + RN5T618_IRQ_DCDC, + RN5T618_IRQ_RTC, + RN5T618_IRQ_ADC, + RN5T618_IRQ_GPIO, + RN5T618_IRQ_CHG, + RN5T618_NR_IRQS, +}; + struct rn5t618 { struct regmap *regmap; + struct device *dev; long variant; + + int irq; + struct regmap_irq_chip_data *irq_data; }; #endif /* __LINUX_MFD_RN5T618_H */ -- cgit v1.2.3-58-ga151 From 11027ce6f1d2a20af16b0eae3d21d3ab78089262 Mon Sep 17 00:00:00 2001 From: Andreas Kemnade Date: Fri, 20 Mar 2020 09:11:01 +0100 Subject: mfd: rn5t618: Add RTC related registers Defines for some RTC related registers were missing, also they were not included in the volatile register list Signed-off-by: Andreas Kemnade Signed-off-by: Lee Jones --- drivers/mfd/rn5t618.c | 2 ++ include/linux/mfd/rn5t618.h | 11 +++++++++++ 2 files changed, 13 insertions(+) (limited to 'include/linux') diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c index b66dc4605d56..7686cc36e8c0 100644 --- a/drivers/mfd/rn5t618.c +++ b/drivers/mfd/rn5t618.c @@ -34,6 +34,8 @@ static bool rn5t618_volatile_reg(struct device *dev, unsigned int reg) case RN5T618_IR_GPF: case RN5T618_MON_IOIN: case RN5T618_INTMON: + case RN5T618_RTC_CTRL1 ... RN5T618_RTC_CTRL2: + case RN5T618_RTC_SECONDS ... RN5T618_RTC_YEAR: return true; default: return false; diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h index 739571656f2b..fba0df13d9a8 100644 --- a/include/linux/mfd/rn5t618.h +++ b/include/linux/mfd/rn5t618.h @@ -139,6 +139,17 @@ #define RN5T618_INTPOL 0x9c #define RN5T618_INTEN 0x9d #define RN5T618_INTMON 0x9e + +#define RN5T618_RTC_SECONDS 0xA0 +#define RN5T618_RTC_MDAY 0xA4 +#define RN5T618_RTC_MONTH 0xA5 +#define RN5T618_RTC_YEAR 0xA6 +#define RN5T618_RTC_ADJUST 0xA7 +#define RN5T618_RTC_ALARM_Y_SEC 0xA8 +#define RN5T618_RTC_DAL_MONTH 0xAC +#define RN5T618_RTC_CTRL1 0xAE +#define RN5T618_RTC_CTRL2 0xAF + #define RN5T618_PREVINDAC 0xb0 #define RN5T618_BATDAC 0xb1 #define RN5T618_CHGCTL1 0xb3 -- cgit v1.2.3-58-ga151 From 0008d0c3b1ab03b046b04b7bd9d70df1e2fffbfc Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 26 Mar 2020 16:08:26 +0100 Subject: iommu: Define dev_iommu_fwspec_get() for !CONFIG_IOMMU_API There are users outside of the IOMMU code that need to call that function. Define it for !CONFIG_IOMMU_API too so that compilation does not break. Reported-by: kbuild test robot Signed-off-by: Joerg Roedel Reviewed-by: Jean-Philippe Brucker Link: https://lore.kernel.org/r/20200326150841.10083-2-joro@8bytes.org --- include/linux/iommu.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 4d1ba76c9a64..505163e9702a 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -1073,6 +1073,10 @@ static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain, return -ENODEV; } +static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) +{ + return NULL; +} #endif /* CONFIG_IOMMU_API */ #ifdef CONFIG_IOMMU_DEBUGFS -- cgit v1.2.3-58-ga151 From 045a70426067d6a22e3e5745b55efc18fa75aabf Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 26 Mar 2020 16:08:30 +0100 Subject: iommu: Rename struct iommu_param to dev_iommu The term dev_iommu aligns better with other existing structures and their accessor functions. Signed-off-by: Joerg Roedel Tested-by: Will Deacon # arm-smmu Reviewed-by: Jean-Philippe Brucker Reviewed-by: Greg Kroah-Hartman Cc: Greg Kroah-Hartman Link: https://lore.kernel.org/r/20200326150841.10083-6-joro@8bytes.org --- drivers/iommu/iommu.c | 28 ++++++++++++++-------------- include/linux/device.h | 6 +++--- include/linux/iommu.h | 4 ++-- 3 files changed, 19 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 660eea8d1d2f..15d64a175d92 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -152,9 +152,9 @@ void iommu_device_unregister(struct iommu_device *iommu) } EXPORT_SYMBOL_GPL(iommu_device_unregister); -static struct iommu_param *iommu_get_dev_param(struct device *dev) +static struct dev_iommu *dev_iommu_get(struct device *dev) { - struct iommu_param *param = dev->iommu_param; + struct dev_iommu *param = dev->iommu; if (param) return param; @@ -164,14 +164,14 @@ static struct iommu_param *iommu_get_dev_param(struct device *dev) return NULL; mutex_init(¶m->lock); - dev->iommu_param = param; + dev->iommu = param; return param; } -static void iommu_free_dev_param(struct device *dev) +static void dev_iommu_free(struct device *dev) { - kfree(dev->iommu_param); - dev->iommu_param = NULL; + kfree(dev->iommu); + dev->iommu = NULL; } int iommu_probe_device(struct device *dev) @@ -183,7 +183,7 @@ int iommu_probe_device(struct device *dev) if (!ops) return -EINVAL; - if (!iommu_get_dev_param(dev)) + if (!dev_iommu_get(dev)) return -ENOMEM; if (!try_module_get(ops->owner)) { @@ -200,7 +200,7 @@ int iommu_probe_device(struct device *dev) err_module_put: module_put(ops->owner); err_free_dev_param: - iommu_free_dev_param(dev); + dev_iommu_free(dev); return ret; } @@ -211,9 +211,9 @@ void iommu_release_device(struct device *dev) if (dev->iommu_group) ops->remove_device(dev); - if (dev->iommu_param) { + if (dev->iommu) { module_put(ops->owner); - iommu_free_dev_param(dev); + dev_iommu_free(dev); } } @@ -972,7 +972,7 @@ int iommu_register_device_fault_handler(struct device *dev, iommu_dev_fault_handler_t handler, void *data) { - struct iommu_param *param = dev->iommu_param; + struct dev_iommu *param = dev->iommu; int ret = 0; if (!param) @@ -1015,7 +1015,7 @@ EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); */ int iommu_unregister_device_fault_handler(struct device *dev) { - struct iommu_param *param = dev->iommu_param; + struct dev_iommu *param = dev->iommu; int ret = 0; if (!param) @@ -1055,7 +1055,7 @@ EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); */ int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) { - struct iommu_param *param = dev->iommu_param; + struct dev_iommu *param = dev->iommu; struct iommu_fault_event *evt_pending = NULL; struct iommu_fault_param *fparam; int ret = 0; @@ -1104,7 +1104,7 @@ int iommu_page_response(struct device *dev, int ret = -EINVAL; struct iommu_fault_event *evt; struct iommu_fault_page_request *prm; - struct iommu_param *param = dev->iommu_param; + struct dev_iommu *param = dev->iommu; struct iommu_domain *domain = iommu_get_domain_for_dev(dev); if (!domain || !domain->ops->page_response) diff --git a/include/linux/device.h b/include/linux/device.h index 0cd7c647c16c..af621f9fe85b 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -44,7 +44,7 @@ struct iommu_ops; struct iommu_group; struct iommu_fwspec; struct dev_pin_info; -struct iommu_param; +struct dev_iommu; /** * struct subsys_interface - interfaces to device functions @@ -514,7 +514,7 @@ struct dev_links_info { * device (i.e. the bus driver that discovered the device). * @iommu_group: IOMMU group the device belongs to. * @iommu_fwspec: IOMMU-specific properties supplied by firmware. - * @iommu_param: Per device generic IOMMU runtime data + * @iommu: Per device generic IOMMU runtime data * * @offline_disabled: If set, the device is permanently online. * @offline: Set after successful invocation of bus type's .offline(). @@ -614,7 +614,7 @@ struct device { void (*release)(struct device *dev); struct iommu_group *iommu_group; struct iommu_fwspec *iommu_fwspec; - struct iommu_param *iommu_param; + struct dev_iommu *iommu; bool offline_disabled:1; bool offline:1; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 505163e9702a..843baaa65f10 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -365,7 +365,7 @@ struct iommu_fault_param { }; /** - * struct iommu_param - collection of per-device IOMMU data + * struct dev_iommu - Collection of per-device IOMMU data * * @fault_param: IOMMU detected device fault reporting data * @@ -373,7 +373,7 @@ struct iommu_fault_param { * struct iommu_group *iommu_group; * struct iommu_fwspec *iommu_fwspec; */ -struct iommu_param { +struct dev_iommu { struct mutex lock; struct iommu_fault_param *fault_param; }; -- cgit v1.2.3-58-ga151 From 72acd9df18f12420001f901493c54b7364f34d60 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 26 Mar 2020 16:08:31 +0100 Subject: iommu: Move iommu_fwspec to struct dev_iommu Move the iommu_fwspec pointer in struct device into struct dev_iommu. This is a step in the effort to reduce the iommu related pointers in struct device to one. Signed-off-by: Joerg Roedel Tested-by: Will Deacon # arm-smmu Reviewed-by: Jean-Philippe Brucker Reviewed-by: Greg Kroah-Hartman Cc: Greg Kroah-Hartman Link: https://lore.kernel.org/r/20200326150841.10083-7-joro@8bytes.org --- drivers/iommu/iommu.c | 3 +++ include/linux/device.h | 3 --- include/linux/iommu.h | 12 ++++++++---- 3 files changed, 11 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 15d64a175d92..2b471419e26c 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2405,6 +2405,9 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, if (fwspec) return ops == fwspec->ops ? 0 : -EINVAL; + if (!dev_iommu_get(dev)) + return -ENOMEM; + /* Preallocate for the overwhelmingly common case of 1 ID */ fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); if (!fwspec) diff --git a/include/linux/device.h b/include/linux/device.h index af621f9fe85b..9610d0accd88 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -42,7 +42,6 @@ struct device_node; struct fwnode_handle; struct iommu_ops; struct iommu_group; -struct iommu_fwspec; struct dev_pin_info; struct dev_iommu; @@ -513,7 +512,6 @@ struct dev_links_info { * gone away. This should be set by the allocator of the * device (i.e. the bus driver that discovered the device). * @iommu_group: IOMMU group the device belongs to. - * @iommu_fwspec: IOMMU-specific properties supplied by firmware. * @iommu: Per device generic IOMMU runtime data * * @offline_disabled: If set, the device is permanently online. @@ -613,7 +611,6 @@ struct device { void (*release)(struct device *dev); struct iommu_group *iommu_group; - struct iommu_fwspec *iommu_fwspec; struct dev_iommu *iommu; bool offline_disabled:1; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 843baaa65f10..d031ddc0596b 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -368,14 +368,15 @@ struct iommu_fault_param { * struct dev_iommu - Collection of per-device IOMMU data * * @fault_param: IOMMU detected device fault reporting data + * @fwspec: IOMMU fwspec data * * TODO: migrate other per device data pointers under iommu_dev_data, e.g. * struct iommu_group *iommu_group; - * struct iommu_fwspec *iommu_fwspec; */ struct dev_iommu { struct mutex lock; - struct iommu_fault_param *fault_param; + struct iommu_fault_param *fault_param; + struct iommu_fwspec *fwspec; }; int iommu_device_register(struct iommu_device *iommu); @@ -614,13 +615,16 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) { - return dev->iommu_fwspec; + if (dev->iommu) + return dev->iommu->fwspec; + else + return NULL; } static inline void dev_iommu_fwspec_set(struct device *dev, struct iommu_fwspec *fwspec) { - dev->iommu_fwspec = fwspec; + dev->iommu->fwspec = fwspec; } int iommu_probe_device(struct device *dev); -- cgit v1.2.3-58-ga151 From f9867f416ee721141e1664810516b8ebc2563cdd Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 26 Mar 2020 16:08:33 +0100 Subject: iommu: Introduce accessors for iommu private data Add dev_iommu_priv_get/set() functions to access per-device iommu private data. This makes it easier to move the pointer to a different location. Signed-off-by: Joerg Roedel Tested-by: Will Deacon # arm-smmu Reviewed-by: Jean-Philippe Brucker Link: https://lore.kernel.org/r/20200326150841.10083-9-joro@8bytes.org --- include/linux/iommu.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index d031ddc0596b..49e3173260b3 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -627,6 +627,16 @@ static inline void dev_iommu_fwspec_set(struct device *dev, dev->iommu->fwspec = fwspec; } +static inline void *dev_iommu_priv_get(struct device *dev) +{ + return dev->iommu->fwspec->iommu_priv; +} + +static inline void dev_iommu_priv_set(struct device *dev, void *priv) +{ + dev->iommu->fwspec->iommu_priv = priv; +} + int iommu_probe_device(struct device *dev); void iommu_release_device(struct device *dev); -- cgit v1.2.3-58-ga151 From 986d5ecc56999800a5d112a70e88522d9212aefd Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 26 Mar 2020 16:08:41 +0100 Subject: iommu: Move fwspec->iommu_priv to struct dev_iommu Move the pointer for iommu private data from struct iommu_fwspec to struct dev_iommu. Signed-off-by: Joerg Roedel Tested-by: Will Deacon # arm-smmu Reviewed-by: Jean-Philippe Brucker Link: https://lore.kernel.org/r/20200326150841.10083-17-joro@8bytes.org --- include/linux/iommu.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 49e3173260b3..7ef8b0bda695 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -369,6 +369,7 @@ struct iommu_fault_param { * * @fault_param: IOMMU detected device fault reporting data * @fwspec: IOMMU fwspec data + * @priv: IOMMU Driver private data * * TODO: migrate other per device data pointers under iommu_dev_data, e.g. * struct iommu_group *iommu_group; @@ -377,6 +378,7 @@ struct dev_iommu { struct mutex lock; struct iommu_fault_param *fault_param; struct iommu_fwspec *fwspec; + void *priv; }; int iommu_device_register(struct iommu_device *iommu); @@ -589,7 +591,6 @@ struct iommu_group *fsl_mc_device_group(struct device *dev); struct iommu_fwspec { const struct iommu_ops *ops; struct fwnode_handle *iommu_fwnode; - void *iommu_priv; u32 flags; u32 num_pasid_bits; unsigned int num_ids; @@ -629,12 +630,12 @@ static inline void dev_iommu_fwspec_set(struct device *dev, static inline void *dev_iommu_priv_get(struct device *dev) { - return dev->iommu->fwspec->iommu_priv; + return dev->iommu->priv; } static inline void dev_iommu_priv_set(struct device *dev, void *priv) { - dev->iommu->fwspec->iommu_priv = priv; + dev->iommu->priv = priv; } int iommu_probe_device(struct device *dev); -- cgit v1.2.3-58-ga151 From 6546b19f95acc986807de981402bbac6b3a94b0f Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 25 Mar 2020 21:45:29 +0900 Subject: perf/core: Add PERF_SAMPLE_CGROUP feature The PERF_SAMPLE_CGROUP bit is to save (perf_event) cgroup information in the sample. It will add a 64-bit id to identify current cgroup and it's the file handle in the cgroup file system. Userspace should use this information with PERF_RECORD_CGROUP event to match which cgroup it belongs. I put it before PERF_SAMPLE_AUX for simplicity since it just needs a 64-bit word. But if we want bigger samples, I can work on that direction too. Committer testing: $ pahole perf_sample_data | grep -w cgroup -B5 -A5 /* --- cacheline 4 boundary (256 bytes) was 56 bytes ago --- */ struct perf_regs regs_intr; /* 312 16 */ /* --- cacheline 5 boundary (320 bytes) was 8 bytes ago --- */ u64 stack_user_size; /* 328 8 */ u64 phys_addr; /* 336 8 */ u64 cgroup; /* 344 8 */ /* size: 384, cachelines: 6, members: 22 */ /* padding: 32 */ }; $ Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Acked-by: Peter Zijlstra (Intel) Acked-by: Tejun Heo Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Johannes Weiner Cc: Mark Rutland Cc: Zefan Li Link: http://lore.kernel.org/lkml/20200325124536.2800725-3-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- include/linux/perf_event.h | 1 + include/uapi/linux/perf_event.h | 3 ++- init/Kconfig | 3 ++- kernel/events/core.c | 22 ++++++++++++++++++++++ 4 files changed, 27 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 8768a39b5258..9c3e7619c929 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1020,6 +1020,7 @@ struct perf_sample_data { u64 stack_user_size; u64 phys_addr; + u64 cgroup; } ____cacheline_aligned; /* default value for data source */ diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index de95f6c7b273..7b2d6fc9e6ed 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -142,8 +142,9 @@ enum perf_event_sample_format { PERF_SAMPLE_REGS_INTR = 1U << 18, PERF_SAMPLE_PHYS_ADDR = 1U << 19, PERF_SAMPLE_AUX = 1U << 20, + PERF_SAMPLE_CGROUP = 1U << 21, - PERF_SAMPLE_MAX = 1U << 21, /* non-ABI */ + PERF_SAMPLE_MAX = 1U << 22, /* non-ABI */ __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */ }; diff --git a/init/Kconfig b/init/Kconfig index 20a6ac33761c..7766b06a0038 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1027,7 +1027,8 @@ config CGROUP_PERF help This option extends the perf per-cpu mode to restrict monitoring to threads which belong to the cgroup specified and run on the - designated cpu. + designated cpu. Or this can be used to have cgroup ID in samples + so that it can monitor performance events among cgroups. Say N if unsure. diff --git a/kernel/events/core.c b/kernel/events/core.c index 994932d5e474..1569979c8912 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1862,6 +1862,9 @@ static void __perf_event_header_size(struct perf_event *event, u64 sample_type) if (sample_type & PERF_SAMPLE_PHYS_ADDR) size += sizeof(data->phys_addr); + if (sample_type & PERF_SAMPLE_CGROUP) + size += sizeof(data->cgroup); + event->header_size = size; } @@ -6867,6 +6870,9 @@ void perf_output_sample(struct perf_output_handle *handle, if (sample_type & PERF_SAMPLE_PHYS_ADDR) perf_output_put(handle, data->phys_addr); + if (sample_type & PERF_SAMPLE_CGROUP) + perf_output_put(handle, data->cgroup); + if (sample_type & PERF_SAMPLE_AUX) { perf_output_put(handle, data->aux_size); @@ -7066,6 +7072,16 @@ void perf_prepare_sample(struct perf_event_header *header, if (sample_type & PERF_SAMPLE_PHYS_ADDR) data->phys_addr = perf_virt_to_phys(data->addr); +#ifdef CONFIG_CGROUP_PERF + if (sample_type & PERF_SAMPLE_CGROUP) { + struct cgroup *cgrp; + + /* protected by RCU */ + cgrp = task_css_check(current, perf_event_cgrp_id, 1)->cgroup; + data->cgroup = cgroup_id(cgrp); + } +#endif + if (sample_type & PERF_SAMPLE_AUX) { u64 size; @@ -11264,6 +11280,12 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, if (attr->sample_type & PERF_SAMPLE_REGS_INTR) ret = perf_reg_validate(attr->sample_regs_intr); + +#ifndef CONFIG_CGROUP_PERF + if (attr->sample_type & PERF_SAMPLE_CGROUP) + return -EINVAL; +#endif + out: return ret; -- cgit v1.2.3-58-ga151 From 9a81ef42b238b28829a46ecf13c7aacb79b9b3ac Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Fri, 27 Mar 2020 11:53:09 -0400 Subject: SUNRPC/cache: don't allow invalid entries to be flushed Trond points out in commit 277f27e2f277 ("SUNRPC/cache: Allow garbage collection of invalid cache entries") that we allow invalid cache entries to persist indefinitely. That fix, however, reintroduces the problem fixed by Kinglong Mee's commit d6fc8821c2d2 ("SUNRPC/Cache: Always treat the invalid cache as unexpired"), where an invalid cache entry is immediately removed by a flush before mountd responds to it. The result is that the server thread that should be waiting for mountd to fill in that entry instead gets an -ETIMEDOUT return from cache_check(). Symptoms are the server becoming unresponsive after a restart, reproduceable by running pynfs 4.1 test REBT5. Instead, take a compromise approach: allow invalid cache entries to be removed after they expire, but not to be removed by a cache flush. Fixes: 277f27e2f277 ("SUNRPC/cache: Allow garbage collection ... ") Signed-off-by: J. Bruce Fields Signed-off-by: Chuck Lever --- include/linux/sunrpc/cache.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 532cdbda43da..10891b70fc7b 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -209,8 +209,11 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd) static inline bool cache_is_expired(struct cache_detail *detail, struct cache_head *h) { - return (h->expiry_time < seconds_since_boot()) || - (detail->flush_time >= h->last_refresh); + if (h->expiry_time < seconds_since_boot()) + return true; + if (!test_bit(CACHE_VALID, &h->flags)) + return false; + return detail->flush_time >= h->last_refresh; } extern int cache_check(struct cache_detail *detail, -- cgit v1.2.3-58-ga151 From c21e7168848d4ff4158120dbd4464f0d5cfb1456 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 19 Mar 2020 13:36:36 -0400 Subject: NFSv4/pnfs: Support a list of commit arrays in struct pnfs_ds_commit_info When we have multiple layout segments with different lists of mirrored data, we need to track the commits on a per layout segment basis. This patch adds a list to support this tracking in struct pnfs_ds_commit_info. Signed-off-by: Trond Myklebust --- fs/nfs/direct.c | 1 + fs/nfs/filelayout/filelayout.c | 5 ++++- fs/nfs/flexfilelayout/flexfilelayout.c | 1 + fs/nfs/pnfs.h | 11 +++++++++++ include/linux/nfs_xdr.h | 1 + 5 files changed, 18 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index ade2435551c8..f9a73febce02 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -305,6 +305,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void) kref_get(&dreq->kref); init_completion(&dreq->completion); INIT_LIST_HEAD(&dreq->mds_cinfo.list); + pnfs_init_ds_commit_info(&dreq->ds_cinfo); dreq->verf.committed = NFS_INVALID_STABLE_HOW; /* not set yet */ INIT_WORK(&dreq->work, nfs_direct_write_schedule_work); spin_lock_init(&dreq->lock); diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index bd234394a87c..b051d5d320ba 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -1140,7 +1140,10 @@ filelayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags) struct nfs4_filelayout *flo; flo = kzalloc(sizeof(*flo), gfp_flags); - return flo != NULL ? &flo->generic_hdr : NULL; + if (flo == NULL) + return NULL; + pnfs_init_ds_commit_info(&flo->commit_info); + return &flo->generic_hdr; } static void diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 19728206e9c6..c9e79c8e62cd 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -48,6 +48,7 @@ ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags) ffl = kzalloc(sizeof(*ffl), gfp_flags); if (ffl) { + pnfs_init_ds_commit_info(&ffl->commit_info); INIT_LIST_HEAD(&ffl->error_list); INIT_LIST_HEAD(&ffl->mirrors); ffl->last_report_time = ktime_get(); diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index f6b1099aa151..b293afb48d04 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -462,6 +462,12 @@ pnfs_get_ds_info(struct inode *inode) return ld->get_ds_info(inode); } +static inline void +pnfs_init_ds_commit_info(struct pnfs_ds_commit_info *fl_cinfo) +{ + INIT_LIST_HEAD(&fl_cinfo->commits); +} + static inline void pnfs_generic_mark_devid_invalid(struct nfs4_deviceid_node *node) { @@ -759,6 +765,11 @@ pnfs_get_ds_info(struct inode *inode) return NULL; } +static inline void +pnfs_init_ds_commit_info(struct pnfs_ds_commit_info *fl_cinfo) +{ +} + static inline bool pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, struct nfs_commit_info *cinfo, u32 ds_commit_idx) diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index e91c917c9c1c..9946787eda72 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1280,6 +1280,7 @@ struct pnfs_commit_array { }; struct pnfs_ds_commit_info { + struct list_head commits; unsigned int nwritten; unsigned int ncommitting; unsigned int nbuckets; -- cgit v1.2.3-58-ga151 From a9901899b649dc80ef75c14d6d78059cae14def7 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 20 Mar 2020 16:04:06 -0400 Subject: pNFS: Add infrastructure for cleaning up per-layout commit structures Ensure that both the file and flexfiles layout types clean up when freeing the layout segments. Signed-off-by: Trond Myklebust --- fs/nfs/filelayout/filelayout.c | 16 +++++++ fs/nfs/flexfilelayout/flexfilelayout.c | 11 +++++ fs/nfs/internal.h | 4 +- fs/nfs/pnfs.c | 1 + fs/nfs/pnfs.h | 4 ++ fs/nfs/pnfs_nfs.c | 88 ++++++++++++++++++++++++++++++++-- include/linux/nfs_xdr.h | 1 + 7 files changed, 121 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index b051d5d320ba..ffc5e2af1776 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -750,11 +750,16 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg) /* This assumes a single RW lseg */ if (lseg->pls_range.iomode == IOMODE_RW) { struct nfs4_filelayout *flo; + struct inode *inode; flo = FILELAYOUT_FROM_HDR(lseg->pls_layout); + inode = flo->generic_hdr.plh_inode; + spin_lock(&inode->i_lock); flo->commit_info.nbuckets = 0; kfree(flo->commit_info.buckets); flo->commit_info.buckets = NULL; + pnfs_generic_ds_cinfo_release_lseg(&flo->commit_info, lseg); + spin_unlock(&inode->i_lock); } _filelayout_free_lseg(fl); } @@ -1163,6 +1168,16 @@ filelayout_get_ds_info(struct inode *inode) return &FILELAYOUT_FROM_HDR(layout)->commit_info; } +static void +filelayout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo, + struct inode *inode) +{ + spin_lock(&inode->i_lock); + pnfs_generic_ds_cinfo_destroy(fl_cinfo); + spin_unlock(&inode->i_lock); +} + + static struct pnfs_layoutdriver_type filelayout_type = { .id = LAYOUT_NFSV4_1_FILES, .name = "LAYOUT_NFSV4_1_FILES", @@ -1176,6 +1191,7 @@ static struct pnfs_layoutdriver_type filelayout_type = { .pg_read_ops = &filelayout_pg_read_ops, .pg_write_ops = &filelayout_pg_write_ops, .get_ds_info = &filelayout_get_ds_info, + .release_ds_info = filelayout_release_ds_info, .mark_request_commit = filelayout_mark_request_commit, .clear_request_commit = pnfs_generic_clear_request_commit, .scan_commit_lists = pnfs_generic_scan_commit_lists, diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index c9e79c8e62cd..8e1393d75cbc 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -580,6 +580,7 @@ ff_layout_free_lseg(struct pnfs_layout_segment *lseg) kfree(ffl->commit_info.buckets); ffl->commit_info.buckets = NULL; } + pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg); spin_unlock(&inode->i_lock); } _ff_layout_free_lseg(fls); @@ -2003,6 +2004,15 @@ ff_layout_get_ds_info(struct inode *inode) return &FF_LAYOUT_FROM_HDR(layout)->commit_info; } +static void +ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo, + struct inode *inode) +{ + spin_lock(&inode->i_lock); + pnfs_generic_ds_cinfo_destroy(fl_cinfo); + spin_unlock(&inode->i_lock); +} + static void ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d) { @@ -2503,6 +2513,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = { .pg_read_ops = &ff_layout_pg_read_ops, .pg_write_ops = &ff_layout_pg_write_ops, .get_ds_info = ff_layout_get_ds_info, + .release_ds_info = ff_layout_release_ds_info, .free_deviceid_node = ff_layout_free_deviceid_node, .mark_request_commit = pnfs_layout_mark_request_commit, .clear_request_commit = pnfs_generic_clear_request_commit, diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 4a1adad3740f..683146a51599 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -534,9 +534,11 @@ void nfs_clear_pnfs_ds_commit_verifiers(struct pnfs_ds_commit_info *cinfo) pnfs_bucket_clear_pnfs_ds_commit_verifiers(cinfo->buckets, cinfo->nbuckets); - list_for_each_entry(array, &cinfo->commits, cinfo_list) + rcu_read_lock(); + list_for_each_entry_rcu(array, &cinfo->commits, cinfo_list) pnfs_bucket_clear_pnfs_ds_commit_verifiers(array->buckets, array->nbuckets); + rcu_read_unlock(); } #else static inline diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 6b25117fca5f..eba18f137fb0 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -506,6 +506,7 @@ pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg, { INIT_LIST_HEAD(&lseg->pls_list); INIT_LIST_HEAD(&lseg->pls_lc_list); + INIT_LIST_HEAD(&lseg->pls_commits); refcount_set(&lseg->pls_refcount, 1); set_bit(NFS_LSEG_VALID, &lseg->pls_flags); lseg->pls_layout = lo; diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 2ec97b419b56..6c48bd7b4640 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -66,6 +66,7 @@ struct nfs4_pnfs_ds { struct pnfs_layout_segment { struct list_head pls_list; struct list_head pls_lc_list; + struct list_head pls_commits; struct pnfs_layout_range pls_range; refcount_t pls_refcount; u32 pls_seq; @@ -370,6 +371,9 @@ void nfs4_deviceid_purge_client(const struct nfs_client *); /* pnfs_nfs.c */ struct pnfs_commit_array *pnfs_alloc_commit_array(size_t n, gfp_t gfp_flags); void pnfs_free_commit_array(struct pnfs_commit_array *p); +void pnfs_generic_ds_cinfo_release_lseg(struct pnfs_ds_commit_info *fl_cinfo, + struct pnfs_layout_segment *lseg); +void pnfs_generic_ds_cinfo_destroy(struct pnfs_ds_commit_info *fl_cinfo); void pnfs_generic_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo); diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index f895a28b1e26..edad251a6a48 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -118,6 +118,67 @@ pnfs_free_commit_array(struct pnfs_commit_array *p) } EXPORT_SYMBOL_GPL(pnfs_free_commit_array); +static void +pnfs_release_commit_array_locked(struct pnfs_commit_array *array) +{ + list_del_rcu(&array->cinfo_list); + list_del(&array->lseg_list); + pnfs_free_commit_array(array); +} + +static void +pnfs_put_commit_array_locked(struct pnfs_commit_array *array) +{ + if (refcount_dec_and_test(&array->refcount)) + pnfs_release_commit_array_locked(array); +} + +static void +pnfs_put_commit_array(struct pnfs_commit_array *array, struct inode *inode) +{ + if (refcount_dec_and_lock(&array->refcount, &inode->i_lock)) { + pnfs_release_commit_array_locked(array); + spin_unlock(&inode->i_lock); + } +} + +static struct pnfs_commit_array * +pnfs_get_commit_array(struct pnfs_commit_array *array) +{ + if (refcount_inc_not_zero(&array->refcount)) + return array; + return NULL; +} + +static void +pnfs_remove_and_free_commit_array(struct pnfs_commit_array *array) +{ + array->lseg = NULL; + list_del_init(&array->lseg_list); + pnfs_put_commit_array_locked(array); +} + +void +pnfs_generic_ds_cinfo_release_lseg(struct pnfs_ds_commit_info *fl_cinfo, + struct pnfs_layout_segment *lseg) +{ + struct pnfs_commit_array *array, *tmp; + + list_for_each_entry_safe(array, tmp, &lseg->pls_commits, lseg_list) + pnfs_remove_and_free_commit_array(array); +} +EXPORT_SYMBOL_GPL(pnfs_generic_ds_cinfo_release_lseg); + +void +pnfs_generic_ds_cinfo_destroy(struct pnfs_ds_commit_info *fl_cinfo) +{ + struct pnfs_commit_array *array, *tmp; + + list_for_each_entry_safe(array, tmp, &fl_cinfo->commits, cinfo_list) + pnfs_remove_and_free_commit_array(array); +} +EXPORT_SYMBOL_GPL(pnfs_generic_ds_cinfo_destroy); + /* * Locks the nfs_page requests for commit and moves them to * @bucket->committing. @@ -177,14 +238,21 @@ int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max) max -= cnt; if (!max) return rv; - list_for_each_entry(array, &fl_cinfo->commits, cinfo_list) { + rcu_read_lock(); + list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) { + if (!array->lseg || !pnfs_get_commit_array(array)) + continue; + rcu_read_unlock(); cnt = pnfs_bucket_scan_array(cinfo, array->buckets, array->nbuckets, max); + rcu_read_lock(); + pnfs_put_commit_array(array, cinfo->inode); rv += cnt; max -= cnt; if (!max) break; } + rcu_read_unlock(); return rv; } EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists); @@ -230,13 +298,20 @@ void pnfs_generic_recover_commit_reqs(struct list_head *dst, fl_cinfo->nbuckets, cinfo); fl_cinfo->nwritten -= nwritten; - list_for_each_entry(array, &fl_cinfo->commits, cinfo_list) { + rcu_read_lock(); + list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) { + if (!array->lseg || !pnfs_get_commit_array(array)) + continue; + rcu_read_unlock(); nwritten = pnfs_bucket_recover_commit_reqs(dst, array->buckets, array->nbuckets, cinfo); + rcu_read_lock(); + pnfs_put_commit_array(array, cinfo->inode); fl_cinfo->nwritten -= nwritten; } + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs); @@ -330,9 +405,16 @@ pnfs_alloc_ds_commits_list(struct list_head *list, struct pnfs_commit_array *array; unsigned int ret = 0; - list_for_each_entry(array, &fl_cinfo->commits, cinfo_list) + rcu_read_lock(); + list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) { + if (!array->lseg || !pnfs_get_commit_array(array)) + continue; + rcu_read_unlock(); ret += pnfs_bucket_alloc_ds_commits(list, array->buckets, array->nbuckets, cinfo); + rcu_read_lock(); + pnfs_put_commit_array(array, cinfo->inode); + } return ret; } diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 9946787eda72..33be2ee2a248 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1275,6 +1275,7 @@ struct pnfs_commit_array { struct list_head lseg_list; struct pnfs_layout_segment *lseg; struct rcu_head rcu; + refcount_t refcount; unsigned int nbuckets; struct pnfs_commit_bucket buckets[]; }; -- cgit v1.2.3-58-ga151 From 0aa647b7369dd29de0789c321111b2e4668c46b2 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 21 Mar 2020 09:50:05 -0400 Subject: NFS: Remove bucket array from struct pnfs_ds_commit_info Remove the unused bucket array in struct pnfs_ds_commit_info. Signed-off-by: Trond Myklebust --- fs/nfs/direct.c | 1 - fs/nfs/filelayout/filelayout.c | 75 +-------------------------------- fs/nfs/flexfilelayout/flexfilelayout.c | 76 ---------------------------------- fs/nfs/internal.h | 3 -- fs/nfs/pnfs_nfs.c | 18 -------- include/linux/nfs_xdr.h | 13 ------ 6 files changed, 1 insertion(+), 185 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 4ee26465b510..61f93a0fb0e0 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -217,7 +217,6 @@ static void nfs_direct_req_free(struct kref *kref) struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode); - nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo); if (dreq->l_ctx != NULL) nfs_put_lock_context(dreq->l_ctx); if (dreq->ctx != NULL) diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 795508054a4d..854f350e2599 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -755,72 +755,12 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg) flo = FILELAYOUT_FROM_HDR(lseg->pls_layout); inode = flo->generic_hdr.plh_inode; spin_lock(&inode->i_lock); - flo->commit_info.nbuckets = 0; - kfree(flo->commit_info.buckets); - flo->commit_info.buckets = NULL; pnfs_generic_ds_cinfo_release_lseg(&flo->commit_info, lseg); spin_unlock(&inode->i_lock); } _filelayout_free_lseg(fl); } -static int -filelayout_alloc_commit_info(struct pnfs_layout_segment *lseg, - struct nfs_commit_info *cinfo, - gfp_t gfp_flags) -{ - struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); - struct pnfs_commit_bucket *buckets; - int size, i; - - if (fl->commit_through_mds) - return 0; - - size = (fl->stripe_type == STRIPE_SPARSE) ? - fl->dsaddr->ds_num : fl->dsaddr->stripe_count; - - if (cinfo->ds->nbuckets >= size) { - /* This assumes there is only one IOMODE_RW lseg. What - * we really want to do is have a layout_hdr level - * dictionary of keys, each - * associated with a struct list_head, populated by calls - * to filelayout_write_pagelist(). - * */ - return 0; - } - - buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket), - gfp_flags); - if (!buckets) - return -ENOMEM; - for (i = 0; i < size; i++) { - INIT_LIST_HEAD(&buckets[i].written); - INIT_LIST_HEAD(&buckets[i].committing); - /* mark direct verifier as unset */ - buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW; - } - - spin_lock(&cinfo->inode->i_lock); - if (cinfo->ds->nbuckets >= size) - goto out; - for (i = 0; i < cinfo->ds->nbuckets; i++) { - list_splice(&cinfo->ds->buckets[i].written, - &buckets[i].written); - list_splice(&cinfo->ds->buckets[i].committing, - &buckets[i].committing); - buckets[i].direct_verf.committed = - cinfo->ds->buckets[i].direct_verf.committed; - buckets[i].wlseg = cinfo->ds->buckets[i].wlseg; - buckets[i].clseg = cinfo->ds->buckets[i].clseg; - } - swap(cinfo->ds->buckets, buckets); - cinfo->ds->nbuckets = size; -out: - spin_unlock(&cinfo->inode->i_lock); - kfree(buckets); - return 0; -} - static struct pnfs_layout_segment * filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, @@ -943,9 +883,6 @@ static void filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) { - struct nfs_commit_info cinfo; - int status; - pnfs_generic_pg_check_layout(pgio); if (!pgio->pg_lseg) { pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode, @@ -964,17 +901,7 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio, /* If no lseg, fall back to write through mds */ if (pgio->pg_lseg == NULL) - goto out_mds; - nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq); - status = filelayout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS); - if (status < 0) { - pnfs_put_lseg(pgio->pg_lseg); - pgio->pg_lseg = NULL; - goto out_mds; - } - return; -out_mds: - nfs_pageio_reset_write_mds(pgio); + nfs_pageio_reset_write_mds(pgio); } static const struct nfs_pageio_ops filelayout_pg_read_ops = { diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index f343a241906a..1a4e36d07eab 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -550,17 +550,6 @@ out_err_free: goto out_free_page; } -static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout) -{ - struct pnfs_layout_segment *lseg; - - list_for_each_entry(lseg, &layout->plh_segs, pls_list) - if (lseg->pls_range.iomode == IOMODE_RW) - return true; - - return false; -} - static void ff_layout_free_lseg(struct pnfs_layout_segment *lseg) { @@ -575,24 +564,12 @@ ff_layout_free_lseg(struct pnfs_layout_segment *lseg) ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout); inode = ffl->generic_hdr.plh_inode; spin_lock(&inode->i_lock); - if (!ff_layout_has_rw_segments(lseg->pls_layout)) { - ffl->commit_info.nbuckets = 0; - kfree(ffl->commit_info.buckets); - ffl->commit_info.buckets = NULL; - } pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg); spin_unlock(&inode->i_lock); } _ff_layout_free_lseg(fls); } -/* Return 1 until we have multiple lsegs support */ -static int -ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls) -{ - return 1; -} - static void nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now) { @@ -737,52 +714,6 @@ nfs4_ff_layout_stat_io_end_write(struct rpc_task *task, spin_unlock(&mirror->lock); } -static int -ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg, - struct nfs_commit_info *cinfo, - gfp_t gfp_flags) -{ - struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); - struct pnfs_commit_bucket *buckets; - int size; - - if (cinfo->ds->nbuckets != 0) { - /* This assumes there is only one RW lseg per file. - * To support multiple lseg per file, we need to - * change struct pnfs_commit_bucket to allow dynamic - * increasing nbuckets. - */ - return 0; - } - - size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg); - - buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket), - gfp_flags); - if (!buckets) - return -ENOMEM; - else { - int i; - - spin_lock(&cinfo->inode->i_lock); - if (cinfo->ds->nbuckets != 0) - kfree(buckets); - else { - cinfo->ds->buckets = buckets; - cinfo->ds->nbuckets = size; - for (i = 0; i < size; i++) { - INIT_LIST_HEAD(&buckets[i].written); - INIT_LIST_HEAD(&buckets[i].committing); - /* mark direct verifier as unset */ - buckets[i].direct_verf.committed = - NFS_INVALID_STABLE_HOW; - } - } - spin_unlock(&cinfo->inode->i_lock); - return 0; - } -} - static void ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, int idx) { @@ -944,10 +875,8 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, { struct nfs4_ff_layout_mirror *mirror; struct nfs_pgio_mirror *pgm; - struct nfs_commit_info cinfo; struct nfs4_pnfs_ds *ds; int i; - int status; retry: pnfs_generic_pg_check_layout(pgio); @@ -969,11 +898,6 @@ retry: if (pgio->pg_lseg == NULL) goto out_mds; - nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq); - status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS); - if (status < 0) - goto out_mds; - /* Use a direct mapping of ds_idx to pgio mirror_idx */ if (WARN_ON_ONCE(pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))) diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 683146a51599..78f317fac940 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -531,9 +531,6 @@ void nfs_clear_pnfs_ds_commit_verifiers(struct pnfs_ds_commit_info *cinfo) { struct pnfs_commit_array *array; - pnfs_bucket_clear_pnfs_ds_commit_verifiers(cinfo->buckets, - cinfo->nbuckets); - rcu_read_lock(); list_for_each_entry_rcu(array, &cinfo->commits, cinfo_list) pnfs_bucket_clear_pnfs_ds_commit_verifiers(array->buckets, diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 9b55919e64ac..20f12f3cbe38 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -292,12 +292,6 @@ int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max) struct pnfs_commit_array *array; int rv = 0, cnt; - cnt = pnfs_bucket_scan_array(cinfo, fl_cinfo->buckets, - fl_cinfo->nbuckets, max); - rv += cnt; - max -= cnt; - if (!max) - return rv; rcu_read_lock(); list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) { if (!array->lseg || !pnfs_get_commit_array(array)) @@ -353,11 +347,6 @@ void pnfs_generic_recover_commit_reqs(struct list_head *dst, unsigned int nwritten; lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex); - nwritten = pnfs_bucket_recover_commit_reqs(dst, - fl_cinfo->buckets, - fl_cinfo->nbuckets, - cinfo); - fl_cinfo->nwritten -= nwritten; rcu_read_lock(); list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) { if (!array->lseg || !pnfs_get_commit_array(array)) @@ -412,10 +401,6 @@ pnfs_generic_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page struct pnfs_commit_array *array; struct nfs_page *req; - req = pnfs_bucket_search_commit_reqs(fl_cinfo->buckets, - fl_cinfo->nbuckets, page); - if (req) - return req; list_for_each_entry(array, &fl_cinfo->commits, cinfo_list) { req = pnfs_bucket_search_commit_reqs(array->buckets, array->nbuckets, page); @@ -550,9 +535,6 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages, nreq++; } - nreq += pnfs_bucket_alloc_ds_commits(&list, fl_cinfo->buckets, - fl_cinfo->nbuckets, cinfo); - nreq += pnfs_alloc_ds_commits_list(&list, fl_cinfo, cinfo); if (nreq == 0) goto out; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 33be2ee2a248..2903597ec88c 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1284,8 +1284,6 @@ struct pnfs_ds_commit_info { struct list_head commits; unsigned int nwritten; unsigned int ncommitting; - unsigned int nbuckets; - struct pnfs_commit_bucket *buckets; }; struct nfs41_state_protection { @@ -1396,22 +1394,11 @@ struct nfs41_free_stateid_res { unsigned int status; }; -static inline void -nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) -{ - kfree(cinfo->buckets); -} - #else struct pnfs_ds_commit_info { }; -static inline void -nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) -{ -} - #endif /* CONFIG_NFS_V4_1 */ #ifdef CONFIG_NFS_V4_2 -- cgit v1.2.3-58-ga151 From 9c455a8c1e146dac3a6d1405fe6a7096177b9546 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 21 Mar 2020 11:13:05 -0400 Subject: NFS/pNFS: Clean up pNFS commit operations Move the pNFS commit related operations into a separate structure that can be carried by the pnfs_ds_commit_info. Signed-off-by: Trond Myklebust --- fs/nfs/direct.c | 6 +- fs/nfs/filelayout/filelayout.c | 20 +++--- fs/nfs/flexfilelayout/flexfilelayout.c | 19 +++--- fs/nfs/pnfs.h | 110 +++++++++++++++++++++------------ fs/nfs/pnfs_nfs.c | 13 +--- include/linux/nfs_xdr.h | 1 + 6 files changed, 98 insertions(+), 71 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 61f93a0fb0e0..51ab4627c4d6 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -511,10 +511,7 @@ nfs_direct_write_scan_commit_list(struct inode *inode, struct nfs_commit_info *cinfo) { mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); -#ifdef CONFIG_NFS_V4_1 - if (cinfo->ds != NULL && cinfo->ds->nwritten != 0) - NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo); -#endif + pnfs_recover_commit_reqs(list, cinfo); nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0); mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); } @@ -917,6 +914,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) dreq->l_ctx = l_ctx; if (!is_sync_kiocb(iocb)) dreq->iocb = iocb; + pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode); nfs_start_io_direct(inode); diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 854f350e2599..a13e69009f19 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -49,6 +49,7 @@ MODULE_AUTHOR("Dean Hildebrand "); MODULE_DESCRIPTION("The NFSv4 file layout driver"); #define FILELAYOUT_POLL_RETRY_MAX (15*HZ) +static const struct pnfs_commit_ops filelayout_commit_ops; static loff_t filelayout_get_dense_offset(struct nfs4_filelayout_segment *flseg, @@ -1045,6 +1046,7 @@ filelayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags) if (flo == NULL) return NULL; pnfs_init_ds_commit_info(&flo->commit_info); + flo->commit_info.ops = &filelayout_commit_ops; return &flo->generic_hdr; } @@ -1094,6 +1096,16 @@ filelayout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo, spin_unlock(&inode->i_lock); } +static const struct pnfs_commit_ops filelayout_commit_ops = { + .setup_ds_info = filelayout_setup_ds_info, + .release_ds_info = filelayout_release_ds_info, + .mark_request_commit = filelayout_mark_request_commit, + .clear_request_commit = pnfs_generic_clear_request_commit, + .scan_commit_lists = pnfs_generic_scan_commit_lists, + .recover_commit_reqs = pnfs_generic_recover_commit_reqs, + .search_commit_reqs = pnfs_generic_search_commit_reqs, + .commit_pagelist = filelayout_commit_pagelist, +}; static struct pnfs_layoutdriver_type filelayout_type = { .id = LAYOUT_NFSV4_1_FILES, @@ -1108,14 +1120,6 @@ static struct pnfs_layoutdriver_type filelayout_type = { .pg_read_ops = &filelayout_pg_read_ops, .pg_write_ops = &filelayout_pg_write_ops, .get_ds_info = &filelayout_get_ds_info, - .setup_ds_info = filelayout_setup_ds_info, - .release_ds_info = filelayout_release_ds_info, - .mark_request_commit = filelayout_mark_request_commit, - .clear_request_commit = pnfs_generic_clear_request_commit, - .scan_commit_lists = pnfs_generic_scan_commit_lists, - .recover_commit_reqs = pnfs_generic_recover_commit_reqs, - .search_commit_reqs = pnfs_generic_search_commit_reqs, - .commit_pagelist = filelayout_commit_pagelist, .read_pagelist = filelayout_read_pagelist, .write_pagelist = filelayout_write_pagelist, .alloc_deviceid_node = filelayout_alloc_deviceid_node, diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 1a4e36d07eab..d37883a2b51f 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -32,6 +32,7 @@ static unsigned short io_maxretrans; +static const struct pnfs_commit_ops ff_layout_commit_ops; static void ff_layout_read_record_layoutstats_done(struct rpc_task *task, struct nfs_pgio_header *hdr); static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo, @@ -52,6 +53,7 @@ ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags) INIT_LIST_HEAD(&ffl->error_list); INIT_LIST_HEAD(&ffl->mirrors); ffl->last_report_time = ktime_get(); + ffl->commit_info.ops = &ff_layout_commit_ops; return &ffl->generic_hdr; } else return NULL; @@ -2440,6 +2442,16 @@ ff_layout_set_layoutdriver(struct nfs_server *server, return 0; } +static const struct pnfs_commit_ops ff_layout_commit_ops = { + .setup_ds_info = ff_layout_setup_ds_info, + .release_ds_info = ff_layout_release_ds_info, + .mark_request_commit = pnfs_layout_mark_request_commit, + .clear_request_commit = pnfs_generic_clear_request_commit, + .scan_commit_lists = pnfs_generic_scan_commit_lists, + .recover_commit_reqs = pnfs_generic_recover_commit_reqs, + .commit_pagelist = ff_layout_commit_pagelist, +}; + static struct pnfs_layoutdriver_type flexfilelayout_type = { .id = LAYOUT_FLEX_FILES, .name = "LAYOUT_FLEX_FILES", @@ -2455,14 +2467,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = { .pg_read_ops = &ff_layout_pg_read_ops, .pg_write_ops = &ff_layout_pg_write_ops, .get_ds_info = ff_layout_get_ds_info, - .setup_ds_info = ff_layout_setup_ds_info, - .release_ds_info = ff_layout_release_ds_info, .free_deviceid_node = ff_layout_free_deviceid_node, - .mark_request_commit = pnfs_layout_mark_request_commit, - .clear_request_commit = pnfs_generic_clear_request_commit, - .scan_commit_lists = pnfs_generic_scan_commit_lists, - .recover_commit_reqs = pnfs_generic_recover_commit_reqs, - .commit_pagelist = ff_layout_commit_pagelist, .read_pagelist = ff_layout_read_pagelist, .write_pagelist = ff_layout_write_pagelist, .alloc_deviceid_node = ff_layout_alloc_deviceid_node, diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index faed9be6e479..b32025553f26 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -150,26 +150,6 @@ struct pnfs_layoutdriver_type { const struct nfs_pageio_ops *pg_write_ops; struct pnfs_ds_commit_info *(*get_ds_info) (struct inode *inode); - void (*setup_ds_info)(struct pnfs_ds_commit_info *, - struct pnfs_layout_segment *); - void (*release_ds_info)(struct pnfs_ds_commit_info *, - struct inode *inode); - void (*mark_request_commit) (struct nfs_page *req, - struct pnfs_layout_segment *lseg, - struct nfs_commit_info *cinfo, - u32 ds_commit_idx); - void (*clear_request_commit) (struct nfs_page *req, - struct nfs_commit_info *cinfo); - int (*scan_commit_lists) (struct nfs_commit_info *cinfo, - int max); - void (*recover_commit_reqs) (struct list_head *list, - struct nfs_commit_info *cinfo); - struct nfs_page * (*search_commit_reqs)(struct nfs_commit_info *cinfo, - struct page *page); - int (*commit_pagelist)(struct inode *inode, - struct list_head *mds_pages, - int how, - struct nfs_commit_info *cinfo); int (*sync)(struct inode *inode, bool datasync); @@ -192,6 +172,29 @@ struct pnfs_layoutdriver_type { int (*prepare_layoutstats) (struct nfs42_layoutstat_args *args); }; +struct pnfs_commit_ops { + void (*setup_ds_info)(struct pnfs_ds_commit_info *, + struct pnfs_layout_segment *); + void (*release_ds_info)(struct pnfs_ds_commit_info *, + struct inode *inode); + int (*commit_pagelist)(struct inode *inode, + struct list_head *mds_pages, + int how, + struct nfs_commit_info *cinfo); + void (*mark_request_commit) (struct nfs_page *req, + struct pnfs_layout_segment *lseg, + struct nfs_commit_info *cinfo, + u32 ds_commit_idx); + void (*clear_request_commit) (struct nfs_page *req, + struct nfs_commit_info *cinfo); + int (*scan_commit_lists) (struct nfs_commit_info *cinfo, + int max); + void (*recover_commit_reqs) (struct list_head *list, + struct nfs_commit_info *cinfo); + struct nfs_page * (*search_commit_reqs)(struct nfs_commit_info *cinfo, + struct page *page); +}; + struct pnfs_layout_hdr { refcount_t plh_refcount; atomic_t plh_outstanding; /* number of RPCs out */ @@ -461,9 +464,11 @@ static inline int pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how, struct nfs_commit_info *cinfo) { - if (cinfo->ds == NULL || cinfo->ds->ncommitting == 0) + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; + + if (fl_cinfo == NULL || fl_cinfo->ncommitting == 0) return PNFS_NOT_ATTEMPTED; - return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how, cinfo); + return fl_cinfo->ops->commit_pagelist(inode, mds_pages, how, cinfo); } static inline struct pnfs_ds_commit_info * @@ -476,19 +481,26 @@ pnfs_get_ds_info(struct inode *inode) return ld->get_ds_info(inode); } +static inline void +pnfs_init_ds_commit_info_ops(struct pnfs_ds_commit_info *fl_cinfo, struct inode *inode) +{ + struct pnfs_ds_commit_info *inode_cinfo = pnfs_get_ds_info(inode); + if (inode_cinfo != NULL) + fl_cinfo->ops = inode_cinfo->ops; +} + static inline void pnfs_init_ds_commit_info(struct pnfs_ds_commit_info *fl_cinfo) { INIT_LIST_HEAD(&fl_cinfo->commits); + fl_cinfo->ops = NULL; } static inline void pnfs_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo, struct inode *inode) { - struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; - - if (ld != NULL && ld->release_ds_info != NULL) - ld->release_ds_info(fl_cinfo, inode); + if (fl_cinfo->ops != NULL && fl_cinfo->ops->release_ds_info != NULL) + fl_cinfo->ops->release_ds_info(fl_cinfo, inode); } static inline void @@ -501,24 +513,22 @@ static inline bool pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, struct nfs_commit_info *cinfo, u32 ds_commit_idx) { - struct inode *inode = d_inode(nfs_req_openctx(req)->dentry); - struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; - if (lseg == NULL || ld->mark_request_commit == NULL) + if (!lseg || !fl_cinfo->ops->mark_request_commit) return false; - ld->mark_request_commit(req, lseg, cinfo, ds_commit_idx); + fl_cinfo->ops->mark_request_commit(req, lseg, cinfo, ds_commit_idx); return true; } static inline bool pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo) { - struct inode *inode = d_inode(nfs_req_openctx(req)->dentry); - struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; - if (ld == NULL || ld->clear_request_commit == NULL) + if (!fl_cinfo || !fl_cinfo->ops || !fl_cinfo->ops->clear_request_commit) return false; - ld->clear_request_commit(req, cinfo); + fl_cinfo->ops->clear_request_commit(req, cinfo); return true; } @@ -526,21 +536,31 @@ static inline int pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo, int max) { - if (cinfo->ds == NULL || cinfo->ds->nwritten == 0) + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; + + if (!fl_cinfo || fl_cinfo->nwritten == 0) return 0; - else - return NFS_SERVER(inode)->pnfs_curr_ld->scan_commit_lists(cinfo, max); + return fl_cinfo->ops->scan_commit_lists(cinfo, max); +} + +static inline void +pnfs_recover_commit_reqs(struct list_head *head, struct nfs_commit_info *cinfo) +{ + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; + + if (fl_cinfo && fl_cinfo->nwritten != 0) + fl_cinfo->ops->recover_commit_reqs(head, cinfo); } static inline struct nfs_page * pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo, struct page *page) { - struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; - if (ld == NULL || ld->search_commit_reqs == NULL) + if (!fl_cinfo->ops || !fl_cinfo->ops->search_commit_reqs) return NULL; - return ld->search_commit_reqs(cinfo, page); + return fl_cinfo->ops->search_commit_reqs(cinfo, page); } /* Should the pNFS client commit and return the layout upon a setattr */ @@ -788,6 +808,11 @@ pnfs_get_ds_info(struct inode *inode) return NULL; } +static inline void +pnfs_init_ds_commit_info_ops(struct pnfs_ds_commit_info *fl_cinfo, struct inode *inode) +{ +} + static inline void pnfs_init_ds_commit_info(struct pnfs_ds_commit_info *fl_cinfo) { @@ -818,6 +843,11 @@ pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo, return 0; } +static inline void +pnfs_recover_commit_reqs(struct list_head *head, struct nfs_commit_info *cinfo) +{ +} + static inline struct nfs_page * pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo, struct page *page) diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 20f12f3cbe38..06df2e6663dc 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -149,17 +149,6 @@ pnfs_add_commit_array(struct pnfs_ds_commit_info *fl_cinfo, } EXPORT_SYMBOL_GPL(pnfs_add_commit_array); -static void -pnfs_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo, - struct pnfs_layout_segment *lseg) -{ - struct inode *inode = lseg->pls_layout->plh_inode; - struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; - - if (ld->setup_ds_info != NULL) - ld->setup_ds_info(fl_cinfo, lseg); -} - static struct pnfs_commit_array * pnfs_lookup_commit_array(struct pnfs_ds_commit_info *fl_cinfo, struct pnfs_layout_segment *lseg) @@ -170,7 +159,7 @@ pnfs_lookup_commit_array(struct pnfs_ds_commit_info *fl_cinfo, array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg); if (!array) { rcu_read_unlock(); - pnfs_setup_ds_info(fl_cinfo, lseg); + fl_cinfo->ops->setup_ds_info(fl_cinfo, lseg); rcu_read_lock(); array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg); } diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 2903597ec88c..adbbeae9ce5b 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1284,6 +1284,7 @@ struct pnfs_ds_commit_info { struct list_head commits; unsigned int nwritten; unsigned int ncommitting; + const struct pnfs_commit_ops *ops; }; struct nfs41_state_protection { -- cgit v1.2.3-58-ga151 From c84bea59449aaa699a0600a50f59d441cc1d4501 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 22 Mar 2020 14:47:38 -0400 Subject: NFS/pNFS: Simplify bucket layout segment reference counting Signed-off-by: Trond Myklebust --- fs/nfs/pnfs_nfs.c | 39 ++++++++++++++++++++------------------- include/linux/nfs_xdr.h | 3 +-- 2 files changed, 21 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 06df2e6663dc..abf16fc98346 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -59,6 +59,17 @@ void pnfs_generic_commit_release(void *calldata) } EXPORT_SYMBOL_GPL(pnfs_generic_commit_release); +static struct pnfs_layout_segment * +pnfs_free_bucket_lseg(struct pnfs_commit_bucket *bucket) +{ + if (list_empty(&bucket->committing) && list_empty(&bucket->written)) { + struct pnfs_layout_segment *freeme = bucket->lseg; + bucket->lseg = NULL; + return freeme; + } + return NULL; +} + /* The generic layer is about to remove the req from the commit list. * If this will make the bucket empty, it will need to put the lseg reference. * Note this must be called holding nfsi->commit_mutex @@ -78,8 +89,7 @@ pnfs_generic_clear_request_commit(struct nfs_page *req, bucket = list_first_entry(&req->wb_list, struct pnfs_commit_bucket, written); - freeme = bucket->wlseg; - bucket->wlseg = NULL; + freeme = pnfs_free_bucket_lseg(bucket); } out: nfs_request_remove_commit_list(req, cinfo); @@ -103,8 +113,7 @@ pnfs_alloc_commit_array(size_t n, gfp_t gfp_flags) for (b = &p->buckets[0]; n != 0; b++, n--) { INIT_LIST_HEAD(&b->written); INIT_LIST_HEAD(&b->committing); - b->wlseg = NULL; - b->clseg = NULL; + b->lseg = NULL; b->direct_verf.committed = NFS_INVALID_STABLE_HOW; } return p; @@ -246,12 +255,6 @@ pnfs_bucket_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, if (ret) { cinfo->ds->nwritten -= ret; cinfo->ds->ncommitting += ret; - if (bucket->clseg == NULL) - bucket->clseg = pnfs_get_lseg(bucket->wlseg); - if (list_empty(src)) { - pnfs_put_lseg(bucket->wlseg); - bucket->wlseg = NULL; - } } return ret; } @@ -317,9 +320,8 @@ restart: if (!nwritten) continue; ret += nwritten; - if (list_empty(&b->written)) { - freeme = b->wlseg; - b->wlseg = NULL; + freeme = pnfs_free_bucket_lseg(b); + if (freeme) { pnfs_put_lseg(freeme); goto restart; } @@ -405,15 +407,12 @@ pnfs_bucket_get_committing(struct list_head *head, struct pnfs_commit_bucket *bucket, struct nfs_commit_info *cinfo) { - struct pnfs_layout_segment *freeme; struct list_head *pos; list_for_each(pos, &bucket->committing) cinfo->ds->ncommitting--; list_splice_init(&bucket->committing, head); - freeme = bucket->clseg; - bucket->clseg = NULL; - return freeme; + return pnfs_free_bucket_lseg(bucket); } static struct nfs_commit_data * @@ -425,6 +424,8 @@ pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket, if (!data) return NULL; data->lseg = pnfs_bucket_get_committing(&data->pages, bucket, cinfo); + if (!data->lseg) + data->lseg = pnfs_get_lseg(bucket->lseg); return data; } @@ -1182,8 +1183,8 @@ pnfs_layout_mark_request_commit(struct nfs_page *req, * off due to a rewrite, in which case it will be done in * pnfs_common_clear_request_commit */ - WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL); - buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg); + if (!buckets[ds_commit_idx].lseg) + buckets[ds_commit_idx].lseg = pnfs_get_lseg(lseg); } set_bit(PG_COMMIT_TO_DS, &req->wb_flags); cinfo->ds->nwritten++; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index adbbeae9ce5b..7bbb1f6fc1b1 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1265,8 +1265,7 @@ struct nfstime4 { struct pnfs_commit_bucket { struct list_head written; struct list_head committing; - struct pnfs_layout_segment *wlseg; - struct pnfs_layout_segment *clseg; + struct pnfs_layout_segment *lseg; struct nfs_writeverf direct_verf; }; -- cgit v1.2.3-58-ga151 From c9b7a4a72ff64e67b7e877a99fd652230dc26058 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Tue, 17 Mar 2020 17:32:32 -0400 Subject: ring-buffer/tracing: Have iterator acknowledge dropped events Have the ring_buffer_iterator set a flag if events were dropped as it were to go and peek at the next event. Have the trace file display this fact if it happened with a "LOST EVENTS" message. Link: http://lkml.kernel.org/r/20200317213417.045858900@goodmis.org Signed-off-by: Steven Rostedt (VMware) --- include/linux/ring_buffer.h | 1 + kernel/trace/ring_buffer.c | 16 ++++++++++++++++ kernel/trace/trace.c | 16 ++++++++++++---- 3 files changed, 29 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 0ae603b79b0e..c76b2f3b3ac4 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -138,6 +138,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); void ring_buffer_iter_advance(struct ring_buffer_iter *iter); void ring_buffer_iter_reset(struct ring_buffer_iter *iter); int ring_buffer_iter_empty(struct ring_buffer_iter *iter); +bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter); unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index af2f10d9f3f1..6f0b42ceeb00 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -510,6 +510,7 @@ struct ring_buffer_iter { u64 read_stamp; u64 page_stamp; struct ring_buffer_event *event; + int missed_events; }; /** @@ -1988,6 +1989,7 @@ rb_iter_head_event(struct ring_buffer_iter *iter) iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; iter->head = 0; iter->next_event = 0; + iter->missed_events = 1; return NULL; } @@ -4191,6 +4193,20 @@ ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, return event; } +/** ring_buffer_iter_dropped - report if there are dropped events + * @iter: The ring buffer iterator + * + * Returns true if there was dropped events since the last peek. + */ +bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter) +{ + bool ret = iter->missed_events != 0; + + iter->missed_events = 0; + return ret; +} +EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped); + /** * ring_buffer_iter_peek - peek at the next event to be read * @iter: The ring buffer iterator diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 650fa81fffe8..5e634b9c1e0a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3388,11 +3388,15 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); - if (buf_iter) + if (buf_iter) { event = ring_buffer_iter_peek(buf_iter, ts); - else + if (lost_events) + *lost_events = ring_buffer_iter_dropped(buf_iter) ? + (unsigned long)-1 : 0; + } else { event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts, lost_events); + } if (event) { iter->ent_size = ring_buffer_event_length(event); @@ -4005,8 +4009,12 @@ enum print_line_t print_trace_line(struct trace_iterator *iter) enum print_line_t ret; if (iter->lost_events) { - trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", - iter->cpu, iter->lost_events); + if (iter->lost_events == (unsigned long)-1) + trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n", + iter->cpu); + else + trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", + iter->cpu, iter->lost_events); if (trace_seq_has_overflowed(&iter->seq)) return TRACE_TYPE_PARTIAL_LINE; } -- cgit v1.2.3-58-ga151 From 8ced32ffadc857eaa45d62c0c5a34cf6f37168ea Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 24 Mar 2020 14:56:50 +0100 Subject: gpiolib: Introduce gpiod_set_config() The GPIO Aggregator will need a method to forward a .set_config() call to its parent gpiochip. This requires obtaining the gpio_chip and offset for a given gpio_desc. While gpiod_to_chip() is public, gpio_chip_hwgpio() is not, so there is currently no method to obtain the needed GPIO offset parameter. Hence introduce a public gpiod_set_config() helper, which invokes the .set_config() callback through a gpio_desc pointer, like is done for most other gpio_chip callbacks. Rewrite the existing gpiod_set_debounce() helper as a wrapper around gpiod_set_config(), to avoid duplication. Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20200324135653.6676-5-geert+renesas@glider.be Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib.c | 28 ++++++++++++++++++++++------ include/linux/gpio/consumer.h | 8 ++++++++ 2 files changed, 30 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index e3616cc85d66..6c3e4eb53771 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -3431,6 +3431,26 @@ set_output_flag: } EXPORT_SYMBOL_GPL(gpiod_direction_output); +/** + * gpiod_set_config - sets @config for a GPIO + * @desc: descriptor of the GPIO for which to set the configuration + * @config: Same packed config format as generic pinconf + * + * Returns: + * 0 on success, %-ENOTSUPP if the controller doesn't support setting the + * configuration. + */ +int gpiod_set_config(struct gpio_desc *desc, unsigned long config) +{ + struct gpio_chip *chip; + + VALIDATE_DESC(desc); + chip = desc->gdev->chip; + + return gpio_do_set_config(chip, gpio_chip_hwgpio(desc), config); +} +EXPORT_SYMBOL_GPL(gpiod_set_config); + /** * gpiod_set_debounce - sets @debounce time for a GPIO * @desc: descriptor of the GPIO for which to set debounce time @@ -3442,14 +3462,10 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output); */ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) { - struct gpio_chip *chip; - unsigned long config; - - VALIDATE_DESC(desc); - chip = desc->gdev->chip; + unsigned long config; config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce); - return gpio_do_set_config(chip, gpio_chip_hwgpio(desc), config); + return gpiod_set_config(desc, config); } EXPORT_SYMBOL_GPL(gpiod_set_debounce); diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 0a72fccf60ff..901aab89d025 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -157,6 +157,7 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size, struct gpio_array *array_info, unsigned long *value_bitmap); +int gpiod_set_config(struct gpio_desc *desc, unsigned long config); int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); int gpiod_set_transitory(struct gpio_desc *desc, bool transitory); void gpiod_toggle_active_low(struct gpio_desc *desc); @@ -473,6 +474,13 @@ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size, return 0; } +static inline int gpiod_set_config(struct gpio_desc *desc, unsigned long config) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return -ENOSYS; +} + static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) { /* GPIO can never have been requested */ -- cgit v1.2.3-58-ga151 From be06c1b42eea749547d2f0248dc0a7c1153f67b9 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Mon, 23 Mar 2020 17:26:01 -0700 Subject: PCI/DPC: Move DPC data into struct pci_dev We only need 25 bits of data for DPC, so I don't think it's worth the complexity of allocating and keeping track of the struct dpc_dev separately from the pci_dev. Move that data into the struct pci_dev. Link: https://lore.kernel.org/r/98323eaa18080adbe5bb30846862f09f8722d4b3.1585000084.git.sathyanarayanan.kuppuswamy@linux.intel.com Signed-off-by: Bjorn Helgaas --- drivers/pci/pcie/dpc.c | 103 +++++++++++++++---------------------------------- include/linux/pci.h | 5 +++ 2 files changed, 36 insertions(+), 72 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index e06f42f58d3d..6b116d7fdb89 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c @@ -17,13 +17,6 @@ #include "portdrv.h" #include "../pci.h" -struct dpc_dev { - struct pcie_device *dev; - u16 cap_pos; - bool rp_extensions; - u8 rp_log_size; -}; - static const char * const rp_pio_error_string[] = { "Configuration Request received UR Completion", /* Bit Position 0 */ "Configuration Request received CA Completion", /* Bit Position 1 */ @@ -46,63 +39,42 @@ static const char * const rp_pio_error_string[] = { "Memory Request Completion Timeout", /* Bit Position 18 */ }; -static struct dpc_dev *to_dpc_dev(struct pci_dev *dev) -{ - struct device *device; - - device = pcie_port_find_device(dev, PCIE_PORT_SERVICE_DPC); - if (!device) - return NULL; - return get_service_data(to_pcie_device(device)); -} - void pci_save_dpc_state(struct pci_dev *dev) { - struct dpc_dev *dpc; struct pci_cap_saved_state *save_state; u16 *cap; if (!pci_is_pcie(dev)) return; - dpc = to_dpc_dev(dev); - if (!dpc) - return; - save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC); if (!save_state) return; cap = (u16 *)&save_state->cap.data[0]; - pci_read_config_word(dev, dpc->cap_pos + PCI_EXP_DPC_CTL, cap); + pci_read_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, cap); } void pci_restore_dpc_state(struct pci_dev *dev) { - struct dpc_dev *dpc; struct pci_cap_saved_state *save_state; u16 *cap; if (!pci_is_pcie(dev)) return; - dpc = to_dpc_dev(dev); - if (!dpc) - return; - save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC); if (!save_state) return; cap = (u16 *)&save_state->cap.data[0]; - pci_write_config_word(dev, dpc->cap_pos + PCI_EXP_DPC_CTL, *cap); + pci_write_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, *cap); } -static int dpc_wait_rp_inactive(struct dpc_dev *dpc) +static int dpc_wait_rp_inactive(struct pci_dev *pdev) { unsigned long timeout = jiffies + HZ; - struct pci_dev *pdev = dpc->dev->port; - u16 cap = dpc->cap_pos, status; + u16 cap = pdev->dpc_cap, status; pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); while (status & PCI_EXP_DPC_RP_BUSY && @@ -119,15 +91,13 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc) static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev) { - struct dpc_dev *dpc; u16 cap; /* * DPC disables the Link automatically in hardware, so it has * already been reset by the time we get here. */ - dpc = to_dpc_dev(pdev); - cap = dpc->cap_pos; + cap = pdev->dpc_cap; /* * Wait until the Link is inactive, then clear DPC Trigger Status @@ -135,7 +105,7 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev) */ pcie_wait_for_link(pdev, false); - if (dpc->rp_extensions && dpc_wait_rp_inactive(dpc)) + if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev)) return PCI_ERS_RESULT_DISCONNECT; pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS, @@ -147,10 +117,9 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev) return PCI_ERS_RESULT_RECOVERED; } -static void dpc_process_rp_pio_error(struct dpc_dev *dpc) +static void dpc_process_rp_pio_error(struct pci_dev *pdev) { - struct pci_dev *pdev = dpc->dev->port; - u16 cap = dpc->cap_pos, dpc_status, first_error; + u16 cap = pdev->dpc_cap, dpc_status, first_error; u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix; int i; @@ -175,7 +144,7 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc) first_error == i ? " (First)" : ""); } - if (dpc->rp_log_size < 4) + if (pdev->dpc_rp_log_size < 4) goto clear_status; pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG, &dw0); @@ -188,12 +157,12 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc) pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n", dw0, dw1, dw2, dw3); - if (dpc->rp_log_size < 5) + if (pdev->dpc_rp_log_size < 5) goto clear_status; pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log); pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log); - for (i = 0; i < dpc->rp_log_size - 5; i++) { + for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) { pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix); pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix); @@ -226,10 +195,9 @@ static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev, static irqreturn_t dpc_handler(int irq, void *context) { + struct pci_dev *pdev = context; + u16 cap = pdev->dpc_cap, status, source, reason, ext_reason; struct aer_err_info info; - struct dpc_dev *dpc = context; - struct pci_dev *pdev = dpc->dev->port; - u16 cap = dpc->cap_pos, status, source, reason, ext_reason; pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source); @@ -248,8 +216,8 @@ static irqreturn_t dpc_handler(int irq, void *context) "reserved error"); /* show RP PIO error detail information */ - if (dpc->rp_extensions && reason == 3 && ext_reason == 0) - dpc_process_rp_pio_error(dpc); + if (pdev->dpc_rp_extensions && reason == 3 && ext_reason == 0) + dpc_process_rp_pio_error(pdev); else if (reason == 0 && dpc_get_aer_uncorrect_severity(pdev, &info) && aer_get_device_error_info(pdev, &info)) { @@ -266,9 +234,8 @@ static irqreturn_t dpc_handler(int irq, void *context) static irqreturn_t dpc_irq(int irq, void *context) { - struct dpc_dev *dpc = (struct dpc_dev *)context; - struct pci_dev *pdev = dpc->dev->port; - u16 cap = dpc->cap_pos, status; + struct pci_dev *pdev = context; + u16 cap = pdev->dpc_cap, status; pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); @@ -285,7 +252,6 @@ static irqreturn_t dpc_irq(int irq, void *context) #define FLAG(x, y) (((x) & (y)) ? '+' : '-') static int dpc_probe(struct pcie_device *dev) { - struct dpc_dev *dpc; struct pci_dev *pdev = dev->port; struct device *device = &dev->device; int status; @@ -294,43 +260,37 @@ static int dpc_probe(struct pcie_device *dev) if (pcie_aer_get_firmware_first(pdev) && !pcie_ports_dpc_native) return -ENOTSUPP; - dpc = devm_kzalloc(device, sizeof(*dpc), GFP_KERNEL); - if (!dpc) - return -ENOMEM; - - dpc->cap_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC); - dpc->dev = dev; - set_service_data(dev, dpc); + pdev->dpc_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC); status = devm_request_threaded_irq(device, dev->irq, dpc_irq, dpc_handler, IRQF_SHARED, - "pcie-dpc", dpc); + "pcie-dpc", pdev); if (status) { pci_warn(pdev, "request IRQ%d failed: %d\n", dev->irq, status); return status; } - pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap); - pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl); + pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap); + pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl); - dpc->rp_extensions = (cap & PCI_EXP_DPC_CAP_RP_EXT); - if (dpc->rp_extensions) { - dpc->rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8; - if (dpc->rp_log_size < 4 || dpc->rp_log_size > 9) { + pdev->dpc_rp_extensions = (cap & PCI_EXP_DPC_CAP_RP_EXT) ? 1 : 0; + if (pdev->dpc_rp_extensions) { + pdev->dpc_rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8; + if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) { pci_err(pdev, "RP PIO log size %u is invalid\n", - dpc->rp_log_size); - dpc->rp_log_size = 0; + pdev->dpc_rp_log_size); + pdev->dpc_rp_log_size = 0; } } ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN; - pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); + pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl); pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT), FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP), - FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size, + FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), pdev->dpc_rp_log_size, FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE)); pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16)); @@ -339,13 +299,12 @@ static int dpc_probe(struct pcie_device *dev) static void dpc_remove(struct pcie_device *dev) { - struct dpc_dev *dpc = get_service_data(dev); struct pci_dev *pdev = dev->port; u16 ctl; - pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl); + pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl); ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN); - pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); + pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl); } static struct pcie_port_service_driver dpcdriver = { diff --git a/include/linux/pci.h b/include/linux/pci.h index 3840a541a9de..a0b7e7a53741 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -444,6 +444,11 @@ struct pci_dev { const struct attribute_group **msi_irq_groups; #endif struct pci_vpd *vpd; +#ifdef CONFIG_PCIE_DPC + u16 dpc_cap; + unsigned int dpc_rp_extensions:1; + u8 dpc_rp_log_size; +#endif #ifdef CONFIG_PCI_ATS union { struct pci_sriov *sriov; /* PF: SR-IOV info */ -- cgit v1.2.3-58-ga151 From ac1c8e35a3262d04cc81b07fac6480a3539e3b0f Mon Sep 17 00:00:00 2001 From: Kuppuswamy Sathyanarayanan Date: Mon, 23 Mar 2020 17:26:07 -0700 Subject: PCI/DPC: Add Error Disconnect Recover (EDR) support Error Disconnect Recover (EDR) is a feature that allows ACPI firmware to notify OSPM that a device has been disconnected due to an error condition (ACPI v6.3, sec 5.6.6). OSPM advertises its support for EDR on PCI devices via _OSC (see [1], sec 4.5.1, table 4-4). The OSPM EDR notify handler should invalidate software state associated with disconnected devices and may attempt to recover them. OSPM communicates the status of recovery to the firmware via _OST (sec 6.3.5.2). For PCIe, firmware may use Downstream Port Containment (DPC) to support EDR. Per [1], sec 4.5.1, table 4-6, even if firmware has retained control of DPC, OSPM may read/write DPC control and status registers during the EDR notification processing window, i.e., from the time it receives an EDR notification until it clears the DPC Trigger Status. Note that per [1], sec 4.5.1 and 4.5.2.4, 1. If the OS supports EDR, it should advertise that to firmware by setting OSC_PCI_EDR_SUPPORT in _OSC Support. 2. If the OS sets OSC_PCI_EXPRESS_DPC_CONTROL in _OSC Control to request control of the DPC capability, it must also set OSC_PCI_EDR_SUPPORT in _OSC Support. Add an EDR notify handler to attempt recovery. [1] Downstream Port Containment Related Enhancements ECN, Jan 28, 2019, affecting PCI Firmware Specification, Rev. 3.2 https://members.pcisig.com/wg/PCI-SIG/document/12888 [bhelgaas: squash add/enable patches into one] Link: https://lore.kernel.org/r/90f91fe6d25c13f9d2255d2ce97ca15be307e1bb.1585000084.git.sathyanarayanan.kuppuswamy@linux.intel.com Signed-off-by: Kuppuswamy Sathyanarayanan Signed-off-by: Bjorn Helgaas Cc: "Rafael J. Wysocki" Cc: Len Brown --- drivers/acpi/pci_root.c | 15 +++ drivers/pci/pci-acpi.c | 2 + drivers/pci/pcie/Kconfig | 10 ++ drivers/pci/pcie/Makefile | 1 + drivers/pci/pcie/edr.c | 239 ++++++++++++++++++++++++++++++++++++++++++++++ drivers/pci/probe.c | 1 + include/linux/acpi.h | 6 +- include/linux/pci-acpi.h | 8 ++ include/linux/pci.h | 1 + 9 files changed, 281 insertions(+), 2 deletions(-) create mode 100644 drivers/pci/pcie/edr.c (limited to 'include/linux') diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index d1e666ef3fcc..0cb9df5462c3 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -131,6 +131,7 @@ static struct pci_osc_bit_struct pci_osc_support_bit[] = { { OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" }, { OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" }, { OSC_PCI_MSI_SUPPORT, "MSI" }, + { OSC_PCI_EDR_SUPPORT, "EDR" }, { OSC_PCI_HPX_TYPE_3_SUPPORT, "HPX-Type3" }, }; @@ -141,6 +142,7 @@ static struct pci_osc_bit_struct pci_osc_control_bit[] = { { OSC_PCI_EXPRESS_AER_CONTROL, "AER" }, { OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" }, { OSC_PCI_EXPRESS_LTR_CONTROL, "LTR" }, + { OSC_PCI_EXPRESS_DPC_CONTROL, "DPC" }, }; static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word, @@ -440,6 +442,8 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm, support |= OSC_PCI_ASPM_SUPPORT | OSC_PCI_CLOCK_PM_SUPPORT; if (pci_msi_enabled()) support |= OSC_PCI_MSI_SUPPORT; + if (IS_ENABLED(CONFIG_PCIE_EDR)) + support |= OSC_PCI_EDR_SUPPORT; decode_osc_support(root, "OS supports", support); status = acpi_pci_osc_support(root, support); @@ -487,6 +491,15 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm, control |= OSC_PCI_EXPRESS_AER_CONTROL; } + /* + * Per the Downstream Port Containment Related Enhancements ECN to + * the PCI Firmware Spec, r3.2, sec 4.5.1, table 4-5, + * OSC_PCI_EXPRESS_DPC_CONTROL indicates the OS supports both DPC + * and EDR. + */ + if (IS_ENABLED(CONFIG_PCIE_DPC) && IS_ENABLED(CONFIG_PCIE_EDR)) + control |= OSC_PCI_EXPRESS_DPC_CONTROL; + requested = control; status = acpi_pci_osc_control_set(handle, &control, OSC_PCI_EXPRESS_CAPABILITY_CONTROL); @@ -916,6 +929,8 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root, host_bridge->native_pme = 0; if (!(root->osc_control_set & OSC_PCI_EXPRESS_LTR_CONTROL)) host_bridge->native_ltr = 0; + if (!(root->osc_control_set & OSC_PCI_EXPRESS_DPC_CONTROL)) + host_bridge->native_dpc = 0; /* * Evaluate the "PCI Boot Configuration" _DSM Function. If it diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 0c02d500158f..1a6d2062cf8d 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -1241,6 +1241,7 @@ static void pci_acpi_setup(struct device *dev) pci_acpi_optimize_delay(pci_dev, adev->handle); pci_acpi_set_untrusted(pci_dev); + pci_acpi_add_edr_notifier(pci_dev); pci_acpi_add_pm_notifier(adev, pci_dev); if (!adev->wakeup.flags.valid) @@ -1268,6 +1269,7 @@ static void pci_acpi_cleanup(struct device *dev) if (!adev) return; + pci_acpi_remove_edr_notifier(pci_dev); pci_acpi_remove_pm_notifier(adev); if (adev->wakeup.flags.valid) { acpi_device_power_remove_dependent(adev, dev); diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 6e3c04b46fb1..772b1f4cb19e 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -140,3 +140,13 @@ config PCIE_BW This enables PCI Express Bandwidth Change Notification. If you know link width or rate changes occur only to correct unreliable links, you may answer Y. + +config PCIE_EDR + bool "PCI Express Error Disconnect Recover support" + depends on PCIE_DPC && ACPI + help + This option adds Error Disconnect Recover support as specified + in the Downstream Port Containment Related Enhancements ECN to + the PCI Firmware Specification r3.2. Enable this if you want to + support hybrid DPC model which uses both firmware and OS to + implement DPC. diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile index efb9d2e71e9e..68da9280ff11 100644 --- a/drivers/pci/pcie/Makefile +++ b/drivers/pci/pcie/Makefile @@ -13,3 +13,4 @@ obj-$(CONFIG_PCIE_PME) += pme.o obj-$(CONFIG_PCIE_DPC) += dpc.o obj-$(CONFIG_PCIE_PTM) += ptm.o obj-$(CONFIG_PCIE_BW) += bw_notification.o +obj-$(CONFIG_PCIE_EDR) += edr.o diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c new file mode 100644 index 000000000000..594622a6cb16 --- /dev/null +++ b/drivers/pci/pcie/edr.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI Error Disconnect Recover support + * Author: Kuppuswamy Sathyanarayanan + * + * Copyright (C) 2020 Intel Corp. + */ + +#define dev_fmt(fmt) "EDR: " fmt + +#include +#include + +#include "portdrv.h" +#include "../pci.h" + +#define EDR_PORT_DPC_ENABLE_DSM 0x0C +#define EDR_PORT_LOCATE_DSM 0x0D +#define EDR_OST_SUCCESS 0x80 +#define EDR_OST_FAILED 0x81 + +/* + * _DSM wrapper function to enable/disable DPC + * @pdev : PCI device structure + * + * returns 0 on success or errno on failure. + */ +static int acpi_enable_dpc(struct pci_dev *pdev) +{ + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); + union acpi_object *obj, argv4, req; + int status = 0; + + /* + * Behavior when calling unsupported _DSM functions is undefined, + * so check whether EDR_PORT_DPC_ENABLE_DSM is supported. + */ + if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5, + 1ULL << EDR_PORT_DPC_ENABLE_DSM)) + return 0; + + req.type = ACPI_TYPE_INTEGER; + req.integer.value = 1; + + argv4.type = ACPI_TYPE_PACKAGE; + argv4.package.count = 1; + argv4.package.elements = &req; + + /* + * Per Downstream Port Containment Related Enhancements ECN to PCI + * Firmware Specification r3.2, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is + * optional. Return success if it's not implemented. + */ + obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5, + EDR_PORT_DPC_ENABLE_DSM, &argv4); + if (!obj) + return 0; + + if (obj->type != ACPI_TYPE_INTEGER) { + pci_err(pdev, FW_BUG "Enable DPC _DSM returned non integer\n"); + status = -EIO; + } + + if (obj->integer.value != 1) { + pci_err(pdev, "Enable DPC _DSM failed to enable DPC\n"); + status = -EIO; + } + + ACPI_FREE(obj); + + return status; +} + +/* + * _DSM wrapper function to locate DPC port + * @pdev : Device which received EDR event + * + * Returns pci_dev or NULL. Caller is responsible for dropping a reference + * on the returned pci_dev with pci_dev_put(). + */ +static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev) +{ + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); + union acpi_object *obj; + u16 port; + + /* + * Behavior when calling unsupported _DSM functions is undefined, + * so check whether EDR_PORT_DPC_ENABLE_DSM is supported. + */ + if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5, + 1ULL << EDR_PORT_LOCATE_DSM)) + return pci_dev_get(pdev); + + obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5, + EDR_PORT_LOCATE_DSM, NULL); + if (!obj) + return pci_dev_get(pdev); + + if (obj->type != ACPI_TYPE_INTEGER) { + ACPI_FREE(obj); + pci_err(pdev, FW_BUG "Locate Port _DSM returned non integer\n"); + return NULL; + } + + /* + * Firmware returns DPC port BDF details in following format: + * 15:8 = bus + * 7:3 = device + * 2:0 = function + */ + port = obj->integer.value; + + ACPI_FREE(obj); + + return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), + PCI_BUS_NUM(port), port & 0xff); +} + +/* + * _OST wrapper function to let firmware know the status of EDR event + * @pdev : Device used to send _OST + * @edev : Device which experienced EDR event + * @status : Status of EDR event + */ +static int acpi_send_edr_status(struct pci_dev *pdev, struct pci_dev *edev, + u16 status) +{ + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); + u32 ost_status; + + pci_dbg(pdev, "Status for %s: %#x\n", pci_name(edev), status); + + ost_status = PCI_DEVID(edev->bus->number, edev->devfn) << 16; + ost_status |= status; + + status = acpi_evaluate_ost(adev->handle, ACPI_NOTIFY_DISCONNECT_RECOVER, + ost_status, NULL); + if (ACPI_FAILURE(status)) + return -EINVAL; + + return 0; +} + +static void edr_handle_event(acpi_handle handle, u32 event, void *data) +{ + struct pci_dev *pdev = data, *edev; + pci_ers_result_t estate = PCI_ERS_RESULT_DISCONNECT; + u16 status; + + pci_info(pdev, "ACPI event %#x received\n", event); + + if (event != ACPI_NOTIFY_DISCONNECT_RECOVER) + return; + + /* Locate the port which issued EDR event */ + edev = acpi_dpc_port_get(pdev); + if (!edev) { + pci_err(pdev, "Firmware failed to locate DPC port\n"); + return; + } + + pci_dbg(pdev, "Reported EDR dev: %s\n", pci_name(edev)); + + /* If port does not support DPC, just send the OST */ + if (!edev->dpc_cap) { + pci_err(edev, FW_BUG "This device doesn't support DPC\n"); + goto send_ost; + } + + /* Check if there is a valid DPC trigger */ + pci_read_config_word(edev, edev->dpc_cap + PCI_EXP_DPC_STATUS, &status); + if (!(status & PCI_EXP_DPC_STATUS_TRIGGER)) { + pci_err(edev, "Invalid DPC trigger %#010x\n", status); + goto send_ost; + } + + dpc_process_error(edev); + pci_aer_raw_clear_status(edev); + + /* + * Irrespective of whether the DPC event is triggered by ERR_FATAL + * or ERR_NONFATAL, since the link is already down, use the FATAL + * error recovery path for both cases. + */ + estate = pcie_do_recovery(edev, pci_channel_io_frozen, dpc_reset_link); + +send_ost: + + /* + * If recovery is successful, send _OST(0xF, BDF << 16 | 0x80) + * to firmware. If not successful, send _OST(0xF, BDF << 16 | 0x81). + */ + if (estate == PCI_ERS_RESULT_RECOVERED) { + pci_dbg(edev, "DPC port successfully recovered\n"); + acpi_send_edr_status(pdev, edev, EDR_OST_SUCCESS); + } else { + pci_dbg(edev, "DPC port recovery failed\n"); + acpi_send_edr_status(pdev, edev, EDR_OST_FAILED); + } + + pci_dev_put(edev); +} + +void pci_acpi_add_edr_notifier(struct pci_dev *pdev) +{ + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); + acpi_status status; + + if (!adev) { + pci_dbg(pdev, "No valid ACPI node, skipping EDR init\n"); + return; + } + + status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY, + edr_handle_event, pdev); + if (ACPI_FAILURE(status)) { + pci_err(pdev, "Failed to install notify handler\n"); + return; + } + + if (acpi_enable_dpc(pdev)) + acpi_remove_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY, + edr_handle_event); + else + pci_dbg(pdev, "Notify handler installed\n"); +} + +void pci_acpi_remove_edr_notifier(struct pci_dev *pdev) +{ + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); + + if (!adev) + return; + + acpi_remove_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY, + edr_handle_event); + pci_dbg(pdev, "Notify handler removed\n"); +} diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index c6f91f886818..f67c007edcae 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -598,6 +598,7 @@ static void pci_init_host_bridge(struct pci_host_bridge *bridge) bridge->native_shpc_hotplug = 1; bridge->native_pme = 1; bridge->native_ltr = 1; + bridge->native_dpc = 1; } struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 0f24d701fbdc..b7d3caf6f205 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -530,8 +530,9 @@ extern bool osc_pc_lpi_support_confirmed; #define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004 #define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008 #define OSC_PCI_MSI_SUPPORT 0x00000010 +#define OSC_PCI_EDR_SUPPORT 0x00000080 #define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100 -#define OSC_PCI_SUPPORT_MASKS 0x0000011f +#define OSC_PCI_SUPPORT_MASKS 0x0000019f /* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */ #define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001 @@ -540,7 +541,8 @@ extern bool osc_pc_lpi_support_confirmed; #define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008 #define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010 #define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020 -#define OSC_PCI_CONTROL_MASKS 0x0000003f +#define OSC_PCI_EXPRESS_DPC_CONTROL 0x00000080 +#define OSC_PCI_CONTROL_MASKS 0x000000bf #define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002 #define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004 diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 62b7fdcc661c..2d155bfb8fbf 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -112,6 +112,14 @@ extern const guid_t pci_acpi_dsm_guid; #define RESET_DELAY_DSM 0x08 #define FUNCTION_DELAY_DSM 0x09 +#ifdef CONFIG_PCIE_EDR +void pci_acpi_add_edr_notifier(struct pci_dev *pdev); +void pci_acpi_remove_edr_notifier(struct pci_dev *pdev); +#else +static inline void pci_acpi_add_edr_notifier(struct pci_dev *pdev) { } +static inline void pci_acpi_remove_edr_notifier(struct pci_dev *pdev) { } +#endif /* CONFIG_PCIE_EDR */ + #else /* CONFIG_ACPI */ static inline void acpi_pci_add_bus(struct pci_bus *bus) { } static inline void acpi_pci_remove_bus(struct pci_bus *bus) { } diff --git a/include/linux/pci.h b/include/linux/pci.h index a0b7e7a53741..7ed7c088c952 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -515,6 +515,7 @@ struct pci_host_bridge { unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */ unsigned int native_pme:1; /* OS may use PCIe PME */ unsigned int native_ltr:1; /* OS may use PCIe LTR */ + unsigned int native_dpc:1; /* OS may use PCIe DPC */ unsigned int preserve_config:1; /* Preserve FW resource setup */ /* Resource alignment requirements */ -- cgit v1.2.3-58-ga151 From 894020fdd88c1e9a74c60b67c0f19f1c7696ba2f Mon Sep 17 00:00:00 2001 From: Kuppuswamy Sathyanarayanan Date: Mon, 23 Mar 2020 17:26:08 -0700 Subject: PCI/AER: Rationalize error status register clearing The AER interfaces to clear error status registers were a confusing mess: - pci_cleanup_aer_uncorrect_error_status() cleared non-fatal errors from the Uncorrectable Error Status register. - pci_aer_clear_fatal_status() cleared fatal errors from the Uncorrectable Error Status register. - pci_cleanup_aer_error_status_regs() cleared the Root Error Status register (for Root Ports), the Uncorrectable Error Status register, and the Correctable Error Status register. Rename them to make them consistent: From To ---------------------------------------- ------------------------------- pci_cleanup_aer_uncorrect_error_status() pci_aer_clear_nonfatal_status() pci_aer_clear_fatal_status() pci_aer_clear_fatal_status() pci_cleanup_aer_error_status_regs() pci_aer_clear_status() Since pci_cleanup_aer_error_status_regs() (renamed to pci_aer_clear_status()) is only used within drivers/pci/, move the declaration from to drivers/pci/pci.h. [bhelgaas: commit log, add renames] Link: https://lore.kernel.org/r/d1310a75dc3d28f7e8da4e99c45fbd3e60fe238e.1585000084.git.sathyanarayanan.kuppuswamy@linux.intel.com Signed-off-by: Kuppuswamy Sathyanarayanan Signed-off-by: Bjorn Helgaas --- Documentation/PCI/pcieaer-howto.rst | 4 ++-- drivers/net/ethernet/intel/ice/ice_main.c | 4 ++-- drivers/ntb/hw/idt/ntb_hw_idt.c | 4 ++-- drivers/pci/pci.c | 2 +- drivers/pci/pci.h | 2 ++ drivers/pci/pcie/aer.c | 8 ++++---- drivers/pci/pcie/dpc.c | 2 +- drivers/pci/pcie/err.c | 2 +- drivers/scsi/lpfc/lpfc_attr.c | 4 ++-- include/linux/aer.h | 9 ++------- 10 files changed, 19 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/Documentation/PCI/pcieaer-howto.rst b/Documentation/PCI/pcieaer-howto.rst index afbd8c1c321d..0b36b9ebfa4b 100644 --- a/Documentation/PCI/pcieaer-howto.rst +++ b/Documentation/PCI/pcieaer-howto.rst @@ -232,9 +232,9 @@ messages to root port when an error is detected. :: - int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);` + int pci_aer_clear_nonfatal_status(struct pci_dev *dev);` -pci_cleanup_aer_uncorrect_error_status cleanups the uncorrectable +pci_aer_clear_nonfatal_status clears non-fatal errors in the uncorrectable error status register. Frequent Asked Questions diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 5ae671609f98..effca3fa92e0 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3495,10 +3495,10 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) result = PCI_ERS_RESULT_DISCONNECT; } - err = pci_cleanup_aer_uncorrect_error_status(pdev); + err = pci_aer_clear_nonfatal_status(pdev); if (err) dev_dbg(&pdev->dev, - "pci_cleanup_aer_uncorrect_error_status failed, error %d\n", + "pci_aer_clear_nonfatal_status() failed, error %d\n", err); /* non-fatal, continue */ diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c index dcf234680535..edae52384b8a 100644 --- a/drivers/ntb/hw/idt/ntb_hw_idt.c +++ b/drivers/ntb/hw/idt/ntb_hw_idt.c @@ -2674,8 +2674,8 @@ static int idt_init_pci(struct idt_ntb_dev *ndev) ret = pci_enable_pcie_error_reporting(pdev); if (ret != 0) dev_warn(&pdev->dev, "PCIe AER capability disabled\n"); - else /* Cleanup uncorrectable error status before getting to init */ - pci_cleanup_aer_uncorrect_error_status(pdev); + else /* Cleanup nonfatal error status before getting to init */ + pci_aer_clear_nonfatal_status(pdev); /* First enable the PCI device */ ret = pcim_enable_device(pdev); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index d828ca835a98..6c6e8c73fd8f 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1503,7 +1503,7 @@ void pci_restore_state(struct pci_dev *dev) pci_restore_rebar_state(dev); pci_restore_dpc_state(dev); - pci_cleanup_aer_error_status_regs(dev); + pci_aer_clear_status(dev); pci_restore_aer_state(dev); pci_restore_config_space(dev); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 25265bf80a83..bd46f23e3db1 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -656,6 +656,7 @@ void pci_aer_exit(struct pci_dev *dev); extern const struct attribute_group aer_stats_attr_group; void pci_aer_clear_fatal_status(struct pci_dev *dev); void pci_aer_clear_device_status(struct pci_dev *dev); +int pci_aer_clear_status(struct pci_dev *dev); int pci_aer_raw_clear_status(struct pci_dev *dev); #else static inline void pci_no_aer(void) { } @@ -663,6 +664,7 @@ static inline void pci_aer_init(struct pci_dev *d) { } static inline void pci_aer_exit(struct pci_dev *d) { } static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { } static inline void pci_aer_clear_device_status(struct pci_dev *dev) { } +static inline int pci_aer_clear_status(struct pci_dev *dev) { return -EINVAL; } static inline int pci_aer_raw_clear_status(struct pci_dev *dev) { return -EINVAL; } #endif diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index bd9f122165e0..f4274d301235 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -377,7 +377,7 @@ void pci_aer_clear_device_status(struct pci_dev *dev) pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta); } -int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) +int pci_aer_clear_nonfatal_status(struct pci_dev *dev) { int pos; u32 status, sev; @@ -398,7 +398,7 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) return 0; } -EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); +EXPORT_SYMBOL_GPL(pci_aer_clear_nonfatal_status); void pci_aer_clear_fatal_status(struct pci_dev *dev) { @@ -457,7 +457,7 @@ int pci_aer_raw_clear_status(struct pci_dev *dev) return 0; } -int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) +int pci_aer_clear_status(struct pci_dev *dev) { if (pcie_aer_get_firmware_first(dev)) return -EIO; @@ -530,7 +530,7 @@ void pci_aer_init(struct pci_dev *dev) n = pcie_cap_has_rtctl(dev) ? 5 : 4; pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n); - pci_cleanup_aer_error_status_regs(dev); + pci_aer_clear_status(dev); } void pci_aer_exit(struct pci_dev *dev) diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index 22998ee2f7ea..762170423fdd 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c @@ -221,7 +221,7 @@ void dpc_process_error(struct pci_dev *pdev) dpc_get_aer_uncorrect_severity(pdev, &info) && aer_get_device_error_info(pdev, &info)) { aer_print_error(pdev, &info); - pci_cleanup_aer_uncorrect_error_status(pdev); + pci_aer_clear_nonfatal_status(pdev); pci_aer_clear_fatal_status(pdev); } } diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index 0c40488da651..14bb8f54723e 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -198,7 +198,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, pci_walk_bus(bus, report_resume, &status); pci_aer_clear_device_status(dev); - pci_cleanup_aer_uncorrect_error_status(dev); + pci_aer_clear_nonfatal_status(dev); pci_info(dev, "device recovery successful\n"); return status; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 46f56f30f77e..847300de7ff1 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -4783,7 +4783,7 @@ static DEVICE_ATTR_RW(lpfc_aer_support); * Description: * If the @buf contains 1 and the device currently has the AER support * enabled, then invokes the kernel AER helper routine - * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable + * pci_aer_clear_nonfatal_status() to clean up the uncorrectable * error status register. * * Notes: @@ -4809,7 +4809,7 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr, return -EINVAL; if (phba->hba_flag & HBA_AER_ENABLED) - rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev); + rc = pci_aer_clear_nonfatal_status(phba->pcidev); if (rc == 0) return strlen(buf); diff --git a/include/linux/aer.h b/include/linux/aer.h index fa19e01f418a..97f64ba1b34a 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h @@ -44,8 +44,7 @@ struct aer_capability_regs { /* PCIe port driver needs this function to enable AER */ int pci_enable_pcie_error_reporting(struct pci_dev *dev); int pci_disable_pcie_error_reporting(struct pci_dev *dev); -int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); -int pci_cleanup_aer_error_status_regs(struct pci_dev *dev); +int pci_aer_clear_nonfatal_status(struct pci_dev *dev); void pci_save_aer_state(struct pci_dev *dev); void pci_restore_aer_state(struct pci_dev *dev); #else @@ -57,11 +56,7 @@ static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev) { return -EINVAL; } -static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) -{ - return -EINVAL; -} -static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) +static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev) { return -EINVAL; } -- cgit v1.2.3-58-ga151 From cee416a347440628762db2257ff921ccf9f66923 Mon Sep 17 00:00:00 2001 From: Gwendal Grignou Date: Fri, 27 Mar 2020 15:34:32 -0700 Subject: platform/chrome: cros_ec_sensorhub: Add the number of sensors in sensorhub To better manage resources, store the number of sensors reported by the EC. Signed-off-by: Gwendal Grignou Signed-off-by: Enric Balletbo i Serra --- drivers/platform/chrome/cros_ec_sensorhub.c | 4 +++- include/linux/platform_data/cros_ec_sensorhub.h | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/platform/chrome/cros_ec_sensorhub.c b/drivers/platform/chrome/cros_ec_sensorhub.c index 79fefd3bb0fa..134ee5e46047 100644 --- a/drivers/platform/chrome/cros_ec_sensorhub.c +++ b/drivers/platform/chrome/cros_ec_sensorhub.c @@ -65,6 +65,7 @@ static int cros_ec_sensorhub_register(struct device *dev, return sensor_num; } + sensorhub->sensor_num = sensor_num; if (sensor_num == 0) { dev_err(dev, "Zero sensors reported.\n"); return -EINVAL; @@ -172,7 +173,8 @@ static int cros_ec_sensorhub_probe(struct platform_device *pdev) * If the device has sensors but does not claim to * be a sensor hub, we are in legacy mode. */ - for (i = 0; i < 2; i++) { + data->sensor_num = 2; + for (i = 0; i < data->sensor_num; i++) { ret = cros_ec_sensorhub_allocate_sensor(dev, "cros-ec-accel-legacy", i); if (ret) diff --git a/include/linux/platform_data/cros_ec_sensorhub.h b/include/linux/platform_data/cros_ec_sensorhub.h index bef7ffc7fce1..7e46a47fd642 100644 --- a/include/linux/platform_data/cros_ec_sensorhub.h +++ b/include/linux/platform_data/cros_ec_sensorhub.h @@ -22,9 +22,11 @@ struct cros_ec_sensor_platform { * struct cros_ec_sensorhub - Sensor Hub device data. * * @ec: Embedded Controller where the hub is located. + * @sensor_num: Number of MEMS sensors present in the EC. */ struct cros_ec_sensorhub { struct cros_ec_dev *ec; + int sensor_num; }; #endif /* __LINUX_PLATFORM_DATA_CROS_EC_SENSORHUB_H */ -- cgit v1.2.3-58-ga151 From 145d59baff5944b71551ac518d7fd7d377a9c820 Mon Sep 17 00:00:00 2001 From: Gwendal Grignou Date: Fri, 27 Mar 2020 15:34:33 -0700 Subject: platform/chrome: cros_ec_sensorhub: Add FIFO support cros_ec_sensorhub registers a listener and query motion sense FIFO, spread to iio sensors registers. To test, we can use libiio: iiod& iio_readdev -u ip:localhost -T 10000 -s 25 -b 16 cros-ec-gyro | od -x Signed-off-by: Gwendal Grignou Reviewed-by: Jonathan Cameron Acked-by: Andy Shevchenko Signed-off-by: Enric Balletbo i Serra --- drivers/platform/chrome/Makefile | 3 +- drivers/platform/chrome/cros_ec_sensorhub.c | 107 ++++-- drivers/platform/chrome/cros_ec_sensorhub_ring.c | 439 +++++++++++++++++++++++ include/linux/platform_data/cros_ec_sensorhub.h | 76 ++++ 4 files changed, 597 insertions(+), 28 deletions(-) create mode 100644 drivers/platform/chrome/cros_ec_sensorhub_ring.c (limited to 'include/linux') diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile index 198c155c7c4d..41baccba033f 100644 --- a/drivers/platform/chrome/Makefile +++ b/drivers/platform/chrome/Makefile @@ -20,7 +20,8 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o -obj-$(CONFIG_CROS_EC_SENSORHUB) += cros_ec_sensorhub.o +cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o +obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o obj-$(CONFIG_CROS_USBPD_NOTIFY) += cros_usbpd_notify.o diff --git a/drivers/platform/chrome/cros_ec_sensorhub.c b/drivers/platform/chrome/cros_ec_sensorhub.c index 134ee5e46047..b7f2c00db5e1 100644 --- a/drivers/platform/chrome/cros_ec_sensorhub.c +++ b/drivers/platform/chrome/cros_ec_sensorhub.c @@ -50,10 +50,8 @@ static int cros_ec_sensorhub_register(struct device *dev, struct cros_ec_sensorhub *sensorhub) { int sensor_type[MOTIONSENSE_TYPE_MAX] = { 0 }; + struct cros_ec_command *msg = sensorhub->msg; struct cros_ec_dev *ec = sensorhub->ec; - struct ec_params_motion_sense *params; - struct ec_response_motion_sense *resp; - struct cros_ec_command *msg; int ret, i, sensor_num; char *name; @@ -71,22 +69,13 @@ static int cros_ec_sensorhub_register(struct device *dev, return -EINVAL; } - /* Prepare a message to send INFO command to each sensor. */ - msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*resp)), - GFP_KERNEL); - if (!msg) - return -ENOMEM; - msg->version = 1; - msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset; - msg->outsize = sizeof(*params); - msg->insize = sizeof(*resp); - params = (struct ec_params_motion_sense *)msg->data; - resp = (struct ec_response_motion_sense *)msg->data; + msg->insize = sizeof(struct ec_response_motion_sense); + msg->outsize = sizeof(struct ec_params_motion_sense); for (i = 0; i < sensor_num; i++) { - params->cmd = MOTIONSENSE_CMD_INFO; - params->info.sensor_num = i; + sensorhub->params->cmd = MOTIONSENSE_CMD_INFO; + sensorhub->params->info.sensor_num = i; ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg); if (ret < 0) { @@ -95,7 +84,7 @@ static int cros_ec_sensorhub_register(struct device *dev, continue; } - switch (resp->info.type) { + switch (sensorhub->resp->info.type) { case MOTIONSENSE_TYPE_ACCEL: name = "cros-ec-accel"; break; @@ -118,15 +107,16 @@ static int cros_ec_sensorhub_register(struct device *dev, name = "cros-ec-activity"; break; default: - dev_warn(dev, "unknown type %d\n", resp->info.type); + dev_warn(dev, "unknown type %d\n", + sensorhub->resp->info.type); continue; } ret = cros_ec_sensorhub_allocate_sensor(dev, name, i); if (ret) - goto error; + return ret; - sensor_type[resp->info.type]++; + sensor_type[sensorhub->resp->info.type]++; } if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2) @@ -138,29 +128,41 @@ static int cros_ec_sensorhub_register(struct device *dev, "cros-ec-lid-angle", 0); if (ret) - goto error; + return ret; } - kfree(msg); return 0; - -error: - kfree(msg); - return ret; } static int cros_ec_sensorhub_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct cros_ec_dev *ec = dev_get_drvdata(dev->parent); struct cros_ec_sensorhub *data; + struct cros_ec_command *msg; int ret; int i; + msg = devm_kzalloc(dev, sizeof(struct cros_ec_command) + + max((u16)sizeof(struct ec_params_motion_sense), + ec->ec_dev->max_response), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset; + data = devm_kzalloc(dev, sizeof(struct cros_ec_sensorhub), GFP_KERNEL); if (!data) return -ENOMEM; - data->ec = dev_get_drvdata(dev->parent); + mutex_init(&data->cmd_lock); + + data->dev = dev; + data->ec = ec; + data->msg = msg; + data->params = (struct ec_params_motion_sense *)msg->data; + data->resp = (struct ec_response_motion_sense *)msg->data; + dev_set_drvdata(dev, data); /* Check whether this EC is a sensor hub. */ @@ -182,12 +184,63 @@ static int cros_ec_sensorhub_probe(struct platform_device *pdev) } } + /* + * If the EC does not have a FIFO, the sensors will query their data + * themselves via sysfs or a software trigger. + */ + if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) { + ret = cros_ec_sensorhub_ring_add(data); + if (ret) + return ret; + /* + * The msg and its data is not under the control of the ring + * handler. + */ + return devm_add_action_or_reset(dev, + cros_ec_sensorhub_ring_remove, + data); + } + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +/* + * When the EC is suspending, we must stop sending interrupt, + * we may use the same interrupt line for waking up the device. + * Tell the EC to stop sending non-interrupt event on the iio ring. + */ +static int cros_ec_sensorhub_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct cros_ec_sensorhub *sensorhub = platform_get_drvdata(pdev); + struct cros_ec_dev *ec = sensorhub->ec; + + if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) + return cros_ec_sensorhub_ring_fifo_enable(sensorhub, false); return 0; } +static int cros_ec_sensorhub_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct cros_ec_sensorhub *sensorhub = platform_get_drvdata(pdev); + struct cros_ec_dev *ec = sensorhub->ec; + + if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) + return cros_ec_sensorhub_ring_fifo_enable(sensorhub, true); + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(cros_ec_sensorhub_pm_ops, + cros_ec_sensorhub_suspend, + cros_ec_sensorhub_resume); + static struct platform_driver cros_ec_sensorhub_driver = { .driver = { .name = DRV_NAME, + .pm = &cros_ec_sensorhub_pm_ops, }, .probe = cros_ec_sensorhub_probe, }; diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c new file mode 100644 index 000000000000..da73757529ca --- /dev/null +++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c @@ -0,0 +1,439 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for Chrome OS EC Sensor hub FIFO. + * + * Copyright 2020 Google LLC + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static inline int +cros_sensorhub_send_sample(struct cros_ec_sensorhub *sensorhub, + struct cros_ec_sensors_ring_sample *sample) +{ + cros_ec_sensorhub_push_data_cb_t cb; + int id = sample->sensor_id; + struct iio_dev *indio_dev; + + if (id > sensorhub->sensor_num) + return -EINVAL; + + cb = sensorhub->push_data[id].push_data_cb; + if (!cb) + return 0; + + indio_dev = sensorhub->push_data[id].indio_dev; + + if (sample->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH) + return 0; + + return cb(indio_dev, sample->vector, sample->timestamp); +} + +/** + * cros_ec_sensorhub_register_push_data() - register the callback to the hub. + * + * @sensorhub : Sensor Hub object + * @sensor_num : The sensor the caller is interested in. + * @indio_dev : The iio device to use when a sample arrives. + * @cb : The callback to call when a sample arrives. + * + * The callback cb will be used by cros_ec_sensorhub_ring to distribute events + * from the EC. + * + * Return: 0 when callback is registered. + * EINVAL is the sensor number is invalid or the slot already used. + */ +int cros_ec_sensorhub_register_push_data(struct cros_ec_sensorhub *sensorhub, + u8 sensor_num, + struct iio_dev *indio_dev, + cros_ec_sensorhub_push_data_cb_t cb) +{ + if (sensor_num >= sensorhub->sensor_num) + return -EINVAL; + if (sensorhub->push_data[sensor_num].indio_dev) + return -EINVAL; + + sensorhub->push_data[sensor_num].indio_dev = indio_dev; + sensorhub->push_data[sensor_num].push_data_cb = cb; + + return 0; +} +EXPORT_SYMBOL_GPL(cros_ec_sensorhub_register_push_data); + +void cros_ec_sensorhub_unregister_push_data(struct cros_ec_sensorhub *sensorhub, + u8 sensor_num) +{ + sensorhub->push_data[sensor_num].indio_dev = NULL; + sensorhub->push_data[sensor_num].push_data_cb = NULL; +} +EXPORT_SYMBOL_GPL(cros_ec_sensorhub_unregister_push_data); + +/** + * cros_ec_sensorhub_ring_fifo_enable() - Enable or disable interrupt generation + * for FIFO events. + * @sensorhub: Sensor Hub object + * @on: true when events are requested. + * + * To be called before sleeping or when noone is listening. + * Return: 0 on success, or an error when we can not communicate with the EC. + * + */ +int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub, + bool on) +{ + int ret; + + mutex_lock(&sensorhub->cmd_lock); + sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INT_ENABLE; + sensorhub->params->fifo_int_enable.enable = on; + + sensorhub->msg->outsize = sizeof(struct ec_params_motion_sense); + sensorhub->msg->insize = sizeof(struct ec_response_motion_sense); + + ret = cros_ec_cmd_xfer_status(sensorhub->ec->ec_dev, sensorhub->msg); + mutex_unlock(&sensorhub->cmd_lock); + + /* We expect to receive a payload of 4 bytes, ignore. */ + if (ret > 0) + ret = 0; + + return ret; +} + +/** + * cros_ec_sensor_ring_process_event() - process one EC FIFO event + * + * @sensorhub: Sensor Hub object. + * @fifo_info: FIFO information from the EC (includes b point, EC timebase). + * @fifo_timestamp: EC IRQ, kernel timebase (aka c). + * @current_timestamp: calculated event timestamp, kernel timebase (aka a'). + * @in: incoming FIFO event from EC (includes a point, EC timebase). + * @out: outgoing event to user space (includes a'). + * + * Process one EC event, add it in the ring if necessary. + * + * Return: true if out event has been populated. + */ +static bool +cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub, + const struct ec_response_motion_sense_fifo_info + *fifo_info, + const ktime_t fifo_timestamp, + ktime_t *current_timestamp, + struct ec_response_motion_sensor_data *in, + struct cros_ec_sensors_ring_sample *out) +{ + const s64 now = cros_ec_get_time_ns(); + int axis, async_flags; + + /* Do not populate the filter based on asynchronous events. */ + async_flags = in->flags & + (MOTIONSENSE_SENSOR_FLAG_ODR | MOTIONSENSE_SENSOR_FLAG_FLUSH); + + if (in->flags & MOTIONSENSE_SENSOR_FLAG_TIMESTAMP && !async_flags) { + s64 new_timestamp; + + /* + * Disable filtering since we might add more jitter + * if b is in a random point in time. + */ + new_timestamp = fifo_timestamp - + fifo_info->timestamp * 1000 + + in->timestamp * 1000; + + /* + * The timestamp can be stale if we had to use the fifo + * info timestamp. + */ + if (new_timestamp - *current_timestamp > 0) + *current_timestamp = new_timestamp; + } + + if (in->flags & MOTIONSENSE_SENSOR_FLAG_FLUSH) { + out->sensor_id = in->sensor_num; + out->timestamp = *current_timestamp; + out->flag = in->flags; + /* + * No other payload information provided with + * flush ack. + */ + return true; + } + + if (in->flags & MOTIONSENSE_SENSOR_FLAG_TIMESTAMP) + /* If we just have a timestamp, skip this entry. */ + return false; + + /* Regular sample */ + out->sensor_id = in->sensor_num; + if (*current_timestamp - now > 0) + /* If the timestamp is in the future. */ + out->timestamp = now; + else + out->timestamp = *current_timestamp; + + out->flag = in->flags; + for (axis = 0; axis < 3; axis++) + out->vector[axis] = in->data[axis]; + + return true; +} + +/** + * cros_ec_sensorhub_ring_handler() - The trigger handler function + * + * @sensorhub: Sensor Hub object. + * + * Called by the notifier, process the EC sensor FIFO queue. + */ +static void cros_ec_sensorhub_ring_handler(struct cros_ec_sensorhub *sensorhub) +{ + struct ec_response_motion_sense_fifo_info *fifo_info = + sensorhub->fifo_info; + struct cros_ec_dev *ec = sensorhub->ec; + ktime_t fifo_timestamp, current_timestamp; + int i, j, number_data, ret; + struct ec_response_motion_sensor_data *in; + struct cros_ec_sensors_ring_sample *out, *last_out; + + mutex_lock(&sensorhub->cmd_lock); + + /* Get FIFO information if there are lost vectors. */ + if (fifo_info->total_lost) { + int fifo_info_length = + sizeof(struct ec_response_motion_sense_fifo_info) + + sizeof(u16) * sensorhub->sensor_num; + + /* Need to retrieve the number of lost vectors per sensor */ + sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INFO; + sensorhub->msg->outsize = 1; + sensorhub->msg->insize = fifo_info_length; + + if (cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg) < 0) + goto error; + + memcpy(fifo_info, &sensorhub->resp->fifo_info, + fifo_info_length); + + /* + * Update collection time, will not be as precise as the + * non-error case. + */ + fifo_timestamp = cros_ec_get_time_ns(); + } else { + fifo_timestamp = sensorhub->fifo_timestamp[ + CROS_EC_SENSOR_NEW_TS]; + } + + if (fifo_info->count > sensorhub->fifo_size || + fifo_info->size != sensorhub->fifo_size) { + dev_warn(sensorhub->dev, + "Mismatch EC data: count %d, size %d - expected %d", + fifo_info->count, fifo_info->size, + sensorhub->fifo_size); + goto error; + } + + /* Copy elements in the main fifo */ + current_timestamp = sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS]; + out = sensorhub->ring; + for (i = 0; i < fifo_info->count; i += number_data) { + sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_READ; + sensorhub->params->fifo_read.max_data_vector = + fifo_info->count - i; + sensorhub->msg->outsize = + sizeof(struct ec_params_motion_sense); + sensorhub->msg->insize = + sizeof(sensorhub->resp->fifo_read) + + sensorhub->params->fifo_read.max_data_vector * + sizeof(struct ec_response_motion_sensor_data); + ret = cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg); + if (ret < 0) { + dev_warn(sensorhub->dev, "Fifo error: %d\n", ret); + break; + } + number_data = sensorhub->resp->fifo_read.number_data; + if (number_data == 0) { + dev_dbg(sensorhub->dev, "Unexpected empty FIFO\n"); + break; + } + if (number_data > fifo_info->count - i) { + dev_warn(sensorhub->dev, + "Invalid EC data: too many entry received: %d, expected %d", + number_data, fifo_info->count - i); + break; + } + if (out + number_data > + sensorhub->ring + fifo_info->count) { + dev_warn(sensorhub->dev, + "Too many samples: %d (%zd data) to %d entries for expected %d entries", + i, out - sensorhub->ring, i + number_data, + fifo_info->count); + break; + } + + for (in = sensorhub->resp->fifo_read.data, j = 0; + j < number_data; j++, in++) { + if (cros_ec_sensor_ring_process_event( + sensorhub, fifo_info, + fifo_timestamp, + ¤t_timestamp, + in, out)) + out++; + } + } + mutex_unlock(&sensorhub->cmd_lock); + last_out = out; + + if (out == sensorhub->ring) + /* Unexpected empty FIFO. */ + goto ring_handler_end; + + /* + * Check if current_timestamp is ahead of the last sample. + * Normally, the EC appends a timestamp after the last sample, but if + * the AP is slow to respond to the IRQ, the EC may have added new + * samples. Use the FIFO info timestamp as last timestamp then. + */ + if ((last_out - 1)->timestamp == current_timestamp) + current_timestamp = fifo_timestamp; + + /* Warn on lost samples. */ + if (fifo_info->total_lost) + for (i = 0; i < sensorhub->sensor_num; i++) { + if (fifo_info->lost[i]) + dev_warn_ratelimited(sensorhub->dev, + "Sensor %d: lost: %d out of %d\n", + i, fifo_info->lost[i], + fifo_info->total_lost); + } + + /* Push the event into the FIFO. */ + for (out = sensorhub->ring; out < last_out; out++) + cros_sensorhub_send_sample(sensorhub, out); + +ring_handler_end: + sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] = current_timestamp; + return; + +error: + mutex_unlock(&sensorhub->cmd_lock); +} + +static int cros_ec_sensorhub_event(struct notifier_block *nb, + unsigned long queued_during_suspend, + void *_notify) +{ + struct cros_ec_sensorhub *sensorhub; + struct cros_ec_device *ec_dev; + + sensorhub = container_of(nb, struct cros_ec_sensorhub, notifier); + ec_dev = sensorhub->ec->ec_dev; + + if (ec_dev->event_data.event_type != EC_MKBP_EVENT_SENSOR_FIFO) + return NOTIFY_DONE; + + if (ec_dev->event_size != sizeof(ec_dev->event_data.data.sensor_fifo)) { + dev_warn(ec_dev->dev, "Invalid fifo info size\n"); + return NOTIFY_DONE; + } + + if (queued_during_suspend) + return NOTIFY_OK; + + memcpy(sensorhub->fifo_info, &ec_dev->event_data.data.sensor_fifo.info, + sizeof(*sensorhub->fifo_info)); + sensorhub->fifo_timestamp[CROS_EC_SENSOR_NEW_TS] = + ec_dev->last_event_time; + cros_ec_sensorhub_ring_handler(sensorhub); + + return NOTIFY_OK; +} + +/** + * cros_ec_sensorhub_ring_add() - Add the FIFO functionality if the EC + * supports it. + * + * @sensorhub : Sensor Hub object. + * + * Return: 0 on success. + */ +int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub) +{ + struct cros_ec_dev *ec = sensorhub->ec; + int ret; + int fifo_info_length = + sizeof(struct ec_response_motion_sense_fifo_info) + + sizeof(u16) * sensorhub->sensor_num; + + /* Allocate the array for lost events. */ + sensorhub->fifo_info = devm_kzalloc(sensorhub->dev, fifo_info_length, + GFP_KERNEL); + if (!sensorhub->fifo_info) + return -ENOMEM; + + /* Retrieve FIFO information */ + sensorhub->msg->version = 2; + sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INFO; + sensorhub->msg->outsize = 1; + sensorhub->msg->insize = fifo_info_length; + + ret = cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg); + if (ret < 0) + return ret; + + /* + * Allocate the full fifo. We need to copy the whole FIFO to set + * timestamps properly. + */ + sensorhub->fifo_size = sensorhub->resp->fifo_info.size; + sensorhub->ring = devm_kcalloc(sensorhub->dev, sensorhub->fifo_size, + sizeof(*sensorhub->ring), GFP_KERNEL); + if (!sensorhub->ring) + return -ENOMEM; + + /* + * Allocate the callback area based on the number of sensors. + */ + sensorhub->push_data = devm_kcalloc( + sensorhub->dev, sensorhub->sensor_num, + sizeof(*sensorhub->push_data), + GFP_KERNEL); + if (!sensorhub->push_data) + return -ENOMEM; + + sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] = + cros_ec_get_time_ns(); + + /* Register the notifier that will act as a top half interrupt. */ + sensorhub->notifier.notifier_call = cros_ec_sensorhub_event; + ret = blocking_notifier_chain_register(&ec->ec_dev->event_notifier, + &sensorhub->notifier); + if (ret < 0) + return ret; + + /* Start collection samples. */ + return cros_ec_sensorhub_ring_fifo_enable(sensorhub, true); +} + +void cros_ec_sensorhub_ring_remove(void *arg) +{ + struct cros_ec_sensorhub *sensorhub = arg; + struct cros_ec_device *ec_dev = sensorhub->ec->ec_dev; + + /* Disable the ring, prevent EC interrupt to the AP for nothing. */ + cros_ec_sensorhub_ring_fifo_enable(sensorhub, false); + blocking_notifier_chain_unregister(&ec_dev->event_notifier, + &sensorhub->notifier); +} diff --git a/include/linux/platform_data/cros_ec_sensorhub.h b/include/linux/platform_data/cros_ec_sensorhub.h index 7e46a47fd642..b0950814f820 100644 --- a/include/linux/platform_data/cros_ec_sensorhub.h +++ b/include/linux/platform_data/cros_ec_sensorhub.h @@ -8,8 +8,13 @@ #ifndef __LINUX_PLATFORM_DATA_CROS_EC_SENSORHUB_H #define __LINUX_PLATFORM_DATA_CROS_EC_SENSORHUB_H +#include +#include +#include #include +struct iio_dev; + /** * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information. * @sensor_num: Id of the sensor, as reported by the EC. @@ -18,15 +23,86 @@ struct cros_ec_sensor_platform { u8 sensor_num; }; +/** + * typedef cros_ec_sensorhub_push_data_cb_t - Callback function to send datum + * to specific sensors. + * + * @indio_dev: The IIO device that will process the sample. + * @data: Vector array of the ring sample. + * @timestamp: Timestamp in host timespace when the sample was acquired by + * the EC. + */ +typedef int (*cros_ec_sensorhub_push_data_cb_t)(struct iio_dev *indio_dev, + s16 *data, + s64 timestamp); + +struct cros_ec_sensorhub_sensor_push_data { + struct iio_dev *indio_dev; + cros_ec_sensorhub_push_data_cb_t push_data_cb; +}; + +enum { + CROS_EC_SENSOR_LAST_TS, + CROS_EC_SENSOR_NEW_TS, + CROS_EC_SENSOR_ALL_TS +}; + +struct cros_ec_sensors_ring_sample { + u8 sensor_id; + u8 flag; + s16 vector[3]; + s64 timestamp; +} __packed; + /** * struct cros_ec_sensorhub - Sensor Hub device data. * + * @dev: Device object, mostly used for logging. * @ec: Embedded Controller where the hub is located. * @sensor_num: Number of MEMS sensors present in the EC. + * @msg: Structure to send FIFO requests. + * @params: Pointer to parameters in msg. + * @resp: Pointer to responses in msg. + * @cmd_lock : Lock for sending msg. + * @notifier: Notifier to kick the FIFO interrupt. + * @ring: Preprocessed ring to store events. + * @fifo_timestamp: array for event timestamp and spreading. + * @fifo_info: copy of FIFO information coming from the EC. + * @fifo_size: size of the ring. + * @push_data: array of callback to send datums to iio sensor object. */ struct cros_ec_sensorhub { + struct device *dev; struct cros_ec_dev *ec; int sensor_num; + + struct cros_ec_command *msg; + struct ec_params_motion_sense *params; + struct ec_response_motion_sense *resp; + struct mutex cmd_lock; /* Lock for protecting msg structure. */ + + struct notifier_block notifier; + + struct cros_ec_sensors_ring_sample *ring; + + ktime_t fifo_timestamp[CROS_EC_SENSOR_ALL_TS]; + struct ec_response_motion_sense_fifo_info *fifo_info; + int fifo_size; + + struct cros_ec_sensorhub_sensor_push_data *push_data; }; +int cros_ec_sensorhub_register_push_data(struct cros_ec_sensorhub *sensorhub, + u8 sensor_num, + struct iio_dev *indio_dev, + cros_ec_sensorhub_push_data_cb_t cb); + +void cros_ec_sensorhub_unregister_push_data(struct cros_ec_sensorhub *sensorhub, + u8 sensor_num); + +int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub); +void cros_ec_sensorhub_ring_remove(void *arg); +int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub, + bool on); + #endif /* __LINUX_PLATFORM_DATA_CROS_EC_SENSORHUB_H */ -- cgit v1.2.3-58-ga151 From 93fe48a585905675719835f8269258736de0948f Mon Sep 17 00:00:00 2001 From: Gwendal Grignou Date: Fri, 27 Mar 2020 15:34:35 -0700 Subject: platform/chrome: cros_ec_sensorhub: Add median filter Events are timestamped in EC time space, their timestamps need to be converted in host time space. The assumption is the time delta between when the interrupt is sent by the EC and when it is receive by the host is a [small] constant. This is not always true, even with hard-wired interrupt. To mitigate worst offenders, add a median filter to weed out bigger than expected delays. Signed-off-by: Gwendal Grignou Acked-by: Jonathan Cameron Acked-by: Lee Jones Acked-by: Andy Shevchenko Signed-off-by: Enric Balletbo i Serra --- drivers/platform/chrome/cros_ec_sensorhub_ring.c | 560 +++++++++++++++++++++-- include/linux/platform_data/cros_ec_sensorhub.h | 93 +++- 2 files changed, 622 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c index 443db8277d2b..230e6cf3da2f 100644 --- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c +++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c @@ -17,6 +17,21 @@ #include #include +/* Precision of fixed point for the m values from the filter */ +#define M_PRECISION BIT(23) + +/* Only activate the filter once we have at least this many elements. */ +#define TS_HISTORY_THRESHOLD 8 + +/* + * If we don't have any history entries for this long, empty the filter to + * make sure there are no big discontinuities. + */ +#define TS_HISTORY_BORED_US 500000 + +/* To measure by how much the filter is overshooting, if it happens. */ +#define FUTURE_TS_ANALYTICS_COUNT_MAX 100 + static inline int cros_sensorhub_send_sample(struct cros_ec_sensorhub *sensorhub, struct cros_ec_sensors_ring_sample *sample) @@ -92,9 +107,13 @@ EXPORT_SYMBOL_GPL(cros_ec_sensorhub_unregister_push_data); int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub, bool on) { - int ret; + int ret, i; mutex_lock(&sensorhub->cmd_lock); + if (sensorhub->tight_timestamps) + for (i = 0; i < sensorhub->sensor_num; i++) + sensorhub->batch_state[i].last_len = 0; + sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INT_ENABLE; sensorhub->params->fifo_int_enable.enable = on; @@ -111,8 +130,245 @@ int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub, return ret; } +static int cros_ec_sensor_ring_median_cmp(const void *pv1, const void *pv2) +{ + s64 v1 = *(s64 *)pv1; + s64 v2 = *(s64 *)pv2; + + if (v1 > v2) + return 1; + else if (v1 < v2) + return -1; + else + return 0; +} + +/* + * cros_ec_sensor_ring_median: Gets median of an array of numbers + * + * For now it's implemented using an inefficient > O(n) sort then return + * the middle element. A more optimal method would be something like + * quickselect, but given that n = 64 we can probably live with it in the + * name of clarity. + * + * Warning: the input array gets modified (sorted)! + */ +static s64 cros_ec_sensor_ring_median(s64 *array, size_t length) +{ + sort(array, length, sizeof(s64), cros_ec_sensor_ring_median_cmp, NULL); + return array[length / 2]; +} + +/* + * IRQ Timestamp Filtering + * + * Lower down in cros_ec_sensor_ring_process_event(), for each sensor event + * we have to calculate it's timestamp in the AP timebase. There are 3 time + * points: + * a - EC timebase, sensor event + * b - EC timebase, IRQ + * c - AP timebase, IRQ + * a' - what we want: sensor even in AP timebase + * + * While a and b are recorded at accurate times (due to the EC real time + * nature); c is pretty untrustworthy, even though it's recorded the + * first thing in ec_irq_handler(). There is a very good change we'll get + * added lantency due to: + * other irqs + * ddrfreq + * cpuidle + * + * Normally a' = c - b + a, but if we do that naive math any jitter in c + * will get coupled in a', which we don't want. We want a function + * a' = cros_ec_sensor_ring_ts_filter(a) which will filter out outliers in c. + * + * Think of a graph of AP time(b) on the y axis vs EC time(c) on the x axis. + * The slope of the line won't be exactly 1, there will be some clock drift + * between the 2 chips for various reasons (mechanical stress, temperature, + * voltage). We need to extrapolate values for a future x, without trusting + * recent y values too much. + * + * We use a median filter for the slope, then another median filter for the + * y-intercept to calculate this function: + * dx[n] = x[n-1] - x[n] + * dy[n] = x[n-1] - x[n] + * m[n] = dy[n] / dx[n] + * median_m = median(m[n-k:n]) + * error[i] = y[n-i] - median_m * x[n-i] + * median_error = median(error[:k]) + * predicted_y = median_m * x + median_error + * + * Implementation differences from above: + * - Redefined y to be actually c - b, this gives us a lot more precision + * to do the math. (c-b)/b variations are more obvious than c/b variations. + * - Since we don't have floating point, any operations involving slope are + * done using fixed point math (*M_PRECISION) + * - Since x and y grow with time, we keep zeroing the graph (relative to + * the last sample), this way math involving *x[n-i] will not overflow + * - EC timestamps are kept in us, it improves the slope calculation precision + */ + +/** + * cros_ec_sensor_ring_ts_filter_update() - Update filter history. + * + * @state: Filter information. + * @b: IRQ timestamp, EC timebase (us) + * @c: IRQ timestamp, AP timebase (ns) + * + * Given a new IRQ timestamp pair (EC and AP timebases), add it to the filter + * history. + */ +static void +cros_ec_sensor_ring_ts_filter_update(struct cros_ec_sensors_ts_filter_state + *state, + s64 b, s64 c) +{ + s64 x, y; + s64 dx, dy; + s64 m; /* stored as *M_PRECISION */ + s64 *m_history_copy = state->temp_buf; + s64 *error = state->temp_buf; + int i; + + /* we trust b the most, that'll be our independent variable */ + x = b; + /* y is the offset between AP and EC times, in ns */ + y = c - b * 1000; + + dx = (state->x_history[0] + state->x_offset) - x; + if (dx == 0) + return; /* we already have this irq in the history */ + dy = (state->y_history[0] + state->y_offset) - y; + m = div64_s64(dy * M_PRECISION, dx); + + /* Empty filter if we haven't seen any action in a while. */ + if (-dx > TS_HISTORY_BORED_US) + state->history_len = 0; + + /* Move everything over, also update offset to all absolute coords .*/ + for (i = state->history_len - 1; i >= 1; i--) { + state->x_history[i] = state->x_history[i - 1] + dx; + state->y_history[i] = state->y_history[i - 1] + dy; + + state->m_history[i] = state->m_history[i - 1]; + /* + * Also use the same loop to copy m_history for future + * median extraction. + */ + m_history_copy[i] = state->m_history[i - 1]; + } + + /* Store the x and y, but remember offset is actually last sample. */ + state->x_offset = x; + state->y_offset = y; + state->x_history[0] = 0; + state->y_history[0] = 0; + + state->m_history[0] = m; + m_history_copy[0] = m; + + if (state->history_len < CROS_EC_SENSORHUB_TS_HISTORY_SIZE) + state->history_len++; + + /* Precalculate things for the filter. */ + if (state->history_len > TS_HISTORY_THRESHOLD) { + state->median_m = + cros_ec_sensor_ring_median(m_history_copy, + state->history_len - 1); + + /* + * Calculate y-intercepts as if m_median is the slope and + * points in the history are on the line. median_error will + * still be in the offset coordinate system. + */ + for (i = 0; i < state->history_len; i++) + error[i] = state->y_history[i] - + div_s64(state->median_m * state->x_history[i], + M_PRECISION); + state->median_error = + cros_ec_sensor_ring_median(error, state->history_len); + } else { + state->median_m = 0; + state->median_error = 0; + } +} + +/** + * cros_ec_sensor_ring_ts_filter() - Translate EC timebase timestamp to AP + * timebase + * + * @state: filter information. + * @x: any ec timestamp (us): + * + * cros_ec_sensor_ring_ts_filter(a) => a' event timestamp, AP timebase + * cros_ec_sensor_ring_ts_filter(b) => calculated timestamp when the EC IRQ + * should have happened on the AP, with low jitter + * + * Note: The filter will only activate once state->history_len goes + * over TS_HISTORY_THRESHOLD. Otherwise it'll just do the naive c - b + a + * transform. + * + * How to derive the formula, starting from: + * f(x) = median_m * x + median_error + * That's the calculated AP - EC offset (at the x point in time) + * Undo the coordinate system transform: + * f(x) = median_m * (x - x_offset) + median_error + y_offset + * Remember to undo the "y = c - b * 1000" modification: + * f(x) = median_m * (x - x_offset) + median_error + y_offset + x * 1000 + * + * Return: timestamp in AP timebase (ns) + */ +static s64 +cros_ec_sensor_ring_ts_filter(struct cros_ec_sensors_ts_filter_state *state, + s64 x) +{ + return div_s64(state->median_m * (x - state->x_offset), M_PRECISION) + + state->median_error + state->y_offset + x * 1000; +} + +/* + * Since a and b were originally 32 bit values from the EC, + * they overflow relatively often, casting is not enough, so we need to + * add an offset. + */ +static void +cros_ec_sensor_ring_fix_overflow(s64 *ts, + const s64 overflow_period, + struct cros_ec_sensors_ec_overflow_state + *state) +{ + s64 adjust; + + *ts += state->offset; + if (abs(state->last - *ts) > (overflow_period / 2)) { + adjust = state->last > *ts ? overflow_period : -overflow_period; + state->offset += adjust; + *ts += adjust; + } + state->last = *ts; +} + +static void +cros_ec_sensor_ring_check_for_past_timestamp(struct cros_ec_sensorhub + *sensorhub, + struct cros_ec_sensors_ring_sample + *sample) +{ + const u8 sensor_id = sample->sensor_id; + + /* If this event is earlier than one we saw before... */ + if (sensorhub->batch_state[sensor_id].newest_sensor_event > + sample->timestamp) + /* mark it for spreading. */ + sample->timestamp = + sensorhub->batch_state[sensor_id].last_ts; + else + sensorhub->batch_state[sensor_id].newest_sensor_event = + sample->timestamp; +} + /** - * cros_ec_sensor_ring_process_event() - process one EC FIFO event + * cros_ec_sensor_ring_process_event() - Process one EC FIFO event * * @sensorhub: Sensor Hub object. * @fifo_info: FIFO information from the EC (includes b point, EC timebase). @@ -142,28 +398,57 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub, (MOTIONSENSE_SENSOR_FLAG_ODR | MOTIONSENSE_SENSOR_FLAG_FLUSH); if (in->flags & MOTIONSENSE_SENSOR_FLAG_TIMESTAMP && !async_flags) { - s64 new_timestamp; + s64 a = in->timestamp; + s64 b = fifo_info->timestamp; + s64 c = fifo_timestamp; + + cros_ec_sensor_ring_fix_overflow(&a, 1LL << 32, + &sensorhub->overflow_a); + cros_ec_sensor_ring_fix_overflow(&b, 1LL << 32, + &sensorhub->overflow_b); + + if (sensorhub->tight_timestamps) { + cros_ec_sensor_ring_ts_filter_update( + &sensorhub->filter, b, c); + *current_timestamp = cros_ec_sensor_ring_ts_filter( + &sensorhub->filter, a); + } else { + s64 new_timestamp; - /* - * Disable filtering since we might add more jitter - * if b is in a random point in time. - */ - new_timestamp = fifo_timestamp - - fifo_info->timestamp * 1000 + - in->timestamp * 1000; + /* + * Disable filtering since we might add more jitter + * if b is in a random point in time. + */ + new_timestamp = fifo_timestamp - + fifo_info->timestamp * 1000 + + in->timestamp * 1000; + /* + * The timestamp can be stale if we had to use the fifo + * info timestamp. + */ + if (new_timestamp - *current_timestamp > 0) + *current_timestamp = new_timestamp; + } + } + if (in->flags & MOTIONSENSE_SENSOR_FLAG_ODR) { + if (sensorhub->tight_timestamps) { + sensorhub->batch_state[in->sensor_num].last_len = 0; + sensorhub->batch_state[in->sensor_num].penul_len = 0; + } /* - * The timestamp can be stale if we had to use the fifo - * info timestamp. + * ODR change is only useful for the sensor_ring, it does not + * convey information to clients. */ - if (new_timestamp - *current_timestamp > 0) - *current_timestamp = new_timestamp; + return false; } if (in->flags & MOTIONSENSE_SENSOR_FLAG_FLUSH) { out->sensor_id = in->sensor_num; out->timestamp = *current_timestamp; out->flag = in->flags; + if (sensorhub->tight_timestamps) + sensorhub->batch_state[out->sensor_id].last_len = 0; /* * No other payload information provided with * flush ack. @@ -177,22 +462,221 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub, /* Regular sample */ out->sensor_id = in->sensor_num; - if (*current_timestamp - now > 0) - /* If the timestamp is in the future. */ + if (*current_timestamp - now > 0) { + /* + * This fix is needed to overcome the timestamp filter putting + * events in the future. + */ + sensorhub->future_timestamp_total_ns += + *current_timestamp - now; + if (++sensorhub->future_timestamp_count == + FUTURE_TS_ANALYTICS_COUNT_MAX) { + s64 avg = div_s64(sensorhub->future_timestamp_total_ns, + sensorhub->future_timestamp_count); + dev_warn_ratelimited(sensorhub->dev, + "100 timestamps in the future, %lldns shaved on average\n", + avg); + sensorhub->future_timestamp_count = 0; + sensorhub->future_timestamp_total_ns = 0; + } out->timestamp = now; - else + } else { out->timestamp = *current_timestamp; + } out->flag = in->flags; for (axis = 0; axis < 3; axis++) out->vector[axis] = in->data[axis]; + if (sensorhub->tight_timestamps) + cros_ec_sensor_ring_check_for_past_timestamp(sensorhub, out); return true; } /* * cros_ec_sensor_ring_spread_add: Calculate proper timestamps then add to - * ringbuffer. + * ringbuffer. + * + * This is the new spreading code, assumes every sample's timestamp + * preceeds the sample. Run if tight_timestamps == true. + * + * Sometimes the EC receives only one interrupt (hence timestamp) for + * a batch of samples. Only the first sample will have the correct + * timestamp. So we must interpolate the other samples. + * We use the previous batch timestamp and our current batch timestamp + * as a way to calculate period, then spread the samples evenly. + * + * s0 int, 0ms + * s1 int, 10ms + * s2 int, 20ms + * 30ms point goes by, no interrupt, previous one is still asserted + * downloading s2 and s3 + * s3 sample, 20ms (incorrect timestamp) + * s4 int, 40ms + * + * The batches are [(s0), (s1), (s2, s3), (s4)]. Since the 3rd batch + * has 2 samples in them, we adjust the timestamp of s3. + * s2 - s1 = 10ms, so s3 must be s2 + 10ms => 20ms. If s1 would have + * been part of a bigger batch things would have gotten a little + * more complicated. + * + * Note: we also assume another sensor sample doesn't break up a batch + * in 2 or more partitions. Example, there can't ever be a sync sensor + * in between S2 and S3. This simplifies the following code. + */ +static void +cros_ec_sensor_ring_spread_add(struct cros_ec_sensorhub *sensorhub, + unsigned long sensor_mask, + struct cros_ec_sensors_ring_sample *last_out) +{ + struct cros_ec_sensors_ring_sample *batch_start, *next_batch_start; + int id; + + for_each_set_bit(id, &sensor_mask, sensorhub->sensor_num) { + for (batch_start = sensorhub->ring; batch_start < last_out; + batch_start = next_batch_start) { + /* + * For each batch (where all samples have the same + * timestamp). + */ + int batch_len, sample_idx; + struct cros_ec_sensors_ring_sample *batch_end = + batch_start; + struct cros_ec_sensors_ring_sample *s; + s64 batch_timestamp = batch_start->timestamp; + s64 sample_period; + + /* + * Skip over batches that start with the sensor types + * we're not looking at right now. + */ + if (batch_start->sensor_id != id) { + next_batch_start = batch_start + 1; + continue; + } + + /* + * Do not start a batch + * from a flush, as it happens asynchronously to the + * regular flow of events. + */ + if (batch_start->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH) { + cros_sensorhub_send_sample(sensorhub, + batch_start); + next_batch_start = batch_start + 1; + continue; + } + + if (batch_start->timestamp <= + sensorhub->batch_state[id].last_ts) { + batch_timestamp = + sensorhub->batch_state[id].last_ts; + batch_len = sensorhub->batch_state[id].last_len; + + sample_idx = batch_len; + + sensorhub->batch_state[id].last_ts = + sensorhub->batch_state[id].penul_ts; + sensorhub->batch_state[id].last_len = + sensorhub->batch_state[id].penul_len; + } else { + /* + * Push first sample in the batch to the, + * kifo, it's guaranteed to be correct, the + * rest will follow later on. + */ + sample_idx = 1; + batch_len = 1; + cros_sensorhub_send_sample(sensorhub, + batch_start); + batch_start++; + } + + /* Find all samples have the same timestamp. */ + for (s = batch_start; s < last_out; s++) { + if (s->sensor_id != id) + /* + * Skip over other sensor types that + * are interleaved, don't count them. + */ + continue; + if (s->timestamp != batch_timestamp) + /* we discovered the next batch */ + break; + if (s->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH) + /* break on flush packets */ + break; + batch_end = s; + batch_len++; + } + + if (batch_len == 1) + goto done_with_this_batch; + + /* Can we calculate period? */ + if (sensorhub->batch_state[id].last_len == 0) { + dev_warn(sensorhub->dev, "Sensor %d: lost %d samples when spreading\n", + id, batch_len - 1); + goto done_with_this_batch; + /* + * Note: we're dropping the rest of the samples + * in this batch since we have no idea where + * they're supposed to go without a period + * calculation. + */ + } + + sample_period = div_s64(batch_timestamp - + sensorhub->batch_state[id].last_ts, + sensorhub->batch_state[id].last_len); + dev_dbg(sensorhub->dev, + "Adjusting %d samples, sensor %d last_batch @%lld (%d samples) batch_timestamp=%lld => period=%lld\n", + batch_len, id, + sensorhub->batch_state[id].last_ts, + sensorhub->batch_state[id].last_len, + batch_timestamp, + sample_period); + + /* + * Adjust timestamps of the samples then push them to + * kfifo. + */ + for (s = batch_start; s <= batch_end; s++) { + if (s->sensor_id != id) + /* + * Skip over other sensor types that + * are interleaved, don't change them. + */ + continue; + + s->timestamp = batch_timestamp + + sample_period * sample_idx; + sample_idx++; + + cros_sensorhub_send_sample(sensorhub, s); + } + +done_with_this_batch: + sensorhub->batch_state[id].penul_ts = + sensorhub->batch_state[id].last_ts; + sensorhub->batch_state[id].penul_len = + sensorhub->batch_state[id].last_len; + + sensorhub->batch_state[id].last_ts = + batch_timestamp; + sensorhub->batch_state[id].last_len = batch_len; + + next_batch_start = batch_end + 1; + } + } +} + +/* + * cros_ec_sensor_ring_spread_add_legacy: Calculate proper timestamps then + * add to ringbuffer (legacy). + * + * Note: This assumes we're running old firmware, where every sample's timestamp + * is after the sample. Run if tight_timestamps == false. * * If there is a sample with a proper timestamp * @@ -215,11 +699,12 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub, * * We know have [TS1+1/3, TS1+2/3, current timestamp] */ -static void cros_ec_sensor_ring_spread_add(struct cros_ec_sensorhub *sensorhub, - unsigned long sensor_mask, - s64 current_timestamp, - struct cros_ec_sensors_ring_sample - *last_out) +static void +cros_ec_sensor_ring_spread_add_legacy(struct cros_ec_sensorhub *sensorhub, + unsigned long sensor_mask, + s64 current_timestamp, + struct cros_ec_sensors_ring_sample + *last_out) { struct cros_ec_sensors_ring_sample *out; int i; @@ -404,25 +889,34 @@ static void cros_ec_sensorhub_ring_handler(struct cros_ec_sensorhub *sensorhub) * is slow to respond to the IRQ, the EC may have added new samples. * Use the FIFO info timestamp as last timestamp then. */ - if ((last_out - 1)->timestamp == current_timestamp) + if (!sensorhub->tight_timestamps && + (last_out - 1)->timestamp == current_timestamp) current_timestamp = fifo_timestamp; /* Warn on lost samples. */ if (fifo_info->total_lost) for (i = 0; i < sensorhub->sensor_num; i++) { - if (fifo_info->lost[i]) + if (fifo_info->lost[i]) { dev_warn_ratelimited(sensorhub->dev, "Sensor %d: lost: %d out of %d\n", i, fifo_info->lost[i], fifo_info->total_lost); + if (sensorhub->tight_timestamps) + sensorhub->batch_state[i].last_len = 0; + } } /* * Spread samples in case of batching, then add them to the * ringbuffer. */ - cros_ec_sensor_ring_spread_add(sensorhub, sensor_mask, - current_timestamp, last_out); + if (sensorhub->tight_timestamps) + cros_ec_sensor_ring_spread_add(sensorhub, sensor_mask, + last_out); + else + cros_ec_sensor_ring_spread_add_legacy(sensorhub, sensor_mask, + current_timestamp, + last_out); ring_handler_end: sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] = current_timestamp; @@ -517,6 +1011,18 @@ int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub) sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] = cros_ec_get_time_ns(); + sensorhub->tight_timestamps = cros_ec_check_features( + ec, EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS); + + if (sensorhub->tight_timestamps) { + sensorhub->batch_state = devm_kcalloc(sensorhub->dev, + sensorhub->sensor_num, + sizeof(*sensorhub->batch_state), + GFP_KERNEL); + if (!sensorhub->batch_state) + return -ENOMEM; + } + /* Register the notifier that will act as a top half interrupt. */ sensorhub->notifier.notifier_call = cros_ec_sensorhub_event; ret = blocking_notifier_chain_register(&ec->ec_dev->event_notifier, diff --git a/include/linux/platform_data/cros_ec_sensorhub.h b/include/linux/platform_data/cros_ec_sensorhub.h index b0950814f820..c588be843f61 100644 --- a/include/linux/platform_data/cros_ec_sensorhub.h +++ b/include/linux/platform_data/cros_ec_sensorhub.h @@ -54,7 +54,64 @@ struct cros_ec_sensors_ring_sample { s64 timestamp; } __packed; +/* State used for cros_ec_ring_fix_overflow */ +struct cros_ec_sensors_ec_overflow_state { + s64 offset; + s64 last; +}; + +/* Length of the filter, how long to remember entries for */ +#define CROS_EC_SENSORHUB_TS_HISTORY_SIZE 64 + /** + * struct cros_ec_sensors_ts_filter_state - Timestamp filetr state. + * + * @x_offset: x is EC interrupt time. x_offset its last value. + * @y_offset: y is the difference between AP and EC time, y_offset its last + * value. + * @x_history: The past history of x, relative to x_offset. + * @y_history: The past history of y, relative to y_offset. + * @m_history: rate between y and x. + * @history_len: Amount of valid historic data in the arrays. + * @temp_buf: Temporary buffer used when updating the filter. + * @median_m: median value of m_history + * @median_error: final error to apply to AP interrupt timestamp to get the + * "true timestamp" the event occurred. + */ +struct cros_ec_sensors_ts_filter_state { + s64 x_offset, y_offset; + s64 x_history[CROS_EC_SENSORHUB_TS_HISTORY_SIZE]; + s64 y_history[CROS_EC_SENSORHUB_TS_HISTORY_SIZE]; + s64 m_history[CROS_EC_SENSORHUB_TS_HISTORY_SIZE]; + int history_len; + + s64 temp_buf[CROS_EC_SENSORHUB_TS_HISTORY_SIZE]; + + s64 median_m; + s64 median_error; +}; + +/* struct cros_ec_sensors_ts_batch_state - State of batch of a single sensor. + * + * Use to store information to batch data using median fileter information. + * + * @penul_ts: last but one batch timestamp (penultimate timestamp). + * Used for timestamp spreading calculations + * when a batch shows up. + * @penul_len: last but one batch length. + * @last_ts: Last batch timestam. + * @last_len: Last batch length. + * @newest_sensor_event: Last sensor timestamp. + */ +struct cros_ec_sensors_ts_batch_state { + s64 penul_ts; + int penul_len; + s64 last_ts; + int last_len; + s64 newest_sensor_event; +}; + +/* * struct cros_ec_sensorhub - Sensor Hub device data. * * @dev: Device object, mostly used for logging. @@ -66,10 +123,26 @@ struct cros_ec_sensors_ring_sample { * @cmd_lock : Lock for sending msg. * @notifier: Notifier to kick the FIFO interrupt. * @ring: Preprocessed ring to store events. - * @fifo_timestamp: array for event timestamp and spreading. - * @fifo_info: copy of FIFO information coming from the EC. - * @fifo_size: size of the ring. - * @push_data: array of callback to send datums to iio sensor object. + * @fifo_timestamp: Array for event timestamp and spreading. + * @fifo_info: Copy of FIFO information coming from the EC. + * @fifo_size: Size of the ring. + * @batch_state: Per sensor information of the last batches received. + * @overflow_a: For handling timestamp overflow for a time (sensor events) + * @overflow_b: For handling timestamp overflow for b time (ec interrupts) + * @filter: Medium fileter structure. + * @tight_timestamps: Set to truen when EC support tight timestamping: + * The timestamps reported from the EC have low jitter. + * Timestamps also come before every sample. Set either + * by feature bits coming from the EC or userspace. + * @future_timestamp_count: Statistics used to compute shaved time. + * This occurs when timestamp interpolation from EC + * time to AP time accidentally puts timestamps in + * the future. These timestamps are clamped to + * `now` and these count/total_ns maintain the + * statistics for how much time was removed in a + * given period. + * @future_timestamp_total_ns: Total amount of time shaved. + * @push_data: Array of callback to send datums to iio sensor object. */ struct cros_ec_sensorhub { struct device *dev; @@ -89,6 +162,18 @@ struct cros_ec_sensorhub { struct ec_response_motion_sense_fifo_info *fifo_info; int fifo_size; + struct cros_ec_sensors_ts_batch_state *batch_state; + + struct cros_ec_sensors_ec_overflow_state overflow_a; + struct cros_ec_sensors_ec_overflow_state overflow_b; + + struct cros_ec_sensors_ts_filter_state filter; + + int tight_timestamps; + + s32 future_timestamp_count; + s64 future_timestamp_total_ns; + struct cros_ec_sensorhub_sensor_push_data *push_data; }; -- cgit v1.2.3-58-ga151 From d9452adcc5b485ab1b50352d9356cde75ae6ac0e Mon Sep 17 00:00:00 2001 From: Gwendal Grignou Date: Fri, 27 Mar 2020 15:34:36 -0700 Subject: iio: cros_ec: Move function description to .c file To prevent comment rot, move function description to cros_ec_sensors_core.c. Signed-off-by: Gwendal Grignou Acked-by: Jonathan Cameron Signed-off-by: Enric Balletbo i Serra --- .../common/cros_ec_sensors/cros_ec_sensors_core.c | 69 +++++++++++++++++++ include/linux/iio/common/cros_ec_sensors_core.h | 80 ---------------------- 2 files changed, 69 insertions(+), 80 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index d3a3626c7cd8..f3c000448b90 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -82,6 +82,14 @@ static void get_default_min_max_freq(enum motionsensor_type type, } } +/** + * cros_ec_sensors_core_init() - basic initialization of the core structure + * @pdev: platform device created for the sensors + * @indio_dev: iio device structure of the device + * @physical_device: true if the device refers to a physical device + * + * Return: 0 on success, -errno on failure. + */ int cros_ec_sensors_core_init(struct platform_device *pdev, struct iio_dev *indio_dev, bool physical_device) @@ -159,6 +167,16 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, } EXPORT_SYMBOL_GPL(cros_ec_sensors_core_init); +/** + * cros_ec_motion_send_host_cmd() - send motion sense host command + * @state: pointer to state information for device + * @opt_length: optional length to reduce the response size, useful on the data + * path. Otherwise, the maximal allowed response size is used + * + * When called, the sub-command is assumed to be set in param->cmd. + * + * Return: 0 on success, -errno on failure. + */ int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *state, u16 opt_length) { @@ -421,6 +439,14 @@ int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, } EXPORT_SYMBOL_GPL(cros_ec_sensors_read_lpc); +/** + * cros_ec_sensors_read_cmd() - retrieve data using the EC command protocol + * @indio_dev: pointer to IIO device + * @scan_mask: bitmap of the sensor indices to scan + * @data: location to store data + * + * Return: 0 on success, -errno on failure. + */ int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask, s16 *data) { @@ -445,6 +471,18 @@ int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, } EXPORT_SYMBOL_GPL(cros_ec_sensors_read_cmd); +/** + * cros_ec_sensors_capture() - the trigger handler function + * @irq: the interrupt number. + * @p: a pointer to the poll function. + * + * On a trigger event occurring, if the pollfunc is attached then this + * handler is called as a threaded interrupt (and hence may sleep). It + * is responsible for grabbing data from the device and pushing it into + * the associated buffer. + * + * Return: IRQ_HANDLED + */ irqreturn_t cros_ec_sensors_capture(int irq, void *p) { struct iio_poll_func *pf = p; @@ -480,6 +518,16 @@ done: } EXPORT_SYMBOL_GPL(cros_ec_sensors_capture); +/** + * cros_ec_sensors_core_read() - function to request a value from the sensor + * @st: pointer to state information for device + * @chan: channel specification structure table + * @val: will contain one element making up the returned value + * @val2: will contain another element making up the returned value + * @mask: specifies which values to be requested + * + * Return: the type of value returned by the device + */ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int *val, int *val2, long mask) @@ -520,6 +568,17 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st, } EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read); +/** + * cros_ec_sensors_core_read_avail() - get available values + * @indio_dev: pointer to state information for device + * @chan: channel specification structure table + * @vals: list of available values + * @type: type of data returned + * @length: number of data returned in the array + * @mask: specifies which values to be requested + * + * Return: an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST + */ int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, const int **vals, @@ -541,6 +600,16 @@ int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev, } EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read_avail); +/** + * cros_ec_sensors_core_write() - function to write a value to the sensor + * @st: pointer to state information for device + * @chan: channel specification structure table + * @val: first part of value to write + * @val2: second part of value to write + * @mask: specifies which values to write + * + * Return: the type of value returned by the device + */ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int val, int val2, long mask) diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index bb331e6356a9..0af918978f97 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -79,95 +79,25 @@ struct cros_ec_sensors_core_state { int frequencies[3]; }; -/** - * cros_ec_sensors_read_lpc() - retrieve data from EC shared memory - * @indio_dev: pointer to IIO device - * @scan_mask: bitmap of the sensor indices to scan - * @data: location to store data - * - * This is the safe function for reading the EC data. It guarantees that the - * data sampled was not modified by the EC while being read. - * - * Return: 0 on success, -errno on failure. - */ int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, unsigned long scan_mask, s16 *data); -/** - * cros_ec_sensors_read_cmd() - retrieve data using the EC command protocol - * @indio_dev: pointer to IIO device - * @scan_mask: bitmap of the sensor indices to scan - * @data: location to store data - * - * Return: 0 on success, -errno on failure. - */ int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask, s16 *data); struct platform_device; -/** - * cros_ec_sensors_core_init() - basic initialization of the core structure - * @pdev: platform device created for the sensors - * @indio_dev: iio device structure of the device - * @physical_device: true if the device refers to a physical device - * - * Return: 0 on success, -errno on failure. - */ int cros_ec_sensors_core_init(struct platform_device *pdev, struct iio_dev *indio_dev, bool physical_device); -/** - * cros_ec_sensors_capture() - the trigger handler function - * @irq: the interrupt number. - * @p: a pointer to the poll function. - * - * On a trigger event occurring, if the pollfunc is attached then this - * handler is called as a threaded interrupt (and hence may sleep). It - * is responsible for grabbing data from the device and pushing it into - * the associated buffer. - * - * Return: IRQ_HANDLED - */ irqreturn_t cros_ec_sensors_capture(int irq, void *p); -/** - * cros_ec_motion_send_host_cmd() - send motion sense host command - * @st: pointer to state information for device - * @opt_length: optional length to reduce the response size, useful on the data - * path. Otherwise, the maximal allowed response size is used - * - * When called, the sub-command is assumed to be set in param->cmd. - * - * Return: 0 on success, -errno on failure. - */ int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *st, u16 opt_length); -/** - * cros_ec_sensors_core_read() - function to request a value from the sensor - * @st: pointer to state information for device - * @chan: channel specification structure table - * @val: will contain one element making up the returned value - * @val2: will contain another element making up the returned value - * @mask: specifies which values to be requested - * - * Return: the type of value returned by the device - */ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int *val, int *val2, long mask); -/** - * cros_ec_sensors_core_read_avail() - get available values - * @indio_dev: pointer to state information for device - * @chan: channel specification structure table - * @vals: list of available values - * @type: type of data returned - * @length: number of data returned in the array - * @mask: specifies which values to be requested - * - * Return: an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST - */ int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, const int **vals, @@ -175,16 +105,6 @@ int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev, int *length, long mask); -/** - * cros_ec_sensors_core_write() - function to write a value to the sensor - * @st: pointer to state information for device - * @chan: channel specification structure table - * @val: first part of value to write - * @val2: second part of value to write - * @mask: specifies which values to write - * - * Return: the type of value returned by the device - */ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int val, int val2, long mask); -- cgit v1.2.3-58-ga151 From 69f0793eb60dacd153388974bbaaa1d3184d171d Mon Sep 17 00:00:00 2001 From: Gwendal Grignou Date: Fri, 27 Mar 2020 15:34:37 -0700 Subject: iio: expose iio_device_set_clock Some IIO devices may want to override the default (realtime) to another clock source by default. It can beneficial when timestamps coming from the hardware or underlying drivers are already in that format. It can always be overridden by attribute current_timestamp_clock. Signed-off-by: Gwendal Grignou Reviewed-by: Jonathan Cameron Signed-off-by: Enric Balletbo i Serra --- drivers/iio/industrialio-core.c | 8 +++++++- include/linux/iio/iio.h | 2 ++ 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 65ff0d067018..26e963483bab 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -189,7 +189,12 @@ ssize_t iio_read_const_attr(struct device *dev, } EXPORT_SYMBOL(iio_read_const_attr); -static int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) +/** + * iio_device_set_clock() - Set current timestamping clock for the device + * @indio_dev: IIO device structure containing the device + * @clock_id: timestamping clock posix identifier to set. + */ +int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) { int ret; const struct iio_event_interface *ev_int = indio_dev->event_interface; @@ -207,6 +212,7 @@ static int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) return 0; } +EXPORT_SYMBOL(iio_device_set_clock); /** * iio_get_time_ns() - utility function to get a time stamp for events etc diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 862ce0019eba..b18f34a8901f 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -627,6 +627,8 @@ static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) return indio_dev->clock_id; } +int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id); + /** * dev_to_iio_dev() - Get IIO device struct from a device struct * @dev: The device embedded in the IIO device -- cgit v1.2.3-58-ga151 From aa984f1ba4a477c8ea39d2fa975a4f8de8a126e9 Mon Sep 17 00:00:00 2001 From: Gwendal Grignou Date: Fri, 27 Mar 2020 15:34:38 -0700 Subject: iio: cros_ec: Register to cros_ec_sensorhub when EC supports FIFO When EC supports FIFO, each IIO device registers a callback, to put samples in the buffer when they arrives from the FIFO. When no FIFO, the user space app needs to call trigger_new, or better register a high precision timer. Signed-off-by: Gwendal Grignou Reviewed-by: Jonathan Cameron Signed-off-by: Enric Balletbo i Serra --- drivers/iio/accel/cros_ec_accel_legacy.c | 8 +- .../iio/common/cros_ec_sensors/cros_ec_lid_angle.c | 2 +- .../iio/common/cros_ec_sensors/cros_ec_sensors.c | 9 +- .../common/cros_ec_sensors/cros_ec_sensors_core.c | 101 ++++++++++++++++++++- drivers/iio/light/cros_ec_light_prox.c | 9 +- drivers/iio/pressure/cros_ec_baro.c | 9 +- include/linux/iio/common/cros_ec_sensors_core.h | 10 +- 7 files changed, 119 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c index 68e847c6255e..2532b9ad3384 100644 --- a/drivers/iio/accel/cros_ec_accel_legacy.c +++ b/drivers/iio/accel/cros_ec_accel_legacy.c @@ -170,7 +170,8 @@ static int cros_ec_accel_legacy_probe(struct platform_device *pdev) if (!indio_dev) return -ENOMEM; - ret = cros_ec_sensors_core_init(pdev, indio_dev, true); + ret = cros_ec_sensors_core_init(pdev, indio_dev, true, + cros_ec_sensors_capture, NULL); if (ret) return ret; @@ -190,11 +191,6 @@ static int cros_ec_accel_legacy_probe(struct platform_device *pdev) state->sign[CROS_EC_SENSOR_Z] = -1; } - ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, - cros_ec_sensors_capture, NULL); - if (ret) - return ret; - return devm_iio_device_register(dev, indio_dev); } diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c b/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c index 1dcc2a16ab2d..e30a59fcf0f9 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c @@ -97,7 +97,7 @@ static int cros_ec_lid_angle_probe(struct platform_device *pdev) if (!indio_dev) return -ENOMEM; - ret = cros_ec_sensors_core_init(pdev, indio_dev, false); + ret = cros_ec_sensors_core_init(pdev, indio_dev, false, NULL, NULL); if (ret) return ret; diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c index 576e45faafaf..711134d67dde 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c @@ -230,7 +230,9 @@ static int cros_ec_sensors_probe(struct platform_device *pdev) if (!indio_dev) return -ENOMEM; - ret = cros_ec_sensors_core_init(pdev, indio_dev, true); + ret = cros_ec_sensors_core_init(pdev, indio_dev, true, + cros_ec_sensors_capture, + cros_ec_sensors_push_data); if (ret) return ret; @@ -292,11 +294,6 @@ static int cros_ec_sensors_probe(struct platform_device *pdev) else state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd; - ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, - cros_ec_sensors_capture, NULL); - if (ret) - return ret; - return devm_iio_device_register(dev, indio_dev); } diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index f3c000448b90..01513cb93365 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -82,17 +83,71 @@ static void get_default_min_max_freq(enum motionsensor_type type, } } +int cros_ec_sensors_push_data(struct iio_dev *indio_dev, + s16 *data, + s64 timestamp) +{ + struct cros_ec_sensors_core_state *st = iio_priv(indio_dev); + s16 *out; + s64 delta; + unsigned int i; + + /* + * Ignore samples if the buffer is not set: it is needed if the ODR is + * set but the buffer is not enabled yet. + */ + if (!iio_buffer_enabled(indio_dev)) + return 0; + + out = (s16 *)st->samples; + for_each_set_bit(i, + indio_dev->active_scan_mask, + indio_dev->masklength) { + *out = data[i]; + out++; + } + + if (iio_device_get_clock(indio_dev) != CLOCK_BOOTTIME) + delta = iio_get_time_ns(indio_dev) - cros_ec_get_time_ns(); + else + delta = 0; + + iio_push_to_buffers_with_timestamp(indio_dev, st->samples, + timestamp + delta); + + return 0; +} +EXPORT_SYMBOL_GPL(cros_ec_sensors_push_data); + +static void cros_ec_sensors_core_clean(void *arg) +{ + struct platform_device *pdev = (struct platform_device *)arg; + struct cros_ec_sensorhub *sensor_hub = + dev_get_drvdata(pdev->dev.parent); + struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct cros_ec_sensors_core_state *st = iio_priv(indio_dev); + u8 sensor_num = st->param.info.sensor_num; + + cros_ec_sensorhub_unregister_push_data(sensor_hub, sensor_num); +} + /** * cros_ec_sensors_core_init() - basic initialization of the core structure * @pdev: platform device created for the sensors * @indio_dev: iio device structure of the device * @physical_device: true if the device refers to a physical device + * @trigger_capture: function pointer to call buffer is triggered, + * for backward compatibility. + * @push_data: function to call when cros_ec_sensorhub receives + * a sample for that sensor. * * Return: 0 on success, -errno on failure. */ int cros_ec_sensors_core_init(struct platform_device *pdev, struct iio_dev *indio_dev, - bool physical_device) + bool physical_device, + cros_ec_sensors_capture_t trigger_capture, + cros_ec_sensorhub_push_data_cb_t push_data) { struct device *dev = &pdev->dev; struct cros_ec_sensors_core_state *state = iio_priv(indio_dev); @@ -131,8 +186,6 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, indio_dev->name = pdev->name; if (physical_device) { - indio_dev->modes = INDIO_DIRECT_MODE; - state->param.cmd = MOTIONSENSE_CMD_INFO; state->param.info.sensor_num = sensor_platform->sensor_num; ret = cros_ec_motion_send_host_cmd(state, 0); @@ -161,6 +214,48 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, state->frequencies[2] = state->resp->info_3.max_frequency; } + + if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) { + /* + * Create a software buffer, feed by the EC FIFO. + * We can not use trigger here, as events are generated + * as soon as sample_frequency is set. + */ + struct iio_buffer *buffer; + + buffer = devm_iio_kfifo_allocate(dev); + if (!buffer) + return -ENOMEM; + + iio_device_attach_buffer(indio_dev, buffer); + indio_dev->modes = INDIO_BUFFER_SOFTWARE; + + ret = cros_ec_sensorhub_register_push_data( + sensor_hub, sensor_platform->sensor_num, + indio_dev, push_data); + if (ret) + return ret; + + ret = devm_add_action_or_reset( + dev, cros_ec_sensors_core_clean, pdev); + if (ret) + return ret; + + /* Timestamp coming from FIFO are in ns since boot. */ + ret = iio_device_set_clock(indio_dev, CLOCK_BOOTTIME); + if (ret) + return ret; + } else { + /* + * The only way to get samples in buffer is to set a + * software tigger (systrig, hrtimer). + */ + ret = devm_iio_triggered_buffer_setup( + dev, indio_dev, NULL, trigger_capture, + NULL); + if (ret) + return ret; + } } return 0; diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c index 7a838e2956f4..03c951ff4a3c 100644 --- a/drivers/iio/light/cros_ec_light_prox.c +++ b/drivers/iio/light/cros_ec_light_prox.c @@ -177,7 +177,9 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev) if (!indio_dev) return -ENOMEM; - ret = cros_ec_sensors_core_init(pdev, indio_dev, true); + ret = cros_ec_sensors_core_init(pdev, indio_dev, true, + cros_ec_sensors_capture, + cros_ec_sensors_push_data); if (ret) return ret; @@ -236,11 +238,6 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev) state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd; - ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, - cros_ec_sensors_capture, NULL); - if (ret) - return ret; - return devm_iio_device_register(dev, indio_dev); } diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c index b521bebd551c..6add499f11aa 100644 --- a/drivers/iio/pressure/cros_ec_baro.c +++ b/drivers/iio/pressure/cros_ec_baro.c @@ -134,7 +134,9 @@ static int cros_ec_baro_probe(struct platform_device *pdev) if (!indio_dev) return -ENOMEM; - ret = cros_ec_sensors_core_init(pdev, indio_dev, true); + ret = cros_ec_sensors_core_init(pdev, indio_dev, true, + cros_ec_sensors_capture, + cros_ec_sensors_push_data); if (ret) return ret; @@ -182,11 +184,6 @@ static int cros_ec_baro_probe(struct platform_device *pdev) state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd; - ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, - cros_ec_sensors_capture, NULL); - if (ret) - return ret; - return devm_iio_device_register(dev, indio_dev); } diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index 0af918978f97..b8f573ca9dcc 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -12,6 +12,7 @@ #include #include #include +#include enum { CROS_EC_SENSOR_X, @@ -32,6 +33,8 @@ enum { /* Minimum sampling period to use when device is suspending */ #define CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY 1000 /* 1 second */ +typedef irqreturn_t (*cros_ec_sensors_capture_t)(int irq, void *p); + /** * struct cros_ec_sensors_core_state - state data for EC sensors IIO driver * @ec: cros EC device structure @@ -87,9 +90,14 @@ int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask, struct platform_device; int cros_ec_sensors_core_init(struct platform_device *pdev, - struct iio_dev *indio_dev, bool physical_device); + struct iio_dev *indio_dev, bool physical_device, + cros_ec_sensors_capture_t trigger_capture, + cros_ec_sensorhub_push_data_cb_t push_data); irqreturn_t cros_ec_sensors_capture(int irq, void *p); +int cros_ec_sensors_push_data(struct iio_dev *indio_dev, + s16 *data, + s64 timestamp); int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *st, u16 opt_length); -- cgit v1.2.3-58-ga151 From 2861be4ca9125ee1b7c49895948ca4236449a7fe Mon Sep 17 00:00:00 2001 From: Gwendal Grignou Date: Fri, 27 Mar 2020 15:34:39 -0700 Subject: iio: cros_ec: Remove pm function Since cros_ec_sensorhub is shutting down the FIFO when the device suspends, no need to slow down the EC sampling period rate. It was necesseary to do that before command CMD_FIFO_INT_ENABLE was introduced, but now all supported chromebooks have it. Signed-off-by: Gwendal Grignou Acked-by: Jonathan Cameron Acked-by: Lee Jones Signed-off-by: Enric Balletbo i Serra --- .../iio/common/cros_ec_sensors/cros_ec_lid_angle.c | 1 - .../iio/common/cros_ec_sensors/cros_ec_sensors.c | 1 - .../common/cros_ec_sensors/cros_ec_sensors_core.c | 47 ---------------------- drivers/iio/light/cros_ec_light_prox.c | 1 - include/linux/iio/common/cros_ec_sensors_core.h | 5 --- 5 files changed, 55 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c b/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c index e30a59fcf0f9..af801e203623 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c @@ -127,7 +127,6 @@ MODULE_DEVICE_TABLE(platform, cros_ec_lid_angle_ids); static struct platform_driver cros_ec_lid_angle_platform_driver = { .driver = { .name = DRV_NAME, - .pm = &cros_ec_sensors_pm_ops, }, .probe = cros_ec_lid_angle_probe, .id_table = cros_ec_lid_angle_ids, diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c index 711134d67dde..fad21a90bc7e 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c @@ -314,7 +314,6 @@ MODULE_DEVICE_TABLE(platform, cros_ec_sensors_ids); static struct platform_driver cros_ec_sensors_platform_driver = { .driver = { .name = "cros-ec-sensors", - .pm = &cros_ec_sensors_pm_ops, }, .probe = cros_ec_sensors_probe, .id_table = cros_ec_sensors_ids, diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index 01513cb93365..a1ecbd55ea76 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -738,52 +738,5 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, } EXPORT_SYMBOL_GPL(cros_ec_sensors_core_write); -static int __maybe_unused cros_ec_sensors_prepare(struct device *dev) -{ - struct iio_dev *indio_dev = dev_get_drvdata(dev); - struct cros_ec_sensors_core_state *st = iio_priv(indio_dev); - - if (st->curr_sampl_freq == 0) - return 0; - - /* - * If the sensors are sampled at high frequency, we will not be able to - * sleep. Set sampling to a long period if necessary. - */ - if (st->curr_sampl_freq < CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY) { - mutex_lock(&st->cmd_lock); - st->param.cmd = MOTIONSENSE_CMD_EC_RATE; - st->param.ec_rate.data = CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY; - cros_ec_motion_send_host_cmd(st, 0); - mutex_unlock(&st->cmd_lock); - } - return 0; -} - -static void __maybe_unused cros_ec_sensors_complete(struct device *dev) -{ - struct iio_dev *indio_dev = dev_get_drvdata(dev); - struct cros_ec_sensors_core_state *st = iio_priv(indio_dev); - - if (st->curr_sampl_freq == 0) - return; - - if (st->curr_sampl_freq < CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY) { - mutex_lock(&st->cmd_lock); - st->param.cmd = MOTIONSENSE_CMD_EC_RATE; - st->param.ec_rate.data = st->curr_sampl_freq; - cros_ec_motion_send_host_cmd(st, 0); - mutex_unlock(&st->cmd_lock); - } -} - -const struct dev_pm_ops cros_ec_sensors_pm_ops = { -#ifdef CONFIG_PM_SLEEP - .prepare = cros_ec_sensors_prepare, - .complete = cros_ec_sensors_complete -#endif -}; -EXPORT_SYMBOL_GPL(cros_ec_sensors_pm_ops); - MODULE_DESCRIPTION("ChromeOS EC sensor hub core functions"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c index 03c951ff4a3c..e10b35de4c2f 100644 --- a/drivers/iio/light/cros_ec_light_prox.c +++ b/drivers/iio/light/cros_ec_light_prox.c @@ -255,7 +255,6 @@ MODULE_DEVICE_TABLE(platform, cros_ec_light_prox_ids); static struct platform_driver cros_ec_light_prox_platform_driver = { .driver = { .name = "cros-ec-light-prox", - .pm = &cros_ec_sensors_pm_ops, }, .probe = cros_ec_light_prox_probe, .id_table = cros_ec_light_prox_ids, diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index b8f573ca9dcc..96ea4551945e 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -30,9 +30,6 @@ enum { */ #define CROS_EC_SAMPLE_SIZE (sizeof(s64) * 2) -/* Minimum sampling period to use when device is suspending */ -#define CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY 1000 /* 1 second */ - typedef irqreturn_t (*cros_ec_sensors_capture_t)(int irq, void *p); /** @@ -117,8 +114,6 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int val, int val2, long mask); -extern const struct dev_pm_ops cros_ec_sensors_pm_ops; - /* List of extended channel specification for all sensors */ extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[]; -- cgit v1.2.3-58-ga151 From 6562793b55c58b6b1dcb9cd581c7905afc25e89f Mon Sep 17 00:00:00 2001 From: Gwendal Grignou Date: Fri, 27 Mar 2020 15:34:40 -0700 Subject: iio: cros_ec: Expose hwfifo_timeout Expose EC minimal interrupt period through buffer/hwfifo_timeout: - Maximal timeout is limited to 65s. - When timeout for all sensors is set to 0, EC will not send events, even if the sensor sampling rate is greater than 0. Rename frequency to sampling_frequency to match IIO ABI. Signed-off-by: Gwendal Grignou Reviewed-by: Jonathan Cameron Signed-off-by: Enric Balletbo i Serra --- .../iio/common/cros_ec_sensors/cros_ec_sensors.c | 3 +- .../common/cros_ec_sensors/cros_ec_sensors_core.c | 95 +++++++++++++++++----- drivers/iio/light/cros_ec_light_prox.c | 5 +- drivers/iio/pressure/cros_ec_baro.c | 5 +- include/linux/iio/common/cros_ec_sensors_core.h | 4 +- 5 files changed, 82 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c index fad21a90bc7e..a66941fdb385 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c @@ -236,6 +236,8 @@ static int cros_ec_sensors_probe(struct platform_device *pdev) if (ret) return ret; + iio_buffer_set_attrs(indio_dev->buffer, cros_ec_sensor_fifo_attributes); + indio_dev->info = &ec_sensors_info; state = iio_priv(indio_dev); for (channel = state->channels, i = CROS_EC_SENSOR_X; @@ -247,7 +249,6 @@ static int cros_ec_sensors_probe(struct platform_device *pdev) BIT(IIO_CHAN_INFO_CALIBSCALE); channel->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) | - BIT(IIO_CHAN_INFO_FREQUENCY) | BIT(IIO_CHAN_INFO_SAMP_FREQ); channel->info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ); diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index a1ecbd55ea76..b8eac7e5d5e5 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -83,6 +84,77 @@ static void get_default_min_max_freq(enum motionsensor_type type, } } +static int cros_ec_sensor_set_ec_rate(struct cros_ec_sensors_core_state *st, + int rate) +{ + int ret; + + if (rate > U16_MAX) + rate = U16_MAX; + + mutex_lock(&st->cmd_lock); + st->param.cmd = MOTIONSENSE_CMD_EC_RATE; + st->param.ec_rate.data = rate; + ret = cros_ec_motion_send_host_cmd(st, 0); + mutex_unlock(&st->cmd_lock); + return ret; +} + +static ssize_t cros_ec_sensor_set_report_latency(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct cros_ec_sensors_core_state *st = iio_priv(indio_dev); + int integer, fract, ret; + int latency; + + ret = iio_str_to_fixpoint(buf, 100000, &integer, &fract); + if (ret) + return ret; + + /* EC rate is in ms. */ + latency = integer * 1000 + fract / 1000; + ret = cros_ec_sensor_set_ec_rate(st, latency); + if (ret < 0) + return ret; + + return len; +} + +static ssize_t cros_ec_sensor_get_report_latency(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct cros_ec_sensors_core_state *st = iio_priv(indio_dev); + int latency, ret; + + mutex_lock(&st->cmd_lock); + st->param.cmd = MOTIONSENSE_CMD_EC_RATE; + st->param.ec_rate.data = EC_MOTION_SENSE_NO_VALUE; + + ret = cros_ec_motion_send_host_cmd(st, 0); + latency = st->resp->ec_rate.ret; + mutex_unlock(&st->cmd_lock); + if (ret < 0) + return ret; + + return sprintf(buf, "%d.%06u\n", + latency / 1000, + (latency % 1000) * 1000); +} + +static IIO_DEVICE_ATTR(hwfifo_timeout, 0644, + cros_ec_sensor_get_report_latency, + cros_ec_sensor_set_report_latency, 0); + +const struct attribute *cros_ec_sensor_fifo_attributes[] = { + &iio_dev_attr_hwfifo_timeout.dev_attr.attr, + NULL, +}; +EXPORT_SYMBOL_GPL(cros_ec_sensor_fifo_attributes); + int cros_ec_sensors_push_data(struct iio_dev *indio_dev, s16 *data, s64 timestamp) @@ -631,18 +703,6 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st, switch (mask) { case IIO_CHAN_INFO_SAMP_FREQ: - st->param.cmd = MOTIONSENSE_CMD_EC_RATE; - st->param.ec_rate.data = - EC_MOTION_SENSE_NO_VALUE; - - ret = cros_ec_motion_send_host_cmd(st, 0); - if (ret) - break; - - *val = st->resp->ec_rate.ret; - ret = IIO_VAL_INT; - break; - case IIO_CHAN_INFO_FREQUENCY: st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR; st->param.sensor_odr.data = EC_MOTION_SENSE_NO_VALUE; @@ -712,7 +772,7 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, int ret; switch (mask) { - case IIO_CHAN_INFO_FREQUENCY: + case IIO_CHAN_INFO_SAMP_FREQ: st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR; st->param.sensor_odr.data = val; @@ -721,15 +781,6 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, ret = cros_ec_motion_send_host_cmd(st, 0); break; - case IIO_CHAN_INFO_SAMP_FREQ: - st->param.cmd = MOTIONSENSE_CMD_EC_RATE; - st->param.ec_rate.data = val; - - ret = cros_ec_motion_send_host_cmd(st, 0); - if (ret) - break; - st->curr_sampl_freq = val; - break; default: ret = -EINVAL; break; diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c index e10b35de4c2f..2198b50909ed 100644 --- a/drivers/iio/light/cros_ec_light_prox.c +++ b/drivers/iio/light/cros_ec_light_prox.c @@ -183,6 +183,8 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev) if (ret) return ret; + iio_buffer_set_attrs(indio_dev->buffer, cros_ec_sensor_fifo_attributes); + indio_dev->info = &cros_ec_light_prox_info; state = iio_priv(indio_dev); state->core.type = state->core.resp->info.type; @@ -191,8 +193,7 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev) /* Common part */ channel->info_mask_shared_by_all = - BIT(IIO_CHAN_INFO_SAMP_FREQ) | - BIT(IIO_CHAN_INFO_FREQUENCY); + BIT(IIO_CHAN_INFO_SAMP_FREQ); channel->info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ); channel->scan_type.realbits = CROS_EC_SENSOR_BITS; diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c index 6add499f11aa..c079b8960082 100644 --- a/drivers/iio/pressure/cros_ec_baro.c +++ b/drivers/iio/pressure/cros_ec_baro.c @@ -140,6 +140,8 @@ static int cros_ec_baro_probe(struct platform_device *pdev) if (ret) return ret; + iio_buffer_set_attrs(indio_dev->buffer, cros_ec_sensor_fifo_attributes); + indio_dev->info = &cros_ec_baro_info; state = iio_priv(indio_dev); state->core.type = state->core.resp->info.type; @@ -149,8 +151,7 @@ static int cros_ec_baro_probe(struct platform_device *pdev) channel->info_mask_separate = BIT(IIO_CHAN_INFO_RAW); channel->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) | - BIT(IIO_CHAN_INFO_SAMP_FREQ) | - BIT(IIO_CHAN_INFO_FREQUENCY); + BIT(IIO_CHAN_INFO_SAMP_FREQ); channel->info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ); channel->scan_type.realbits = CROS_EC_SENSOR_BITS; diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index 96ea4551945e..5b0acc14c891 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -50,7 +50,6 @@ typedef irqreturn_t (*cros_ec_sensors_capture_t)(int irq, void *p); * the timestamp. The timestamp is always last and * is always 8-byte aligned. * @read_ec_sensors_data: function used for accessing sensors values - * @cuur_sampl_freq: current sampling period */ struct cros_ec_sensors_core_state { struct cros_ec_device *ec; @@ -73,8 +72,6 @@ struct cros_ec_sensors_core_state { int (*read_ec_sensors_data)(struct iio_dev *indio_dev, unsigned long scan_mask, s16 *data); - int curr_sampl_freq; - /* Table of known available frequencies : 0, Min and Max in mHz */ int frequencies[3]; }; @@ -116,5 +113,6 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, /* List of extended channel specification for all sensors */ extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[]; +extern const struct attribute *cros_ec_sensor_fifo_attributes[]; #endif /* __CROS_EC_SENSORS_CORE_H */ -- cgit v1.2.3-58-ga151 From cb87556068146de5c9933397706d3bde88b4a14d Mon Sep 17 00:00:00 2001 From: Gwendal Grignou Date: Fri, 27 Mar 2020 15:34:41 -0700 Subject: iio: cros_ec: Report hwfifo_watermark_max Report the maximum amount of sample the EC can hold. This is not tunable, but can be useful for application to find out the maximum amount of time it can sleep when hwfifo_timeout is set to a large number. Signed-off-by: Gwendal Grignou Reviewed-by: Jonathan Cameron Signed-off-by: Enric Balletbo i Serra --- .../common/cros_ec_sensors/cros_ec_sensors_core.c | 33 ++++++++++++++++++++-- include/linux/iio/common/cros_ec_sensors_core.h | 3 ++ 2 files changed, 34 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index b8eac7e5d5e5..67e8eff038cf 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -22,6 +22,12 @@ #include #include +/* + * Hard coded to the first device to support sensor fifo. The EC has a 2048 + * byte fifo and will trigger an interrupt when fifo is 2/3 full. + */ +#define CROS_EC_FIFO_SIZE (2048 * 2 / 3) + static char *cros_ec_loc[] = { [MOTIONSENSE_LOC_BASE] = "base", [MOTIONSENSE_LOC_LID] = "lid", @@ -55,8 +61,15 @@ static int cros_ec_get_host_cmd_version_mask(struct cros_ec_device *ec_dev, static void get_default_min_max_freq(enum motionsensor_type type, u32 *min_freq, - u32 *max_freq) + u32 *max_freq, + u32 *max_fifo_events) { + /* + * We don't know fifo size, set to size previously used by older + * hardware. + */ + *max_fifo_events = CROS_EC_FIFO_SIZE; + switch (type) { case MOTIONSENSE_TYPE_ACCEL: case MOTIONSENSE_TYPE_GYRO: @@ -149,8 +162,21 @@ static IIO_DEVICE_ATTR(hwfifo_timeout, 0644, cros_ec_sensor_get_report_latency, cros_ec_sensor_set_report_latency, 0); +static ssize_t hwfifo_watermark_max_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct cros_ec_sensors_core_state *st = iio_priv(indio_dev); + + return sprintf(buf, "%d\n", st->fifo_max_event_count); +} + +static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0); + const struct attribute *cros_ec_sensor_fifo_attributes[] = { &iio_dev_attr_hwfifo_timeout.dev_attr.attr, + &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr, NULL, }; EXPORT_SYMBOL_GPL(cros_ec_sensor_fifo_attributes); @@ -279,12 +305,15 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, if (state->msg->version < 3) { get_default_min_max_freq(state->resp->info.type, &state->frequencies[1], - &state->frequencies[2]); + &state->frequencies[2], + &state->fifo_max_event_count); } else { state->frequencies[1] = state->resp->info_3.min_frequency; state->frequencies[2] = state->resp->info_3.max_frequency; + state->fifo_max_event_count = + state->resp->info_3.fifo_max_event_count; } if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) { diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index 5b0acc14c891..bc26ae2e3272 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -50,6 +50,7 @@ typedef irqreturn_t (*cros_ec_sensors_capture_t)(int irq, void *p); * the timestamp. The timestamp is always last and * is always 8-byte aligned. * @read_ec_sensors_data: function used for accessing sensors values + * @fifo_max_event_count: Size of the EC sensor FIFO */ struct cros_ec_sensors_core_state { struct cros_ec_device *ec; @@ -72,6 +73,8 @@ struct cros_ec_sensors_core_state { int (*read_ec_sensors_data)(struct iio_dev *indio_dev, unsigned long scan_mask, s16 *data); + u32 fifo_max_event_count; + /* Table of known available frequencies : 0, Min and Max in mHz */ int frequencies[3]; }; -- cgit v1.2.3-58-ga151 From 3375590623e4a132b19a8740512f4deb95728933 Mon Sep 17 00:00:00 2001 From: Raymond Pang Date: Fri, 27 Mar 2020 17:11:46 +0800 Subject: PCI: Add Zhaoxin Vendor ID Add Zhaoxin Vendor ID to pci_ids.h Link: https://lore.kernel.org/r/20200327091148.5190-2-RaymondPang-oc@zhaoxin.com Signed-off-by: Raymond Pang Signed-off-by: Bjorn Helgaas --- include/linux/pci_ids.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 352c0d708720..6693cf561cd1 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2583,6 +2583,8 @@ #define PCI_VENDOR_ID_AMAZON 0x1d0f +#define PCI_VENDOR_ID_ZHAOXIN 0x1d17 + #define PCI_VENDOR_ID_HYGON 0x1d94 #define PCI_VENDOR_ID_HXT 0x1dbf -- cgit v1.2.3-58-ga151 From d3ec10aa95819bff18a0d936b18884c7816d0914 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Sat, 21 Mar 2020 21:11:24 -0400 Subject: KEYS: Don't write out to userspace while holding key semaphore A lockdep circular locking dependency report was seen when running a keyutils test: [12537.027242] ====================================================== [12537.059309] WARNING: possible circular locking dependency detected [12537.088148] 4.18.0-147.7.1.el8_1.x86_64+debug #1 Tainted: G OE --------- - - [12537.125253] ------------------------------------------------------ [12537.153189] keyctl/25598 is trying to acquire lock: [12537.175087] 000000007c39f96c (&mm->mmap_sem){++++}, at: __might_fault+0xc4/0x1b0 [12537.208365] [12537.208365] but task is already holding lock: [12537.234507] 000000003de5b58d (&type->lock_class){++++}, at: keyctl_read_key+0x15a/0x220 [12537.270476] [12537.270476] which lock already depends on the new lock. [12537.270476] [12537.307209] [12537.307209] the existing dependency chain (in reverse order) is: [12537.340754] [12537.340754] -> #3 (&type->lock_class){++++}: [12537.367434] down_write+0x4d/0x110 [12537.385202] __key_link_begin+0x87/0x280 [12537.405232] request_key_and_link+0x483/0xf70 [12537.427221] request_key+0x3c/0x80 [12537.444839] dns_query+0x1db/0x5a5 [dns_resolver] [12537.468445] dns_resolve_server_name_to_ip+0x1e1/0x4d0 [cifs] [12537.496731] cifs_reconnect+0xe04/0x2500 [cifs] [12537.519418] cifs_readv_from_socket+0x461/0x690 [cifs] [12537.546263] cifs_read_from_socket+0xa0/0xe0 [cifs] [12537.573551] cifs_demultiplex_thread+0x311/0x2db0 [cifs] [12537.601045] kthread+0x30c/0x3d0 [12537.617906] ret_from_fork+0x3a/0x50 [12537.636225] [12537.636225] -> #2 (root_key_user.cons_lock){+.+.}: [12537.664525] __mutex_lock+0x105/0x11f0 [12537.683734] request_key_and_link+0x35a/0xf70 [12537.705640] request_key+0x3c/0x80 [12537.723304] dns_query+0x1db/0x5a5 [dns_resolver] [12537.746773] dns_resolve_server_name_to_ip+0x1e1/0x4d0 [cifs] [12537.775607] cifs_reconnect+0xe04/0x2500 [cifs] [12537.798322] cifs_readv_from_socket+0x461/0x690 [cifs] [12537.823369] cifs_read_from_socket+0xa0/0xe0 [cifs] [12537.847262] cifs_demultiplex_thread+0x311/0x2db0 [cifs] [12537.873477] kthread+0x30c/0x3d0 [12537.890281] ret_from_fork+0x3a/0x50 [12537.908649] [12537.908649] -> #1 (&tcp_ses->srv_mutex){+.+.}: [12537.935225] __mutex_lock+0x105/0x11f0 [12537.954450] cifs_call_async+0x102/0x7f0 [cifs] [12537.977250] smb2_async_readv+0x6c3/0xc90 [cifs] [12538.000659] cifs_readpages+0x120a/0x1e50 [cifs] [12538.023920] read_pages+0xf5/0x560 [12538.041583] __do_page_cache_readahead+0x41d/0x4b0 [12538.067047] ondemand_readahead+0x44c/0xc10 [12538.092069] filemap_fault+0xec1/0x1830 [12538.111637] __do_fault+0x82/0x260 [12538.129216] do_fault+0x419/0xfb0 [12538.146390] __handle_mm_fault+0x862/0xdf0 [12538.167408] handle_mm_fault+0x154/0x550 [12538.187401] __do_page_fault+0x42f/0xa60 [12538.207395] do_page_fault+0x38/0x5e0 [12538.225777] page_fault+0x1e/0x30 [12538.243010] [12538.243010] -> #0 (&mm->mmap_sem){++++}: [12538.267875] lock_acquire+0x14c/0x420 [12538.286848] __might_fault+0x119/0x1b0 [12538.306006] keyring_read_iterator+0x7e/0x170 [12538.327936] assoc_array_subtree_iterate+0x97/0x280 [12538.352154] keyring_read+0xe9/0x110 [12538.370558] keyctl_read_key+0x1b9/0x220 [12538.391470] do_syscall_64+0xa5/0x4b0 [12538.410511] entry_SYSCALL_64_after_hwframe+0x6a/0xdf [12538.435535] [12538.435535] other info that might help us debug this: [12538.435535] [12538.472829] Chain exists of: [12538.472829] &mm->mmap_sem --> root_key_user.cons_lock --> &type->lock_class [12538.472829] [12538.524820] Possible unsafe locking scenario: [12538.524820] [12538.551431] CPU0 CPU1 [12538.572654] ---- ---- [12538.595865] lock(&type->lock_class); [12538.613737] lock(root_key_user.cons_lock); [12538.644234] lock(&type->lock_class); [12538.672410] lock(&mm->mmap_sem); [12538.687758] [12538.687758] *** DEADLOCK *** [12538.687758] [12538.714455] 1 lock held by keyctl/25598: [12538.732097] #0: 000000003de5b58d (&type->lock_class){++++}, at: keyctl_read_key+0x15a/0x220 [12538.770573] [12538.770573] stack backtrace: [12538.790136] CPU: 2 PID: 25598 Comm: keyctl Kdump: loaded Tainted: G [12538.844855] Hardware name: HP ProLiant DL360 Gen9/ProLiant DL360 Gen9, BIOS P89 12/27/2015 [12538.881963] Call Trace: [12538.892897] dump_stack+0x9a/0xf0 [12538.907908] print_circular_bug.isra.25.cold.50+0x1bc/0x279 [12538.932891] ? save_trace+0xd6/0x250 [12538.948979] check_prev_add.constprop.32+0xc36/0x14f0 [12538.971643] ? keyring_compare_object+0x104/0x190 [12538.992738] ? check_usage+0x550/0x550 [12539.009845] ? sched_clock+0x5/0x10 [12539.025484] ? sched_clock_cpu+0x18/0x1e0 [12539.043555] __lock_acquire+0x1f12/0x38d0 [12539.061551] ? trace_hardirqs_on+0x10/0x10 [12539.080554] lock_acquire+0x14c/0x420 [12539.100330] ? __might_fault+0xc4/0x1b0 [12539.119079] __might_fault+0x119/0x1b0 [12539.135869] ? __might_fault+0xc4/0x1b0 [12539.153234] keyring_read_iterator+0x7e/0x170 [12539.172787] ? keyring_read+0x110/0x110 [12539.190059] assoc_array_subtree_iterate+0x97/0x280 [12539.211526] keyring_read+0xe9/0x110 [12539.227561] ? keyring_gc_check_iterator+0xc0/0xc0 [12539.249076] keyctl_read_key+0x1b9/0x220 [12539.266660] do_syscall_64+0xa5/0x4b0 [12539.283091] entry_SYSCALL_64_after_hwframe+0x6a/0xdf One way to prevent this deadlock scenario from happening is to not allow writing to userspace while holding the key semaphore. Instead, an internal buffer is allocated for getting the keys out from the read method first before copying them out to userspace without holding the lock. That requires taking out the __user modifier from all the relevant read methods as well as additional changes to not use any userspace write helpers. That is, 1) The put_user() call is replaced by a direct copy. 2) The copy_to_user() call is replaced by memcpy(). 3) All the fault handling code is removed. Compiling on a x86-64 system, the size of the rxrpc_read() function is reduced from 3795 bytes to 2384 bytes with this patch. Fixes: ^1da177e4c3f4 ("Linux-2.6.12-rc2") Reviewed-by: Jarkko Sakkinen Signed-off-by: Waiman Long Signed-off-by: David Howells --- include/keys/big_key-type.h | 2 +- include/keys/user-type.h | 3 +- include/linux/key-type.h | 2 +- net/dns_resolver/dns_key.c | 2 +- net/rxrpc/key.c | 27 ++++-------- security/keys/big_key.c | 11 ++--- security/keys/encrypted-keys/encrypted.c | 7 ++- security/keys/keyctl.c | 73 ++++++++++++++++++++++++------- security/keys/keyring.c | 6 +-- security/keys/request_key_auth.c | 7 ++- security/keys/trusted-keys/trusted_tpm1.c | 14 +----- security/keys/user_defined.c | 5 +-- 12 files changed, 85 insertions(+), 74 deletions(-) (limited to 'include/linux') diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h index f6a7ba4dccd4..3fee04f81439 100644 --- a/include/keys/big_key-type.h +++ b/include/keys/big_key-type.h @@ -17,6 +17,6 @@ extern void big_key_free_preparse(struct key_preparsed_payload *prep); extern void big_key_revoke(struct key *key); extern void big_key_destroy(struct key *key); extern void big_key_describe(const struct key *big_key, struct seq_file *m); -extern long big_key_read(const struct key *key, char __user *buffer, size_t buflen); +extern long big_key_read(const struct key *key, char *buffer, size_t buflen); #endif /* _KEYS_BIG_KEY_TYPE_H */ diff --git a/include/keys/user-type.h b/include/keys/user-type.h index d5e73266a81a..be61fcddc02a 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h @@ -41,8 +41,7 @@ extern int user_update(struct key *key, struct key_preparsed_payload *prep); extern void user_revoke(struct key *key); extern void user_destroy(struct key *key); extern void user_describe(const struct key *user, struct seq_file *m); -extern long user_read(const struct key *key, - char __user *buffer, size_t buflen); +extern long user_read(const struct key *key, char *buffer, size_t buflen); static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key) { diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 4ded94bcf274..2ab2d6d6aeab 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -127,7 +127,7 @@ struct key_type { * much is copied into the buffer * - shouldn't do the copy if the buffer is NULL */ - long (*read)(const struct key *key, char __user *buffer, size_t buflen); + long (*read)(const struct key *key, char *buffer, size_t buflen); /* handle request_key() for this type instead of invoking * /sbin/request-key (optional) diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 3e1a90669006..ad53eb31d40f 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c @@ -302,7 +302,7 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m) * - the key's semaphore is read-locked */ static long dns_resolver_read(const struct key *key, - char __user *buffer, size_t buflen) + char *buffer, size_t buflen) { int err = PTR_ERR(key->payload.data[dns_key_error]); diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index 6c3f35fac42d..0c98313dd7a8 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c @@ -31,7 +31,7 @@ static void rxrpc_free_preparse_s(struct key_preparsed_payload *); static void rxrpc_destroy(struct key *); static void rxrpc_destroy_s(struct key *); static void rxrpc_describe(const struct key *, struct seq_file *); -static long rxrpc_read(const struct key *, char __user *, size_t); +static long rxrpc_read(const struct key *, char *, size_t); /* * rxrpc defined keys take an arbitrary string as the description and an @@ -1042,12 +1042,12 @@ EXPORT_SYMBOL(rxrpc_get_null_key); * - this returns the result in XDR form */ static long rxrpc_read(const struct key *key, - char __user *buffer, size_t buflen) + char *buffer, size_t buflen) { const struct rxrpc_key_token *token; const struct krb5_principal *princ; size_t size; - __be32 __user *xdr, *oldxdr; + __be32 *xdr, *oldxdr; u32 cnlen, toksize, ntoks, tok, zero; u16 toksizes[AFSTOKEN_MAX]; int loop; @@ -1124,30 +1124,25 @@ static long rxrpc_read(const struct key *key, if (!buffer || buflen < size) return size; - xdr = (__be32 __user *) buffer; + xdr = (__be32 *)buffer; zero = 0; #define ENCODE(x) \ do { \ - __be32 y = htonl(x); \ - if (put_user(y, xdr++) < 0) \ - goto fault; \ + *xdr++ = htonl(x); \ } while(0) #define ENCODE_DATA(l, s) \ do { \ u32 _l = (l); \ ENCODE(l); \ - if (copy_to_user(xdr, (s), _l) != 0) \ - goto fault; \ - if (_l & 3 && \ - copy_to_user((u8 __user *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \ - goto fault; \ + memcpy(xdr, (s), _l); \ + if (_l & 3) \ + memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ xdr += (_l + 3) >> 2; \ } while(0) #define ENCODE64(x) \ do { \ __be64 y = cpu_to_be64(x); \ - if (copy_to_user(xdr, &y, 8) != 0) \ - goto fault; \ + memcpy(xdr, &y, 8); \ xdr += 8 >> 2; \ } while(0) #define ENCODE_STR(s) \ @@ -1238,8 +1233,4 @@ static long rxrpc_read(const struct key *key, ASSERTCMP((char __user *) xdr - buffer, ==, size); _leave(" = %zu", size); return size; - -fault: - _leave(" = -EFAULT"); - return -EFAULT; } diff --git a/security/keys/big_key.c b/security/keys/big_key.c index 001abe530a0d..82008f900930 100644 --- a/security/keys/big_key.c +++ b/security/keys/big_key.c @@ -352,7 +352,7 @@ void big_key_describe(const struct key *key, struct seq_file *m) * read the key data * - the key's semaphore is read-locked */ -long big_key_read(const struct key *key, char __user *buffer, size_t buflen) +long big_key_read(const struct key *key, char *buffer, size_t buflen) { size_t datalen = (size_t)key->payload.data[big_key_len]; long ret; @@ -391,9 +391,8 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen) ret = datalen; - /* copy decrypted data to user */ - if (copy_to_user(buffer, buf->virt, datalen) != 0) - ret = -EFAULT; + /* copy out decrypted data */ + memcpy(buffer, buf->virt, datalen); err_fput: fput(file); @@ -401,9 +400,7 @@ error: big_key_free_buffer(buf); } else { ret = datalen; - if (copy_to_user(buffer, key->payload.data[big_key_data], - datalen) != 0) - ret = -EFAULT; + memcpy(buffer, key->payload.data[big_key_data], datalen); } return ret; diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index 60720f58cbe0..f6797ba44bf7 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c @@ -902,14 +902,14 @@ out: } /* - * encrypted_read - format and copy the encrypted data to userspace + * encrypted_read - format and copy out the encrypted data * * The resulting datablob format is: * * * On success, return to userspace the encrypted key datablob size. */ -static long encrypted_read(const struct key *key, char __user *buffer, +static long encrypted_read(const struct key *key, char *buffer, size_t buflen) { struct encrypted_key_payload *epayload; @@ -957,8 +957,7 @@ static long encrypted_read(const struct key *key, char __user *buffer, key_put(mkey); memzero_explicit(derived_key, sizeof(derived_key)); - if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0) - ret = -EFAULT; + memcpy(buffer, ascii_buf, asciiblob_len); kzfree(ascii_buf); return asciiblob_len; diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 9b898c969558..434ed9defd3a 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -797,6 +797,21 @@ error: return ret; } +/* + * Call the read method + */ +static long __keyctl_read_key(struct key *key, char *buffer, size_t buflen) +{ + long ret; + + down_read(&key->sem); + ret = key_validate(key); + if (ret == 0) + ret = key->type->read(key, buffer, buflen); + up_read(&key->sem); + return ret; +} + /* * Read a key's payload. * @@ -812,26 +827,27 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) struct key *key; key_ref_t key_ref; long ret; + char *key_data; /* find the key first */ key_ref = lookup_user_key(keyid, 0, 0); if (IS_ERR(key_ref)) { ret = -ENOKEY; - goto error; + goto out; } key = key_ref_to_ptr(key_ref); ret = key_read_state(key); if (ret < 0) - goto error2; /* Negatively instantiated */ + goto key_put_out; /* Negatively instantiated */ /* see if we can read it directly */ ret = key_permission(key_ref, KEY_NEED_READ); if (ret == 0) goto can_read_key; if (ret != -EACCES) - goto error2; + goto key_put_out; /* we can't; see if it's searchable from this process's keyrings * - we automatically take account of the fact that it may be @@ -839,26 +855,51 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) */ if (!is_key_possessed(key_ref)) { ret = -EACCES; - goto error2; + goto key_put_out; } /* the key is probably readable - now try to read it */ can_read_key: - ret = -EOPNOTSUPP; - if (key->type->read) { - /* Read the data with the semaphore held (since we might sleep) - * to protect against the key being updated or revoked. - */ - down_read(&key->sem); - ret = key_validate(key); - if (ret == 0) - ret = key->type->read(key, buffer, buflen); - up_read(&key->sem); + if (!key->type->read) { + ret = -EOPNOTSUPP; + goto key_put_out; } -error2: + if (!buffer || !buflen) { + /* Get the key length from the read method */ + ret = __keyctl_read_key(key, NULL, 0); + goto key_put_out; + } + + /* + * Read the data with the semaphore held (since we might sleep) + * to protect against the key being updated or revoked. + * + * Allocating a temporary buffer to hold the keys before + * transferring them to user buffer to avoid potential + * deadlock involving page fault and mmap_sem. + */ + key_data = kmalloc(buflen, GFP_KERNEL); + + if (!key_data) { + ret = -ENOMEM; + goto key_put_out; + } + ret = __keyctl_read_key(key, key_data, buflen); + + /* + * Read methods will just return the required length without + * any copying if the provided length isn't large enough. + */ + if (ret > 0 && ret <= buflen) { + if (copy_to_user(buffer, key_data, ret)) + ret = -EFAULT; + } + kzfree(key_data); + +key_put_out: key_put(key); -error: +out: return ret; } diff --git a/security/keys/keyring.c b/security/keys/keyring.c index febf36c6ddc5..5ca620d31cd3 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -459,7 +459,6 @@ static int keyring_read_iterator(const void *object, void *data) { struct keyring_read_iterator_context *ctx = data; const struct key *key = keyring_ptr_to_key(object); - int ret; kenter("{%s,%d},,{%zu/%zu}", key->type->name, key->serial, ctx->count, ctx->buflen); @@ -467,10 +466,7 @@ static int keyring_read_iterator(const void *object, void *data) if (ctx->count >= ctx->buflen) return 1; - ret = put_user(key->serial, ctx->buffer); - if (ret < 0) - return ret; - ctx->buffer++; + *ctx->buffer++ = key->serial; ctx->count += sizeof(key->serial); return 0; } diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index ecba39c93fd9..41e9735006d0 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c @@ -22,7 +22,7 @@ static int request_key_auth_instantiate(struct key *, static void request_key_auth_describe(const struct key *, struct seq_file *); static void request_key_auth_revoke(struct key *); static void request_key_auth_destroy(struct key *); -static long request_key_auth_read(const struct key *, char __user *, size_t); +static long request_key_auth_read(const struct key *, char *, size_t); /* * The request-key authorisation key type definition. @@ -80,7 +80,7 @@ static void request_key_auth_describe(const struct key *key, * - the key's semaphore is read-locked */ static long request_key_auth_read(const struct key *key, - char __user *buffer, size_t buflen) + char *buffer, size_t buflen) { struct request_key_auth *rka = dereference_key_locked(key); size_t datalen; @@ -97,8 +97,7 @@ static long request_key_auth_read(const struct key *key, if (buflen > datalen) buflen = datalen; - if (copy_to_user(buffer, rka->callout_info, buflen) != 0) - ret = -EFAULT; + memcpy(buffer, rka->callout_info, buflen); } return ret; diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c index d2c5ec1e040b..8001ab07e63b 100644 --- a/security/keys/trusted-keys/trusted_tpm1.c +++ b/security/keys/trusted-keys/trusted_tpm1.c @@ -1130,11 +1130,10 @@ out: * trusted_read - copy the sealed blob data to userspace in hex. * On success, return to userspace the trusted key datablob size. */ -static long trusted_read(const struct key *key, char __user *buffer, +static long trusted_read(const struct key *key, char *buffer, size_t buflen) { const struct trusted_key_payload *p; - char *ascii_buf; char *bufp; int i; @@ -1143,18 +1142,9 @@ static long trusted_read(const struct key *key, char __user *buffer, return -EINVAL; if (buffer && buflen >= 2 * p->blob_len) { - ascii_buf = kmalloc_array(2, p->blob_len, GFP_KERNEL); - if (!ascii_buf) - return -ENOMEM; - - bufp = ascii_buf; + bufp = buffer; for (i = 0; i < p->blob_len; i++) bufp = hex_byte_pack(bufp, p->blob[i]); - if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) { - kzfree(ascii_buf); - return -EFAULT; - } - kzfree(ascii_buf); } return 2 * p->blob_len; } diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index 6f12de4ce549..07d4287e9084 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c @@ -168,7 +168,7 @@ EXPORT_SYMBOL_GPL(user_describe); * read the key data * - the key's semaphore is read-locked */ -long user_read(const struct key *key, char __user *buffer, size_t buflen) +long user_read(const struct key *key, char *buffer, size_t buflen) { const struct user_key_payload *upayload; long ret; @@ -181,8 +181,7 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen) if (buflen > upayload->datalen) buflen = upayload->datalen; - if (copy_to_user(buffer, upayload->data, buflen) != 0) - ret = -EFAULT; + memcpy(buffer, upayload->data, buflen); } return ret; -- cgit v1.2.3-58-ga151 From 317a0ebe53f46527aed912f7c3df963cd9a41536 Mon Sep 17 00:00:00 2001 From: Gwendal Grignou Date: Fri, 27 Mar 2020 15:34:42 -0700 Subject: iio: cros_ec: Use Hertz as unit for sampling frequency To be compliant with other sensors, set and get sensor sampling frequency in Hz, not mHz. Fixes: ae7b02ad2f32 ("iio: common: cros_ec_sensors: Expose cros_ec_sensors frequency range via iio sysfs") Signed-off-by: Gwendal Grignou Acked-by: Jonathan Cameron Signed-off-by: Enric Balletbo i Serra --- .../common/cros_ec_sensors/cros_ec_sensors_core.c | 32 +++++++++++++--------- include/linux/iio/common/cros_ec_sensors_core.h | 6 ++-- 2 files changed, 22 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index 67e8eff038cf..c831915ca7e5 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -253,6 +253,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, struct cros_ec_dev *ec = sensor_hub->ec; struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev); u32 ver_mask; + int frequencies[ARRAY_SIZE(state->frequencies) / 2] = { 0 }; int ret, i; platform_set_drvdata(pdev, indio_dev); @@ -301,20 +302,22 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, state->calib[i].scale = MOTION_SENSE_DEFAULT_SCALE; /* 0 is a correct value used to stop the device */ - state->frequencies[0] = 0; if (state->msg->version < 3) { get_default_min_max_freq(state->resp->info.type, - &state->frequencies[1], - &state->frequencies[2], + &frequencies[1], + &frequencies[2], &state->fifo_max_event_count); } else { - state->frequencies[1] = - state->resp->info_3.min_frequency; - state->frequencies[2] = - state->resp->info_3.max_frequency; + frequencies[1] = state->resp->info_3.min_frequency; + frequencies[2] = state->resp->info_3.max_frequency; state->fifo_max_event_count = state->resp->info_3.fifo_max_event_count; } + for (i = 0; i < ARRAY_SIZE(frequencies); i++) { + state->frequencies[2 * i] = frequencies[i] / 1000; + state->frequencies[2 * i + 1] = + (frequencies[i] % 1000) * 1000; + } if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) { /* @@ -728,7 +731,7 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { - int ret; + int ret, frequency; switch (mask) { case IIO_CHAN_INFO_SAMP_FREQ: @@ -740,8 +743,10 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st, if (ret) break; - *val = st->resp->sensor_odr.ret; - ret = IIO_VAL_INT; + frequency = st->resp->sensor_odr.ret; + *val = frequency / 1000; + *val2 = (frequency % 1000) * 1000; + ret = IIO_VAL_INT_PLUS_MICRO; break; default: ret = -EINVAL; @@ -776,7 +781,7 @@ int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev, case IIO_CHAN_INFO_SAMP_FREQ: *length = ARRAY_SIZE(state->frequencies); *vals = (const int *)&state->frequencies; - *type = IIO_VAL_INT; + *type = IIO_VAL_INT_PLUS_MICRO; return IIO_AVAIL_LIST; } @@ -798,12 +803,13 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int val, int val2, long mask) { - int ret; + int ret, frequency; switch (mask) { case IIO_CHAN_INFO_SAMP_FREQ: + frequency = val * 1000 + val2 / 1000; st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR; - st->param.sensor_odr.data = val; + st->param.sensor_odr.data = frequency; /* Always roundup, so caller gets at least what it asks for. */ st->param.sensor_odr.roundup = 1; diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index bc26ae2e3272..7bc961defa87 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -51,6 +51,8 @@ typedef irqreturn_t (*cros_ec_sensors_capture_t)(int irq, void *p); * is always 8-byte aligned. * @read_ec_sensors_data: function used for accessing sensors values * @fifo_max_event_count: Size of the EC sensor FIFO + * @frequencies: Table of known available frequencies: + * 0, Min and Max in mHz */ struct cros_ec_sensors_core_state { struct cros_ec_device *ec; @@ -74,9 +76,7 @@ struct cros_ec_sensors_core_state { unsigned long scan_mask, s16 *data); u32 fifo_max_event_count; - - /* Table of known available frequencies : 0, Min and Max in mHz */ - int frequencies[3]; + int frequencies[6]; }; int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, unsigned long scan_mask, -- cgit v1.2.3-58-ga151 From 7a52cbccee8df0edfee30b81fdbb7d4f9d27ffd5 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Sun, 12 Jan 2020 01:55:03 +0000 Subject: mfd: rk808: Reduce shutdown duplication Rather than having 3 almost-identical functions plus the machinery to keep track of them, it's far simpler to just dynamically select the appropriate register field per variant. Signed-off-by: Robin Murphy Signed-off-by: Lee Jones --- drivers/mfd/rk808.c | 61 ++++++++++++++++------------------------------- include/linux/mfd/rk808.h | 1 - 2 files changed, 20 insertions(+), 42 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c index 8116ed6cf2e7..b2265c6e94ae 100644 --- a/drivers/mfd/rk808.c +++ b/drivers/mfd/rk808.c @@ -448,21 +448,6 @@ static const struct regmap_irq_chip rk818_irq_chip = { static struct i2c_client *rk808_i2c_client; -static void rk805_device_shutdown(void) -{ - int ret; - struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); - - if (!rk808) - return; - - ret = regmap_update_bits(rk808->regmap, - RK805_DEV_CTRL_REG, - DEV_OFF, DEV_OFF); - if (ret) - dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n"); -} - static void rk805_device_shutdown_prepare(void) { int ret; @@ -478,32 +463,29 @@ static void rk805_device_shutdown_prepare(void) dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n"); } -static void rk808_device_shutdown(void) -{ - int ret; - struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); - - if (!rk808) - return; - - ret = regmap_update_bits(rk808->regmap, - RK808_DEVCTRL_REG, - DEV_OFF_RST, DEV_OFF_RST); - if (ret) - dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n"); -} - -static void rk818_device_shutdown(void) +static void rk808_pm_power_off(void) { int ret; + unsigned int reg, bit; struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); - if (!rk808) + switch (rk808->variant) { + case RK805_ID: + reg = RK805_DEV_CTRL_REG; + bit = DEV_OFF; + break; + case RK808_ID: + reg = RK808_DEVCTRL_REG, + bit = DEV_OFF_RST; + break; + case RK818_ID: + reg = RK818_DEVCTRL_REG; + bit = DEV_OFF; + break; + default: return; - - ret = regmap_update_bits(rk808->regmap, - RK818_DEVCTRL_REG, - DEV_OFF, DEV_OFF); + } + ret = regmap_update_bits(rk808->regmap, reg, bit, bit); if (ret) dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n"); } @@ -592,7 +574,6 @@ static int rk808_probe(struct i2c_client *client, nr_pre_init_regs = ARRAY_SIZE(rk805_pre_init_reg); cells = rk805s; nr_cells = ARRAY_SIZE(rk805s); - rk808->pm_pwroff_fn = rk805_device_shutdown; rk808->pm_pwroff_prep_fn = rk805_device_shutdown_prepare; break; case RK808_ID: @@ -602,7 +583,6 @@ static int rk808_probe(struct i2c_client *client, nr_pre_init_regs = ARRAY_SIZE(rk808_pre_init_reg); cells = rk808s; nr_cells = ARRAY_SIZE(rk808s); - rk808->pm_pwroff_fn = rk808_device_shutdown; break; case RK818_ID: rk808->regmap_cfg = &rk818_regmap_config; @@ -611,7 +591,6 @@ static int rk808_probe(struct i2c_client *client, nr_pre_init_regs = ARRAY_SIZE(rk818_pre_init_reg); cells = rk818s; nr_cells = ARRAY_SIZE(rk818s); - rk808->pm_pwroff_fn = rk818_device_shutdown; break; case RK809_ID: case RK817_ID: @@ -673,7 +652,7 @@ static int rk808_probe(struct i2c_client *client, if (of_property_read_bool(np, "rockchip,system-power-controller")) { rk808_i2c_client = client; - pm_power_off = rk808->pm_pwroff_fn; + pm_power_off = rk808_pm_power_off; pm_power_off_prepare = rk808->pm_pwroff_prep_fn; } @@ -694,7 +673,7 @@ static int rk808_remove(struct i2c_client *client) * pm_power_off may points to a function from another module. * Check if the pointer is set by us and only then overwrite it. */ - if (rk808->pm_pwroff_fn && pm_power_off == rk808->pm_pwroff_fn) + if (pm_power_off == rk808_pm_power_off) pm_power_off = NULL; /** diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h index a59bf323f713..b038653fa87e 100644 --- a/include/linux/mfd/rk808.h +++ b/include/linux/mfd/rk808.h @@ -620,7 +620,6 @@ struct rk808 { long variant; const struct regmap_config *regmap_cfg; const struct regmap_irq_chip *regmap_irq_chip; - void (*pm_pwroff_fn)(void); void (*pm_pwroff_prep_fn)(void); }; #endif /* __LINUX_REGULATOR_RK808_H */ -- cgit v1.2.3-58-ga151 From 42679765faf286259b16acf284eb52d68877ff32 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Sun, 12 Jan 2020 01:55:04 +0000 Subject: mfd: rk808: Convert RK805 to shutdown/suspend hooks RK805 has the same kind of dual-role sleep/shutdown pin as RK809/RK817, so it makes little sense for the driver to have to have two completely different mechanisms to handle essentially the same thing. Move RK805 over to the shutdown/suspend flow to clean things up. Signed-off-by: Robin Murphy Signed-off-by: Lee Jones --- drivers/mfd/rk808.c | 37 ++++++++++++------------------------- include/linux/mfd/rk808.h | 1 - 2 files changed, 12 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c index b2265c6e94ae..d109b9f14407 100644 --- a/drivers/mfd/rk808.c +++ b/drivers/mfd/rk808.c @@ -185,7 +185,6 @@ static const struct rk808_reg_data rk805_pre_init_reg[] = { {RK805_BUCK4_CONFIG_REG, RK805_BUCK3_4_ILMAX_MASK, RK805_BUCK4_ILMAX_3500MA}, {RK805_BUCK4_CONFIG_REG, BUCK_ILMIN_MASK, BUCK_ILMIN_400MA}, - {RK805_GPIO_IO_POL_REG, SLP_SD_MSK, SLEEP_FUN}, {RK805_THERMAL_REG, TEMP_HOTDIE_MSK, TEMP115C}, }; @@ -448,21 +447,6 @@ static const struct regmap_irq_chip rk818_irq_chip = { static struct i2c_client *rk808_i2c_client; -static void rk805_device_shutdown_prepare(void) -{ - int ret; - struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); - - if (!rk808) - return; - - ret = regmap_update_bits(rk808->regmap, - RK805_GPIO_IO_POL_REG, - SLP_SD_MSK, SHUTDOWN_FUN); - if (ret) - dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n"); -} - static void rk808_pm_power_off(void) { int ret; @@ -496,6 +480,12 @@ static void rk8xx_shutdown(struct i2c_client *client) int ret; switch (rk808->variant) { + case RK805_ID: + ret = regmap_update_bits(rk808->regmap, + RK805_GPIO_IO_POL_REG, + SLP_SD_MSK, + SHUTDOWN_FUN); + break; case RK809_ID: case RK817_ID: ret = regmap_update_bits(rk808->regmap, @@ -574,7 +564,6 @@ static int rk808_probe(struct i2c_client *client, nr_pre_init_regs = ARRAY_SIZE(rk805_pre_init_reg); cells = rk805s; nr_cells = ARRAY_SIZE(rk805s); - rk808->pm_pwroff_prep_fn = rk805_device_shutdown_prepare; break; case RK808_ID: rk808->regmap_cfg = &rk808_regmap_config; @@ -653,7 +642,6 @@ static int rk808_probe(struct i2c_client *client, if (of_property_read_bool(np, "rockchip,system-power-controller")) { rk808_i2c_client = client; pm_power_off = rk808_pm_power_off; - pm_power_off_prepare = rk808->pm_pwroff_prep_fn; } return 0; @@ -676,13 +664,6 @@ static int rk808_remove(struct i2c_client *client) if (pm_power_off == rk808_pm_power_off) pm_power_off = NULL; - /** - * As above, check if the pointer is set by us before overwrite. - */ - if (rk808->pm_pwroff_prep_fn && - pm_power_off_prepare == rk808->pm_pwroff_prep_fn) - pm_power_off_prepare = NULL; - return 0; } @@ -692,6 +673,12 @@ static int __maybe_unused rk8xx_suspend(struct device *dev) int ret = 0; switch (rk808->variant) { + case RK805_ID: + ret = regmap_update_bits(rk808->regmap, + RK805_GPIO_IO_POL_REG, + SLP_SD_MSK, + SLEEP_FUN); + break; case RK809_ID: case RK817_ID: ret = regmap_update_bits(rk808->regmap, diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h index b038653fa87e..e07f6e61cd38 100644 --- a/include/linux/mfd/rk808.h +++ b/include/linux/mfd/rk808.h @@ -620,6 +620,5 @@ struct rk808 { long variant; const struct regmap_config *regmap_cfg; const struct regmap_irq_chip *regmap_irq_chip; - void (*pm_pwroff_prep_fn)(void); }; #endif /* __LINUX_REGULATOR_RK808_H */ -- cgit v1.2.3-58-ga151 From 2a7e7274f3d43d2a072cab25c0035dc994903bb9 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 17 Feb 2020 10:26:16 +0800 Subject: mfd: sc27xx: Add USB charger type detection support The Spreadtrum SC27XX series PMICs supply the USB charger type detection function, and related registers are located on the PMIC global registers region, thus we implement and export this function in the MFD driver for users to get the USB charger type. Signed-off-by: Baolin Wang Signed-off-by: Lee Jones --- drivers/mfd/sprd-sc27xx-spi.c | 52 +++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/sc27xx-pmic.h | 7 ++++++ 2 files changed, 59 insertions(+) create mode 100644 include/linux/mfd/sc27xx-pmic.h (limited to 'include/linux') diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c index c0529a1cd5ea..ebdf2f11ae28 100644 --- a/drivers/mfd/sprd-sc27xx-spi.c +++ b/drivers/mfd/sprd-sc27xx-spi.c @@ -10,6 +10,7 @@ #include #include #include +#include #define SPRD_PMIC_INT_MASK_STATUS 0x0 #define SPRD_PMIC_INT_RAW_STATUS 0x4 @@ -17,6 +18,16 @@ #define SPRD_SC2731_IRQ_BASE 0x140 #define SPRD_SC2731_IRQ_NUMS 16 +#define SPRD_SC2731_CHG_DET 0xedc + +/* PMIC charger detection definition */ +#define SPRD_PMIC_CHG_DET_DELAY_US 200000 +#define SPRD_PMIC_CHG_DET_TIMEOUT 2000000 +#define SPRD_PMIC_CHG_DET_DONE BIT(11) +#define SPRD_PMIC_SDP_TYPE BIT(7) +#define SPRD_PMIC_DCP_TYPE BIT(6) +#define SPRD_PMIC_CDP_TYPE BIT(5) +#define SPRD_PMIC_CHG_TYPE_MASK GENMASK(7, 5) struct sprd_pmic { struct regmap *regmap; @@ -24,12 +35,14 @@ struct sprd_pmic { struct regmap_irq *irqs; struct regmap_irq_chip irq_chip; struct regmap_irq_chip_data *irq_data; + const struct sprd_pmic_data *pdata; int irq; }; struct sprd_pmic_data { u32 irq_base; u32 num_irqs; + u32 charger_det; }; /* @@ -40,8 +53,46 @@ struct sprd_pmic_data { static const struct sprd_pmic_data sc2731_data = { .irq_base = SPRD_SC2731_IRQ_BASE, .num_irqs = SPRD_SC2731_IRQ_NUMS, + .charger_det = SPRD_SC2731_CHG_DET, }; +enum usb_charger_type sprd_pmic_detect_charger_type(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + struct sprd_pmic *ddata = spi_get_drvdata(spi); + const struct sprd_pmic_data *pdata = ddata->pdata; + enum usb_charger_type type; + u32 val; + int ret; + + ret = regmap_read_poll_timeout(ddata->regmap, pdata->charger_det, val, + (val & SPRD_PMIC_CHG_DET_DONE), + SPRD_PMIC_CHG_DET_DELAY_US, + SPRD_PMIC_CHG_DET_TIMEOUT); + if (ret) { + dev_err(&spi->dev, "failed to detect charger type\n"); + return UNKNOWN_TYPE; + } + + switch (val & SPRD_PMIC_CHG_TYPE_MASK) { + case SPRD_PMIC_CDP_TYPE: + type = CDP_TYPE; + break; + case SPRD_PMIC_DCP_TYPE: + type = DCP_TYPE; + break; + case SPRD_PMIC_SDP_TYPE: + type = SDP_TYPE; + break; + default: + type = UNKNOWN_TYPE; + break; + } + + return type; +} +EXPORT_SYMBOL_GPL(sprd_pmic_detect_charger_type); + static const struct mfd_cell sprd_pmic_devs[] = { { .name = "sc27xx-wdt", @@ -181,6 +232,7 @@ static int sprd_pmic_probe(struct spi_device *spi) spi_set_drvdata(spi, ddata); ddata->dev = &spi->dev; ddata->irq = spi->irq; + ddata->pdata = pdata; ddata->irq_chip.name = dev_name(&spi->dev); ddata->irq_chip.status_base = diff --git a/include/linux/mfd/sc27xx-pmic.h b/include/linux/mfd/sc27xx-pmic.h new file mode 100644 index 000000000000..57e45c0b3ae2 --- /dev/null +++ b/include/linux/mfd/sc27xx-pmic.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_MFD_SC27XX_PMIC_H +#define __LINUX_MFD_SC27XX_PMIC_H + +extern enum usb_charger_type sprd_pmic_detect_charger_type(struct device *dev); + +#endif /* __LINUX_MFD_SC27XX_PMIC_H */ -- cgit v1.2.3-58-ga151 From 072eaf3c0f0fd2bd8f53799c8dee3ab907db1242 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Tue, 28 Jan 2020 20:12:22 +0100 Subject: libceph: drop CEPH_DEFINE_SHOW_FUNC Although CEPH_DEFINE_SHOW_FUNC is much older, it now duplicates DEFINE_SHOW_ATTRIBUTE from linux/seq_file.h. Signed-off-by: Ilya Dryomov Reviewed-by: Jeff Layton --- fs/ceph/debugfs.c | 16 ++++++++-------- include/linux/ceph/debugfs.h | 14 -------------- net/ceph/debugfs.c | 20 ++++++++++---------- 3 files changed, 18 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index fb7cabd98e7b..481ac97b4d25 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -218,10 +218,10 @@ static int mds_sessions_show(struct seq_file *s, void *ptr) return 0; } -CEPH_DEFINE_SHOW_FUNC(mdsmap_show) -CEPH_DEFINE_SHOW_FUNC(mdsc_show) -CEPH_DEFINE_SHOW_FUNC(caps_show) -CEPH_DEFINE_SHOW_FUNC(mds_sessions_show) +DEFINE_SHOW_ATTRIBUTE(mdsmap); +DEFINE_SHOW_ATTRIBUTE(mdsc); +DEFINE_SHOW_ATTRIBUTE(caps); +DEFINE_SHOW_ATTRIBUTE(mds_sessions); /* @@ -281,25 +281,25 @@ void ceph_fs_debugfs_init(struct ceph_fs_client *fsc) 0400, fsc->client->debugfs_dir, fsc, - &mdsmap_show_fops); + &mdsmap_fops); fsc->debugfs_mds_sessions = debugfs_create_file("mds_sessions", 0400, fsc->client->debugfs_dir, fsc, - &mds_sessions_show_fops); + &mds_sessions_fops); fsc->debugfs_mdsc = debugfs_create_file("mdsc", 0400, fsc->client->debugfs_dir, fsc, - &mdsc_show_fops); + &mdsc_fops); fsc->debugfs_caps = debugfs_create_file("caps", 0400, fsc->client->debugfs_dir, fsc, - &caps_show_fops); + &caps_fops); } diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h index cf5e840eec71..8b3a1a7a953a 100644 --- a/include/linux/ceph/debugfs.h +++ b/include/linux/ceph/debugfs.h @@ -2,22 +2,8 @@ #ifndef _FS_CEPH_DEBUGFS_H #define _FS_CEPH_DEBUGFS_H -#include #include -#define CEPH_DEFINE_SHOW_FUNC(name) \ -static int name##_open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, name, inode->i_private); \ -} \ - \ -static const struct file_operations name##_fops = { \ - .open = name##_open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ -}; - /* debugfs.c */ extern void ceph_debugfs_init(void); extern void ceph_debugfs_cleanup(void); diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c index 7cb992e55475..1344f232ecc5 100644 --- a/net/ceph/debugfs.c +++ b/net/ceph/debugfs.c @@ -383,11 +383,11 @@ static int client_options_show(struct seq_file *s, void *p) return 0; } -CEPH_DEFINE_SHOW_FUNC(monmap_show) -CEPH_DEFINE_SHOW_FUNC(osdmap_show) -CEPH_DEFINE_SHOW_FUNC(monc_show) -CEPH_DEFINE_SHOW_FUNC(osdc_show) -CEPH_DEFINE_SHOW_FUNC(client_options_show) +DEFINE_SHOW_ATTRIBUTE(monmap); +DEFINE_SHOW_ATTRIBUTE(osdmap); +DEFINE_SHOW_ATTRIBUTE(monc); +DEFINE_SHOW_ATTRIBUTE(osdc); +DEFINE_SHOW_ATTRIBUTE(client_options); void __init ceph_debugfs_init(void) { @@ -414,31 +414,31 @@ void ceph_debugfs_client_init(struct ceph_client *client) 0400, client->debugfs_dir, client, - &monc_show_fops); + &monc_fops); client->osdc.debugfs_file = debugfs_create_file("osdc", 0400, client->debugfs_dir, client, - &osdc_show_fops); + &osdc_fops); client->debugfs_monmap = debugfs_create_file("monmap", 0400, client->debugfs_dir, client, - &monmap_show_fops); + &monmap_fops); client->debugfs_osdmap = debugfs_create_file("osdmap", 0400, client->debugfs_dir, client, - &osdmap_show_fops); + &osdmap_fops); client->debugfs_options = debugfs_create_file("client_options", 0400, client->debugfs_dir, client, - &client_options_show_fops); + &client_options_fops); } void ceph_debugfs_client_cleanup(struct ceph_client *client) -- cgit v1.2.3-58-ga151 From 5107d7d505cb32fc5e74b792bce14b03f5beac7f Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Wed, 29 Jan 2020 03:27:07 -0500 Subject: ceph: move ceph_osdc_{read,write}pages to ceph.ko Since these helpers are only used by ceph.ko, move them there and rename them with _sync_ qualifiers. Signed-off-by: Xiubo Li Reviewed-by: Jeff Layton Signed-off-by: Ilya Dryomov --- fs/ceph/addr.c | 86 ++++++++++++++++++++++++++++++++++++++++- include/linux/ceph/osd_client.h | 17 -------- net/ceph/osd_client.c | 79 ------------------------------------- 3 files changed, 84 insertions(+), 98 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 6067847bc03b..7136f9947354 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -180,6 +180,47 @@ static int ceph_releasepage(struct page *page, gfp_t g) return !PagePrivate(page); } +/* + * Read some contiguous pages. If we cross a stripe boundary, shorten + * *plen. Return number of bytes read, or error. + */ +static int ceph_sync_readpages(struct ceph_fs_client *fsc, + struct ceph_vino vino, + struct ceph_file_layout *layout, + u64 off, u64 *plen, + u32 truncate_seq, u64 truncate_size, + struct page **pages, int num_pages, + int page_align) +{ + struct ceph_osd_client *osdc = &fsc->client->osdc; + struct ceph_osd_request *req; + int rc = 0; + + dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, + vino.snap, off, *plen); + req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1, + CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, + NULL, truncate_seq, truncate_size, + false); + if (IS_ERR(req)) + return PTR_ERR(req); + + /* it may be a short read due to an object boundary */ + osd_req_op_extent_osd_data_pages(req, 0, + pages, *plen, page_align, false, false); + + dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n", + off, *plen, *plen, page_align); + + rc = ceph_osdc_start_request(osdc, req, false); + if (!rc) + rc = ceph_osdc_wait_request(osdc, req); + + ceph_osdc_put_request(req); + dout("readpages result %d\n", rc); + return rc; +} + /* * read a single page, without unlocking it. */ @@ -216,7 +257,7 @@ static int ceph_do_readpage(struct file *filp, struct page *page) dout("readpage inode %p file %p page %p index %lu\n", inode, filp, page, page->index); - err = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), + err = ceph_sync_readpages(fsc, ceph_vino(inode), &ci->i_layout, off, &len, ci->i_truncate_seq, ci->i_truncate_size, &page, 1, 0); @@ -568,6 +609,47 @@ static u64 get_writepages_data_length(struct inode *inode, return end > start ? end - start : 0; } +/* + * do a synchronous write on N pages + */ +static int ceph_sync_writepages(struct ceph_fs_client *fsc, + struct ceph_vino vino, + struct ceph_file_layout *layout, + struct ceph_snap_context *snapc, + u64 off, u64 len, + u32 truncate_seq, u64 truncate_size, + struct timespec64 *mtime, + struct page **pages, int num_pages) +{ + struct ceph_osd_client *osdc = &fsc->client->osdc; + struct ceph_osd_request *req; + int rc = 0; + int page_align = off & ~PAGE_MASK; + + req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1, + CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, + snapc, truncate_seq, truncate_size, + true); + if (IS_ERR(req)) + return PTR_ERR(req); + + /* it may be a short write due to an object boundary */ + osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align, + false, false); + dout("writepages %llu~%llu (%llu bytes)\n", off, len, len); + + req->r_mtime = *mtime; + rc = ceph_osdc_start_request(osdc, req, true); + if (!rc) + rc = ceph_osdc_wait_request(osdc, req); + + ceph_osdc_put_request(req); + if (rc == 0) + rc = len; + dout("writepages result %d\n", rc); + return rc; +} + /* * Write a single page, but leave the page locked. * @@ -626,7 +708,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); set_page_writeback(page); - err = ceph_osdc_writepages(&fsc->client->osdc, ceph_vino(inode), + err = ceph_sync_writepages(fsc, ceph_vino(inode), &ci->i_layout, snapc, page_off, len, ceph_wbc.truncate_seq, ceph_wbc.truncate_size, diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 5a62dbd3f4c2..9d9f745b98a1 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -509,23 +509,6 @@ int ceph_osdc_call(struct ceph_osd_client *osdc, struct page *req_page, size_t req_len, struct page **resp_pages, size_t *resp_len); -extern int ceph_osdc_readpages(struct ceph_osd_client *osdc, - struct ceph_vino vino, - struct ceph_file_layout *layout, - u64 off, u64 *plen, - u32 truncate_seq, u64 truncate_size, - struct page **pages, int nr_pages, - int page_align); - -extern int ceph_osdc_writepages(struct ceph_osd_client *osdc, - struct ceph_vino vino, - struct ceph_file_layout *layout, - struct ceph_snap_context *sc, - u64 off, u64 len, - u32 truncate_seq, u64 truncate_size, - struct timespec64 *mtime, - struct page **pages, int nr_pages); - int ceph_osdc_copy_from(struct ceph_osd_client *osdc, u64 src_snapid, u64 src_version, struct ceph_object_id *src_oid, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index af868d3923b9..f9ec6a6568bd 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -5228,85 +5228,6 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc) ceph_msgpool_destroy(&osdc->msgpool_op_reply); } -/* - * Read some contiguous pages. If we cross a stripe boundary, shorten - * *plen. Return number of bytes read, or error. - */ -int ceph_osdc_readpages(struct ceph_osd_client *osdc, - struct ceph_vino vino, struct ceph_file_layout *layout, - u64 off, u64 *plen, - u32 truncate_seq, u64 truncate_size, - struct page **pages, int num_pages, int page_align) -{ - struct ceph_osd_request *req; - int rc = 0; - - dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, - vino.snap, off, *plen); - req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1, - CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, - NULL, truncate_seq, truncate_size, - false); - if (IS_ERR(req)) - return PTR_ERR(req); - - /* it may be a short read due to an object boundary */ - osd_req_op_extent_osd_data_pages(req, 0, - pages, *plen, page_align, false, false); - - dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n", - off, *plen, *plen, page_align); - - rc = ceph_osdc_start_request(osdc, req, false); - if (!rc) - rc = ceph_osdc_wait_request(osdc, req); - - ceph_osdc_put_request(req); - dout("readpages result %d\n", rc); - return rc; -} -EXPORT_SYMBOL(ceph_osdc_readpages); - -/* - * do a synchronous write on N pages - */ -int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, - struct ceph_file_layout *layout, - struct ceph_snap_context *snapc, - u64 off, u64 len, - u32 truncate_seq, u64 truncate_size, - struct timespec64 *mtime, - struct page **pages, int num_pages) -{ - struct ceph_osd_request *req; - int rc = 0; - int page_align = off & ~PAGE_MASK; - - req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1, - CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, - snapc, truncate_seq, truncate_size, - true); - if (IS_ERR(req)) - return PTR_ERR(req); - - /* it may be a short write due to an object boundary */ - osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align, - false, false); - dout("writepages %llu~%llu (%llu bytes)\n", off, len, len); - - req->r_mtime = *mtime; - rc = ceph_osdc_start_request(osdc, req, true); - if (!rc) - rc = ceph_osdc_wait_request(osdc, req); - - ceph_osdc_put_request(req); - if (rc == 0) - rc = len; - dout("writepages result %d\n", rc); - return rc; -} -EXPORT_SYMBOL(ceph_osdc_writepages); - static int osd_req_op_copy_from_init(struct ceph_osd_request *req, u64 src_snapid, u64 src_version, struct ceph_object_id *src_oid, -- cgit v1.2.3-58-ga151 From 058daab79d6b597a20fd49b5e445b1b2929c2c1c Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 17 Feb 2020 18:38:37 -0500 Subject: ceph: move to a dedicated slabcache for mds requests On my machine (x86_64) this struct is 952 bytes, which gets rounded up to 1024 by kmalloc. Move this to a dedicated slabcache, so we can allocate them without the extra 72 bytes of overhead per. Signed-off-by: Jeff Layton Reviewed-by: Ilya Dryomov Signed-off-by: Ilya Dryomov --- fs/ceph/mds_client.c | 5 +++-- fs/ceph/super.c | 8 ++++++++ include/linux/ceph/libceph.h | 1 + 3 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 2980e57ca7b9..fab9d6461a65 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -736,7 +736,7 @@ void ceph_mdsc_release_request(struct kref *kref) put_request_session(req); ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation); WARN_ON_ONCE(!list_empty(&req->r_wait)); - kfree(req); + kmem_cache_free(ceph_mds_request_cachep, req); } DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node) @@ -2094,8 +2094,9 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req, struct ceph_mds_request * ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) { - struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); + struct ceph_mds_request *req; + req = kmem_cache_zalloc(ceph_mds_request_cachep, GFP_NOFS); if (!req) return ERR_PTR(-ENOMEM); diff --git a/fs/ceph/super.c b/fs/ceph/super.c index c7f150686a53..b1329cd5388a 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -729,6 +729,7 @@ struct kmem_cache *ceph_cap_flush_cachep; struct kmem_cache *ceph_dentry_cachep; struct kmem_cache *ceph_file_cachep; struct kmem_cache *ceph_dir_file_cachep; +struct kmem_cache *ceph_mds_request_cachep; static void ceph_inode_init_once(void *foo) { @@ -769,6 +770,10 @@ static int __init init_caches(void) if (!ceph_dir_file_cachep) goto bad_dir_file; + ceph_mds_request_cachep = KMEM_CACHE(ceph_mds_request, SLAB_MEM_SPREAD); + if (!ceph_mds_request_cachep) + goto bad_mds_req; + error = ceph_fscache_register(); if (error) goto bad_fscache; @@ -776,6 +781,8 @@ static int __init init_caches(void) return 0; bad_fscache: + kmem_cache_destroy(ceph_mds_request_cachep); +bad_mds_req: kmem_cache_destroy(ceph_dir_file_cachep); bad_dir_file: kmem_cache_destroy(ceph_file_cachep); @@ -804,6 +811,7 @@ static void destroy_caches(void) kmem_cache_destroy(ceph_dentry_cachep); kmem_cache_destroy(ceph_file_cachep); kmem_cache_destroy(ceph_dir_file_cachep); + kmem_cache_destroy(ceph_mds_request_cachep); ceph_fscache_unregister(); } diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index ec73ebc4827d..525b7c3f1c81 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -272,6 +272,7 @@ extern struct kmem_cache *ceph_cap_flush_cachep; extern struct kmem_cache *ceph_dentry_cachep; extern struct kmem_cache *ceph_file_cachep; extern struct kmem_cache *ceph_dir_file_cachep; +extern struct kmem_cache *ceph_mds_request_cachep; /* ceph_common.c */ extern bool libceph_compatible(void *data); -- cgit v1.2.3-58-ga151 From 3bb48b4142bbf72045af5ebe72e65ccff6d02680 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 2 Dec 2019 13:47:57 -0500 Subject: ceph: add flag to designate that a request is asynchronous ...and ensure that such requests are never queued. The MDS has need to know that a request is asynchronous so add flags and proper infrastructure for that. Also, delegated inode numbers and directory caps are associated with the session, so ensure that async requests are always transmitted on the first attempt and are never queued to wait for session reestablishment. If it does end up looking like we'll need to queue the request, then have it return -EJUKEBOX so the caller can reattempt with a synchronous request. Signed-off-by: Jeff Layton Reviewed-by: "Yan, Zheng" Signed-off-by: Ilya Dryomov --- fs/ceph/inode.c | 1 + fs/ceph/mds_client.c | 15 +++++++++++++++ fs/ceph/mds_client.h | 1 + include/linux/ceph/ceph_fs.h | 5 +++-- 4 files changed, 20 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 094b8fc37787..9869ec101e88 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1311,6 +1311,7 @@ retry_lookup: err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL, session, (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) && + !test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) && rinfo->head->result == 0) ? req->r_fmode : -1, &req->r_caps_reservation); if (err < 0) { diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 2da98b6cc064..9e8b2099e63f 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2528,6 +2528,8 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) flags |= CEPH_MDS_FLAG_REPLAY; + if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) + flags |= CEPH_MDS_FLAG_ASYNC; if (req->r_parent) flags |= CEPH_MDS_FLAG_WANT_DENTRY; rhead->flags = cpu_to_le32(flags); @@ -2611,6 +2613,10 @@ static void __do_request(struct ceph_mds_client *mdsc, mds = __choose_mds(mdsc, req, &random); if (mds < 0 || ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { + if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) { + err = -EJUKEBOX; + goto finish; + } dout("do_request no mds or not active, waiting for map\n"); list_add(&req->r_wait, &mdsc->waiting_for_map); return; @@ -2635,6 +2641,15 @@ static void __do_request(struct ceph_mds_client *mdsc, err = -EACCES; goto out_session; } + /* + * We cannot queue async requests since the caps and delegated + * inodes are bound to the session. Just return -EJUKEBOX and + * let the caller retry a sync request in that case. + */ + if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) { + err = -EJUKEBOX; + goto out_session; + } if (session->s_state == CEPH_MDS_SESSION_NEW || session->s_state == CEPH_MDS_SESSION_CLOSING) { __open_session(mdsc, session); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index a0918d00117c..95ac00e59e66 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -255,6 +255,7 @@ struct ceph_mds_request { #define CEPH_MDS_R_GOT_RESULT (5) /* got a result */ #define CEPH_MDS_R_DID_PREPOPULATE (6) /* prepopulated readdir */ #define CEPH_MDS_R_PARENT_LOCKED (7) /* is r_parent->i_rwsem wlocked? */ +#define CEPH_MDS_R_ASYNC (8) /* async request */ unsigned long r_req_flags; struct mutex r_fill_mutex; diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index cb21c5cf12c3..9f747a1b8788 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -444,8 +444,9 @@ union ceph_mds_request_args { } __attribute__ ((packed)) lookupino; } __attribute__ ((packed)); -#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */ -#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */ +#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */ +#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */ +#define CEPH_MDS_FLAG_ASYNC 4 /* request is asynchronous */ struct ceph_mds_request_head { __le64 oldest_client_tid; -- cgit v1.2.3-58-ga151 From f5e17aed3accb406f51ae528d657c275efc1edfc Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 18 Feb 2020 14:12:32 -0500 Subject: ceph: track primary dentry link Newer versions of the MDS will flag a dentry as "primary". In later patches, we'll need to consult this info, so track it in di->flags. Signed-off-by: Jeff Layton Reviewed-by: "Yan, Zheng" Signed-off-by: Ilya Dryomov --- fs/ceph/dir.c | 1 + fs/ceph/inode.c | 8 +++++++- fs/ceph/super.h | 1 + include/linux/ceph/ceph_fs.h | 3 +++ 4 files changed, 12 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index d0cd0aba5843..a87274935a09 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -1411,6 +1411,7 @@ void ceph_invalidate_dentry_lease(struct dentry *dentry) spin_lock(&dentry->d_lock); di->time = jiffies; di->lease_shared_gen = 0; + di->flags &= ~CEPH_DENTRY_PRIMARY_LINK; __dentry_lease_unlist(di); spin_unlock(&dentry->d_lock); } diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 9869ec101e88..7478bd0283c1 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1051,6 +1051,7 @@ static void __update_dentry_lease(struct inode *dir, struct dentry *dentry, struct ceph_mds_session **old_lease_session) { struct ceph_dentry_info *di = ceph_dentry(dentry); + unsigned mask = le16_to_cpu(lease->mask); long unsigned duration = le32_to_cpu(lease->duration_ms); long unsigned ttl = from_time + (duration * HZ) / 1000; long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000; @@ -1062,8 +1063,13 @@ static void __update_dentry_lease(struct inode *dir, struct dentry *dentry, if (ceph_snap(dir) != CEPH_NOSNAP) return; + if (mask & CEPH_LEASE_PRIMARY_LINK) + di->flags |= CEPH_DENTRY_PRIMARY_LINK; + else + di->flags &= ~CEPH_DENTRY_PRIMARY_LINK; + di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen); - if (duration == 0) { + if (!(mask & CEPH_LEASE_VALID)) { __ceph_dentry_dir_lease_touch(di); return; } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index b2711ee56b09..7a93f37e7708 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -284,6 +284,7 @@ struct ceph_dentry_info { #define CEPH_DENTRY_REFERENCED 1 #define CEPH_DENTRY_LEASE_LIST 2 #define CEPH_DENTRY_SHRINK_LIST 4 +#define CEPH_DENTRY_PRIMARY_LINK 8 struct ceph_inode_xattrs_info { /* diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 9f747a1b8788..94cc4b047987 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -531,6 +531,9 @@ struct ceph_mds_reply_lease { __le32 seq; } __attribute__ ((packed)); +#define CEPH_LEASE_VALID (1 | 2) /* old and new bit values */ +#define CEPH_LEASE_PRIMARY_LINK 4 /* primary linkage */ + struct ceph_mds_reply_dirfrag { __le32 frag; /* fragment */ __le32 auth; /* auth mds, if this is a delegation point */ -- cgit v1.2.3-58-ga151 From a25949b99003b7e6c2604a3fc8b8d62385508477 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 18 Feb 2020 14:12:45 -0500 Subject: ceph: cap tracking for async directory operations Track and correctly handle directory caps for asynchronous operations. Add aliases for Frc caps that we now designate at Dcu caps (when dealing with directories). Unlike file caps, we don't reclaim these when the session goes away, and instead preemptively release them. In-flight async dirops are instead handled during reconnect phase. The client needs to re-do a synchronous operation in order to re-get directory caps. Signed-off-by: Jeff Layton Reviewed-by: "Yan, Zheng" Signed-off-by: Ilya Dryomov --- fs/ceph/caps.c | 27 +++++++++++++++++++-------- fs/ceph/mds_client.c | 31 ++++++++++++++++++++++++++----- fs/ceph/mds_client.h | 6 +++++- include/linux/ceph/ceph_fs.h | 6 ++++++ 4 files changed, 56 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 569484a57111..d3274120f738 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -992,7 +992,11 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci) int __ceph_caps_wanted(struct ceph_inode_info *ci) { int w = __ceph_caps_file_wanted(ci) | __ceph_caps_used(ci); - if (!S_ISDIR(ci->vfs_inode.i_mode)) { + if (S_ISDIR(ci->vfs_inode.i_mode)) { + /* we want EXCL if holding caps of dir ops */ + if (w & CEPH_CAP_ANY_DIR_OPS) + w |= CEPH_CAP_FILE_EXCL; + } else { /* we want EXCL if dirty data */ if (w & CEPH_CAP_FILE_BUFFER) w |= CEPH_CAP_FILE_EXCL; @@ -1893,10 +1897,13 @@ retry_locked: * revoking the shared cap on every create/unlink * operation. */ - if (IS_RDONLY(inode)) + if (IS_RDONLY(inode)) { want = CEPH_CAP_ANY_SHARED; - else - want = CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL; + } else { + want = CEPH_CAP_ANY_SHARED | + CEPH_CAP_FILE_EXCL | + CEPH_CAP_ANY_DIR_OPS; + } retain |= want; } else { @@ -2749,10 +2756,14 @@ int ceph_try_get_caps(struct inode *inode, int need, int want, int ret; BUG_ON(need & ~CEPH_CAP_FILE_RD); - BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO|CEPH_CAP_FILE_SHARED)); - ret = ceph_pool_perm_check(inode, need); - if (ret < 0) - return ret; + BUG_ON(want & ~(CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO | + CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL | + CEPH_CAP_ANY_DIR_OPS)); + if (need) { + ret = ceph_pool_perm_check(inode, need); + if (ret < 0) + return ret; + } ret = try_get_cap_refs(inode, need, want, 0, (nonblock ? NON_BLOCKING : 0), got); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index c433655eba62..9ffb2ee623af 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -699,6 +699,7 @@ void ceph_mdsc_release_request(struct kref *kref) struct ceph_mds_request *req = container_of(kref, struct ceph_mds_request, r_kref); + ceph_mdsc_release_dir_caps(req); destroy_reply_info(&req->r_reply_info); if (req->r_request) ceph_msg_put(req->r_request); @@ -3280,6 +3281,17 @@ bad: return; } +void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req) +{ + int dcaps; + + dcaps = xchg(&req->r_dir_caps, 0); + if (dcaps) { + dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps)); + ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps); + } +} + /* * called under session->mutex. */ @@ -3307,9 +3319,14 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc, continue; if (req->r_attempts == 0) continue; /* only old requests */ - if (req->r_session && - req->r_session->s_mds == session->s_mds) - __send_request(mdsc, session, req, true); + if (!req->r_session) + continue; + if (req->r_session->s_mds != session->s_mds) + continue; + + ceph_mdsc_release_dir_caps(req); + + __send_request(mdsc, session, req, true); } mutex_unlock(&mdsc->mutex); } @@ -3393,7 +3410,7 @@ fail_msg: /* * Encode information about a cap for a reconnect with the MDS. */ -static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, +static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) { union { @@ -3416,6 +3433,10 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, cap->mseq = 0; /* and migrate_seq */ cap->cap_gen = cap->session->s_cap_gen; + /* These are lost when the session goes away */ + if (S_ISDIR(inode->i_mode)) + cap->issued &= ~CEPH_CAP_ANY_DIR_OPS; + if (recon_state->msg_version >= 2) { rec.v2.cap_id = cpu_to_le64(cap->cap_id); rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); @@ -3712,7 +3733,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, recon_state.msg_version = 2; } /* trsaverse this session's caps */ - err = ceph_iterate_session_caps(session, encode_caps_cb, &recon_state); + err = ceph_iterate_session_caps(session, reconnect_caps_cb, &recon_state); spin_lock(&session->s_cap_lock); session->s_cap_reconnect = 0; diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 8043f2b439b1..f10d342ea585 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -284,8 +284,11 @@ struct ceph_mds_request { struct ceph_msg *r_request; /* original request */ struct ceph_msg *r_reply; struct ceph_mds_reply_info_parsed r_reply_info; - struct page *r_locked_page; int r_err; + + + struct page *r_locked_page; + int r_dir_caps; int r_num_caps; u32 r_readdir_offset; @@ -489,6 +492,7 @@ extern int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, struct inode *dir, struct ceph_mds_request *req); +extern void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req); static inline void ceph_mdsc_get_request(struct ceph_mds_request *req) { kref_get(&req->r_kref); diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 94cc4b047987..91d09cf37649 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -663,6 +663,12 @@ int ceph_flags_to_mode(int flags); #define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \ CEPH_LOCK_IXATTR) +/* cap masks async dir operations */ +#define CEPH_CAP_DIR_CREATE CEPH_CAP_FILE_CACHE +#define CEPH_CAP_DIR_UNLINK CEPH_CAP_FILE_RD +#define CEPH_CAP_ANY_DIR_OPS (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | \ + CEPH_CAP_FILE_WREXTEND | CEPH_CAP_FILE_LAZYIO) + int ceph_caps_for_mode(int mode); enum { -- cgit v1.2.3-58-ga151 From 9a8d03ca2e2c334d08ee91a3e07dcce31a02fdc6 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 27 Nov 2019 12:06:14 -0500 Subject: ceph: attempt to do async create when possible With the Octopus release, the MDS will hand out directory create caps. If we have Fxc caps on the directory, and complete directory information or a known negative dentry, then we can return without waiting on the reply, allowing the open() call to return very quickly to userland. We use the normal ceph_fill_inode() routine to fill in the inode, so we have to gin up some reply inode information with what we'd expect the newly-created inode to have. The client assumes that it has a full set of caps on the new inode, and that the MDS will revoke them when there is conflicting access. This functionality is gated on the wsync/nowsync mount options. Signed-off-by: Jeff Layton Reviewed-by: "Yan, Zheng" Signed-off-by: Ilya Dryomov --- fs/ceph/file.c | 247 +++++++++++++++++++++++++++++++++++++++++-- include/linux/ceph/ceph_fs.h | 3 + 2 files changed, 243 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/file.c b/fs/ceph/file.c index b6ad64b8863a..67080721cec8 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -448,6 +448,216 @@ cache_file_layout(struct inode *dst, struct inode *src) spin_unlock(&cdst->i_ceph_lock); } +/* + * Try to set up an async create. We need caps, a file layout, and inode number, + * and either a lease on the dentry or complete dir info. If any of those + * criteria are not satisfied, then return false and the caller can go + * synchronous. + */ +static int try_prep_async_create(struct inode *dir, struct dentry *dentry, + struct ceph_file_layout *lo, u64 *pino) +{ + struct ceph_inode_info *ci = ceph_inode(dir); + struct ceph_dentry_info *di = ceph_dentry(dentry); + int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE; + u64 ino; + + spin_lock(&ci->i_ceph_lock); + /* No auth cap means no chance for Dc caps */ + if (!ci->i_auth_cap) + goto no_async; + + /* Any delegated inos? */ + if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos)) + goto no_async; + + if (!ceph_file_layout_is_valid(&ci->i_cached_layout)) + goto no_async; + + if ((__ceph_caps_issued(ci, NULL) & want) != want) + goto no_async; + + if (d_in_lookup(dentry)) { + if (!__ceph_dir_is_complete(ci)) + goto no_async; + } else if (atomic_read(&ci->i_shared_gen) != + READ_ONCE(di->lease_shared_gen)) { + goto no_async; + } + + ino = ceph_get_deleg_ino(ci->i_auth_cap->session); + if (!ino) + goto no_async; + + *pino = ino; + ceph_take_cap_refs(ci, want, false); + memcpy(lo, &ci->i_cached_layout, sizeof(*lo)); + rcu_assign_pointer(lo->pool_ns, + ceph_try_get_string(ci->i_cached_layout.pool_ns)); + got = want; +no_async: + spin_unlock(&ci->i_ceph_lock); + return got; +} + +static void restore_deleg_ino(struct inode *dir, u64 ino) +{ + struct ceph_inode_info *ci = ceph_inode(dir); + struct ceph_mds_session *s = NULL; + + spin_lock(&ci->i_ceph_lock); + if (ci->i_auth_cap) + s = ceph_get_mds_session(ci->i_auth_cap->session); + spin_unlock(&ci->i_ceph_lock); + if (s) { + int err = ceph_restore_deleg_ino(s, ino); + if (err) + pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n", + ino, err); + ceph_put_mds_session(s); + } +} + +static void ceph_async_create_cb(struct ceph_mds_client *mdsc, + struct ceph_mds_request *req) +{ + int result = req->r_err ? req->r_err : + le32_to_cpu(req->r_reply_info.head->result); + + if (result == -EJUKEBOX) + goto out; + + mapping_set_error(req->r_parent->i_mapping, result); + + if (result) { + struct dentry *dentry = req->r_dentry; + int pathlen; + u64 base; + char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen, + &base, 0); + + ceph_dir_clear_complete(req->r_parent); + if (!d_unhashed(dentry)) + d_drop(dentry); + + /* FIXME: start returning I/O errors on all accesses? */ + pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n", + base, IS_ERR(path) ? "<>" : path, result); + ceph_mdsc_free_path(path, pathlen); + } + + if (req->r_target_inode) { + struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); + u64 ino = ceph_vino(req->r_target_inode).ino; + + if (req->r_deleg_ino != ino) + pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n", + __func__, req->r_err, req->r_deleg_ino, ino); + mapping_set_error(req->r_target_inode->i_mapping, result); + + spin_lock(&ci->i_ceph_lock); + if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) { + ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE; + wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT); + } + ceph_kick_flushing_inode_caps(req->r_session, ci); + spin_unlock(&ci->i_ceph_lock); + } else { + pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__, + req->r_deleg_ino); + } +out: + ceph_mdsc_release_dir_caps(req); +} + +static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry, + struct file *file, umode_t mode, + struct ceph_mds_request *req, + struct ceph_acl_sec_ctx *as_ctx, + struct ceph_file_layout *lo) +{ + int ret; + char xattr_buf[4]; + struct ceph_mds_reply_inode in = { }; + struct ceph_mds_reply_info_in iinfo = { .in = &in }; + struct ceph_inode_info *ci = ceph_inode(dir); + struct inode *inode; + struct timespec64 now; + struct ceph_vino vino = { .ino = req->r_deleg_ino, + .snap = CEPH_NOSNAP }; + + ktime_get_real_ts64(&now); + + inode = ceph_get_inode(dentry->d_sb, vino); + if (IS_ERR(inode)) + return PTR_ERR(inode); + + iinfo.inline_version = CEPH_INLINE_NONE; + iinfo.change_attr = 1; + ceph_encode_timespec64(&iinfo.btime, &now); + + iinfo.xattr_len = ARRAY_SIZE(xattr_buf); + iinfo.xattr_data = xattr_buf; + memset(iinfo.xattr_data, 0, iinfo.xattr_len); + + in.ino = cpu_to_le64(vino.ino); + in.snapid = cpu_to_le64(CEPH_NOSNAP); + in.version = cpu_to_le64(1); // ??? + in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE); + in.cap.cap_id = cpu_to_le64(1); + in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino); + in.cap.flags = CEPH_CAP_FLAG_AUTH; + in.ctime = in.mtime = in.atime = iinfo.btime; + in.mode = cpu_to_le32((u32)mode); + in.truncate_seq = cpu_to_le32(1); + in.truncate_size = cpu_to_le64(-1ULL); + in.xattr_version = cpu_to_le64(1); + in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid())); + in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_mode & S_ISGID ? + dir->i_gid : current_fsgid())); + in.nlink = cpu_to_le32(1); + in.max_size = cpu_to_le64(lo->stripe_unit); + + ceph_file_layout_to_legacy(lo, &in.layout); + + ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session, + req->r_fmode, NULL); + if (ret) { + dout("%s failed to fill inode: %d\n", __func__, ret); + ceph_dir_clear_complete(dir); + if (!d_unhashed(dentry)) + d_drop(dentry); + if (inode->i_state & I_NEW) + discard_new_inode(inode); + } else { + struct dentry *dn; + + dout("%s d_adding new inode 0x%llx to 0x%lx/%s\n", __func__, + vino.ino, dir->i_ino, dentry->d_name.name); + ceph_dir_clear_ordered(dir); + ceph_init_inode_acls(inode, as_ctx); + if (inode->i_state & I_NEW) { + /* + * If it's not I_NEW, then someone created this before + * we got here. Assume the server is aware of it at + * that point and don't worry about setting + * CEPH_I_ASYNC_CREATE. + */ + ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE; + unlock_new_inode(inode); + } + if (d_in_lookup(dentry) || d_really_is_negative(dentry)) { + if (!d_unhashed(dentry)) + d_drop(dentry); + dn = d_splice_alias(inode, dentry); + WARN_ON_ONCE(dn && dn != dentry); + } + file->f_mode |= FMODE_CREATED; + ret = finish_open(file, dentry, ceph_open); + } + return ret; +} + /* * Do a lookup + open with a single request. If we get a non-existent * file or symlink, return 1 so the VFS can retry. @@ -460,6 +670,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, struct ceph_mds_request *req; struct dentry *dn; struct ceph_acl_sec_ctx as_ctx = {}; + bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS); int mask; int err; @@ -483,7 +694,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, /* If it's not being looked up, it's negative */ return -ENOENT; } - +retry: /* do the open */ req = prepare_open_request(dir->i_sb, flags, mode); if (IS_ERR(req)) { @@ -492,21 +703,43 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, } req->r_dentry = dget(dentry); req->r_num_caps = 2; + mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; + if (ceph_security_xattr_wanted(dir)) + mask |= CEPH_CAP_XATTR_SHARED; + req->r_args.open.mask = cpu_to_le32(mask); + req->r_parent = dir; + if (flags & O_CREAT) { + struct ceph_file_layout lo; + req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL; req->r_dentry_unless = CEPH_CAP_FILE_EXCL; if (as_ctx.pagelist) { req->r_pagelist = as_ctx.pagelist; as_ctx.pagelist = NULL; } + if (try_async && + (req->r_dir_caps = + try_prep_async_create(dir, dentry, &lo, + &req->r_deleg_ino))) { + set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags); + req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL); + req->r_callback = ceph_async_create_cb; + err = ceph_mdsc_submit_request(mdsc, dir, req); + if (!err) { + err = ceph_finish_async_create(dir, dentry, + file, mode, req, + &as_ctx, &lo); + } else if (err == -EJUKEBOX) { + restore_deleg_ino(dir, req->r_deleg_ino); + ceph_mdsc_put_request(req); + try_async = false; + goto retry; + } + goto out_req; + } } - mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; - if (ceph_security_xattr_wanted(dir)) - mask |= CEPH_CAP_XATTR_SHARED; - req->r_args.open.mask = cpu_to_le32(mask); - - req->r_parent = dir; set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); err = ceph_mdsc_do_request(mdsc, (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 91d09cf37649..e035c5194005 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -659,6 +659,9 @@ int ceph_flags_to_mode(int flags); #define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \ CEPH_CAP_ANY_FILE_WR | CEPH_CAP_FILE_LAZYIO | \ CEPH_CAP_PIN) +#define CEPH_CAP_ALL_FILE (CEPH_CAP_PIN | CEPH_CAP_ANY_SHARED | \ + CEPH_CAP_AUTH_EXCL | CEPH_CAP_XATTR_EXCL | \ + CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR) #define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \ CEPH_LOCK_IXATTR) -- cgit v1.2.3-58-ga151 From 719a2514e9bf313c3627078926d56bc2a8b290d1 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Thu, 5 Mar 2020 20:21:00 +0800 Subject: ceph: consider inode's last read/write when calculating wanted caps Add i_last_rd and i_last_wr to ceph_inode_info. These fields are used to track the last time the client acquired read/write caps for the inode. If there is no read/write on an inode for 'caps_wanted_delay_max' seconds, __ceph_caps_file_wanted() does not request caps for read/write even there are open files. Call __ceph_touch_fmode() for dir operations. __ceph_caps_file_wanted() calculates dir's wanted caps according to last dir read/modification. If there is recent dir read, dir inode wants CEPH_CAP_ANY_SHARED caps. If there is recent dir modification, also wants CEPH_CAP_FILE_EXCL. Readdir is a special case. Dir inode wants CEPH_CAP_FILE_EXCL after readdir, as with that, modifications do not need to release CEPH_CAP_FILE_SHARED or invalidate all dentry leases issued by readdir. Signed-off-by: "Yan, Zheng" Reviewed-by: Jeff Layton Signed-off-by: Ilya Dryomov --- fs/ceph/caps.c | 183 ++++++++++++++++++++++++++++++------------- fs/ceph/dir.c | 21 +++-- fs/ceph/file.c | 21 ++--- fs/ceph/inode.c | 10 ++- fs/ceph/ioctl.c | 2 + fs/ceph/mds_client.c | 11 ++- fs/ceph/super.h | 13 ++- include/linux/ceph/ceph_fs.h | 1 + 8 files changed, 188 insertions(+), 74 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index a75e5eb3740e..aa5bb5a4de46 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -978,19 +978,67 @@ int __ceph_caps_used(struct ceph_inode_info *ci) return used; } +#define FMODE_WAIT_BIAS 1000 + /* * wanted, by virtue of open file modes */ int __ceph_caps_file_wanted(struct ceph_inode_info *ci) { - int i, bits = 0; - for (i = 0; i < CEPH_FILE_MODE_BITS; i++) { - if (ci->i_nr_by_mode[i]) - bits |= 1 << i; + const int PIN_SHIFT = ffs(CEPH_FILE_MODE_PIN); + const int RD_SHIFT = ffs(CEPH_FILE_MODE_RD); + const int WR_SHIFT = ffs(CEPH_FILE_MODE_WR); + const int LAZY_SHIFT = ffs(CEPH_FILE_MODE_LAZY); + struct ceph_mount_options *opt = + ceph_inode_to_client(&ci->vfs_inode)->mount_options; + unsigned long used_cutoff = jiffies - opt->caps_wanted_delay_max * HZ; + unsigned long idle_cutoff = jiffies - opt->caps_wanted_delay_min * HZ; + + if (S_ISDIR(ci->vfs_inode.i_mode)) { + int want = 0; + + /* use used_cutoff here, to keep dir's wanted caps longer */ + if (ci->i_nr_by_mode[RD_SHIFT] > 0 || + time_after(ci->i_last_rd, used_cutoff)) + want |= CEPH_CAP_ANY_SHARED; + + if (ci->i_nr_by_mode[WR_SHIFT] > 0 || + time_after(ci->i_last_wr, used_cutoff)) { + want |= CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL; + if (opt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS) + want |= CEPH_CAP_ANY_DIR_OPS; + } + + if (want || ci->i_nr_by_mode[PIN_SHIFT] > 0) + want |= CEPH_CAP_PIN; + + return want; + } else { + int bits = 0; + + if (ci->i_nr_by_mode[RD_SHIFT] > 0) { + if (ci->i_nr_by_mode[RD_SHIFT] >= FMODE_WAIT_BIAS || + time_after(ci->i_last_rd, used_cutoff)) + bits |= 1 << RD_SHIFT; + } else if (time_after(ci->i_last_rd, idle_cutoff)) { + bits |= 1 << RD_SHIFT; + } + + if (ci->i_nr_by_mode[WR_SHIFT] > 0) { + if (ci->i_nr_by_mode[WR_SHIFT] >= FMODE_WAIT_BIAS || + time_after(ci->i_last_wr, used_cutoff)) + bits |= 1 << WR_SHIFT; + } else if (time_after(ci->i_last_wr, idle_cutoff)) { + bits |= 1 << WR_SHIFT; + } + + /* check lazyio only when read/write is wanted */ + if ((bits & (CEPH_FILE_MODE_RDWR << 1)) && + ci->i_nr_by_mode[LAZY_SHIFT] > 0) + bits |= 1 << LAZY_SHIFT; + + return bits ? ceph_caps_for_mode(bits >> 1) : 0; } - if (bits == 0) - return 0; - return ceph_caps_for_mode(bits >> 1); } /* @@ -1032,14 +1080,6 @@ int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check) return mds_wanted; } -/* - * called under i_ceph_lock - */ -static int __ceph_is_single_caps(struct ceph_inode_info *ci) -{ - return rb_first(&ci->i_caps) == rb_last(&ci->i_caps); -} - int ceph_is_any_caps(struct inode *inode) { struct ceph_inode_info *ci = ceph_inode(inode); @@ -1877,10 +1917,6 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags, if (ci->i_ceph_flags & CEPH_I_FLUSH) flags |= CHECK_CAPS_FLUSH; - if (!(flags & CHECK_CAPS_AUTHONLY) || - (ci->i_auth_cap && __ceph_is_single_caps(ci))) - __cap_delay_cancel(mdsc, ci); - goto retry_locked; retry: spin_lock(&ci->i_ceph_lock); @@ -1907,9 +1943,7 @@ retry_locked: if (IS_RDONLY(inode)) { want = CEPH_CAP_ANY_SHARED; } else { - want = CEPH_CAP_ANY_SHARED | - CEPH_CAP_FILE_EXCL | - CEPH_CAP_ANY_DIR_OPS; + want |= CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL; } retain |= want; } else { @@ -2105,9 +2139,17 @@ ack: goto retry; /* retake i_ceph_lock and restart our cap scan. */ } - /* Reschedule delayed caps release if we delayed anything */ - if (delayed) - __cap_delay_requeue(mdsc, ci, false); + if (list_empty(&ci->i_cap_delay_list)) { + if (delayed) { + /* Reschedule delayed caps release if we delayed anything */ + __cap_delay_requeue(mdsc, ci, false); + } else if (__ceph_is_any_real_caps(ci) && + (file_wanted & ~CEPH_CAP_PIN) && + !(used & (CEPH_CAP_FILE_RD | CEPH_CAP_ANY_FILE_WR))) { + /* periodically re-calculate caps wanted by open files */ + __cap_delay_requeue(mdsc, ci, true); + } + } spin_unlock(&ci->i_ceph_lock); @@ -2573,8 +2615,9 @@ void ceph_take_cap_refs(struct ceph_inode_info *ci, int got, * FIXME: how does a 0 return differ from -EAGAIN? */ enum { - NON_BLOCKING = 1, - CHECK_FILELOCK = 2, + /* first 8 bits are reserved for CEPH_FILE_MODE_FOO */ + NON_BLOCKING = (1 << 8), + CHECK_FILELOCK = (1 << 9), }; static int try_get_cap_refs(struct inode *inode, int need, int want, @@ -2584,7 +2627,6 @@ static int try_get_cap_refs(struct inode *inode, int need, int want, struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; int ret = 0; int have, implemented; - int file_wanted; bool snap_rwsem_locked = false; dout("get_cap_refs %p need %s want %s\n", inode, @@ -2600,15 +2642,6 @@ again: goto out_unlock; } - /* make sure file is actually open */ - file_wanted = __ceph_caps_file_wanted(ci); - if ((file_wanted & need) != need) { - dout("try_get_cap_refs need %s file_wanted %s, EBADF\n", - ceph_cap_string(need), ceph_cap_string(file_wanted)); - ret = -EBADF; - goto out_unlock; - } - /* finish pending truncate */ while (ci->i_truncate_pending) { spin_unlock(&ci->i_ceph_lock); @@ -2719,6 +2752,9 @@ again: ceph_cap_string(have), ceph_cap_string(need)); } out_unlock: + + __ceph_touch_fmode(ci, mdsc, flags); + spin_unlock(&ci->i_ceph_lock); if (snap_rwsem_locked) up_read(&mdsc->snap_rwsem); @@ -2756,10 +2792,20 @@ static void check_max_size(struct inode *inode, loff_t endoff) ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); } +static inline int get_used_fmode(int caps) +{ + int fmode = 0; + if (caps & CEPH_CAP_FILE_RD) + fmode |= CEPH_FILE_MODE_RD; + if (caps & CEPH_CAP_FILE_WR) + fmode |= CEPH_FILE_MODE_WR; + return fmode; +} + int ceph_try_get_caps(struct inode *inode, int need, int want, bool nonblock, int *got) { - int ret; + int ret, flags; BUG_ON(need & ~CEPH_CAP_FILE_RD); BUG_ON(want & ~(CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO | @@ -2771,8 +2817,11 @@ int ceph_try_get_caps(struct inode *inode, int need, int want, return ret; } - ret = try_get_cap_refs(inode, need, want, 0, - (nonblock ? NON_BLOCKING : 0), got); + flags = get_used_fmode(need | want); + if (nonblock) + flags |= NON_BLOCKING; + + ret = try_get_cap_refs(inode, need, want, 0, flags, got); return ret == -EAGAIN ? 0 : ret; } @@ -2798,11 +2847,15 @@ int ceph_get_caps(struct file *filp, int need, int want, fi->filp_gen != READ_ONCE(fsc->filp_gen)) return -EBADF; + flags = get_used_fmode(need | want); + while (true) { if (endoff > 0) check_max_size(inode, endoff); - flags = atomic_read(&fi->num_locks) ? CHECK_FILELOCK : 0; + flags &= CEPH_FILE_MODE_MASK; + if (atomic_read(&fi->num_locks)) + flags |= CHECK_FILELOCK; _got = 0; ret = try_get_cap_refs(inode, need, want, endoff, flags, &_got); @@ -2822,6 +2875,8 @@ int ceph_get_caps(struct file *filp, int need, int want, list_add(&cw.list, &mdsc->cap_wait_list); spin_unlock(&mdsc->caps_list_lock); + /* make sure used fmode not timeout */ + ceph_get_fmode(ci, flags, FMODE_WAIT_BIAS); add_wait_queue(&ci->i_cap_wq, &wait); flags |= NON_BLOCKING; @@ -2835,6 +2890,7 @@ int ceph_get_caps(struct file *filp, int need, int want, } remove_wait_queue(&ci->i_cap_wq, &wait); + ceph_put_fmode(ci, flags, FMODE_WAIT_BIAS); spin_lock(&mdsc->caps_list_lock); list_del(&cw.list); @@ -2854,7 +2910,7 @@ int ceph_get_caps(struct file *filp, int need, int want, if (ret < 0) { if (ret == -ESTALE) { /* session was killed, try renew caps */ - ret = ceph_renew_caps(inode); + ret = ceph_renew_caps(inode, flags); if (ret == 0) continue; } @@ -4153,6 +4209,33 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) dout("flush_dirty_caps done\n"); } +void __ceph_touch_fmode(struct ceph_inode_info *ci, + struct ceph_mds_client *mdsc, int fmode) +{ + unsigned long now = jiffies; + if (fmode & CEPH_FILE_MODE_RD) + ci->i_last_rd = now; + if (fmode & CEPH_FILE_MODE_WR) + ci->i_last_wr = now; + /* queue periodic check */ + if (fmode && + __ceph_is_any_real_caps(ci) && + list_empty(&ci->i_cap_delay_list)) + __cap_delay_requeue(mdsc, ci, true); +} + +void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count) +{ + int i; + int bits = (fmode << 1) | 1; + spin_lock(&ci->i_ceph_lock); + for (i = 0; i < CEPH_FILE_MODE_BITS; i++) { + if (bits & (1 << i)) + ci->i_nr_by_mode[i] += count; + } + spin_unlock(&ci->i_ceph_lock); +} + void __ceph_get_fmode(struct ceph_inode_info *ci, int fmode) { int i; @@ -4168,26 +4251,18 @@ void __ceph_get_fmode(struct ceph_inode_info *ci, int fmode) * we may need to release capabilities to the MDS (or schedule * their delayed release). */ -void ceph_put_fmode(struct ceph_inode_info *ci, int fmode) +void ceph_put_fmode(struct ceph_inode_info *ci, int fmode, int count) { - int i, last = 0; + int i; int bits = (fmode << 1) | 1; spin_lock(&ci->i_ceph_lock); for (i = 0; i < CEPH_FILE_MODE_BITS; i++) { if (bits & (1 << i)) { - BUG_ON(ci->i_nr_by_mode[i] == 0); - if (--ci->i_nr_by_mode[i] == 0) - last++; + BUG_ON(ci->i_nr_by_mode[i] < count); + ci->i_nr_by_mode[i] -= count; } } - dout("put_fmode %p fmode %d {%d,%d,%d,%d}\n", - &ci->vfs_inode, fmode, - ci->i_nr_by_mode[0], ci->i_nr_by_mode[1], - ci->i_nr_by_mode[2], ci->i_nr_by_mode[3]); spin_unlock(&ci->i_ceph_lock); - - if (last && ci->i_vino.snap == CEPH_NOSNAP) - ceph_check_caps(ci, 0, NULL); } /* diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index ee6b319e5481..d594c2627430 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -335,8 +335,11 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx) ctx->pos = 2; } - /* can we use the dcache? */ spin_lock(&ci->i_ceph_lock); + /* request Fx cap. if have Fx, we don't need to release Fs cap + * for later create/unlink. */ + __ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_WR); + /* can we use the dcache? */ if (ceph_test_mount_opt(fsc, DCACHE) && !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && ceph_snap(inode) != CEPH_SNAPDIR && @@ -760,6 +763,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, ceph_test_mount_opt(fsc, DCACHE) && __ceph_dir_is_complete(ci) && (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { + __ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_RD); spin_unlock(&ci->i_ceph_lock); dout(" dir %p complete, -ENOENT\n", dir); d_add(dentry, NULL); @@ -1621,7 +1625,8 @@ static int __dir_lease_try_check(const struct dentry *dentry) /* * Check if directory-wide content lease/cap is valid. */ -static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) +static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry, + struct ceph_mds_client *mdsc) { struct ceph_inode_info *ci = ceph_inode(dir); int valid; @@ -1629,7 +1634,10 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) spin_lock(&ci->i_ceph_lock); valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1); - shared_gen = atomic_read(&ci->i_shared_gen); + if (valid) { + __ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_RD); + shared_gen = atomic_read(&ci->i_shared_gen); + } spin_unlock(&ci->i_ceph_lock); if (valid) { struct ceph_dentry_info *di; @@ -1655,6 +1663,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) int valid = 0; struct dentry *parent; struct inode *dir, *inode; + struct ceph_mds_client *mdsc; if (flags & LOOKUP_RCU) { parent = READ_ONCE(dentry->d_parent); @@ -1671,6 +1680,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) dout("d_revalidate %p '%pd' inode %p offset 0x%llx\n", dentry, dentry, inode, ceph_dentry(dentry)->offset); + mdsc = ceph_sb_to_client(dir->i_sb)->mdsc; + /* always trust cached snapped dentries, snapdir dentry */ if (ceph_snap(dir) != CEPH_NOSNAP) { dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry, @@ -1682,7 +1693,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) valid = dentry_lease_is_valid(dentry, flags); if (valid == -ECHILD) return valid; - if (valid || dir_lease_is_valid(dir, dentry)) { + if (valid || dir_lease_is_valid(dir, dentry, mdsc)) { if (inode) valid = ceph_is_any_caps(inode); else @@ -1691,8 +1702,6 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) } if (!valid) { - struct ceph_mds_client *mdsc = - ceph_sb_to_client(dir->i_sb)->mdsc; struct ceph_mds_request *req; int op, err; u32 mask; diff --git a/fs/ceph/file.c b/fs/ceph/file.c index a6c484123d98..6239cff491e2 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -213,7 +213,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file, struct ceph_dir_file_info *dfi = kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL); if (!dfi) { - ceph_put_fmode(ci, fmode); /* clean up */ + ceph_put_fmode(ci, fmode, 1); /* clean up */ return -ENOMEM; } @@ -224,7 +224,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file, } else { fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL); if (!fi) { - ceph_put_fmode(ci, fmode); /* clean up */ + ceph_put_fmode(ci, fmode, 1); /* clean up */ return -ENOMEM; } @@ -263,7 +263,7 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode) case S_IFLNK: dout("init_file %p %p 0%o (symlink)\n", inode, file, inode->i_mode); - ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ + ceph_put_fmode(ceph_inode(inode), fmode, 1); /* clean up */ break; default: @@ -273,7 +273,7 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode) * we need to drop the open ref now, since we don't * have .release set to ceph_release. */ - ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ + ceph_put_fmode(ceph_inode(inode), fmode, 1); /* clean up */ BUG_ON(inode->i_fop->release == ceph_release); /* call the proper open fop */ @@ -285,14 +285,15 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode) /* * try renew caps after session gets killed. */ -int ceph_renew_caps(struct inode *inode) +int ceph_renew_caps(struct inode *inode, int fmode) { - struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; + struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mds_request *req; int err, flags, wanted; spin_lock(&ci->i_ceph_lock); + __ceph_touch_fmode(ci, mdsc, fmode); wanted = __ceph_caps_file_wanted(ci); if (__ceph_is_any_real_caps(ci) && (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) { @@ -405,6 +406,7 @@ int ceph_open(struct inode *inode, struct file *file) } else if (ceph_snap(inode) != CEPH_NOSNAP && (ci->i_snap_caps & wanted) == wanted) { __ceph_get_fmode(ci, fmode); + __ceph_touch_fmode(ci, mdsc, fmode); spin_unlock(&ci->i_ceph_lock); return ceph_init_file(inode, file, fmode); } @@ -781,7 +783,7 @@ retry: } out_req: if (!req->r_err && req->r_target_inode) - ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode); + ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode, 1); ceph_mdsc_put_request(req); out_ctx: ceph_release_acl_sec_ctx(&as_ctx); @@ -798,7 +800,7 @@ int ceph_release(struct inode *inode, struct file *file) dout("release inode %p dir file %p\n", inode, file); WARN_ON(!list_empty(&dfi->file_info.rw_contexts)); - ceph_put_fmode(ci, dfi->file_info.fmode); + ceph_put_fmode(ci, dfi->file_info.fmode, 1); if (dfi->last_readdir) ceph_mdsc_put_request(dfi->last_readdir); @@ -810,7 +812,8 @@ int ceph_release(struct inode *inode, struct file *file) dout("release inode %p regular file %p\n", inode, file); WARN_ON(!list_empty(&fi->rw_contexts)); - ceph_put_fmode(ci, fi->fmode); + ceph_put_fmode(ci, fi->fmode, 1); + kmem_cache_free(ceph_file_cachep, fi); } diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 73f986efb1fd..a618e8ae9d0f 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -479,6 +479,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb) ci->i_head_snapc = NULL; ci->i_snap_caps = 0; + ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ; for (i = 0; i < CEPH_FILE_MODE_BITS; i++) ci->i_nr_by_mode[i] = 0; @@ -639,7 +640,7 @@ int ceph_fill_file_size(struct inode *inode, int issued, if ((issued & (CEPH_CAP_FILE_CACHE| CEPH_CAP_FILE_BUFFER)) || mapping_mapped(inode->i_mapping) || - __ceph_caps_file_wanted(ci)) { + __ceph_is_file_opened(ci)) { ci->i_truncate_pending++; queue_trunc = 1; } @@ -1013,6 +1014,13 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page, fill_inline = true; } + if (cap_fmode >= 0) { + if (!info_caps) + pr_warn("mds issued no caps on %llx.%llx\n", + ceph_vinop(inode)); + __ceph_touch_fmode(ci, mdsc, cap_fmode); + } + spin_unlock(&ci->i_ceph_lock); if (fill_inline) diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c index c90f03beb15d..6e061bf62ad4 100644 --- a/fs/ceph/ioctl.c +++ b/fs/ceph/ioctl.c @@ -243,11 +243,13 @@ static long ceph_ioctl_lazyio(struct file *file) struct ceph_file_info *fi = file->private_data; struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) { spin_lock(&ci->i_ceph_lock); fi->fmode |= CEPH_FILE_MODE_LAZY; ci->i_nr_by_mode[ffs(CEPH_FILE_MODE_LAZY)]++; + __ceph_touch_fmode(ci, mdsc, fi->fmode); spin_unlock(&ci->i_ceph_lock); dout("ioctl_layzio: file %p marked lazy\n", file); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 68b8afded466..486f91f9685b 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2486,7 +2486,8 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, if (req->r_inode_drop) releases += ceph_encode_inode_release(&p, req->r_inode ? req->r_inode : d_inode(req->r_dentry), - mds, req->r_inode_drop, req->r_inode_unless, 0); + mds, req->r_inode_drop, req->r_inode_unless, + req->r_op == CEPH_MDS_OP_READDIR); if (req->r_dentry_drop) releases += ceph_encode_dentry_release(&p, req->r_dentry, req->r_parent, mds, req->r_dentry_drop, @@ -2833,7 +2834,13 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir, if (req->r_inode) ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); if (req->r_parent) { - ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); + struct ceph_inode_info *ci = ceph_inode(req->r_parent); + int fmode = (req->r_op & CEPH_MDS_OP_WRITE) ? + CEPH_FILE_MODE_WR : CEPH_FILE_MODE_RD; + spin_lock(&ci->i_ceph_lock); + ceph_take_cap_refs(ci, CEPH_CAP_PIN, false); + __ceph_touch_fmode(ci, mdsc, fmode); + spin_unlock(&ci->i_ceph_lock); ihold(req->r_parent); } if (req->r_old_dentry_dir) diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 356ad7b46b85..63dd8e61ba88 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -366,6 +366,8 @@ struct ceph_inode_info { dirty|flushing caps */ unsigned i_snap_caps; /* cap bits for snapped files */ + unsigned long i_last_rd; + unsigned long i_last_wr; int i_nr_by_mode[CEPH_FILE_MODE_BITS]; /* open file counts */ struct mutex i_truncate_mutex; @@ -680,6 +682,10 @@ extern int __ceph_caps_revoking_other(struct ceph_inode_info *ci, extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask); extern int __ceph_caps_used(struct ceph_inode_info *ci); +static inline bool __ceph_is_file_opened(struct ceph_inode_info *ci) +{ + return ci->i_nr_by_mode[0]; +} extern int __ceph_caps_file_wanted(struct ceph_inode_info *ci); extern int __ceph_caps_wanted(struct ceph_inode_info *ci); @@ -1093,7 +1099,10 @@ extern int ceph_try_get_caps(struct inode *inode, /* for counting open files by mode */ extern void __ceph_get_fmode(struct ceph_inode_info *ci, int mode); -extern void ceph_put_fmode(struct ceph_inode_info *ci, int mode); +extern void ceph_get_fmode(struct ceph_inode_info *ci, int mode, int count); +extern void ceph_put_fmode(struct ceph_inode_info *ci, int mode, int count); +extern void __ceph_touch_fmode(struct ceph_inode_info *ci, + struct ceph_mds_client *mdsc, int fmode); /* addr.c */ extern const struct address_space_operations ceph_aops; @@ -1105,7 +1114,7 @@ extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc); /* file.c */ extern const struct file_operations ceph_file_fops; -extern int ceph_renew_caps(struct inode *inode); +extern int ceph_renew_caps(struct inode *inode, int fmode); extern int ceph_open(struct inode *inode, struct file *file); extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry, struct file *file, unsigned flags, umode_t mode); diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index e035c5194005..ebf5ba62b772 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -568,6 +568,7 @@ struct ceph_filelock { #define CEPH_FILE_MODE_RDWR 3 /* RD | WR */ #define CEPH_FILE_MODE_LAZY 4 /* lazy io */ #define CEPH_FILE_MODE_BITS 4 +#define CEPH_FILE_MODE_MASK ((1 << CEPH_FILE_MODE_BITS) - 1) int ceph_flags_to_mode(int flags); -- cgit v1.2.3-58-ga151 From 72e0ef0e5f067fd991f702f0b2635d911d0cf208 Mon Sep 17 00:00:00 2001 From: Mikel Rychliski Date: Wed, 18 Mar 2020 22:16:23 -0400 Subject: PCI: Use ioremap(), not phys_to_virt() for platform ROM On some EFI systems, the video BIOS is provided by the EFI firmware. The boot stub code stores the physical address of the ROM image in pdev->rom. Currently we attempt to access this pointer using phys_to_virt(), which doesn't work with CONFIG_HIGHMEM. On these systems, attempting to load the radeon module on a x86_32 kernel can result in the following: BUG: unable to handle page fault for address: 3e8ed03c #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page *pde = 00000000 Oops: 0000 [#1] PREEMPT SMP CPU: 0 PID: 317 Comm: systemd-udevd Not tainted 5.6.0-rc3-next-20200228 #2 Hardware name: Apple Computer, Inc. MacPro1,1/Mac-F4208DC8, BIOS MP11.88Z.005C.B08.0707021221 07/02/07 EIP: radeon_get_bios+0x5ed/0xe50 [radeon] Code: 00 00 84 c0 0f 85 12 fd ff ff c7 87 64 01 00 00 00 00 00 00 8b 47 08 8b 55 b0 e8 1e 83 e1 d6 85 c0 74 1a 8b 55 c0 85 d2 74 13 <80> 38 55 75 0e 80 78 01 aa 0f 84 a4 03 00 00 8d 74 26 00 68 dc 06 EAX: 3e8ed03c EBX: 00000000 ECX: 3e8ed03c EDX: 00010000 ESI: 00040000 EDI: eec04000 EBP: eef3fc60 ESP: eef3fbe0 DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 EFLAGS: 00010206 CR0: 80050033 CR2: 3e8ed03c CR3: 2ec77000 CR4: 000006d0 Call Trace: r520_init+0x26/0x240 [radeon] radeon_device_init+0x533/0xa50 [radeon] radeon_driver_load_kms+0x80/0x220 [radeon] drm_dev_register+0xa7/0x180 [drm] radeon_pci_probe+0x10f/0x1a0 [radeon] pci_device_probe+0xd4/0x140 Fix the issue by updating all drivers which can access a platform provided ROM. Instead of calling the helper function pci_platform_rom() which uses phys_to_virt(), call ioremap() directly on the pdev->rom. radeon_read_platform_bios() previously directly accessed an __iomem pointer. Avoid this by calling memcpy_fromio() instead of kmemdup(). pci_platform_rom() now has no remaining callers, so remove it. Link: https://lore.kernel.org/r/20200319021623.5426-1-mikel@mikelr.com Signed-off-by: Mikel Rychliski Signed-off-by: Bjorn Helgaas Acked-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c | 31 +++++++++++++--------- .../gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c | 17 ++++++++++-- drivers/gpu/drm/radeon/radeon_bios.c | 30 +++++++++++++-------- drivers/pci/rom.c | 17 ------------ include/linux/pci.h | 1 - 5 files changed, 52 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 50dff69a0f6e..b1172d93c99c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -192,30 +192,35 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev) static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) { - uint8_t __iomem *bios; - size_t size; + phys_addr_t rom = adev->pdev->rom; + size_t romlen = adev->pdev->romlen; + void __iomem *bios; adev->bios = NULL; - bios = pci_platform_rom(adev->pdev, &size); - if (!bios) { + if (!rom || romlen == 0) return false; - } - adev->bios = kzalloc(size, GFP_KERNEL); - if (adev->bios == NULL) + adev->bios = kzalloc(romlen, GFP_KERNEL); + if (!adev->bios) return false; - memcpy_fromio(adev->bios, bios, size); + bios = ioremap(rom, romlen); + if (!bios) + goto free_bios; - if (!check_atom_bios(adev->bios, size)) { - kfree(adev->bios); - return false; - } + memcpy_fromio(adev->bios, bios, romlen); + iounmap(bios); - adev->bios_size = size; + if (!check_atom_bios(adev->bios, romlen)) + goto free_bios; + + adev->bios_size = romlen; return true; +free_bios: + kfree(adev->bios); + return false; } #ifdef CONFIG_ACPI diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c index 9b91da09dc5f..8d9812a51ef6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c @@ -101,9 +101,13 @@ platform_init(struct nvkm_bios *bios, const char *name) else return ERR_PTR(-ENODEV); + if (!pdev->rom || pdev->romlen == 0) + return ERR_PTR(-ENODEV); + if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) { + priv->size = pdev->romlen; if (ret = -ENODEV, - (priv->rom = pci_platform_rom(pdev, &priv->size))) + (priv->rom = ioremap(pdev->rom, pdev->romlen))) return priv; kfree(priv); } @@ -111,11 +115,20 @@ platform_init(struct nvkm_bios *bios, const char *name) return ERR_PTR(ret); } +static void +platform_fini(void *data) +{ + struct priv *priv = data; + + iounmap(priv->rom); + kfree(priv); +} + const struct nvbios_source nvbios_platform = { .name = "PLATFORM", .init = platform_init, - .fini = (void(*)(void *))kfree, + .fini = platform_fini, .read = pcirom_read, .rw = true, }; diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index c42f73fad3e3..bb29cf02974d 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -108,25 +108,33 @@ static bool radeon_read_bios(struct radeon_device *rdev) static bool radeon_read_platform_bios(struct radeon_device *rdev) { - uint8_t __iomem *bios; - size_t size; + phys_addr_t rom = rdev->pdev->rom; + size_t romlen = rdev->pdev->romlen; + void __iomem *bios; rdev->bios = NULL; - bios = pci_platform_rom(rdev->pdev, &size); - if (!bios) { + if (!rom || romlen == 0) return false; - } - if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { + rdev->bios = kzalloc(romlen, GFP_KERNEL); + if (!rdev->bios) return false; - } - rdev->bios = kmemdup(bios, size, GFP_KERNEL); - if (rdev->bios == NULL) { - return false; - } + + bios = ioremap(rom, romlen); + if (!bios) + goto free_bios; + + memcpy_fromio(rdev->bios, bios, romlen); + iounmap(bios); + + if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) + goto free_bios; return true; +free_bios: + kfree(rdev->bios); + return false; } #ifdef CONFIG_ACPI diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index 137bf0cee897..8fc9a4e911e3 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c @@ -195,20 +195,3 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) pci_disable_rom(pdev); } EXPORT_SYMBOL(pci_unmap_rom); - -/** - * pci_platform_rom - provides a pointer to any ROM image provided by the - * platform - * @pdev: pointer to pci device struct - * @size: pointer to receive size of pci window over ROM - */ -void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size) -{ - if (pdev->rom && pdev->romlen) { - *size = pdev->romlen; - return phys_to_virt((phys_addr_t)pdev->rom); - } - - return NULL; -} -EXPORT_SYMBOL(pci_platform_rom); diff --git a/include/linux/pci.h b/include/linux/pci.h index 3840a541a9de..7268dcf1f23e 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1214,7 +1214,6 @@ int pci_enable_rom(struct pci_dev *pdev); void pci_disable_rom(struct pci_dev *pdev); void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); -void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); /* Power management related routines */ int pci_save_state(struct pci_dev *dev); -- cgit v1.2.3-58-ga151 From 2c8d5a2dc1e335d895dea45164f5d4d850b134da Mon Sep 17 00:00:00 2001 From: Ivan Kokshaysky Date: Sat, 14 Mar 2020 19:43:55 +0000 Subject: PCI: Add support for root bus sizing In certain cases we should be able to enumerate IO and MEM ranges of all PCI devices installed in the system, and then set respective host bridge apertures basing on calculated size and alignment. Particularly when firmware is broken and fails to assign bridge windows properly, like on Alpha UP1500 platform. Actually, almost everything is already in place, and required changes are minimal: - add "size_windows" flag to struct pci_host_bridge: when set, it instructs __pci_bus_size_bridges() to continue with the root bus; - in the __pci_bus_size_bridges() path: add checks for bus->self, as it can legitimately be null for the root bus. Link: https://lore.kernel.org/r/20200314194355.GA12510@mail.rc.ru Tested-by: Matt Turner Signed-off-by: Ivan Kokshaysky Signed-off-by: Bjorn Helgaas --- drivers/pci/setup-bus.c | 34 ++++++++++++++++++++++------------ include/linux/pci.h | 1 + 2 files changed, 23 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index f2461bf9243d..bbcef1a053ab 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -846,7 +846,7 @@ static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type) * Per spec, I/O windows are 4K-aligned, but some bridges have * an extension to support 1K alignment. */ - if (bus->self->io_window_1k) + if (bus->self && bus->self->io_window_1k) align = PCI_P2P_DEFAULT_IO_ALIGN_1K; else align = PCI_P2P_DEFAULT_IO_ALIGN; @@ -920,7 +920,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, calculate_iosize(size, min_size, size1, add_size, children_add_size, resource_size(b_res), min_align); if (!size0 && !size1) { - if (b_res->start || b_res->end) + if (bus->self && (b_res->start || b_res->end)) pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n", b_res, &bus->busn_res); b_res->flags = 0; @@ -930,7 +930,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, b_res->start = min_align; b_res->end = b_res->start + size0 - 1; b_res->flags |= IORESOURCE_STARTALIGN; - if (size1 > size0 && realloc_head) { + if (bus->self && size1 > size0 && realloc_head) { add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align); pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n", @@ -1073,7 +1073,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, calculate_memsize(size, min_size, add_size, children_add_size, resource_size(b_res), add_align); if (!size0 && !size1) { - if (b_res->start || b_res->end) + if (bus->self && (b_res->start || b_res->end)) pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n", b_res, &bus->busn_res); b_res->flags = 0; @@ -1082,7 +1082,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, b_res->start = min_align; b_res->end = size0 + min_align - 1; b_res->flags |= IORESOURCE_STARTALIGN; - if (size1 > size0 && realloc_head) { + if (bus->self && size1 > size0 && realloc_head) { add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align); pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n", b_res, &bus->busn_res, @@ -1196,8 +1196,9 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) unsigned long mask, prefmask, type2 = 0, type3 = 0; resource_size_t additional_io_size = 0, additional_mmio_size = 0, additional_mmio_pref_size = 0; - struct resource *b_res; - int ret; + struct resource *pref; + struct pci_host_bridge *host; + int hdr_type, i, ret; list_for_each_entry(dev, &bus->devices, bus_list) { struct pci_bus *b = dev->subordinate; @@ -1217,10 +1218,20 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) } /* The root bus? */ - if (pci_is_root_bus(bus)) - return; + if (pci_is_root_bus(bus)) { + host = to_pci_host_bridge(bus->bridge); + if (!host->size_windows) + return; + pci_bus_for_each_resource(bus, pref, i) + if (pref && (pref->flags & IORESOURCE_PREFETCH)) + break; + hdr_type = -1; /* Intentionally invalid - not a PCI device. */ + } else { + pref = &bus->self->resource[PCI_BRIDGE_RESOURCES + 2]; + hdr_type = bus->self->hdr_type; + } - switch (bus->self->hdr_type) { + switch (hdr_type) { case PCI_HEADER_TYPE_CARDBUS: /* Don't size CardBuses yet */ break; @@ -1242,10 +1253,9 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) * the size required to put all 64-bit prefetchable * resources in it. */ - b_res = &bus->self->resource[PCI_BRIDGE_RESOURCES]; mask = IORESOURCE_MEM; prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; - if (b_res[2].flags & IORESOURCE_MEM_64) { + if (pref && (pref->flags & IORESOURCE_MEM_64)) { prefmask |= IORESOURCE_MEM_64; ret = pbus_size_mem(bus, prefmask, prefmask, prefmask, prefmask, diff --git a/include/linux/pci.h b/include/linux/pci.h index 7268dcf1f23e..15734731ad87 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -511,6 +511,7 @@ struct pci_host_bridge { unsigned int native_pme:1; /* OS may use PCIe PME */ unsigned int native_ltr:1; /* OS may use PCIe LTR */ unsigned int preserve_config:1; /* Preserve FW resource setup */ + unsigned int size_windows:1; /* Enable root bus sizing */ /* Resource alignment requirements */ resource_size_t (*align_resource)(struct pci_dev *dev, -- cgit v1.2.3-58-ga151 From 3ad1f3a33286dc67d595f6fab3a3a9e583bc738a Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Mon, 10 Feb 2020 22:35:18 +0100 Subject: pwm: Implement some checks for lowlevel drivers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are some expectations which the callbacks provided by lowlevel drivers should fulfill. Implement checks that help driver authors to get these semantics right. As these have some overhead the checks can be disabled using a Kconfig setting. Signed-off-by: Uwe Kleine-König Signed-off-by: Thierry Reding --- drivers/pwm/Kconfig | 9 ++++ drivers/pwm/core.c | 135 +++++++++++++++++++++++++++++++++++++++++++++++++--- include/linux/pwm.h | 4 +- 3 files changed, 140 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 30190beeb6e9..e21834f44a29 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -33,6 +33,15 @@ config PWM_SYSFS bool default y if SYSFS +config PWM_DEBUG + bool "PWM lowlevel drivers additional checks and debug messages" + depends on DEBUG_KERNEL + help + This option enables some additional checks to help lowlevel driver + authors to get their callbacks implemented correctly. + It is expected to introduce some runtime overhead and diagnostic + output to the kernel log, so only enable while working on a driver. + config PWM_AB8500 tristate "AB8500 PWM support" depends on AB8500_CORE && ARCH_U8500 diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 5a7f6598c05f..e9b9283cff28 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -120,6 +120,9 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label) if (pwm->chip->ops->get_state) { pwm->chip->ops->get_state(pwm->chip, pwm, &pwm->state); trace_pwm_get(pwm, &pwm->state); + + if (IS_ENABLED(PWM_DEBUG)) + pwm->last = pwm->state; } set_bit(PWMF_REQUESTED, &pwm->flags); @@ -232,17 +235,28 @@ void *pwm_get_chip_data(struct pwm_device *pwm) } EXPORT_SYMBOL_GPL(pwm_get_chip_data); -static bool pwm_ops_check(const struct pwm_ops *ops) +static bool pwm_ops_check(const struct pwm_chip *chip) { + + const struct pwm_ops *ops = chip->ops; + /* driver supports legacy, non-atomic operation */ - if (ops->config && ops->enable && ops->disable) - return true; + if (ops->config && ops->enable && ops->disable) { + if (IS_ENABLED(CONFIG_PWM_DEBUG)) + dev_warn(chip->dev, + "Driver needs updating to atomic API\n"); - /* driver supports atomic operation */ - if (ops->apply) return true; + } - return false; + if (!ops->apply) + return false; + + if (IS_ENABLED(CONFIG_PWM_DEBUG) && !ops->get_state) + dev_warn(chip->dev, + "Please implement the .get_state() callback\n"); + + return true; } /** @@ -266,7 +280,7 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip, if (!chip || !chip->dev || !chip->ops || !chip->npwm) return -EINVAL; - if (!pwm_ops_check(chip->ops)) + if (!pwm_ops_check(chip)) return -EINVAL; mutex_lock(&pwm_lock); @@ -450,6 +464,107 @@ void pwm_free(struct pwm_device *pwm) } EXPORT_SYMBOL_GPL(pwm_free); +void pwm_apply_state_debug(struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct pwm_state *last = &pwm->last; + struct pwm_chip *chip = pwm->chip; + struct pwm_state s1, s2; + int err; + + if (!IS_ENABLED(CONFIG_PWM_DEBUG)) + return; + + /* No reasonable diagnosis possible without .get_state() */ + if (!chip->ops->get_state) + return; + + /* + * *state was just applied. Read out the hardware state and do some + * checks. + */ + + chip->ops->get_state(chip, pwm, &s1); + trace_pwm_get(pwm, &s1); + + /* + * The lowlevel driver either ignored .polarity (which is a bug) or as + * best effort inverted .polarity and fixed .duty_cycle respectively. + * Undo this inversion and fixup for further tests. + */ + if (s1.enabled && s1.polarity != state->polarity) { + s2.polarity = state->polarity; + s2.duty_cycle = s1.period - s1.duty_cycle; + s2.period = s1.period; + s2.enabled = s1.enabled; + } else { + s2 = s1; + } + + if (s2.polarity != state->polarity && + state->duty_cycle < state->period) + dev_warn(chip->dev, ".apply ignored .polarity\n"); + + if (state->enabled && + last->polarity == state->polarity && + last->period > s2.period && + last->period <= state->period) + dev_warn(chip->dev, + ".apply didn't pick the best available period (requested: %u, applied: %u, possible: %u)\n", + state->period, s2.period, last->period); + + if (state->enabled && state->period < s2.period) + dev_warn(chip->dev, + ".apply is supposed to round down period (requested: %u, applied: %u)\n", + state->period, s2.period); + + if (state->enabled && + last->polarity == state->polarity && + last->period == s2.period && + last->duty_cycle > s2.duty_cycle && + last->duty_cycle <= state->duty_cycle) + dev_warn(chip->dev, + ".apply didn't pick the best available duty cycle (requested: %u/%u, applied: %u/%u, possible: %u/%u)\n", + state->duty_cycle, state->period, + s2.duty_cycle, s2.period, + last->duty_cycle, last->period); + + if (state->enabled && state->duty_cycle < s2.duty_cycle) + dev_warn(chip->dev, + ".apply is supposed to round down duty_cycle (requested: %u/%u, applied: %u/%u)\n", + state->duty_cycle, state->period, + s2.duty_cycle, s2.period); + + if (!state->enabled && s2.enabled && s2.duty_cycle > 0) + dev_warn(chip->dev, + "requested disabled, but yielded enabled with duty > 0"); + + /* reapply the state that the driver reported being configured. */ + err = chip->ops->apply(chip, pwm, &s1); + if (err) { + *last = s1; + dev_err(chip->dev, "failed to reapply current setting\n"); + return; + } + + trace_pwm_apply(pwm, &s1); + + chip->ops->get_state(chip, pwm, last); + trace_pwm_get(pwm, last); + + /* reapplication of the current state should give an exact match */ + if (s1.enabled != last->enabled || + s1.polarity != last->polarity || + (s1.enabled && s1.period != last->period) || + (s1.enabled && s1.duty_cycle != last->duty_cycle)) { + dev_err(chip->dev, + ".apply is not idempotent (ena=%d pol=%d %u/%u) -> (ena=%d pol=%d %u/%u)\n", + s1.enabled, s1.polarity, s1.duty_cycle, s1.period, + last->enabled, last->polarity, last->duty_cycle, + last->period); + } +} + /** * pwm_apply_state() - atomically apply a new state to a PWM device * @pwm: PWM device @@ -480,6 +595,12 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state) trace_pwm_apply(pwm, state); pwm->state = *state; + + /* + * only do this after pwm->state was applied as some + * implementations of .get_state depend on this + */ + pwm_apply_state_debug(pwm, state); } else { /* * FIXME: restore the initial state in case of error. diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 0ef808d925bb..2635b2a55090 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -71,7 +71,8 @@ struct pwm_state { * @chip: PWM chip providing this PWM device * @chip_data: chip-private data associated with the PWM device * @args: PWM arguments - * @state: curent PWM channel state + * @state: last applied state + * @last: last implemented state (for PWM_DEBUG) */ struct pwm_device { const char *label; @@ -83,6 +84,7 @@ struct pwm_device { struct pwm_args args; struct pwm_state state; + struct pwm_state last; }; /** -- cgit v1.2.3-58-ga151 From 54091b5f195b45a9a7d394008c06d2b9646ab126 Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Thu, 12 Mar 2020 09:52:06 +0530 Subject: pwm: omap-dmtimer: Drop unused header file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit pwm_omap_dmtimer.h is used only: - to typedef struct omap_dm_timer to pwm_omap_dmtimer - for macro PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE Rest of the file is pretty mush unsed. So reuse omap_dm_timer and OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE in pwm-omap-dmtimer.c and delete the header file. Acked-by: Tony Lindgren Signed-off-by: Lokesh Vutla Acked-by: Uwe Kleine-König Signed-off-by: Thierry Reding --- drivers/pwm/pwm-omap-dmtimer.c | 20 +++--- include/clocksource/timer-ti-dm.h | 3 +- include/linux/platform_data/pwm_omap_dmtimer.h | 90 -------------------------- 3 files changed, 10 insertions(+), 103 deletions(-) delete mode 100644 include/linux/platform_data/pwm_omap_dmtimer.h (limited to 'include/linux') diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c index 9e4378dc6897..e4f5f710bfaa 100644 --- a/drivers/pwm/pwm-omap-dmtimer.c +++ b/drivers/pwm/pwm-omap-dmtimer.c @@ -20,8 +20,8 @@ #include #include #include +#include #include -#include #include #include #include @@ -34,7 +34,7 @@ struct pwm_omap_dmtimer_chip { struct pwm_chip chip; struct mutex mutex; - pwm_omap_dmtimer *dm_timer; + struct omap_dm_timer *dm_timer; const struct omap_dm_timer_ops *pdata; struct platform_device *dm_timer_pdev; }; @@ -190,10 +190,9 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip, load_value, load_value, match_value, match_value); omap->pdata->set_pwm(omap->dm_timer, - pwm_get_polarity(pwm) == PWM_POLARITY_INVERSED, - true, - PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE, - true); + pwm_get_polarity(pwm) == PWM_POLARITY_INVERSED, + true, OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE, + true); /* If config was called while timer was running it must be reenabled. */ if (timer_active) @@ -221,10 +220,9 @@ static int pwm_omap_dmtimer_set_polarity(struct pwm_chip *chip, */ mutex_lock(&omap->mutex); omap->pdata->set_pwm(omap->dm_timer, - polarity == PWM_POLARITY_INVERSED, - true, - PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE, - true); + polarity == PWM_POLARITY_INVERSED, + true, OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE, + true); mutex_unlock(&omap->mutex); return 0; @@ -246,7 +244,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev) struct pwm_omap_dmtimer_chip *omap; struct dmtimer_platform_data *timer_pdata; const struct omap_dm_timer_ops *pdata; - pwm_omap_dmtimer *dm_timer; + struct omap_dm_timer *dm_timer; u32 v; int ret = 0; diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h index 25f05235866e..531ca87fcd08 100644 --- a/include/clocksource/timer-ti-dm.h +++ b/include/clocksource/timer-ti-dm.h @@ -248,8 +248,7 @@ int omap_dm_timers_active(void); /* * The below are inlined to optimize code size for system timers. Other code - * should not need these at all, see - * include/linux/platform_data/pwm_omap_dmtimer.h + * should not need these at all. */ #if defined(CONFIG_ARCH_OMAP1) || defined(CONFIG_ARCH_OMAP2PLUS) static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg, diff --git a/include/linux/platform_data/pwm_omap_dmtimer.h b/include/linux/platform_data/pwm_omap_dmtimer.h deleted file mode 100644 index e7d521e48855..000000000000 --- a/include/linux/platform_data/pwm_omap_dmtimer.h +++ /dev/null @@ -1,90 +0,0 @@ -/* - * include/linux/platform_data/pwm_omap_dmtimer.h - * - * OMAP Dual-Mode Timer PWM platform data - * - * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ - * Tarun Kanti DebBarma - * Thara Gopinath - * - * Platform device conversion and hwmod support. - * - * Copyright (C) 2005 Nokia Corporation - * Author: Lauri Leukkunen - * PWM and clock framework support by Timo Teras. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#ifndef __PWM_OMAP_DMTIMER_PDATA_H -#define __PWM_OMAP_DMTIMER_PDATA_H - -/* clock sources */ -#define PWM_OMAP_DMTIMER_SRC_SYS_CLK 0x00 -#define PWM_OMAP_DMTIMER_SRC_32_KHZ 0x01 -#define PWM_OMAP_DMTIMER_SRC_EXT_CLK 0x02 - -/* timer interrupt enable bits */ -#define PWM_OMAP_DMTIMER_INT_CAPTURE (1 << 2) -#define PWM_OMAP_DMTIMER_INT_OVERFLOW (1 << 1) -#define PWM_OMAP_DMTIMER_INT_MATCH (1 << 0) - -/* trigger types */ -#define PWM_OMAP_DMTIMER_TRIGGER_NONE 0x00 -#define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW 0x01 -#define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02 - -struct omap_dm_timer; -typedef struct omap_dm_timer pwm_omap_dmtimer; - -struct pwm_omap_dmtimer_pdata { - pwm_omap_dmtimer *(*request_by_node)(struct device_node *np); - pwm_omap_dmtimer *(*request_specific)(int timer_id); - pwm_omap_dmtimer *(*request)(void); - - int (*free)(pwm_omap_dmtimer *timer); - - void (*enable)(pwm_omap_dmtimer *timer); - void (*disable)(pwm_omap_dmtimer *timer); - - int (*get_irq)(pwm_omap_dmtimer *timer); - int (*set_int_enable)(pwm_omap_dmtimer *timer, unsigned int value); - int (*set_int_disable)(pwm_omap_dmtimer *timer, u32 mask); - - struct clk *(*get_fclk)(pwm_omap_dmtimer *timer); - - int (*start)(pwm_omap_dmtimer *timer); - int (*stop)(pwm_omap_dmtimer *timer); - int (*set_source)(pwm_omap_dmtimer *timer, int source); - - int (*set_load)(pwm_omap_dmtimer *timer, int autoreload, - unsigned int value); - int (*set_match)(pwm_omap_dmtimer *timer, int enable, - unsigned int match); - int (*set_pwm)(pwm_omap_dmtimer *timer, int def_on, - int toggle, int trigger); - int (*set_prescaler)(pwm_omap_dmtimer *timer, int prescaler); - - unsigned int (*read_counter)(pwm_omap_dmtimer *timer); - int (*write_counter)(pwm_omap_dmtimer *timer, unsigned int value); - unsigned int (*read_status)(pwm_omap_dmtimer *timer); - int (*write_status)(pwm_omap_dmtimer *timer, unsigned int value); -}; - -#endif /* __PWM_OMAP_DMTIMER_PDATA_H */ -- cgit v1.2.3-58-ga151 From a0b66a73785ccc8fedbff00383ffe814df9f63c7 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 29 Mar 2020 16:04:05 +0200 Subject: gpio: Rename variable in core APIs There is struct gpio *gc, *chip and *gpiochip, and yes I am responsible for some of the inconsistencies. I want this to be just gc everywhere for minimizing cognitive resistance when reading the code: more compact function signatures and less clutter. Purely syntactic changes intended. No semantic effects. Signed-off-by: Linus Walleij Link: https://lore.kernel.org/r/20200329140405.52276-1-linus.walleij@linaro.org Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib.c | 825 ++++++++++++++++++++++---------------------- include/linux/gpio/driver.h | 138 ++++---- 2 files changed, 482 insertions(+), 481 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 2951a8b595c3..c2cc437ce831 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -81,14 +81,14 @@ LIST_HEAD(gpio_devices); static DEFINE_MUTEX(gpio_machine_hogs_mutex); static LIST_HEAD(gpio_machine_hogs); -static void gpiochip_free_hogs(struct gpio_chip *chip); -static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, +static void gpiochip_free_hogs(struct gpio_chip *gc); +static int gpiochip_add_irqchip(struct gpio_chip *gc, struct lock_class_key *lock_key, struct lock_class_key *request_key); -static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); -static int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip); -static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip); -static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip); +static void gpiochip_irqchip_remove(struct gpio_chip *gc); +static int gpiochip_irqchip_init_hw(struct gpio_chip *gc); +static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc); +static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc); static bool gpiolib_initialized; @@ -132,17 +132,17 @@ EXPORT_SYMBOL_GPL(gpio_to_desc); /** * gpiochip_get_desc - get the GPIO descriptor corresponding to the given * hardware number for this chip - * @chip: GPIO chip + * @gc: GPIO chip * @hwnum: hardware number of the GPIO for this chip * * Returns: * A pointer to the GPIO descriptor or ``ERR_PTR(-EINVAL)`` if no GPIO exists * in the given chip for the specified hardware number. */ -struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, +struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum) { - struct gpio_device *gdev = chip->gpiodev; + struct gpio_device *gdev = gc->gpiodev; if (hwnum >= gdev->ngpio) return ERR_PTR(-EINVAL); @@ -213,11 +213,11 @@ static int gpiochip_find_base(int ngpio) */ int gpiod_get_direction(struct gpio_desc *desc) { - struct gpio_chip *chip; + struct gpio_chip *gc; unsigned offset; int ret; - chip = gpiod_to_chip(desc); + gc = gpiod_to_chip(desc); offset = gpio_chip_hwgpio(desc); /* @@ -228,10 +228,10 @@ int gpiod_get_direction(struct gpio_desc *desc) test_bit(FLAG_IS_OUT, &desc->flags)) return 0; - if (!chip->get_direction) + if (!gc->get_direction) return -ENOTSUPP; - ret = chip->get_direction(chip, offset); + ret = gc->get_direction(gc, offset); if (ret < 0) return ret; @@ -359,16 +359,16 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc) return 0; } -static unsigned long *gpiochip_allocate_mask(struct gpio_chip *chip) +static unsigned long *gpiochip_allocate_mask(struct gpio_chip *gc) { unsigned long *p; - p = bitmap_alloc(chip->ngpio, GFP_KERNEL); + p = bitmap_alloc(gc->ngpio, GFP_KERNEL); if (!p) return NULL; /* Assume by default all GPIOs are valid */ - bitmap_fill(p, chip->ngpio); + bitmap_fill(p, gc->ngpio); return p; } @@ -395,10 +395,10 @@ static int gpiochip_init_valid_mask(struct gpio_chip *gc) return 0; } -static void gpiochip_free_valid_mask(struct gpio_chip *gpiochip) +static void gpiochip_free_valid_mask(struct gpio_chip *gc) { - bitmap_free(gpiochip->valid_mask); - gpiochip->valid_mask = NULL; + bitmap_free(gc->valid_mask); + gc->valid_mask = NULL; } static int gpiochip_add_pin_ranges(struct gpio_chip *gc) @@ -409,13 +409,13 @@ static int gpiochip_add_pin_ranges(struct gpio_chip *gc) return 0; } -bool gpiochip_line_is_valid(const struct gpio_chip *gpiochip, +bool gpiochip_line_is_valid(const struct gpio_chip *gc, unsigned int offset) { /* No mask means all valid */ - if (likely(!gpiochip->valid_mask)) + if (likely(!gc->valid_mask)) return true; - return test_bit(offset, gpiochip->valid_mask); + return test_bit(offset, gc->valid_mask); } EXPORT_SYMBOL_GPL(gpiochip_line_is_valid); @@ -1156,7 +1156,7 @@ out_free_le: static void gpio_desc_to_lineinfo(struct gpio_desc *desc, struct gpioline_info *info) { - struct gpio_chip *chip = desc->gdev->chip; + struct gpio_chip *gc = desc->gdev->chip; unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); @@ -1185,7 +1185,7 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc, test_bit(FLAG_USED_AS_IRQ, &desc->flags) || test_bit(FLAG_EXPORT, &desc->flags) || test_bit(FLAG_SYSFS, &desc->flags) || - !pinctrl_gpio_can_use_line(chip->base + info->line_offset)) + !pinctrl_gpio_can_use_line(gc->base + info->line_offset)) info->flags |= GPIOLINE_FLAG_KERNEL; if (test_bit(FLAG_IS_OUT, &desc->flags)) info->flags |= GPIOLINE_FLAG_IS_OUT; @@ -1222,13 +1222,13 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct gpio_chardev_data *priv = filp->private_data; struct gpio_device *gdev = priv->gdev; - struct gpio_chip *chip = gdev->chip; + struct gpio_chip *gc = gdev->chip; void __user *ip = (void __user *)arg; struct gpio_desc *desc; __u32 offset; /* We fail any subsequent ioctl():s when the chip is gone */ - if (!chip) + if (!gc) return -ENODEV; /* Fill in the struct and pass to userspace */ @@ -1254,7 +1254,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) return -EFAULT; - desc = gpiochip_get_desc(chip, lineinfo.line_offset); + desc = gpiochip_get_desc(gc, lineinfo.line_offset); if (IS_ERR(desc)) return PTR_ERR(desc); @@ -1275,7 +1275,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (copy_from_user(&offset, ip, sizeof(offset))) return -EFAULT; - desc = gpiochip_get_desc(chip, offset); + desc = gpiochip_get_desc(gc, offset); if (IS_ERR(desc)) return PTR_ERR(desc); @@ -1518,12 +1518,12 @@ err_remove_device: return ret; } -static void gpiochip_machine_hog(struct gpio_chip *chip, struct gpiod_hog *hog) +static void gpiochip_machine_hog(struct gpio_chip *gc, struct gpiod_hog *hog) { struct gpio_desc *desc; int rv; - desc = gpiochip_get_desc(chip, hog->chip_hwnum); + desc = gpiochip_get_desc(gc, hog->chip_hwnum); if (IS_ERR(desc)) { pr_err("%s: unable to get GPIO desc: %ld\n", __func__, PTR_ERR(desc)); @@ -1536,18 +1536,18 @@ static void gpiochip_machine_hog(struct gpio_chip *chip, struct gpiod_hog *hog) rv = gpiod_hog(desc, hog->line_name, hog->lflags, hog->dflags); if (rv) pr_err("%s: unable to hog GPIO line (%s:%u): %d\n", - __func__, chip->label, hog->chip_hwnum, rv); + __func__, gc->label, hog->chip_hwnum, rv); } -static void machine_gpiochip_add(struct gpio_chip *chip) +static void machine_gpiochip_add(struct gpio_chip *gc) { struct gpiod_hog *hog; mutex_lock(&gpio_machine_hogs_mutex); list_for_each_entry(hog, &gpio_machine_hogs, list) { - if (!strcmp(chip->label, hog->chip_label)) - gpiochip_machine_hog(chip, hog); + if (!strcmp(gc->label, hog->chip_label)) + gpiochip_machine_hog(gc, hog); } mutex_unlock(&gpio_machine_hogs_mutex); @@ -1566,14 +1566,14 @@ static void gpiochip_setup_devs(void) } } -int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, +int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, struct lock_class_key *lock_key, struct lock_class_key *request_key) { unsigned long flags; int ret = 0; unsigned i; - int base = chip->base; + int base = gc->base; struct gpio_device *gdev; /* @@ -1584,19 +1584,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, if (!gdev) return -ENOMEM; gdev->dev.bus = &gpio_bus_type; - gdev->chip = chip; - chip->gpiodev = gdev; - if (chip->parent) { - gdev->dev.parent = chip->parent; - gdev->dev.of_node = chip->parent->of_node; + gdev->chip = gc; + gc->gpiodev = gdev; + if (gc->parent) { + gdev->dev.parent = gc->parent; + gdev->dev.of_node = gc->parent->of_node; } #ifdef CONFIG_OF_GPIO /* If the gpiochip has an assigned OF node this takes precedence */ - if (chip->of_node) - gdev->dev.of_node = chip->of_node; + if (gc->of_node) + gdev->dev.of_node = gc->of_node; else - chip->of_node = gdev->dev.of_node; + gc->of_node = gdev->dev.of_node; #endif gdev->id = ida_simple_get(&gpio_ida, 0, 0, GFP_KERNEL); @@ -1607,37 +1607,37 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id); device_initialize(&gdev->dev); dev_set_drvdata(&gdev->dev, gdev); - if (chip->parent && chip->parent->driver) - gdev->owner = chip->parent->driver->owner; - else if (chip->owner) + if (gc->parent && gc->parent->driver) + gdev->owner = gc->parent->driver->owner; + else if (gc->owner) /* TODO: remove chip->owner */ - gdev->owner = chip->owner; + gdev->owner = gc->owner; else gdev->owner = THIS_MODULE; - gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); + gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); if (!gdev->descs) { ret = -ENOMEM; goto err_free_ida; } - if (chip->ngpio == 0) { - chip_err(chip, "tried to insert a GPIO chip with zero lines\n"); + if (gc->ngpio == 0) { + chip_err(gc, "tried to insert a GPIO chip with zero lines\n"); ret = -EINVAL; goto err_free_descs; } - if (chip->ngpio > FASTPATH_NGPIO) - chip_warn(chip, "line cnt %u is greater than fast path cnt %u\n", - chip->ngpio, FASTPATH_NGPIO); + if (gc->ngpio > FASTPATH_NGPIO) + chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n", + gc->ngpio, FASTPATH_NGPIO); - gdev->label = kstrdup_const(chip->label ?: "unknown", GFP_KERNEL); + gdev->label = kstrdup_const(gc->label ?: "unknown", GFP_KERNEL); if (!gdev->label) { ret = -ENOMEM; goto err_free_descs; } - gdev->ngpio = chip->ngpio; + gdev->ngpio = gc->ngpio; gdev->data = data; spin_lock_irqsave(&gpio_lock, flags); @@ -1650,7 +1650,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, * of the sysfs interface anyways. */ if (base < 0) { - base = gpiochip_find_base(chip->ngpio); + base = gpiochip_find_base(gc->ngpio); if (base < 0) { ret = base; spin_unlock_irqrestore(&gpio_lock, flags); @@ -1662,7 +1662,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, * see if anyone makes use of this, else drop this and assign * a poison instead. */ - chip->base = base; + gc->base = base; } gdev->base = base; @@ -1672,7 +1672,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, goto err_free_label; } - for (i = 0; i < chip->ngpio; i++) + for (i = 0; i < gc->ngpio; i++) gdev->descs[i].gdev = gdev; spin_unlock_irqrestore(&gpio_lock, flags); @@ -1683,51 +1683,51 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, INIT_LIST_HEAD(&gdev->pin_ranges); #endif - ret = gpiochip_set_desc_names(chip); + ret = gpiochip_set_desc_names(gc); if (ret) goto err_remove_from_list; - ret = gpiochip_alloc_valid_mask(chip); + ret = gpiochip_alloc_valid_mask(gc); if (ret) goto err_remove_from_list; - ret = of_gpiochip_add(chip); + ret = of_gpiochip_add(gc); if (ret) goto err_free_gpiochip_mask; - ret = gpiochip_init_valid_mask(chip); + ret = gpiochip_init_valid_mask(gc); if (ret) goto err_remove_of_chip; - for (i = 0; i < chip->ngpio; i++) { + for (i = 0; i < gc->ngpio; i++) { struct gpio_desc *desc = &gdev->descs[i]; - if (chip->get_direction && gpiochip_line_is_valid(chip, i)) { + if (gc->get_direction && gpiochip_line_is_valid(gc, i)) { assign_bit(FLAG_IS_OUT, - &desc->flags, !chip->get_direction(chip, i)); + &desc->flags, !gc->get_direction(gc, i)); } else { assign_bit(FLAG_IS_OUT, - &desc->flags, !chip->direction_input); + &desc->flags, !gc->direction_input); } } - ret = gpiochip_add_pin_ranges(chip); + ret = gpiochip_add_pin_ranges(gc); if (ret) goto err_remove_of_chip; - acpi_gpiochip_add(chip); + acpi_gpiochip_add(gc); - machine_gpiochip_add(chip); + machine_gpiochip_add(gc); - ret = gpiochip_irqchip_init_valid_mask(chip); + ret = gpiochip_irqchip_init_valid_mask(gc); if (ret) goto err_remove_acpi_chip; - ret = gpiochip_irqchip_init_hw(chip); + ret = gpiochip_irqchip_init_hw(gc); if (ret) goto err_remove_acpi_chip; - ret = gpiochip_add_irqchip(chip, lock_key, request_key); + ret = gpiochip_add_irqchip(gc, lock_key, request_key); if (ret) goto err_remove_irqchip_mask; @@ -1747,17 +1747,17 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, return 0; err_remove_irqchip: - gpiochip_irqchip_remove(chip); + gpiochip_irqchip_remove(gc); err_remove_irqchip_mask: - gpiochip_irqchip_free_valid_mask(chip); + gpiochip_irqchip_free_valid_mask(gc); err_remove_acpi_chip: - acpi_gpiochip_remove(chip); + acpi_gpiochip_remove(gc); err_remove_of_chip: - gpiochip_free_hogs(chip); - of_gpiochip_remove(chip); + gpiochip_free_hogs(gc); + of_gpiochip_remove(gc); err_free_gpiochip_mask: - gpiochip_remove_pin_ranges(chip); - gpiochip_free_valid_mask(chip); + gpiochip_remove_pin_ranges(gc); + gpiochip_free_valid_mask(gc); err_remove_from_list: spin_lock_irqsave(&gpio_lock, flags); list_del(&gdev->list); @@ -1772,7 +1772,7 @@ err_free_gdev: /* failures here can mean systems won't boot... */ pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__, gdev->base, gdev->base + gdev->ngpio - 1, - chip->label ? : "generic", ret); + gc->label ? : "generic", ret); kfree(gdev); return ret; } @@ -1780,39 +1780,39 @@ EXPORT_SYMBOL_GPL(gpiochip_add_data_with_key); /** * gpiochip_get_data() - get per-subdriver data for the chip - * @chip: GPIO chip + * @gc: GPIO chip * * Returns: * The per-subdriver data for the chip. */ -void *gpiochip_get_data(struct gpio_chip *chip) +void *gpiochip_get_data(struct gpio_chip *gc) { - return chip->gpiodev->data; + return gc->gpiodev->data; } EXPORT_SYMBOL_GPL(gpiochip_get_data); /** * gpiochip_remove() - unregister a gpio_chip - * @chip: the chip to unregister + * @gc: the chip to unregister * * A gpio_chip with any GPIOs still requested may not be removed. */ -void gpiochip_remove(struct gpio_chip *chip) +void gpiochip_remove(struct gpio_chip *gc) { - struct gpio_device *gdev = chip->gpiodev; + struct gpio_device *gdev = gc->gpiodev; unsigned long flags; unsigned int i; /* FIXME: should the legacy sysfs handling be moved to gpio_device? */ gpiochip_sysfs_unregister(gdev); - gpiochip_free_hogs(chip); + gpiochip_free_hogs(gc); /* Numb the device, cancelling all outstanding operations */ gdev->chip = NULL; - gpiochip_irqchip_remove(chip); - acpi_gpiochip_remove(chip); - of_gpiochip_remove(chip); - gpiochip_remove_pin_ranges(chip); - gpiochip_free_valid_mask(chip); + gpiochip_irqchip_remove(gc); + acpi_gpiochip_remove(gc); + of_gpiochip_remove(gc); + gpiochip_remove_pin_ranges(gc); + gpiochip_free_valid_mask(gc); /* * We accept no more calls into the driver from this point, so * NULL the driver data pointer @@ -1821,7 +1821,7 @@ void gpiochip_remove(struct gpio_chip *chip) spin_lock_irqsave(&gpio_lock, flags); for (i = 0; i < gdev->ngpio; i++) { - if (gpiochip_is_requested(chip, i)) + if (gpiochip_is_requested(gc, i)) break; } spin_unlock_irqrestore(&gpio_lock, flags); @@ -1853,31 +1853,31 @@ EXPORT_SYMBOL_GPL(gpiochip_remove); * more gpio_chips. */ struct gpio_chip *gpiochip_find(void *data, - int (*match)(struct gpio_chip *chip, + int (*match)(struct gpio_chip *gc, void *data)) { struct gpio_device *gdev; - struct gpio_chip *chip = NULL; + struct gpio_chip *gc = NULL; unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); list_for_each_entry(gdev, &gpio_devices, list) if (gdev->chip && match(gdev->chip, data)) { - chip = gdev->chip; + gc = gdev->chip; break; } spin_unlock_irqrestore(&gpio_lock, flags); - return chip; + return gc; } EXPORT_SYMBOL_GPL(gpiochip_find); -static int gpiochip_match_name(struct gpio_chip *chip, void *data) +static int gpiochip_match_name(struct gpio_chip *gc, void *data) { const char *name = data; - return !strcmp(chip->label, name); + return !strcmp(gc->label, name); } static struct gpio_chip *find_chip_by_name(const char *name) @@ -1917,21 +1917,21 @@ static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc) return 0; } -static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip) +static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc) { - bitmap_free(gpiochip->irq.valid_mask); - gpiochip->irq.valid_mask = NULL; + bitmap_free(gc->irq.valid_mask); + gc->irq.valid_mask = NULL; } -bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip, +bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc, unsigned int offset) { - if (!gpiochip_line_is_valid(gpiochip, offset)) + if (!gpiochip_line_is_valid(gc, offset)) return false; /* No mask means all valid */ - if (likely(!gpiochip->irq.valid_mask)) + if (likely(!gc->irq.valid_mask)) return true; - return test_bit(offset, gpiochip->irq.valid_mask); + return test_bit(offset, gc->irq.valid_mask); } EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid); @@ -1983,16 +1983,16 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gc, /** * gpiochip_set_nested_irqchip() - connects a nested irqchip to a gpiochip - * @gpiochip: the gpiochip to set the irqchip nested handler to + * @gc: the gpiochip to set the irqchip nested handler to * @irqchip: the irqchip to nest to the gpiochip * @parent_irq: the irq number corresponding to the parent IRQ for this * nested irqchip */ -void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip, +void gpiochip_set_nested_irqchip(struct gpio_chip *gc, struct irq_chip *irqchip, unsigned int parent_irq) { - gpiochip_set_cascaded_irqchip(gpiochip, parent_irq, NULL); + gpiochip_set_cascaded_irqchip(gc, parent_irq, NULL); } EXPORT_SYMBOL_GPL(gpiochip_set_nested_irqchip); @@ -2169,7 +2169,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d, return ret; } -static unsigned int gpiochip_child_offset_to_irq_noop(struct gpio_chip *chip, +static unsigned int gpiochip_child_offset_to_irq_noop(struct gpio_chip *gc, unsigned int offset) { return offset; @@ -2229,7 +2229,7 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc) return !!gc->irq.parent_domain; } -void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip, +void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, unsigned int parent_hwirq, unsigned int parent_type) { @@ -2239,7 +2239,7 @@ void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip, if (!fwspec) return NULL; - fwspec->fwnode = chip->irq.parent_domain->fwnode; + fwspec->fwnode = gc->irq.parent_domain->fwnode; fwspec->param_count = 2; fwspec->param[0] = parent_hwirq; fwspec->param[1] = parent_type; @@ -2248,7 +2248,7 @@ void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip, } EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_twocell); -void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip, +void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, unsigned int parent_hwirq, unsigned int parent_type) { @@ -2258,7 +2258,7 @@ void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip, if (!fwspec) return NULL; - fwspec->fwnode = chip->irq.parent_domain->fwnode; + fwspec->fwnode = gc->irq.parent_domain->fwnode; fwspec->param_count = 4; fwspec->param[0] = 0; fwspec->param[1] = parent_hwirq; @@ -2296,28 +2296,28 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc) int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { - struct gpio_chip *chip = d->host_data; + struct gpio_chip *gc = d->host_data; int ret = 0; - if (!gpiochip_irqchip_irq_valid(chip, hwirq)) + if (!gpiochip_irqchip_irq_valid(gc, hwirq)) return -ENXIO; - irq_set_chip_data(irq, chip); + irq_set_chip_data(irq, gc); /* * This lock class tells lockdep that GPIO irqs are in a different * category than their parents, so it won't report false recursion. */ - irq_set_lockdep_class(irq, chip->irq.lock_key, chip->irq.request_key); - irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler); + irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key); + irq_set_chip_and_handler(irq, gc->irq.chip, gc->irq.handler); /* Chips that use nested thread handlers have them marked */ - if (chip->irq.threaded) + if (gc->irq.threaded) irq_set_nested_thread(irq, 1); irq_set_noprobe(irq); - if (chip->irq.num_parents == 1) - ret = irq_set_parent(irq, chip->irq.parents[0]); - else if (chip->irq.map) - ret = irq_set_parent(irq, chip->irq.map[hwirq]); + if (gc->irq.num_parents == 1) + ret = irq_set_parent(irq, gc->irq.parents[0]); + else if (gc->irq.map) + ret = irq_set_parent(irq, gc->irq.map[hwirq]); if (ret < 0) return ret; @@ -2326,8 +2326,8 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, * No set-up of the hardware will happen if IRQ_TYPE_NONE * is passed as default type. */ - if (chip->irq.default_type != IRQ_TYPE_NONE) - irq_set_irq_type(irq, chip->irq.default_type); + if (gc->irq.default_type != IRQ_TYPE_NONE) + irq_set_irq_type(irq, gc->irq.default_type); return 0; } @@ -2335,9 +2335,9 @@ EXPORT_SYMBOL_GPL(gpiochip_irq_map); void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq) { - struct gpio_chip *chip = d->host_data; + struct gpio_chip *gc = d->host_data; - if (chip->irq.threaded) + if (gc->irq.threaded) irq_set_nested_thread(irq, 0); irq_set_chip_and_handler(irq, NULL, NULL); irq_set_chip_data(irq, NULL); @@ -2369,9 +2369,9 @@ static const struct irq_domain_ops gpiochip_domain_ops = { int gpiochip_irq_domain_activate(struct irq_domain *domain, struct irq_data *data, bool reserve) { - struct gpio_chip *chip = domain->host_data; + struct gpio_chip *gc = domain->host_data; - return gpiochip_lock_as_irq(chip, data->hwirq); + return gpiochip_lock_as_irq(gc, data->hwirq); } EXPORT_SYMBOL_GPL(gpiochip_irq_domain_activate); @@ -2387,17 +2387,17 @@ EXPORT_SYMBOL_GPL(gpiochip_irq_domain_activate); void gpiochip_irq_domain_deactivate(struct irq_domain *domain, struct irq_data *data) { - struct gpio_chip *chip = domain->host_data; + struct gpio_chip *gc = domain->host_data; - return gpiochip_unlock_as_irq(chip, data->hwirq); + return gpiochip_unlock_as_irq(gc, data->hwirq); } EXPORT_SYMBOL_GPL(gpiochip_irq_domain_deactivate); -static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset) +static int gpiochip_to_irq(struct gpio_chip *gc, unsigned offset) { - struct irq_domain *domain = chip->irq.domain; + struct irq_domain *domain = gc->irq.domain; - if (!gpiochip_irqchip_irq_valid(chip, offset)) + if (!gpiochip_irqchip_irq_valid(gc, offset)) return -ENXIO; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY @@ -2406,7 +2406,7 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset) spec.fwnode = domain->fwnode; spec.param_count = 2; - spec.param[0] = chip->irq.child_offset_to_irq(chip, offset); + spec.param[0] = gc->irq.child_offset_to_irq(gc, offset); spec.param[1] = IRQ_TYPE_NONE; return irq_create_fwspec_mapping(&spec); @@ -2418,50 +2418,50 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset) static int gpiochip_irq_reqres(struct irq_data *d) { - struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - return gpiochip_reqres_irq(chip, d->hwirq); + return gpiochip_reqres_irq(gc, d->hwirq); } static void gpiochip_irq_relres(struct irq_data *d) { - struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - gpiochip_relres_irq(chip, d->hwirq); + gpiochip_relres_irq(gc, d->hwirq); } static void gpiochip_irq_enable(struct irq_data *d) { - struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - gpiochip_enable_irq(chip, d->hwirq); - if (chip->irq.irq_enable) - chip->irq.irq_enable(d); + gpiochip_enable_irq(gc, d->hwirq); + if (gc->irq.irq_enable) + gc->irq.irq_enable(d); else - chip->irq.chip->irq_unmask(d); + gc->irq.chip->irq_unmask(d); } static void gpiochip_irq_disable(struct irq_data *d) { - struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - if (chip->irq.irq_disable) - chip->irq.irq_disable(d); + if (gc->irq.irq_disable) + gc->irq.irq_disable(d); else - chip->irq.chip->irq_mask(d); - gpiochip_disable_irq(chip, d->hwirq); + gc->irq.chip->irq_mask(d); + gpiochip_disable_irq(gc, d->hwirq); } -static void gpiochip_set_irq_hooks(struct gpio_chip *gpiochip) +static void gpiochip_set_irq_hooks(struct gpio_chip *gc) { - struct irq_chip *irqchip = gpiochip->irq.chip; + struct irq_chip *irqchip = gc->irq.chip; if (!irqchip->irq_request_resources && !irqchip->irq_release_resources) { irqchip->irq_request_resources = gpiochip_irq_reqres; irqchip->irq_release_resources = gpiochip_irq_relres; } - if (WARN_ON(gpiochip->irq.irq_enable)) + if (WARN_ON(gc->irq.irq_enable)) return; /* Check if the irqchip already has this hook... */ if (irqchip->irq_enable == gpiochip_irq_enable) { @@ -2469,27 +2469,27 @@ static void gpiochip_set_irq_hooks(struct gpio_chip *gpiochip) * ...and if so, give a gentle warning that this is bad * practice. */ - chip_info(gpiochip, + chip_info(gc, "detected irqchip that is shared with multiple gpiochips: please fix the driver.\n"); return; } - gpiochip->irq.irq_enable = irqchip->irq_enable; - gpiochip->irq.irq_disable = irqchip->irq_disable; + gc->irq.irq_enable = irqchip->irq_enable; + gc->irq.irq_disable = irqchip->irq_disable; irqchip->irq_enable = gpiochip_irq_enable; irqchip->irq_disable = gpiochip_irq_disable; } /** * gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip - * @gpiochip: the GPIO chip to add the IRQ chip to + * @gc: the GPIO chip to add the IRQ chip to * @lock_key: lockdep class for IRQ lock * @request_key: lockdep class for IRQ request */ -static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, +static int gpiochip_add_irqchip(struct gpio_chip *gc, struct lock_class_key *lock_key, struct lock_class_key *request_key) { - struct irq_chip *irqchip = gpiochip->irq.chip; + struct irq_chip *irqchip = gc->irq.chip; const struct irq_domain_ops *ops = NULL; struct device_node *np; unsigned int type; @@ -2498,13 +2498,13 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, if (!irqchip) return 0; - if (gpiochip->irq.parent_handler && gpiochip->can_sleep) { - chip_err(gpiochip, "you cannot have chained interrupts on a chip that may sleep\n"); + if (gc->irq.parent_handler && gc->can_sleep) { + chip_err(gc, "you cannot have chained interrupts on a chip that may sleep\n"); return -EINVAL; } - np = gpiochip->gpiodev->dev.of_node; - type = gpiochip->irq.default_type; + np = gc->gpiodev->dev.of_node; + type = gc->irq.default_type; /* * Specifying a default trigger is a terrible idea if DT or ACPI is @@ -2515,74 +2515,74 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, "%s: Ignoring %u default trigger\n", np->full_name, type)) type = IRQ_TYPE_NONE; - if (has_acpi_companion(gpiochip->parent) && type != IRQ_TYPE_NONE) { - acpi_handle_warn(ACPI_HANDLE(gpiochip->parent), + if (has_acpi_companion(gc->parent) && type != IRQ_TYPE_NONE) { + acpi_handle_warn(ACPI_HANDLE(gc->parent), "Ignoring %u default trigger\n", type); type = IRQ_TYPE_NONE; } - gpiochip->to_irq = gpiochip_to_irq; - gpiochip->irq.default_type = type; - gpiochip->irq.lock_key = lock_key; - gpiochip->irq.request_key = request_key; + gc->to_irq = gpiochip_to_irq; + gc->irq.default_type = type; + gc->irq.lock_key = lock_key; + gc->irq.request_key = request_key; /* If a parent irqdomain is provided, let's build a hierarchy */ - if (gpiochip_hierarchy_is_hierarchical(gpiochip)) { - int ret = gpiochip_hierarchy_add_domain(gpiochip); + if (gpiochip_hierarchy_is_hierarchical(gc)) { + int ret = gpiochip_hierarchy_add_domain(gc); if (ret) return ret; } else { /* Some drivers provide custom irqdomain ops */ - if (gpiochip->irq.domain_ops) - ops = gpiochip->irq.domain_ops; + if (gc->irq.domain_ops) + ops = gc->irq.domain_ops; if (!ops) ops = &gpiochip_domain_ops; - gpiochip->irq.domain = irq_domain_add_simple(np, - gpiochip->ngpio, - gpiochip->irq.first, - ops, gpiochip); - if (!gpiochip->irq.domain) + gc->irq.domain = irq_domain_add_simple(np, + gc->ngpio, + gc->irq.first, + ops, gc); + if (!gc->irq.domain) return -EINVAL; } - if (gpiochip->irq.parent_handler) { - void *data = gpiochip->irq.parent_handler_data ?: gpiochip; + if (gc->irq.parent_handler) { + void *data = gc->irq.parent_handler_data ?: gc; - for (i = 0; i < gpiochip->irq.num_parents; i++) { + for (i = 0; i < gc->irq.num_parents; i++) { /* * The parent IRQ chip is already using the chip_data * for this IRQ chip, so our callbacks simply use the * handler_data. */ - irq_set_chained_handler_and_data(gpiochip->irq.parents[i], - gpiochip->irq.parent_handler, + irq_set_chained_handler_and_data(gc->irq.parents[i], + gc->irq.parent_handler, data); } } - gpiochip_set_irq_hooks(gpiochip); + gpiochip_set_irq_hooks(gc); - acpi_gpiochip_request_interrupts(gpiochip); + acpi_gpiochip_request_interrupts(gc); return 0; } /** * gpiochip_irqchip_remove() - removes an irqchip added to a gpiochip - * @gpiochip: the gpiochip to remove the irqchip from + * @gc: the gpiochip to remove the irqchip from * * This is called only from gpiochip_remove() */ -static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) +static void gpiochip_irqchip_remove(struct gpio_chip *gc) { - struct irq_chip *irqchip = gpiochip->irq.chip; + struct irq_chip *irqchip = gc->irq.chip; unsigned int offset; - acpi_gpiochip_free_interrupts(gpiochip); + acpi_gpiochip_free_interrupts(gc); - if (irqchip && gpiochip->irq.parent_handler) { - struct gpio_irq_chip *irq = &gpiochip->irq; + if (irqchip && gc->irq.parent_handler) { + struct gpio_irq_chip *irq = &gc->irq; unsigned int i; for (i = 0; i < irq->num_parents; i++) @@ -2591,18 +2591,18 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) } /* Remove all IRQ mappings and delete the domain */ - if (gpiochip->irq.domain) { + if (gc->irq.domain) { unsigned int irq; - for (offset = 0; offset < gpiochip->ngpio; offset++) { - if (!gpiochip_irqchip_irq_valid(gpiochip, offset)) + for (offset = 0; offset < gc->ngpio; offset++) { + if (!gpiochip_irqchip_irq_valid(gc, offset)) continue; - irq = irq_find_mapping(gpiochip->irq.domain, offset); + irq = irq_find_mapping(gc->irq.domain, offset); irq_dispose_mapping(irq); } - irq_domain_remove(gpiochip->irq.domain); + irq_domain_remove(gc->irq.domain); } if (irqchip) { @@ -2611,20 +2611,20 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) irqchip->irq_release_resources = NULL; } if (irqchip->irq_enable == gpiochip_irq_enable) { - irqchip->irq_enable = gpiochip->irq.irq_enable; - irqchip->irq_disable = gpiochip->irq.irq_disable; + irqchip->irq_enable = gc->irq.irq_enable; + irqchip->irq_disable = gc->irq.irq_disable; } } - gpiochip->irq.irq_enable = NULL; - gpiochip->irq.irq_disable = NULL; - gpiochip->irq.chip = NULL; + gc->irq.irq_enable = NULL; + gc->irq.irq_disable = NULL; + gc->irq.chip = NULL; - gpiochip_irqchip_free_valid_mask(gpiochip); + gpiochip_irqchip_free_valid_mask(gc); } /** * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip - * @gpiochip: the gpiochip to add the irqchip to + * @gc: the gpiochip to add the irqchip to * @irqchip: the irqchip to add to the gpiochip * @first_irq: if not dynamically assigned, the base (first) IRQ to * allocate gpiochip irqs from @@ -2649,7 +2649,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) * the pins on the gpiochip can generate a unique IRQ. Everything else * need to be open coded. */ -int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, +int gpiochip_irqchip_add_key(struct gpio_chip *gc, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, @@ -2660,23 +2660,23 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, { struct device_node *of_node; - if (!gpiochip || !irqchip) + if (!gc || !irqchip) return -EINVAL; - if (!gpiochip->parent) { + if (!gc->parent) { pr_err("missing gpiochip .dev parent pointer\n"); return -EINVAL; } - gpiochip->irq.threaded = threaded; - of_node = gpiochip->parent->of_node; + gc->irq.threaded = threaded; + of_node = gc->parent->of_node; #ifdef CONFIG_OF_GPIO /* * If the gpiochip has an assigned OF node this takes precedence - * FIXME: get rid of this and use gpiochip->parent->of_node + * FIXME: get rid of this and use gc->parent->of_node * everywhere */ - if (gpiochip->of_node) - of_node = gpiochip->of_node; + if (gc->of_node) + of_node = gc->of_node; #endif /* * Specifying a default trigger is a terrible idea if DT or ACPI is @@ -2686,29 +2686,29 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, if (WARN(of_node && type != IRQ_TYPE_NONE, "%pOF: Ignoring %d default trigger\n", of_node, type)) type = IRQ_TYPE_NONE; - if (has_acpi_companion(gpiochip->parent) && type != IRQ_TYPE_NONE) { - acpi_handle_warn(ACPI_HANDLE(gpiochip->parent), + if (has_acpi_companion(gc->parent) && type != IRQ_TYPE_NONE) { + acpi_handle_warn(ACPI_HANDLE(gc->parent), "Ignoring %d default trigger\n", type); type = IRQ_TYPE_NONE; } - gpiochip->irq.chip = irqchip; - gpiochip->irq.handler = handler; - gpiochip->irq.default_type = type; - gpiochip->to_irq = gpiochip_to_irq; - gpiochip->irq.lock_key = lock_key; - gpiochip->irq.request_key = request_key; - gpiochip->irq.domain = irq_domain_add_simple(of_node, - gpiochip->ngpio, first_irq, - &gpiochip_domain_ops, gpiochip); - if (!gpiochip->irq.domain) { - gpiochip->irq.chip = NULL; + gc->irq.chip = irqchip; + gc->irq.handler = handler; + gc->irq.default_type = type; + gc->to_irq = gpiochip_to_irq; + gc->irq.lock_key = lock_key; + gc->irq.request_key = request_key; + gc->irq.domain = irq_domain_add_simple(of_node, + gc->ngpio, first_irq, + &gpiochip_domain_ops, gc); + if (!gc->irq.domain) { + gc->irq.chip = NULL; return -EINVAL; } - gpiochip_set_irq_hooks(gpiochip); + gpiochip_set_irq_hooks(gc); - acpi_gpiochip_request_interrupts(gpiochip); + acpi_gpiochip_request_interrupts(gc); return 0; } @@ -2716,65 +2716,65 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key); #else /* CONFIG_GPIOLIB_IRQCHIP */ -static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip, +static inline int gpiochip_add_irqchip(struct gpio_chip *gc, struct lock_class_key *lock_key, struct lock_class_key *request_key) { return 0; } -static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {} +static void gpiochip_irqchip_remove(struct gpio_chip *gc) {} -static inline int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip) +static inline int gpiochip_irqchip_init_hw(struct gpio_chip *gc) { return 0; } -static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip) +static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc) { return 0; } -static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip) +static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc) { } #endif /* CONFIG_GPIOLIB_IRQCHIP */ /** * gpiochip_generic_request() - request the gpio function for a pin - * @chip: the gpiochip owning the GPIO + * @gc: the gpiochip owning the GPIO * @offset: the offset of the GPIO to request for GPIO function */ -int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset) +int gpiochip_generic_request(struct gpio_chip *gc, unsigned offset) { #ifdef CONFIG_PINCTRL - if (list_empty(&chip->gpiodev->pin_ranges)) + if (list_empty(&gc->gpiodev->pin_ranges)) return 0; #endif - return pinctrl_gpio_request(chip->gpiodev->base + offset); + return pinctrl_gpio_request(gc->gpiodev->base + offset); } EXPORT_SYMBOL_GPL(gpiochip_generic_request); /** * gpiochip_generic_free() - free the gpio function from a pin - * @chip: the gpiochip to request the gpio function for + * @gc: the gpiochip to request the gpio function for * @offset: the offset of the GPIO to free from GPIO function */ -void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset) +void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset) { - pinctrl_gpio_free(chip->gpiodev->base + offset); + pinctrl_gpio_free(gc->gpiodev->base + offset); } EXPORT_SYMBOL_GPL(gpiochip_generic_free); /** * gpiochip_generic_config() - apply configuration for a pin - * @chip: the gpiochip owning the GPIO + * @gc: the gpiochip owning the GPIO * @offset: the offset of the GPIO to apply the configuration * @config: the configuration to be applied */ -int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset, +int gpiochip_generic_config(struct gpio_chip *gc, unsigned offset, unsigned long config) { - return pinctrl_gpio_set_config(chip->gpiodev->base + offset, config); + return pinctrl_gpio_set_config(gc->gpiodev->base + offset, config); } EXPORT_SYMBOL_GPL(gpiochip_generic_config); @@ -2782,7 +2782,7 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_config); /** * gpiochip_add_pingroup_range() - add a range for GPIO <-> pin mapping - * @chip: the gpiochip to add the range for + * @gc: the gpiochip to add the range for * @pctldev: the pin controller to map to * @gpio_offset: the start offset in the current gpio_chip number space * @pin_group: name of the pin group inside the pin controller @@ -2792,24 +2792,24 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_config); * Documentation/devicetree/bindings/gpio/gpio.txt on how to * bind pinctrl and gpio drivers via the "gpio-ranges" property. */ -int gpiochip_add_pingroup_range(struct gpio_chip *chip, +int gpiochip_add_pingroup_range(struct gpio_chip *gc, struct pinctrl_dev *pctldev, unsigned int gpio_offset, const char *pin_group) { struct gpio_pin_range *pin_range; - struct gpio_device *gdev = chip->gpiodev; + struct gpio_device *gdev = gc->gpiodev; int ret; pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL); if (!pin_range) { - chip_err(chip, "failed to allocate pin ranges\n"); + chip_err(gc, "failed to allocate pin ranges\n"); return -ENOMEM; } /* Use local offset as range ID */ pin_range->range.id = gpio_offset; - pin_range->range.gc = chip; - pin_range->range.name = chip->label; + pin_range->range.gc = gc; + pin_range->range.name = gc->label; pin_range->range.base = gdev->base + gpio_offset; pin_range->pctldev = pctldev; @@ -2823,7 +2823,7 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip, pinctrl_add_gpio_range(pctldev, &pin_range->range); - chip_dbg(chip, "created GPIO range %d->%d ==> %s PINGRP %s\n", + chip_dbg(gc, "created GPIO range %d->%d ==> %s PINGRP %s\n", gpio_offset, gpio_offset + pin_range->range.npins - 1, pinctrl_dev_get_devname(pctldev), pin_group); @@ -2835,7 +2835,7 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range); /** * gpiochip_add_pin_range() - add a range for GPIO <-> pin mapping - * @chip: the gpiochip to add the range for + * @gc: the gpiochip to add the range for * @pinctl_name: the dev_name() of the pin controller to map to * @gpio_offset: the start offset in the current gpio_chip number space * @pin_offset: the start offset in the pin controller number space @@ -2850,24 +2850,24 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range); * Documentation/devicetree/bindings/gpio/gpio.txt on how to * bind pinctrl and gpio drivers via the "gpio-ranges" property. */ -int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, +int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, unsigned int npins) { struct gpio_pin_range *pin_range; - struct gpio_device *gdev = chip->gpiodev; + struct gpio_device *gdev = gc->gpiodev; int ret; pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL); if (!pin_range) { - chip_err(chip, "failed to allocate pin ranges\n"); + chip_err(gc, "failed to allocate pin ranges\n"); return -ENOMEM; } /* Use local offset as range ID */ pin_range->range.id = gpio_offset; - pin_range->range.gc = chip; - pin_range->range.name = chip->label; + pin_range->range.gc = gc; + pin_range->range.name = gc->label; pin_range->range.base = gdev->base + gpio_offset; pin_range->range.pin_base = pin_offset; pin_range->range.npins = npins; @@ -2875,11 +2875,11 @@ int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, &pin_range->range); if (IS_ERR(pin_range->pctldev)) { ret = PTR_ERR(pin_range->pctldev); - chip_err(chip, "could not create pin range\n"); + chip_err(gc, "could not create pin range\n"); kfree(pin_range); return ret; } - chip_dbg(chip, "created GPIO range %d->%d ==> %s PIN %d->%d\n", + chip_dbg(gc, "created GPIO range %d->%d ==> %s PIN %d->%d\n", gpio_offset, gpio_offset + npins - 1, pinctl_name, pin_offset, pin_offset + npins - 1); @@ -2892,12 +2892,12 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pin_range); /** * gpiochip_remove_pin_ranges() - remove all the GPIO <-> pin mappings - * @chip: the chip to remove all the mappings for + * @gc: the chip to remove all the mappings for */ -void gpiochip_remove_pin_ranges(struct gpio_chip *chip) +void gpiochip_remove_pin_ranges(struct gpio_chip *gc) { struct gpio_pin_range *pin_range, *tmp; - struct gpio_device *gdev = chip->gpiodev; + struct gpio_device *gdev = gc->gpiodev; list_for_each_entry_safe(pin_range, tmp, &gdev->pin_ranges, node) { list_del(&pin_range->node); @@ -2916,7 +2916,7 @@ EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges); */ static int gpiod_request_commit(struct gpio_desc *desc, const char *label) { - struct gpio_chip *chip = desc->gdev->chip; + struct gpio_chip *gc = desc->gdev->chip; int ret; unsigned long flags; unsigned offset; @@ -2942,12 +2942,12 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label) goto done; } - if (chip->request) { - /* chip->request may sleep */ + if (gc->request) { + /* gc->request may sleep */ spin_unlock_irqrestore(&gpio_lock, flags); offset = gpio_chip_hwgpio(desc); - if (gpiochip_line_is_valid(chip, offset)) - ret = chip->request(chip, offset); + if (gpiochip_line_is_valid(gc, offset)) + ret = gc->request(gc, offset); else ret = -EINVAL; spin_lock_irqsave(&gpio_lock, flags); @@ -2959,8 +2959,8 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label) goto done; } } - if (chip->get_direction) { - /* chip->get_direction may sleep */ + if (gc->get_direction) { + /* gc->get_direction may sleep */ spin_unlock_irqrestore(&gpio_lock, flags); gpiod_get_direction(desc); spin_lock_irqsave(&gpio_lock, flags); @@ -3036,7 +3036,7 @@ static bool gpiod_free_commit(struct gpio_desc *desc) { bool ret = false; unsigned long flags; - struct gpio_chip *chip; + struct gpio_chip *gc; might_sleep(); @@ -3044,12 +3044,12 @@ static bool gpiod_free_commit(struct gpio_desc *desc) spin_lock_irqsave(&gpio_lock, flags); - chip = desc->gdev->chip; - if (chip && test_bit(FLAG_REQUESTED, &desc->flags)) { - if (chip->free) { + gc = desc->gdev->chip; + if (gc && test_bit(FLAG_REQUESTED, &desc->flags)) { + if (gc->free) { spin_unlock_irqrestore(&gpio_lock, flags); - might_sleep_if(chip->can_sleep); - chip->free(chip, gpio_chip_hwgpio(desc)); + might_sleep_if(gc->can_sleep); + gc->free(gc, gpio_chip_hwgpio(desc)); spin_lock_irqsave(&gpio_lock, flags); } kfree_const(desc->label); @@ -3087,7 +3087,7 @@ void gpiod_free(struct gpio_desc *desc) /** * gpiochip_is_requested - return string iff signal was requested - * @chip: controller managing the signal + * @gc: controller managing the signal * @offset: of signal within controller's 0..(ngpio - 1) range * * Returns NULL if the GPIO is not currently requested, else a string. @@ -3098,14 +3098,14 @@ void gpiod_free(struct gpio_desc *desc) * help with diagnostics, and knowing that the signal is used as a GPIO * can help avoid accidentally multiplexing it to another controller. */ -const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset) +const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned offset) { struct gpio_desc *desc; - if (offset >= chip->ngpio) + if (offset >= gc->ngpio) return NULL; - desc = gpiochip_get_desc(chip, offset); + desc = gpiochip_get_desc(gc, offset); if (IS_ERR(desc)) return NULL; @@ -3117,7 +3117,7 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested); /** * gpiochip_request_own_desc - Allow GPIO chip to request its own descriptor - * @chip: GPIO chip + * @gc: GPIO chip * @hwnum: hardware number of the GPIO for which to request the descriptor * @label: label for the GPIO * @lflags: lookup flags for this GPIO or 0 if default, this can be used to @@ -3136,17 +3136,17 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested); * A pointer to the GPIO descriptor, or an ERR_PTR()-encoded negative error * code on failure. */ -struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, +struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc, unsigned int hwnum, const char *label, enum gpio_lookup_flags lflags, enum gpiod_flags dflags) { - struct gpio_desc *desc = gpiochip_get_desc(chip, hwnum); + struct gpio_desc *desc = gpiochip_get_desc(gc, hwnum); int ret; if (IS_ERR(desc)) { - chip_err(chip, "failed to get GPIO descriptor\n"); + chip_err(gc, "failed to get GPIO descriptor\n"); return desc; } @@ -3156,7 +3156,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, ret = gpiod_configure_flags(desc, label, lflags, dflags); if (ret) { - chip_err(chip, "setup of own GPIO %s failed\n", label); + chip_err(gc, "setup of own GPIO %s failed\n", label); gpiod_free_commit(desc); return ERR_PTR(ret); } @@ -3200,7 +3200,7 @@ static int gpio_do_set_config(struct gpio_chip *gc, unsigned int offset, static int gpio_set_config(struct gpio_desc *desc, enum pin_config_param mode) { - struct gpio_chip *chip = desc->gdev->chip; + struct gpio_chip *gc = desc->gdev->chip; unsigned long config; unsigned arg; @@ -3215,7 +3215,7 @@ static int gpio_set_config(struct gpio_desc *desc, enum pin_config_param mode) } config = PIN_CONF_PACKED(mode, arg); - return gpio_do_set_config(chip, gpio_chip_hwgpio(desc), config); + return gpio_do_set_config(gc, gpio_chip_hwgpio(desc), config); } static int gpio_set_bias(struct gpio_desc *desc) @@ -3249,18 +3249,18 @@ static int gpio_set_bias(struct gpio_desc *desc) */ int gpiod_direction_input(struct gpio_desc *desc) { - struct gpio_chip *chip; + struct gpio_chip *gc; int ret = 0; VALIDATE_DESC(desc); - chip = desc->gdev->chip; + gc = desc->gdev->chip; /* * It is legal to have no .get() and .direction_input() specified if * the chip is output-only, but you can't specify .direction_input() * and not support the .get() operation, that doesn't make sense. */ - if (!chip->get && chip->direction_input) { + if (!gc->get && gc->direction_input) { gpiod_warn(desc, "%s: missing get() but have direction_input()\n", __func__); @@ -3273,10 +3273,10 @@ int gpiod_direction_input(struct gpio_desc *desc) * direction (if .get_direction() is supported) else we silently * assume we are in input mode after this. */ - if (chip->direction_input) { - ret = chip->direction_input(chip, gpio_chip_hwgpio(desc)); - } else if (chip->get_direction && - (chip->get_direction(chip, gpio_chip_hwgpio(desc)) != 1)) { + if (gc->direction_input) { + ret = gc->direction_input(gc, gpio_chip_hwgpio(desc)); + } else if (gc->get_direction && + (gc->get_direction(gc, gpio_chip_hwgpio(desc)) != 1)) { gpiod_warn(desc, "%s: missing direction_input() operation and line is output\n", __func__); @@ -3439,12 +3439,12 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output); */ int gpiod_set_config(struct gpio_desc *desc, unsigned long config) { - struct gpio_chip *chip; + struct gpio_chip *gc; VALIDATE_DESC(desc); - chip = desc->gdev->chip; + gc = desc->gdev->chip; - return gpio_do_set_config(chip, gpio_chip_hwgpio(desc), config); + return gpio_do_set_config(gc, gpio_chip_hwgpio(desc), config); } EXPORT_SYMBOL_GPL(gpiod_set_config); @@ -3476,7 +3476,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_debounce); */ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) { - struct gpio_chip *chip; + struct gpio_chip *gc; unsigned long packed; int gpio; int rc; @@ -3489,14 +3489,14 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) assign_bit(FLAG_TRANSITORY, &desc->flags, transitory); /* If the driver supports it, set the persistence state now */ - chip = desc->gdev->chip; - if (!chip->set_config) + gc = desc->gdev->chip; + if (!gc->set_config) return 0; packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE, !transitory); gpio = gpio_chip_hwgpio(desc); - rc = gpio_do_set_config(chip, gpio, packed); + rc = gpio_do_set_config(gc, gpio, packed); if (rc == -ENOTSUPP) { dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n", gpio); @@ -3555,28 +3555,28 @@ EXPORT_SYMBOL_GPL(gpiod_toggle_active_low); static int gpiod_get_raw_value_commit(const struct gpio_desc *desc) { - struct gpio_chip *chip; + struct gpio_chip *gc; int offset; int value; - chip = desc->gdev->chip; + gc = desc->gdev->chip; offset = gpio_chip_hwgpio(desc); - value = chip->get ? chip->get(chip, offset) : -EIO; + value = gc->get ? gc->get(gc, offset) : -EIO; value = value < 0 ? value : !!value; trace_gpio_value(desc_to_gpio(desc), 1, value); return value; } -static int gpio_chip_get_multiple(struct gpio_chip *chip, +static int gpio_chip_get_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { - if (chip->get_multiple) { - return chip->get_multiple(chip, mask, bits); - } else if (chip->get) { + if (gc->get_multiple) { + return gc->get_multiple(gc, mask, bits); + } else if (gc->get) { int i, value; - for_each_set_bit(i, mask, chip->ngpio) { - value = chip->get(chip, i); + for_each_set_bit(i, mask, gc->ngpio) { + value = gc->get(gc, i); if (value < 0) return value; __assign_bit(i, bits, value); @@ -3624,26 +3624,26 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep, } while (i < array_size) { - struct gpio_chip *chip = desc_array[i]->gdev->chip; + struct gpio_chip *gc = desc_array[i]->gdev->chip; unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)]; unsigned long *mask, *bits; int first, j, ret; - if (likely(chip->ngpio <= FASTPATH_NGPIO)) { + if (likely(gc->ngpio <= FASTPATH_NGPIO)) { mask = fastpath; } else { - mask = kmalloc_array(2 * BITS_TO_LONGS(chip->ngpio), + mask = kmalloc_array(2 * BITS_TO_LONGS(gc->ngpio), sizeof(*mask), can_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!mask) return -ENOMEM; } - bits = mask + BITS_TO_LONGS(chip->ngpio); - bitmap_zero(mask, chip->ngpio); + bits = mask + BITS_TO_LONGS(gc->ngpio); + bitmap_zero(mask, gc->ngpio); if (!can_sleep) - WARN_ON(chip->can_sleep); + WARN_ON(gc->can_sleep); /* collect all inputs belonging to the same chip */ first = i; @@ -3658,9 +3658,9 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep, i = find_next_zero_bit(array_info->get_mask, array_size, i); } while ((i < array_size) && - (desc_array[i]->gdev->chip == chip)); + (desc_array[i]->gdev->chip == gc)); - ret = gpio_chip_get_multiple(chip, mask, bits); + ret = gpio_chip_get_multiple(gc, mask, bits); if (ret) { if (mask != fastpath) kfree(mask); @@ -3798,13 +3798,13 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_value); static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value) { int ret = 0; - struct gpio_chip *chip = desc->gdev->chip; + struct gpio_chip *gc = desc->gdev->chip; int offset = gpio_chip_hwgpio(desc); if (value) { - ret = chip->direction_input(chip, offset); + ret = gc->direction_input(gc, offset); } else { - ret = chip->direction_output(chip, offset, 0); + ret = gc->direction_output(gc, offset, 0); if (!ret) set_bit(FLAG_IS_OUT, &desc->flags); } @@ -3823,15 +3823,15 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value) static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value) { int ret = 0; - struct gpio_chip *chip = desc->gdev->chip; + struct gpio_chip *gc = desc->gdev->chip; int offset = gpio_chip_hwgpio(desc); if (value) { - ret = chip->direction_output(chip, offset, 1); + ret = gc->direction_output(gc, offset, 1); if (!ret) set_bit(FLAG_IS_OUT, &desc->flags); } else { - ret = chip->direction_input(chip, offset); + ret = gc->direction_input(gc, offset); } trace_gpio_direction(desc_to_gpio(desc), !value, ret); if (ret < 0) @@ -3842,33 +3842,34 @@ static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value) { - struct gpio_chip *chip; + struct gpio_chip *gc; - chip = desc->gdev->chip; + gc = desc->gdev->chip; trace_gpio_value(desc_to_gpio(desc), 0, value); - chip->set(chip, gpio_chip_hwgpio(desc), value); + gc->set(gc, gpio_chip_hwgpio(desc), value); } /* * set multiple outputs on the same chip; * use the chip's set_multiple function if available; * otherwise set the outputs sequentially; + * @chip: the GPIO chip we operate on * @mask: bit mask array; one bit per output; BITS_PER_LONG bits per word * defines which outputs are to be changed * @bits: bit value array; one bit per output; BITS_PER_LONG bits per word * defines the values the outputs specified by mask are to be set to */ -static void gpio_chip_set_multiple(struct gpio_chip *chip, +static void gpio_chip_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { - if (chip->set_multiple) { - chip->set_multiple(chip, mask, bits); + if (gc->set_multiple) { + gc->set_multiple(gc, mask, bits); } else { unsigned int i; /* set outputs if the corresponding mask bit is set */ - for_each_set_bit(i, mask, chip->ngpio) - chip->set(chip, i, test_bit(i, bits)); + for_each_set_bit(i, mask, gc->ngpio) + gc->set(gc, i, test_bit(i, bits)); } } @@ -3907,26 +3908,26 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep, } while (i < array_size) { - struct gpio_chip *chip = desc_array[i]->gdev->chip; + struct gpio_chip *gc = desc_array[i]->gdev->chip; unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)]; unsigned long *mask, *bits; int count = 0; - if (likely(chip->ngpio <= FASTPATH_NGPIO)) { + if (likely(gc->ngpio <= FASTPATH_NGPIO)) { mask = fastpath; } else { - mask = kmalloc_array(2 * BITS_TO_LONGS(chip->ngpio), + mask = kmalloc_array(2 * BITS_TO_LONGS(gc->ngpio), sizeof(*mask), can_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!mask) return -ENOMEM; } - bits = mask + BITS_TO_LONGS(chip->ngpio); - bitmap_zero(mask, chip->ngpio); + bits = mask + BITS_TO_LONGS(gc->ngpio); + bitmap_zero(mask, gc->ngpio); if (!can_sleep) - WARN_ON(chip->can_sleep); + WARN_ON(gc->can_sleep); do { struct gpio_desc *desc = desc_array[i]; @@ -3962,10 +3963,10 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep, i = find_next_zero_bit(array_info->set_mask, array_size, i); } while ((i < array_size) && - (desc_array[i]->gdev->chip == chip)); + (desc_array[i]->gdev->chip == gc)); /* push collected bits to outputs */ if (count != 0) - gpio_chip_set_multiple(chip, mask, bits); + gpio_chip_set_multiple(gc, mask, bits); if (mask != fastpath) kfree(mask); @@ -4127,7 +4128,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_consumer_name); */ int gpiod_to_irq(const struct gpio_desc *desc) { - struct gpio_chip *chip; + struct gpio_chip *gc; int offset; /* @@ -4138,10 +4139,10 @@ int gpiod_to_irq(const struct gpio_desc *desc) if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip) return -EINVAL; - chip = desc->gdev->chip; + gc = desc->gdev->chip; offset = gpio_chip_hwgpio(desc); - if (chip->to_irq) { - int retirq = chip->to_irq(chip, offset); + if (gc->to_irq) { + int retirq = gc->to_irq(gc, offset); /* Zero means NO_IRQ */ if (!retirq) @@ -4155,17 +4156,17 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq); /** * gpiochip_lock_as_irq() - lock a GPIO to be used as IRQ - * @chip: the chip the GPIO to lock belongs to + * @gc: the chip the GPIO to lock belongs to * @offset: the offset of the GPIO to lock as IRQ * * This is used directly by GPIO drivers that want to lock down * a certain GPIO line to be used for IRQs. */ -int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) +int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset) { struct gpio_desc *desc; - desc = gpiochip_get_desc(chip, offset); + desc = gpiochip_get_desc(gc, offset); if (IS_ERR(desc)) return PTR_ERR(desc); @@ -4173,18 +4174,18 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) * If it's fast: flush the direction setting if something changed * behind our back */ - if (!chip->can_sleep && chip->get_direction) { + if (!gc->can_sleep && gc->get_direction) { int dir = gpiod_get_direction(desc); if (dir < 0) { - chip_err(chip, "%s: cannot get GPIO direction\n", + chip_err(gc, "%s: cannot get GPIO direction\n", __func__); return dir; } } if (test_bit(FLAG_IS_OUT, &desc->flags)) { - chip_err(chip, + chip_err(gc, "%s: tried to flag a GPIO set as output for IRQ\n", __func__); return -EIO; @@ -4207,17 +4208,17 @@ EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq); /** * gpiochip_unlock_as_irq() - unlock a GPIO used as IRQ - * @chip: the chip the GPIO to lock belongs to + * @gc: the chip the GPIO to lock belongs to * @offset: the offset of the GPIO to lock as IRQ * * This is used directly by GPIO drivers that want to indicate * that a certain GPIO is no longer used exclusively for IRQ. */ -void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset) +void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset) { struct gpio_desc *desc; - desc = gpiochip_get_desc(chip, offset); + desc = gpiochip_get_desc(gc, offset); if (IS_ERR(desc)) return; @@ -4230,9 +4231,9 @@ void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset) } EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq); -void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset) +void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset) { - struct gpio_desc *desc = gpiochip_get_desc(chip, offset); + struct gpio_desc *desc = gpiochip_get_desc(gc, offset); if (!IS_ERR(desc) && !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) @@ -4240,9 +4241,9 @@ void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset) } EXPORT_SYMBOL_GPL(gpiochip_disable_irq); -void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset) +void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset) { - struct gpio_desc *desc = gpiochip_get_desc(chip, offset); + struct gpio_desc *desc = gpiochip_get_desc(gc, offset); if (!IS_ERR(desc) && !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) { @@ -4252,63 +4253,63 @@ void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset) } EXPORT_SYMBOL_GPL(gpiochip_enable_irq); -bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset) +bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset) { - if (offset >= chip->ngpio) + if (offset >= gc->ngpio) return false; - return test_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags); + return test_bit(FLAG_USED_AS_IRQ, &gc->gpiodev->descs[offset].flags); } EXPORT_SYMBOL_GPL(gpiochip_line_is_irq); -int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset) +int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset) { int ret; - if (!try_module_get(chip->gpiodev->owner)) + if (!try_module_get(gc->gpiodev->owner)) return -ENODEV; - ret = gpiochip_lock_as_irq(chip, offset); + ret = gpiochip_lock_as_irq(gc, offset); if (ret) { - chip_err(chip, "unable to lock HW IRQ %u for IRQ\n", offset); - module_put(chip->gpiodev->owner); + chip_err(gc, "unable to lock HW IRQ %u for IRQ\n", offset); + module_put(gc->gpiodev->owner); return ret; } return 0; } EXPORT_SYMBOL_GPL(gpiochip_reqres_irq); -void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset) +void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset) { - gpiochip_unlock_as_irq(chip, offset); - module_put(chip->gpiodev->owner); + gpiochip_unlock_as_irq(gc, offset); + module_put(gc->gpiodev->owner); } EXPORT_SYMBOL_GPL(gpiochip_relres_irq); -bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset) +bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset) { - if (offset >= chip->ngpio) + if (offset >= gc->ngpio) return false; - return test_bit(FLAG_OPEN_DRAIN, &chip->gpiodev->descs[offset].flags); + return test_bit(FLAG_OPEN_DRAIN, &gc->gpiodev->descs[offset].flags); } EXPORT_SYMBOL_GPL(gpiochip_line_is_open_drain); -bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset) +bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset) { - if (offset >= chip->ngpio) + if (offset >= gc->ngpio) return false; - return test_bit(FLAG_OPEN_SOURCE, &chip->gpiodev->descs[offset].flags); + return test_bit(FLAG_OPEN_SOURCE, &gc->gpiodev->descs[offset].flags); } EXPORT_SYMBOL_GPL(gpiochip_line_is_open_source); -bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset) +bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset) { - if (offset >= chip->ngpio) + if (offset >= gc->ngpio) return false; - return !test_bit(FLAG_TRANSITORY, &chip->gpiodev->descs[offset].flags); + return !test_bit(FLAG_TRANSITORY, &gc->gpiodev->descs[offset].flags); } EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent); @@ -4546,7 +4547,7 @@ EXPORT_SYMBOL_GPL(gpiod_remove_lookup_table); */ void gpiod_add_hogs(struct gpiod_hog *hogs) { - struct gpio_chip *chip; + struct gpio_chip *gc; struct gpiod_hog *hog; mutex_lock(&gpio_machine_hogs_mutex); @@ -4558,9 +4559,9 @@ void gpiod_add_hogs(struct gpiod_hog *hogs) * The chip may have been registered earlier, so check if it * exists and, if so, try to hog the line now. */ - chip = find_chip_by_name(hog->chip_label); - if (chip) - gpiochip_machine_hog(chip, hog); + gc = find_chip_by_name(hog->chip_label); + if (gc) + gpiochip_machine_hog(gc, hog); } mutex_unlock(&gpio_machine_hogs_mutex); @@ -4610,7 +4611,7 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, return desc; for (p = &table->table[0]; p->chip_label; p++) { - struct gpio_chip *chip; + struct gpio_chip *gc; /* idx must always match exactly */ if (p->idx != idx) @@ -4620,9 +4621,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, if (p->con_id && (!con_id || strcmp(p->con_id, con_id))) continue; - chip = find_chip_by_name(p->chip_label); + gc = find_chip_by_name(p->chip_label); - if (!chip) { + if (!gc) { /* * As the lookup table indicates a chip with * p->chip_label should exist, assume it may @@ -4635,15 +4636,15 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, return ERR_PTR(-EPROBE_DEFER); } - if (chip->ngpio <= p->chip_hwnum) { + if (gc->ngpio <= p->chip_hwnum) { dev_err(dev, "requested GPIO %u (%u) is out of range [0..%u] for chip %s\n", - idx, p->chip_hwnum, chip->ngpio - 1, - chip->label); + idx, p->chip_hwnum, gc->ngpio - 1, + gc->label); return ERR_PTR(-EINVAL); } - desc = gpiochip_get_desc(chip, p->chip_hwnum); + desc = gpiochip_get_desc(gc, p->chip_hwnum); *flags = p->flags; return desc; @@ -5038,20 +5039,20 @@ EXPORT_SYMBOL_GPL(gpiod_get_index_optional); int gpiod_hog(struct gpio_desc *desc, const char *name, unsigned long lflags, enum gpiod_flags dflags) { - struct gpio_chip *chip; + struct gpio_chip *gc; struct gpio_desc *local_desc; int hwnum; int ret; - chip = gpiod_to_chip(desc); + gc = gpiod_to_chip(desc); hwnum = gpio_chip_hwgpio(desc); - local_desc = gpiochip_request_own_desc(chip, hwnum, name, + local_desc = gpiochip_request_own_desc(gc, hwnum, name, lflags, dflags); if (IS_ERR(local_desc)) { ret = PTR_ERR(local_desc); pr_err("requesting hog GPIO %s (chip %s, offset %d) failed, %d\n", - name, chip->label, hwnum, ret); + name, gc->label, hwnum, ret); return ret; } @@ -5069,15 +5070,15 @@ int gpiod_hog(struct gpio_desc *desc, const char *name, /** * gpiochip_free_hogs - Scan gpio-controller chip and release GPIO hog - * @chip: gpio chip to act on + * @gc: gpio chip to act on */ -static void gpiochip_free_hogs(struct gpio_chip *chip) +static void gpiochip_free_hogs(struct gpio_chip *gc) { int id; - for (id = 0; id < chip->ngpio; id++) { - if (test_bit(FLAG_IS_HOGGED, &chip->gpiodev->descs[id].flags)) - gpiochip_free_own_desc(&chip->gpiodev->descs[id]); + for (id = 0; id < gc->ngpio; id++) { + if (test_bit(FLAG_IS_HOGGED, &gc->gpiodev->descs[id].flags)) + gpiochip_free_own_desc(&gc->gpiodev->descs[id]); } } @@ -5100,7 +5101,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, struct gpio_desc *desc; struct gpio_descs *descs; struct gpio_array *array_info = NULL; - struct gpio_chip *chip; + struct gpio_chip *gc; int count, bitmap_size; count = gpiod_count(dev, con_id); @@ -5120,7 +5121,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, descs->desc[descs->ndescs] = desc; - chip = gpiod_to_chip(desc); + gc = gpiod_to_chip(desc); /* * If pin hardware number of array member 0 is also 0, select * its chip as a candidate for fast bitmap processing path. @@ -5128,8 +5129,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, if (descs->ndescs == 0 && gpio_chip_hwgpio(desc) == 0) { struct gpio_descs *array; - bitmap_size = BITS_TO_LONGS(chip->ngpio > count ? - chip->ngpio : count); + bitmap_size = BITS_TO_LONGS(gc->ngpio > count ? + gc->ngpio : count); array = kzalloc(struct_size(descs, desc, count) + struct_size(array_info, invert_mask, @@ -5152,7 +5153,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, array_info->desc = descs->desc; array_info->size = count; - array_info->chip = chip; + array_info->chip = gc; bitmap_set(array_info->get_mask, descs->ndescs, count - descs->ndescs); bitmap_set(array_info->set_mask, descs->ndescs, @@ -5160,7 +5161,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, descs->info = array_info; } /* Unmark array members which don't belong to the 'fast' chip */ - if (array_info && array_info->chip != chip) { + if (array_info && array_info->chip != gc) { __clear_bit(descs->ndescs, array_info->get_mask); __clear_bit(descs->ndescs, array_info->set_mask); } @@ -5185,8 +5186,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, } } else if (array_info) { /* Exclude open drain or open source from fast output */ - if (gpiochip_line_is_open_drain(chip, descs->ndescs) || - gpiochip_line_is_open_source(chip, descs->ndescs)) + if (gpiochip_line_is_open_drain(gc, descs->ndescs) || + gpiochip_line_is_open_source(gc, descs->ndescs)) __clear_bit(descs->ndescs, array_info->set_mask); /* Identify 'fast' pins which require invertion */ @@ -5292,7 +5293,7 @@ core_initcall(gpiolib_dev_init); static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev) { unsigned i; - struct gpio_chip *chip = gdev->chip; + struct gpio_chip *gc = gdev->chip; unsigned gpio = gdev->base; struct gpio_desc *gdesc = &gdev->descs[0]; bool is_out; @@ -5315,7 +5316,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev) seq_printf(s, " gpio-%-3d (%-20.20s|%-20.20s) %s %s %s%s", gpio, gdesc->name ? gdesc->name : "", gdesc->label, is_out ? "out" : "in ", - chip->get ? (chip->get(chip, i) ? "hi" : "lo") : "? ", + gc->get ? (gc->get(gc, i) ? "hi" : "lo") : "? ", is_irq ? "IRQ " : "", active_low ? "ACTIVE LOW" : ""); seq_printf(s, "\n"); @@ -5367,10 +5368,10 @@ static void gpiolib_seq_stop(struct seq_file *s, void *v) static int gpiolib_seq_show(struct seq_file *s, void *v) { struct gpio_device *gdev = v; - struct gpio_chip *chip = gdev->chip; + struct gpio_chip *gc = gdev->chip; struct device *parent; - if (!chip) { + if (!gc) { seq_printf(s, "%s%s: (dangling chip)", (char *)s->private, dev_name(&gdev->dev)); return 0; @@ -5379,19 +5380,19 @@ static int gpiolib_seq_show(struct seq_file *s, void *v) seq_printf(s, "%s%s: GPIOs %d-%d", (char *)s->private, dev_name(&gdev->dev), gdev->base, gdev->base + gdev->ngpio - 1); - parent = chip->parent; + parent = gc->parent; if (parent) seq_printf(s, ", parent: %s/%s", parent->bus ? parent->bus->name : "no-bus", dev_name(parent)); - if (chip->label) - seq_printf(s, ", %s", chip->label); - if (chip->can_sleep) + if (gc->label) + seq_printf(s, ", %s", gc->label); + if (gc->can_sleep) seq_printf(s, ", can sleep"); seq_printf(s, ":\n"); - if (chip->dbg_show) - chip->dbg_show(s, chip); + if (gc->dbg_show) + gc->dbg_show(s, gc); else gpiolib_dbg_show(s, gdev); diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index ed65e00ee977..b8fc92c177eb 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -87,7 +87,7 @@ struct gpio_irq_chip { * @need_valid_mask to make these GPIO lines unavailable for * translation. */ - int (*child_to_parent_hwirq)(struct gpio_chip *chip, + int (*child_to_parent_hwirq)(struct gpio_chip *gc, unsigned int child_hwirq, unsigned int child_type, unsigned int *parent_hwirq, @@ -102,7 +102,7 @@ struct gpio_irq_chip { * variant named &gpiochip_populate_parent_fwspec_fourcell is also * available. */ - void *(*populate_parent_alloc_arg)(struct gpio_chip *chip, + void *(*populate_parent_alloc_arg)(struct gpio_chip *gc, unsigned int parent_hwirq, unsigned int parent_type); @@ -114,7 +114,7 @@ struct gpio_irq_chip { * callback. If this is not specified, then a default callback will be * provided that returns the line offset. */ - unsigned int (*child_offset_to_irq)(struct gpio_chip *chip, + unsigned int (*child_offset_to_irq)(struct gpio_chip *gc, unsigned int pin); /** @@ -209,7 +209,7 @@ struct gpio_irq_chip { * a particular driver wants to clear IRQ related registers * in order to avoid undesired events. */ - int (*init_hw)(struct gpio_chip *chip); + int (*init_hw)(struct gpio_chip *gc); /** * @init_valid_mask: optional routine to initialize @valid_mask, to be @@ -220,7 +220,7 @@ struct gpio_irq_chip { * then directly set some bits to "0" if they cannot be used for * interrupts. */ - void (*init_valid_mask)(struct gpio_chip *chip, + void (*init_valid_mask)(struct gpio_chip *gc, unsigned long *valid_mask, unsigned int ngpios); @@ -348,40 +348,40 @@ struct gpio_chip { struct device *parent; struct module *owner; - int (*request)(struct gpio_chip *chip, + int (*request)(struct gpio_chip *gc, unsigned offset); - void (*free)(struct gpio_chip *chip, + void (*free)(struct gpio_chip *gc, unsigned offset); - int (*get_direction)(struct gpio_chip *chip, + int (*get_direction)(struct gpio_chip *gc, unsigned offset); - int (*direction_input)(struct gpio_chip *chip, + int (*direction_input)(struct gpio_chip *gc, unsigned offset); - int (*direction_output)(struct gpio_chip *chip, + int (*direction_output)(struct gpio_chip *gc, unsigned offset, int value); - int (*get)(struct gpio_chip *chip, + int (*get)(struct gpio_chip *gc, unsigned offset); - int (*get_multiple)(struct gpio_chip *chip, + int (*get_multiple)(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits); - void (*set)(struct gpio_chip *chip, + void (*set)(struct gpio_chip *gc, unsigned offset, int value); - void (*set_multiple)(struct gpio_chip *chip, + void (*set_multiple)(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits); - int (*set_config)(struct gpio_chip *chip, + int (*set_config)(struct gpio_chip *gc, unsigned offset, unsigned long config); - int (*to_irq)(struct gpio_chip *chip, + int (*to_irq)(struct gpio_chip *gc, unsigned offset); void (*dbg_show)(struct seq_file *s, - struct gpio_chip *chip); + struct gpio_chip *gc); - int (*init_valid_mask)(struct gpio_chip *chip, + int (*init_valid_mask)(struct gpio_chip *gc, unsigned long *valid_mask, unsigned int ngpios); - int (*add_pin_ranges)(struct gpio_chip *chip); + int (*add_pin_ranges)(struct gpio_chip *gc); int base; u16 ngpio; @@ -458,11 +458,11 @@ struct gpio_chip { #endif /* CONFIG_OF_GPIO */ }; -extern const char *gpiochip_is_requested(struct gpio_chip *chip, +extern const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned offset); /* add/remove chips */ -extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, +extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, struct lock_class_key *lock_key, struct lock_class_key *request_key); @@ -490,43 +490,43 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, * Otherwise it returns zero as a success code. */ #ifdef CONFIG_LOCKDEP -#define gpiochip_add_data(chip, data) ({ \ +#define gpiochip_add_data(gc, data) ({ \ static struct lock_class_key lock_key; \ static struct lock_class_key request_key; \ - gpiochip_add_data_with_key(chip, data, &lock_key, \ + gpiochip_add_data_with_key(gc, data, &lock_key, \ &request_key); \ }) #else -#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL) +#define gpiochip_add_data(gc, data) gpiochip_add_data_with_key(gc, data, NULL, NULL) #endif /* CONFIG_LOCKDEP */ -static inline int gpiochip_add(struct gpio_chip *chip) +static inline int gpiochip_add(struct gpio_chip *gc) { - return gpiochip_add_data(chip, NULL); + return gpiochip_add_data(gc, NULL); } -extern void gpiochip_remove(struct gpio_chip *chip); -extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip, +extern void gpiochip_remove(struct gpio_chip *gc); +extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *gc, void *data); extern struct gpio_chip *gpiochip_find(void *data, - int (*match)(struct gpio_chip *chip, void *data)); + int (*match)(struct gpio_chip *gc, void *data)); -bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset); -int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset); -void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset); -void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset); -void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset); +bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset); +int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset); +void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset); +void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset); +void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset); /* Line status inquiry for drivers */ -bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset); -bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset); +bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset); +bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset); /* Sleep persistence inquiry for drivers */ -bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset); -bool gpiochip_line_is_valid(const struct gpio_chip *chip, unsigned int offset); +bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset); +bool gpiochip_line_is_valid(const struct gpio_chip *gc, unsigned int offset); /* get driver data */ -void *gpiochip_get_data(struct gpio_chip *chip); +void *gpiochip_get_data(struct gpio_chip *gc); struct bgpio_pdata { const char *label; @@ -536,23 +536,23 @@ struct bgpio_pdata { #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip, +void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, unsigned int parent_hwirq, unsigned int parent_type); -void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip, +void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, unsigned int parent_hwirq, unsigned int parent_type); #else -static inline void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip, +static inline void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, unsigned int parent_hwirq, unsigned int parent_type) { return NULL; } -static inline void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip, +static inline void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, unsigned int parent_hwirq, unsigned int parent_type) { @@ -583,11 +583,11 @@ int gpiochip_irq_domain_activate(struct irq_domain *domain, void gpiochip_irq_domain_deactivate(struct irq_domain *domain, struct irq_data *data); -void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip, +void gpiochip_set_nested_irqchip(struct gpio_chip *gc, struct irq_chip *irqchip, unsigned int parent_irq); -int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, +int gpiochip_irqchip_add_key(struct gpio_chip *gc, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, @@ -596,7 +596,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, struct lock_class_key *lock_key, struct lock_class_key *request_key); -bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip, +bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc, unsigned int offset); #ifdef CONFIG_LOCKDEP @@ -607,7 +607,7 @@ bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip, * boilerplate static inlines provides such a key for each * unique instance. */ -static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, +static inline int gpiochip_irqchip_add(struct gpio_chip *gc, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, @@ -616,12 +616,12 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, static struct lock_class_key lock_key; static struct lock_class_key request_key; - return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, + return gpiochip_irqchip_add_key(gc, irqchip, first_irq, handler, type, false, &lock_key, &request_key); } -static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, +static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gc, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, @@ -631,35 +631,35 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, static struct lock_class_key lock_key; static struct lock_class_key request_key; - return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, + return gpiochip_irqchip_add_key(gc, irqchip, first_irq, handler, type, true, &lock_key, &request_key); } #else /* ! CONFIG_LOCKDEP */ -static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, +static inline int gpiochip_irqchip_add(struct gpio_chip *gc, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, unsigned int type) { - return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, + return gpiochip_irqchip_add_key(gc, irqchip, first_irq, handler, type, false, NULL, NULL); } -static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, +static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gc, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, unsigned int type) { - return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, + return gpiochip_irqchip_add_key(gc, irqchip, first_irq, handler, type, true, NULL, NULL); } #endif /* CONFIG_LOCKDEP */ -int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset); -void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset); -int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset, +int gpiochip_generic_request(struct gpio_chip *gc, unsigned offset); +void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset); +int gpiochip_generic_config(struct gpio_chip *gc, unsigned offset, unsigned long config); /** @@ -676,25 +676,25 @@ struct gpio_pin_range { #ifdef CONFIG_PINCTRL -int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, +int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, unsigned int npins); -int gpiochip_add_pingroup_range(struct gpio_chip *chip, +int gpiochip_add_pingroup_range(struct gpio_chip *gc, struct pinctrl_dev *pctldev, unsigned int gpio_offset, const char *pin_group); -void gpiochip_remove_pin_ranges(struct gpio_chip *chip); +void gpiochip_remove_pin_ranges(struct gpio_chip *gc); #else /* ! CONFIG_PINCTRL */ static inline int -gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, +gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, unsigned int npins) { return 0; } static inline int -gpiochip_add_pingroup_range(struct gpio_chip *chip, +gpiochip_add_pingroup_range(struct gpio_chip *gc, struct pinctrl_dev *pctldev, unsigned int gpio_offset, const char *pin_group) { @@ -702,27 +702,27 @@ gpiochip_add_pingroup_range(struct gpio_chip *chip, } static inline void -gpiochip_remove_pin_ranges(struct gpio_chip *chip) +gpiochip_remove_pin_ranges(struct gpio_chip *gc) { } #endif /* CONFIG_PINCTRL */ -struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, +struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc, unsigned int hwnum, const char *label, enum gpio_lookup_flags lflags, enum gpiod_flags dflags); void gpiochip_free_own_desc(struct gpio_desc *desc); -void devprop_gpiochip_set_names(struct gpio_chip *chip, +void devprop_gpiochip_set_names(struct gpio_chip *gc, const struct fwnode_handle *fwnode); #ifdef CONFIG_GPIOLIB /* lock/unlock as IRQ */ -int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); -void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); +int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset); +void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset); struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); @@ -736,14 +736,14 @@ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc) return ERR_PTR(-ENODEV); } -static inline int gpiochip_lock_as_irq(struct gpio_chip *chip, +static inline int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset) { WARN_ON(1); return -EINVAL; } -static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip, +static inline void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset) { WARN_ON(1); -- cgit v1.2.3-58-ga151 From e45ee71ae101bd271c3cd951cf66341dc8f504a0 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Mon, 30 Mar 2020 11:58:01 +0200 Subject: pinctrl: Define of_pinctrl_get() dummy for !PINCTRL Currently, the of_pinctrl_get() dummy is only defined for !OF, which can still cause build failures on configurations with OF enabled but PINCTRL disabled. Make sure to define the dummy if either OF or PINCTRL are not enabled. Reported-by: Stephen Rothwell Signed-off-by: Thierry Reding Link: https://lore.kernel.org/r/20200330095801.2421589-1-thierry.reding@gmail.com Signed-off-by: Linus Walleij --- include/linux/pinctrl/pinctrl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index 7ce23450a1cb..2aef59df93d7 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h @@ -186,7 +186,7 @@ extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev, const char *pin_group, const unsigned **pins, unsigned *num_pins); -#ifdef CONFIG_OF +#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PINCTRL) extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np); #else static inline -- cgit v1.2.3-58-ga151 From a9d68cbd4f8834d126ebdd3097a1dee1c5973fdf Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 1 Apr 2020 08:03:28 +0200 Subject: Revert "amba: Initialize dma_parms for amba devices" This reverts commit 5caf6102e32ead7ed5d21b5309c1a4a7d70e6a9f. It still needs some more work and that will happen for the next release cycle, not this one. Cc: Cc: Russell King Cc: Christoph Hellwig Cc: Ludovic Barre Cc: Linus Walleij Cc: Arnd Bergmann Cc: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/amba/bus.c | 2 -- include/linux/amba/bus.h | 1 - 2 files changed, 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 5e61783ce92d..fe1523664816 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -374,8 +374,6 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent) WARN_ON(dev->irq[0] == (unsigned int)-1); WARN_ON(dev->irq[1] == (unsigned int)-1); - dev->dev.dma_parms = &dev->dma_parms; - ret = request_resource(parent, &dev->res); if (ret) goto err_out; diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 0bbfd647f5c6..26f0ecf401ea 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -65,7 +65,6 @@ struct amba_device { struct device dev; struct resource res; struct clk *pclk; - struct device_dma_parameters dma_parms; unsigned int periphid; unsigned int cid; struct amba_cs_uci_id uci; -- cgit v1.2.3-58-ga151 From 885a64715fd81e6af6d94a038556e0b2e6deb19c Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 1 Apr 2020 08:06:54 +0200 Subject: Revert "driver core: platform: Initialize dma_parms for platform devices" This reverts commit 7c8978c0837d40c302f5e90d24c298d9ca9fc097, a new version will come in the next release cycle. Cc: Cc: Russell King Cc: Christoph Hellwig Cc: Ludovic Barre Cc: Linus Walleij Cc: Arnd Bergmann Cc: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/base/platform.c | 1 - include/linux/platform_device.h | 1 - 2 files changed, 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 46abbfb52655..b5ce7b085795 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -512,7 +512,6 @@ int platform_device_add(struct platform_device *pdev) pdev->dev.parent = &platform_bus; pdev->dev.bus = &platform_bus_type; - pdev->dev.dma_parms = &pdev->dma_parms; switch (pdev->id) { default: diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 81900b3cbe37..041bfa412aa0 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -25,7 +25,6 @@ struct platform_device { bool id_auto; struct device dev; u64 platform_dma_mask; - struct device_dma_parameters dma_parms; u32 num_resources; struct resource *resource; -- cgit v1.2.3-58-ga151 From 73d20564e0dcae003e0d79977f044d5e57496304 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 31 Mar 2020 22:18:49 +0200 Subject: hrtimer: Don't dereference the hrtimer pointer after the callback A hrtimer can be released in its callback, but lockdep_hrtimer_exit() dereferences the pointer after the callback returns, i.e. a potential use after free. Retrieve the context in which the hrtimer expires before the callback is invoked and use it in lockdep_hrtimer_exit(). Fixes: 40db173965c0 ("lockdep: Add hrtimer context tracing bits") Reported-by: syzbot+62c155c276e580cfb606@syzkaller.appspotmail.com Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20200331201849.fkp2siy3vcdqvqlz@linutronix.de --- include/linux/irqflags.h | 27 ++++++++++++++++----------- kernel/time/hrtimer.c | 5 +++-- 2 files changed, 19 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index ceca42de4438..61a9ced3aa50 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -58,16 +58,21 @@ do { \ } while (0) # define lockdep_hrtimer_enter(__hrtimer) \ - do { \ - if (!__hrtimer->is_hard) \ - current->irq_config = 1; \ - } while (0) - -# define lockdep_hrtimer_exit(__hrtimer) \ - do { \ - if (!__hrtimer->is_hard) \ +({ \ + bool __expires_hardirq = true; \ + \ + if (!__hrtimer->is_hard) { \ + current->irq_config = 1; \ + __expires_hardirq = false; \ + } \ + __expires_hardirq; \ +}) + +# define lockdep_hrtimer_exit(__expires_hardirq) \ + do { \ + if (!__expires_hardirq) \ current->irq_config = 0; \ - } while (0) + } while (0) # define lockdep_posixtimer_enter() \ do { \ @@ -102,8 +107,8 @@ do { \ # define lockdep_hardirq_exit() do { } while (0) # define lockdep_softirq_enter() do { } while (0) # define lockdep_softirq_exit() do { } while (0) -# define lockdep_hrtimer_enter(__hrtimer) do { } while (0) -# define lockdep_hrtimer_exit(__hrtimer) do { } while (0) +# define lockdep_hrtimer_enter(__hrtimer) false +# define lockdep_hrtimer_exit(__context) do { } while (0) # define lockdep_posixtimer_enter() do { } while (0) # define lockdep_posixtimer_exit() do { } while (0) # define lockdep_irq_work_enter(__work) do { } while (0) diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index d0a5ba37aff4..d89da1c7e005 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -1480,6 +1480,7 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, unsigned long flags) __must_hold(&cpu_base->lock) { enum hrtimer_restart (*fn)(struct hrtimer *); + bool expires_in_hardirq; int restart; lockdep_assert_held(&cpu_base->lock); @@ -1514,11 +1515,11 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, */ raw_spin_unlock_irqrestore(&cpu_base->lock, flags); trace_hrtimer_expire_entry(timer, now); - lockdep_hrtimer_enter(timer); + expires_in_hardirq = lockdep_hrtimer_enter(timer); restart = fn(timer); - lockdep_hrtimer_exit(timer); + lockdep_hrtimer_exit(expires_in_hardirq); trace_hrtimer_expire_exit(timer); raw_spin_lock_irq(&cpu_base->lock); -- cgit v1.2.3-58-ga151 From 0bbe30668d89ec8a309f28ced6d092c90fb23e8c Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 26 Mar 2020 22:01:19 +0800 Subject: vhost: factor out IOTLB This patch factors out IOTLB into a dedicated module in order to be reused by other modules like vringh. User may choose to enable the automatic retiring by specifying VHOST_IOTLB_FLAG_RETIRE flag to fit for the case of vhost device IOTLB implementation. Signed-off-by: Jason Wang Link: https://lore.kernel.org/r/20200326140125.19794-4-jasowang@redhat.com Signed-off-by: Michael S. Tsirkin --- MAINTAINERS | 1 + drivers/vhost/Kconfig | 6 ++ drivers/vhost/Makefile | 3 + drivers/vhost/iotlb.c | 177 +++++++++++++++++++++++++++++++++++ drivers/vhost/net.c | 2 +- drivers/vhost/vhost.c | 221 ++++++++++++++------------------------------ drivers/vhost/vhost.h | 39 +++----- include/linux/vhost_iotlb.h | 47 ++++++++++ 8 files changed, 315 insertions(+), 181 deletions(-) create mode 100644 drivers/vhost/iotlb.c create mode 100644 include/linux/vhost_iotlb.h (limited to 'include/linux') diff --git a/MAINTAINERS b/MAINTAINERS index cc1d18cb5d18..19363ed5e723 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17766,6 +17766,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git S: Maintained F: drivers/vhost/ F: include/uapi/linux/vhost.h +F: include/linux/vhost_iotlb.h VIRTIO INPUT DRIVER M: Gerd Hoffmann diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig index e775beddc36a..37400a1655b4 100644 --- a/drivers/vhost/Kconfig +++ b/drivers/vhost/Kconfig @@ -1,4 +1,9 @@ # SPDX-License-Identifier: GPL-2.0-only +config VHOST_IOTLB + tristate + help + Generic IOTLB implementation for vhost and vringh. + config VHOST_RING tristate help @@ -67,4 +72,5 @@ config VHOST_CROSS_ENDIAN_LEGACY adds some overhead, it is disabled by default. If unsure, say "N". + endif diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile index 6c6df24f770c..fb831002bcf0 100644 --- a/drivers/vhost/Makefile +++ b/drivers/vhost/Makefile @@ -11,3 +11,6 @@ vhost_vsock-y := vsock.o obj-$(CONFIG_VHOST_RING) += vringh.o obj-$(CONFIG_VHOST) += vhost.o + +obj-$(CONFIG_VHOST_IOTLB) += vhost_iotlb.o +vhost_iotlb-y := iotlb.o diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c new file mode 100644 index 000000000000..1f0ca6e44410 --- /dev/null +++ b/drivers/vhost/iotlb.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Red Hat, Inc. + * Author: Jason Wang + * + * IOTLB implementation for vhost. + */ +#include +#include +#include + +#define MOD_VERSION "0.1" +#define MOD_DESC "VHOST IOTLB" +#define MOD_AUTHOR "Jason Wang " +#define MOD_LICENSE "GPL v2" + +#define START(map) ((map)->start) +#define LAST(map) ((map)->last) + +INTERVAL_TREE_DEFINE(struct vhost_iotlb_map, + rb, __u64, __subtree_last, + START, LAST, static inline, vhost_iotlb_itree); + +/** + * vhost_iotlb_map_free - remove a map node and free it + * @iotlb: the IOTLB + * @map: the map that want to be remove and freed + */ +void vhost_iotlb_map_free(struct vhost_iotlb *iotlb, + struct vhost_iotlb_map *map) +{ + vhost_iotlb_itree_remove(map, &iotlb->root); + list_del(&map->link); + kfree(map); + iotlb->nmaps--; +} +EXPORT_SYMBOL_GPL(vhost_iotlb_map_free); + +/** + * vhost_iotlb_add_range - add a new range to vhost IOTLB + * @iotlb: the IOTLB + * @start: start of the IOVA range + * @last: last of IOVA range + * @addr: the address that is mapped to @start + * @perm: access permission of this range + * + * Returns an error last is smaller than start or memory allocation + * fails + */ +int vhost_iotlb_add_range(struct vhost_iotlb *iotlb, + u64 start, u64 last, + u64 addr, unsigned int perm) +{ + struct vhost_iotlb_map *map; + + if (last < start) + return -EFAULT; + + if (iotlb->limit && + iotlb->nmaps == iotlb->limit && + iotlb->flags & VHOST_IOTLB_FLAG_RETIRE) { + map = list_first_entry(&iotlb->list, typeof(*map), link); + vhost_iotlb_map_free(iotlb, map); + } + + map = kmalloc(sizeof(*map), GFP_ATOMIC); + if (!map) + return -ENOMEM; + + map->start = start; + map->size = last - start + 1; + map->last = last; + map->addr = addr; + map->perm = perm; + + iotlb->nmaps++; + vhost_iotlb_itree_insert(map, &iotlb->root); + + INIT_LIST_HEAD(&map->link); + list_add_tail(&map->link, &iotlb->list); + + return 0; +} +EXPORT_SYMBOL_GPL(vhost_iotlb_add_range); + +/** + * vring_iotlb_del_range - delete overlapped ranges from vhost IOTLB + * @iotlb: the IOTLB + * @start: start of the IOVA range + * @last: last of IOVA range + */ +void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last) +{ + struct vhost_iotlb_map *map; + + while ((map = vhost_iotlb_itree_iter_first(&iotlb->root, + start, last))) + vhost_iotlb_map_free(iotlb, map); +} +EXPORT_SYMBOL_GPL(vhost_iotlb_del_range); + +/** + * vhost_iotlb_alloc - add a new vhost IOTLB + * @limit: maximum number of IOTLB entries + * @flags: VHOST_IOTLB_FLAG_XXX + * + * Returns an error is memory allocation fails + */ +struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags) +{ + struct vhost_iotlb *iotlb = kzalloc(sizeof(*iotlb), GFP_KERNEL); + + if (!iotlb) + return NULL; + + iotlb->root = RB_ROOT_CACHED; + iotlb->limit = limit; + iotlb->nmaps = 0; + iotlb->flags = flags; + INIT_LIST_HEAD(&iotlb->list); + + return iotlb; +} +EXPORT_SYMBOL_GPL(vhost_iotlb_alloc); + +/** + * vhost_iotlb_reset - reset vhost IOTLB (free all IOTLB entries) + * @iotlb: the IOTLB to be reset + */ +void vhost_iotlb_reset(struct vhost_iotlb *iotlb) +{ + vhost_iotlb_del_range(iotlb, 0ULL, 0ULL - 1); +} +EXPORT_SYMBOL_GPL(vhost_iotlb_reset); + +/** + * vhost_iotlb_free - reset and free vhost IOTLB + * @iotlb: the IOTLB to be freed + */ +void vhost_iotlb_free(struct vhost_iotlb *iotlb) +{ + if (iotlb) { + vhost_iotlb_reset(iotlb); + kfree(iotlb); + } +} +EXPORT_SYMBOL_GPL(vhost_iotlb_free); + +/** + * vhost_iotlb_itree_first - return the first overlapped range + * @iotlb: the IOTLB + * @start: start of IOVA range + * @end: end of IOVA range + */ +struct vhost_iotlb_map * +vhost_iotlb_itree_first(struct vhost_iotlb *iotlb, u64 start, u64 last) +{ + return vhost_iotlb_itree_iter_first(&iotlb->root, start, last); +} +EXPORT_SYMBOL_GPL(vhost_iotlb_itree_first); + +/** + * vhost_iotlb_itree_first - return the next overlapped range + * @iotlb: the IOTLB + * @start: start of IOVA range + * @end: end of IOVA range + */ +struct vhost_iotlb_map * +vhost_iotlb_itree_next(struct vhost_iotlb_map *map, u64 start, u64 last) +{ + return vhost_iotlb_itree_iter_next(map, start, last); +} +EXPORT_SYMBOL_GPL(vhost_iotlb_itree_next); + +MODULE_VERSION(MOD_VERSION); +MODULE_DESCRIPTION(MOD_DESC); +MODULE_AUTHOR(MOD_AUTHOR); +MODULE_LICENSE(MOD_LICENSE); diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 7b1d2dfec7f2..87469d67ede8 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -1587,7 +1587,7 @@ static long vhost_net_reset_owner(struct vhost_net *n) struct socket *tx_sock = NULL; struct socket *rx_sock = NULL; long err; - struct vhost_umem *umem; + struct vhost_iotlb *umem; mutex_lock(&n->dev.mutex); err = vhost_dev_check_owner(&n->dev); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 8e9e2341e40a..d450e16c5c25 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -50,10 +50,6 @@ enum { #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) -INTERVAL_TREE_DEFINE(struct vhost_umem_node, - rb, __u64, __subtree_last, - START, LAST, static inline, vhost_umem_interval_tree); - #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) { @@ -584,21 +580,25 @@ err_mm: } EXPORT_SYMBOL_GPL(vhost_dev_set_owner); -struct vhost_umem *vhost_dev_reset_owner_prepare(void) +static struct vhost_iotlb *iotlb_alloc(void) +{ + return vhost_iotlb_alloc(max_iotlb_entries, + VHOST_IOTLB_FLAG_RETIRE); +} + +struct vhost_iotlb *vhost_dev_reset_owner_prepare(void) { - return kvzalloc(sizeof(struct vhost_umem), GFP_KERNEL); + return iotlb_alloc(); } EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare); /* Caller should have device mutex */ -void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem) +void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem) { int i; vhost_dev_cleanup(dev); - /* Restore memory to default empty mapping. */ - INIT_LIST_HEAD(&umem->umem_list); dev->umem = umem; /* We don't need VQ locks below since vhost_dev_cleanup makes sure * VQs aren't running. @@ -621,28 +621,6 @@ void vhost_dev_stop(struct vhost_dev *dev) } EXPORT_SYMBOL_GPL(vhost_dev_stop); -static void vhost_umem_free(struct vhost_umem *umem, - struct vhost_umem_node *node) -{ - vhost_umem_interval_tree_remove(node, &umem->umem_tree); - list_del(&node->link); - kfree(node); - umem->numem--; -} - -static void vhost_umem_clean(struct vhost_umem *umem) -{ - struct vhost_umem_node *node, *tmp; - - if (!umem) - return; - - list_for_each_entry_safe(node, tmp, &umem->umem_list, link) - vhost_umem_free(umem, node); - - kvfree(umem); -} - static void vhost_clear_msg(struct vhost_dev *dev) { struct vhost_msg_node *node, *n; @@ -680,9 +658,9 @@ void vhost_dev_cleanup(struct vhost_dev *dev) eventfd_ctx_put(dev->log_ctx); dev->log_ctx = NULL; /* No one will access memory at this point */ - vhost_umem_clean(dev->umem); + vhost_iotlb_free(dev->umem); dev->umem = NULL; - vhost_umem_clean(dev->iotlb); + vhost_iotlb_free(dev->iotlb); dev->iotlb = NULL; vhost_clear_msg(dev); wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); @@ -718,27 +696,26 @@ static bool vhost_overflow(u64 uaddr, u64 size) } /* Caller should have vq mutex and device mutex. */ -static bool vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem, +static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem, int log_all) { - struct vhost_umem_node *node; + struct vhost_iotlb_map *map; if (!umem) return false; - list_for_each_entry(node, &umem->umem_list, link) { - unsigned long a = node->userspace_addr; + list_for_each_entry(map, &umem->list, link) { + unsigned long a = map->addr; - if (vhost_overflow(node->userspace_addr, node->size)) + if (vhost_overflow(map->addr, map->size)) return false; - if (!access_ok((void __user *)a, - node->size)) + if (!access_ok((void __user *)a, map->size)) return false; else if (log_all && !log_access_ok(log_base, - node->start, - node->size)) + map->start, + map->size)) return false; } return true; @@ -748,17 +725,17 @@ static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, u64 addr, unsigned int size, int type) { - const struct vhost_umem_node *node = vq->meta_iotlb[type]; + const struct vhost_iotlb_map *map = vq->meta_iotlb[type]; - if (!node) + if (!map) return NULL; - return (void *)(uintptr_t)(node->userspace_addr + addr - node->start); + return (void *)(uintptr_t)(map->addr + addr - map->start); } /* Can we switch to this memory table? */ /* Caller should have device mutex but not vq mutex */ -static bool memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem, +static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem, int log_all) { int i; @@ -1023,47 +1000,6 @@ static inline int vhost_get_desc(struct vhost_virtqueue *vq, return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); } -static int vhost_new_umem_range(struct vhost_umem *umem, - u64 start, u64 size, u64 end, - u64 userspace_addr, int perm) -{ - struct vhost_umem_node *tmp, *node; - - if (!size) - return -EFAULT; - - node = kmalloc(sizeof(*node), GFP_ATOMIC); - if (!node) - return -ENOMEM; - - if (umem->numem == max_iotlb_entries) { - tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link); - vhost_umem_free(umem, tmp); - } - - node->start = start; - node->size = size; - node->last = end; - node->userspace_addr = userspace_addr; - node->perm = perm; - INIT_LIST_HEAD(&node->link); - list_add_tail(&node->link, &umem->umem_list); - vhost_umem_interval_tree_insert(node, &umem->umem_tree); - umem->numem++; - - return 0; -} - -static void vhost_del_umem_range(struct vhost_umem *umem, - u64 start, u64 end) -{ - struct vhost_umem_node *node; - - while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree, - start, end))) - vhost_umem_free(umem, node); -} - static void vhost_iotlb_notify_vq(struct vhost_dev *d, struct vhost_iotlb_msg *msg) { @@ -1120,9 +1056,9 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, break; } vhost_vq_meta_reset(dev); - if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size, - msg->iova + msg->size - 1, - msg->uaddr, msg->perm)) { + if (vhost_iotlb_add_range(dev->iotlb, msg->iova, + msg->iova + msg->size - 1, + msg->uaddr, msg->perm)) { ret = -ENOMEM; break; } @@ -1134,8 +1070,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, break; } vhost_vq_meta_reset(dev); - vhost_del_umem_range(dev->iotlb, msg->iova, - msg->iova + msg->size - 1); + vhost_iotlb_del_range(dev->iotlb, msg->iova, + msg->iova + msg->size - 1); break; default: ret = -EINVAL; @@ -1319,44 +1255,42 @@ static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, } static void vhost_vq_meta_update(struct vhost_virtqueue *vq, - const struct vhost_umem_node *node, + const struct vhost_iotlb_map *map, int type) { int access = (type == VHOST_ADDR_USED) ? VHOST_ACCESS_WO : VHOST_ACCESS_RO; - if (likely(node->perm & access)) - vq->meta_iotlb[type] = node; + if (likely(map->perm & access)) + vq->meta_iotlb[type] = map; } static bool iotlb_access_ok(struct vhost_virtqueue *vq, int access, u64 addr, u64 len, int type) { - const struct vhost_umem_node *node; - struct vhost_umem *umem = vq->iotlb; + const struct vhost_iotlb_map *map; + struct vhost_iotlb *umem = vq->iotlb; u64 s = 0, size, orig_addr = addr, last = addr + len - 1; if (vhost_vq_meta_fetch(vq, addr, len, type)) return true; while (len > s) { - node = vhost_umem_interval_tree_iter_first(&umem->umem_tree, - addr, - last); - if (node == NULL || node->start > addr) { + map = vhost_iotlb_itree_first(umem, addr, last); + if (map == NULL || map->start > addr) { vhost_iotlb_miss(vq, addr, access); return false; - } else if (!(node->perm & access)) { + } else if (!(map->perm & access)) { /* Report the possible access violation by * request another translation from userspace. */ return false; } - size = node->size - addr + node->start; + size = map->size - addr + map->start; if (orig_addr == addr && size >= len) - vhost_vq_meta_update(vq, node, type); + vhost_vq_meta_update(vq, map, type); s += size; addr += size; @@ -1372,12 +1306,12 @@ int vq_meta_prefetch(struct vhost_virtqueue *vq) if (!vq->iotlb) return 1; - return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc, + return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc, vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) && - iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail, + iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail, vhost_get_avail_size(vq, num), VHOST_ADDR_AVAIL) && - iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used, + iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used, vhost_get_used_size(vq, num), VHOST_ADDR_USED); } EXPORT_SYMBOL_GPL(vq_meta_prefetch); @@ -1416,25 +1350,11 @@ bool vhost_vq_access_ok(struct vhost_virtqueue *vq) } EXPORT_SYMBOL_GPL(vhost_vq_access_ok); -static struct vhost_umem *vhost_umem_alloc(void) -{ - struct vhost_umem *umem = kvzalloc(sizeof(*umem), GFP_KERNEL); - - if (!umem) - return NULL; - - umem->umem_tree = RB_ROOT_CACHED; - umem->numem = 0; - INIT_LIST_HEAD(&umem->umem_list); - - return umem; -} - static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) { struct vhost_memory mem, *newmem; struct vhost_memory_region *region; - struct vhost_umem *newumem, *oldumem; + struct vhost_iotlb *newumem, *oldumem; unsigned long size = offsetof(struct vhost_memory, regions); int i; @@ -1456,7 +1376,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) return -EFAULT; } - newumem = vhost_umem_alloc(); + newumem = iotlb_alloc(); if (!newumem) { kvfree(newmem); return -ENOMEM; @@ -1465,13 +1385,12 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) for (region = newmem->regions; region < newmem->regions + mem.nregions; region++) { - if (vhost_new_umem_range(newumem, - region->guest_phys_addr, - region->memory_size, - region->guest_phys_addr + - region->memory_size - 1, - region->userspace_addr, - VHOST_ACCESS_RW)) + if (vhost_iotlb_add_range(newumem, + region->guest_phys_addr, + region->guest_phys_addr + + region->memory_size - 1, + region->userspace_addr, + VHOST_MAP_RW)) goto err; } @@ -1489,11 +1408,11 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) } kvfree(newmem); - vhost_umem_clean(oldumem); + vhost_iotlb_free(oldumem); return 0; err: - vhost_umem_clean(newumem); + vhost_iotlb_free(newumem); kvfree(newmem); return -EFAULT; } @@ -1734,10 +1653,10 @@ EXPORT_SYMBOL_GPL(vhost_vring_ioctl); int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled) { - struct vhost_umem *niotlb, *oiotlb; + struct vhost_iotlb *niotlb, *oiotlb; int i; - niotlb = vhost_umem_alloc(); + niotlb = iotlb_alloc(); if (!niotlb) return -ENOMEM; @@ -1753,7 +1672,7 @@ int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled) mutex_unlock(&vq->mutex); } - vhost_umem_clean(oiotlb); + vhost_iotlb_free(oiotlb); return 0; } @@ -1883,8 +1802,8 @@ static int log_write(void __user *log_base, static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) { - struct vhost_umem *umem = vq->umem; - struct vhost_umem_node *u; + struct vhost_iotlb *umem = vq->umem; + struct vhost_iotlb_map *u; u64 start, end, l, min; int r; bool hit = false; @@ -1894,16 +1813,15 @@ static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) /* More than one GPAs can be mapped into a single HVA. So * iterate all possible umems here to be safe. */ - list_for_each_entry(u, &umem->umem_list, link) { - if (u->userspace_addr > hva - 1 + len || - u->userspace_addr - 1 + u->size < hva) + list_for_each_entry(u, &umem->list, link) { + if (u->addr > hva - 1 + len || + u->addr - 1 + u->size < hva) continue; - start = max(u->userspace_addr, hva); - end = min(u->userspace_addr - 1 + u->size, - hva - 1 + len); + start = max(u->addr, hva); + end = min(u->addr - 1 + u->size, hva - 1 + len); l = end - start + 1; r = log_write(vq->log_base, - u->start + start - u->userspace_addr, + u->start + start - u->addr, l); if (r < 0) return r; @@ -2054,9 +1972,9 @@ EXPORT_SYMBOL_GPL(vhost_vq_init_access); static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, struct iovec iov[], int iov_size, int access) { - const struct vhost_umem_node *node; + const struct vhost_iotlb_map *map; struct vhost_dev *dev = vq->dev; - struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem; + struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem; struct iovec *_iov; u64 s = 0; int ret = 0; @@ -2068,25 +1986,24 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, break; } - node = vhost_umem_interval_tree_iter_first(&umem->umem_tree, - addr, addr + len - 1); - if (node == NULL || node->start > addr) { + map = vhost_iotlb_itree_first(umem, addr, addr + len - 1); + if (map == NULL || map->start > addr) { if (umem != dev->iotlb) { ret = -EFAULT; break; } ret = -EAGAIN; break; - } else if (!(node->perm & access)) { + } else if (!(map->perm & access)) { ret = -EPERM; break; } _iov = iov + ret; - size = node->size - addr + node->start; + size = map->size - addr + map->start; _iov->iov_len = min((u64)len - s, size); _iov->iov_base = (void __user *)(unsigned long) - (node->userspace_addr + addr - node->start); + (map->addr + addr - map->start); s += size; addr += size; ++ret; diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index f9d1a03dd153..181382185bbc 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -12,6 +12,7 @@ #include #include #include +#include struct vhost_work; typedef void (*vhost_work_fn_t)(struct vhost_work *work); @@ -52,27 +53,6 @@ struct vhost_log { u64 len; }; -#define START(node) ((node)->start) -#define LAST(node) ((node)->last) - -struct vhost_umem_node { - struct rb_node rb; - struct list_head link; - __u64 start; - __u64 last; - __u64 size; - __u64 userspace_addr; - __u32 perm; - __u32 flags_padding; - __u64 __subtree_last; -}; - -struct vhost_umem { - struct rb_root_cached umem_tree; - struct list_head umem_list; - int numem; -}; - enum vhost_uaddr_type { VHOST_ADDR_DESC = 0, VHOST_ADDR_AVAIL = 1, @@ -90,7 +70,7 @@ struct vhost_virtqueue { struct vring_desc __user *desc; struct vring_avail __user *avail; struct vring_used __user *used; - const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS]; + const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS]; struct file *kick; struct eventfd_ctx *call_ctx; struct eventfd_ctx *error_ctx; @@ -128,8 +108,8 @@ struct vhost_virtqueue { struct iovec *indirect; struct vring_used_elem *heads; /* Protected by virtqueue mutex. */ - struct vhost_umem *umem; - struct vhost_umem *iotlb; + struct vhost_iotlb *umem; + struct vhost_iotlb *iotlb; void *private_data; u64 acked_features; u64 acked_backend_features; @@ -164,8 +144,8 @@ struct vhost_dev { struct eventfd_ctx *log_ctx; struct llist_head work_list; struct task_struct *worker; - struct vhost_umem *umem; - struct vhost_umem *iotlb; + struct vhost_iotlb *umem; + struct vhost_iotlb *iotlb; spinlock_t iotlb_lock; struct list_head read_list; struct list_head pending_list; @@ -186,8 +166,8 @@ void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, long vhost_dev_set_owner(struct vhost_dev *dev); bool vhost_dev_has_owner(struct vhost_dev *dev); long vhost_dev_check_owner(struct vhost_dev *); -struct vhost_umem *vhost_dev_reset_owner_prepare(void); -void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_umem *); +struct vhost_iotlb *vhost_dev_reset_owner_prepare(void); +void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb); void vhost_dev_cleanup(struct vhost_dev *); void vhost_dev_stop(struct vhost_dev *); long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp); @@ -233,6 +213,9 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, struct iov_iter *from); int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled); +void vhost_iotlb_map_free(struct vhost_iotlb *iotlb, + struct vhost_iotlb_map *map); + #define vq_err(vq, fmt, ...) do { \ pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \ if ((vq)->error_ctx) \ diff --git a/include/linux/vhost_iotlb.h b/include/linux/vhost_iotlb.h new file mode 100644 index 000000000000..6b09b786a762 --- /dev/null +++ b/include/linux/vhost_iotlb.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_VHOST_IOTLB_H +#define _LINUX_VHOST_IOTLB_H + +#include + +struct vhost_iotlb_map { + struct rb_node rb; + struct list_head link; + u64 start; + u64 last; + u64 size; + u64 addr; +#define VHOST_MAP_RO 0x1 +#define VHOST_MAP_WO 0x2 +#define VHOST_MAP_RW 0x3 + u32 perm; + u32 flags_padding; + u64 __subtree_last; +}; + +#define VHOST_IOTLB_FLAG_RETIRE 0x1 + +struct vhost_iotlb { + struct rb_root_cached root; + struct list_head list; + unsigned int limit; + unsigned int nmaps; + unsigned int flags; +}; + +int vhost_iotlb_add_range(struct vhost_iotlb *iotlb, u64 start, u64 last, + u64 addr, unsigned int perm); +void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last); + +struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags); +void vhost_iotlb_free(struct vhost_iotlb *iotlb); +void vhost_iotlb_reset(struct vhost_iotlb *iotlb); + +struct vhost_iotlb_map * +vhost_iotlb_itree_first(struct vhost_iotlb *iotlb, u64 start, u64 last); +struct vhost_iotlb_map * +vhost_iotlb_itree_next(struct vhost_iotlb_map *map, u64 start, u64 last); + +void vhost_iotlb_map_free(struct vhost_iotlb *iotlb, + struct vhost_iotlb_map *map); +#endif -- cgit v1.2.3-58-ga151 From 9ad9c49cfe970b053bb0ef323b682dd1b4d4f8a0 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 26 Mar 2020 22:01:20 +0800 Subject: vringh: IOTLB support This patch implements the third memory accessor for vringh besides current kernel and userspace accessors. This idea is to allow vringh to do the address translation through an IOTLB which is implemented via vhost_map interval tree. Users should setup and IOVA to PA mapping in this IOTLB. This allows us to: - Use vringh to access virtqueues with vIOMMU - Use vringh to implement software virtqueues for vDPA devices Signed-off-by: Jason Wang Link: https://lore.kernel.org/r/20200326140125.19794-5-jasowang@redhat.com Signed-off-by: Michael S. Tsirkin --- drivers/vhost/Kconfig | 1 + drivers/vhost/vringh.c | 421 ++++++++++++++++++++++++++++++++++++++++++++++--- include/linux/vringh.h | 36 +++++ 3 files changed, 435 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig index 37400a1655b4..128238488078 100644 --- a/drivers/vhost/Kconfig +++ b/drivers/vhost/Kconfig @@ -6,6 +6,7 @@ config VHOST_IOTLB config VHOST_RING tristate + select VHOST_IOTLB help This option is selected by any driver which needs to access the host side of a virtio ring. diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index a0a2d74967ef..ee0491f579ac 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -13,6 +13,9 @@ #include #include #include +#include +#include +#include #include static __printf(1,2) __cold void vringh_bad(const char *fmt, ...) @@ -71,9 +74,11 @@ static inline int __vringh_get_head(const struct vringh *vrh, } /* Copy some bytes to/from the iovec. Returns num copied. */ -static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov, +static inline ssize_t vringh_iov_xfer(struct vringh *vrh, + struct vringh_kiov *iov, void *ptr, size_t len, - int (*xfer)(void *addr, void *ptr, + int (*xfer)(const struct vringh *vrh, + void *addr, void *ptr, size_t len)) { int err, done = 0; @@ -82,7 +87,7 @@ static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov, size_t partlen; partlen = min(iov->iov[iov->i].iov_len, len); - err = xfer(iov->iov[iov->i].iov_base, ptr, partlen); + err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen); if (err) return err; done += partlen; @@ -96,6 +101,7 @@ static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov, /* Fix up old iov element then increment. */ iov->iov[iov->i].iov_len = iov->consumed; iov->iov[iov->i].iov_base -= iov->consumed; + iov->consumed = 0; iov->i++; @@ -227,7 +233,8 @@ static int slow_copy(struct vringh *vrh, void *dst, const void *src, u64 addr, struct vringh_range *r), struct vringh_range *range, - int (*copy)(void *dst, const void *src, size_t len)) + int (*copy)(const struct vringh *vrh, + void *dst, const void *src, size_t len)) { size_t part, len = sizeof(struct vring_desc); @@ -241,7 +248,7 @@ static int slow_copy(struct vringh *vrh, void *dst, const void *src, if (!rcheck(vrh, addr, &part, range, getrange)) return -EINVAL; - err = copy(dst, src, part); + err = copy(vrh, dst, src, part); if (err) return err; @@ -262,7 +269,8 @@ __vringh_iov(struct vringh *vrh, u16 i, struct vringh_range *)), bool (*getrange)(struct vringh *, u64, struct vringh_range *), gfp_t gfp, - int (*copy)(void *dst, const void *src, size_t len)) + int (*copy)(const struct vringh *vrh, + void *dst, const void *src, size_t len)) { int err, count = 0, up_next, desc_max; struct vring_desc desc, *descs; @@ -291,7 +299,7 @@ __vringh_iov(struct vringh *vrh, u16 i, err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange, &slowrange, copy); else - err = copy(&desc, &descs[i], sizeof(desc)); + err = copy(vrh, &desc, &descs[i], sizeof(desc)); if (unlikely(err)) goto fail; @@ -404,7 +412,8 @@ static inline int __vringh_complete(struct vringh *vrh, unsigned int num_used, int (*putu16)(const struct vringh *vrh, __virtio16 *p, u16 val), - int (*putused)(struct vring_used_elem *dst, + int (*putused)(const struct vringh *vrh, + struct vring_used_elem *dst, const struct vring_used_elem *src, unsigned num)) { @@ -420,12 +429,12 @@ static inline int __vringh_complete(struct vringh *vrh, /* Compiler knows num_used == 1 sometimes, hence extra check */ if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) { u16 part = vrh->vring.num - off; - err = putused(&used_ring->ring[off], used, part); + err = putused(vrh, &used_ring->ring[off], used, part); if (!err) - err = putused(&used_ring->ring[0], used + part, + err = putused(vrh, &used_ring->ring[0], used + part, num_used - part); } else - err = putused(&used_ring->ring[off], used, num_used); + err = putused(vrh, &used_ring->ring[off], used, num_used); if (err) { vringh_bad("Failed to write %u used entries %u at %p", @@ -564,13 +573,15 @@ static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val) return put_user(v, (__force __virtio16 __user *)p); } -static inline int copydesc_user(void *dst, const void *src, size_t len) +static inline int copydesc_user(const struct vringh *vrh, + void *dst, const void *src, size_t len) { return copy_from_user(dst, (__force void __user *)src, len) ? -EFAULT : 0; } -static inline int putused_user(struct vring_used_elem *dst, +static inline int putused_user(const struct vringh *vrh, + struct vring_used_elem *dst, const struct vring_used_elem *src, unsigned int num) { @@ -578,13 +589,15 @@ static inline int putused_user(struct vring_used_elem *dst, sizeof(*dst) * num) ? -EFAULT : 0; } -static inline int xfer_from_user(void *src, void *dst, size_t len) +static inline int xfer_from_user(const struct vringh *vrh, void *src, + void *dst, size_t len) { return copy_from_user(dst, (__force void __user *)src, len) ? -EFAULT : 0; } -static inline int xfer_to_user(void *dst, void *src, size_t len) +static inline int xfer_to_user(const struct vringh *vrh, + void *dst, void *src, size_t len) { return copy_to_user((__force void __user *)dst, src, len) ? -EFAULT : 0; @@ -706,7 +719,7 @@ EXPORT_SYMBOL(vringh_getdesc_user); */ ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len) { - return vringh_iov_xfer((struct vringh_kiov *)riov, + return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov, dst, len, xfer_from_user); } EXPORT_SYMBOL(vringh_iov_pull_user); @@ -722,7 +735,7 @@ EXPORT_SYMBOL(vringh_iov_pull_user); ssize_t vringh_iov_push_user(struct vringh_iov *wiov, const void *src, size_t len) { - return vringh_iov_xfer((struct vringh_kiov *)wiov, + return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov, (void *)src, len, xfer_to_user); } EXPORT_SYMBOL(vringh_iov_push_user); @@ -832,13 +845,15 @@ static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val) return 0; } -static inline int copydesc_kern(void *dst, const void *src, size_t len) +static inline int copydesc_kern(const struct vringh *vrh, + void *dst, const void *src, size_t len) { memcpy(dst, src, len); return 0; } -static inline int putused_kern(struct vring_used_elem *dst, +static inline int putused_kern(const struct vringh *vrh, + struct vring_used_elem *dst, const struct vring_used_elem *src, unsigned int num) { @@ -846,13 +861,15 @@ static inline int putused_kern(struct vring_used_elem *dst, return 0; } -static inline int xfer_kern(void *src, void *dst, size_t len) +static inline int xfer_kern(const struct vringh *vrh, void *src, + void *dst, size_t len) { memcpy(dst, src, len); return 0; } -static inline int kern_xfer(void *dst, void *src, size_t len) +static inline int kern_xfer(const struct vringh *vrh, void *dst, + void *src, size_t len) { memcpy(dst, src, len); return 0; @@ -949,7 +966,7 @@ EXPORT_SYMBOL(vringh_getdesc_kern); */ ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len) { - return vringh_iov_xfer(riov, dst, len, xfer_kern); + return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern); } EXPORT_SYMBOL(vringh_iov_pull_kern); @@ -964,7 +981,7 @@ EXPORT_SYMBOL(vringh_iov_pull_kern); ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov, const void *src, size_t len) { - return vringh_iov_xfer(wiov, (void *)src, len, kern_xfer); + return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer); } EXPORT_SYMBOL(vringh_iov_push_kern); @@ -1042,4 +1059,362 @@ int vringh_need_notify_kern(struct vringh *vrh) } EXPORT_SYMBOL(vringh_need_notify_kern); +static int iotlb_translate(const struct vringh *vrh, + u64 addr, u64 len, struct bio_vec iov[], + int iov_size, u32 perm) +{ + struct vhost_iotlb_map *map; + struct vhost_iotlb *iotlb = vrh->iotlb; + int ret = 0; + u64 s = 0; + + while (len > s) { + u64 size, pa, pfn; + + if (unlikely(ret >= iov_size)) { + ret = -ENOBUFS; + break; + } + + map = vhost_iotlb_itree_first(iotlb, addr, + addr + len - 1); + if (!map || map->start > addr) { + ret = -EINVAL; + break; + } else if (!(map->perm & perm)) { + ret = -EPERM; + break; + } + + size = map->size - addr + map->start; + pa = map->addr + addr - map->start; + pfn = pa >> PAGE_SHIFT; + iov[ret].bv_page = pfn_to_page(pfn); + iov[ret].bv_len = min(len - s, size); + iov[ret].bv_offset = pa & (PAGE_SIZE - 1); + s += size; + addr += size; + ++ret; + } + + return ret; +} + +static inline int copy_from_iotlb(const struct vringh *vrh, void *dst, + void *src, size_t len) +{ + struct iov_iter iter; + struct bio_vec iov[16]; + int ret; + + ret = iotlb_translate(vrh, (u64)(uintptr_t)src, + len, iov, 16, VHOST_MAP_RO); + if (ret < 0) + return ret; + + iov_iter_bvec(&iter, READ, iov, ret, len); + + ret = copy_from_iter(dst, len, &iter); + + return ret; +} + +static inline int copy_to_iotlb(const struct vringh *vrh, void *dst, + void *src, size_t len) +{ + struct iov_iter iter; + struct bio_vec iov[16]; + int ret; + + ret = iotlb_translate(vrh, (u64)(uintptr_t)dst, + len, iov, 16, VHOST_MAP_WO); + if (ret < 0) + return ret; + + iov_iter_bvec(&iter, WRITE, iov, ret, len); + + return copy_to_iter(src, len, &iter); +} + +static inline int getu16_iotlb(const struct vringh *vrh, + u16 *val, const __virtio16 *p) +{ + struct bio_vec iov; + void *kaddr, *from; + int ret; + + /* Atomic read is needed for getu16 */ + ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), + &iov, 1, VHOST_MAP_RO); + if (ret < 0) + return ret; + + kaddr = kmap_atomic(iov.bv_page); + from = kaddr + iov.bv_offset; + *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from)); + kunmap_atomic(kaddr); + + return 0; +} + +static inline int putu16_iotlb(const struct vringh *vrh, + __virtio16 *p, u16 val) +{ + struct bio_vec iov; + void *kaddr, *to; + int ret; + + /* Atomic write is needed for putu16 */ + ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), + &iov, 1, VHOST_MAP_WO); + if (ret < 0) + return ret; + + kaddr = kmap_atomic(iov.bv_page); + to = kaddr + iov.bv_offset; + WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val)); + kunmap_atomic(kaddr); + + return 0; +} + +static inline int copydesc_iotlb(const struct vringh *vrh, + void *dst, const void *src, size_t len) +{ + int ret; + + ret = copy_from_iotlb(vrh, dst, (void *)src, len); + if (ret != len) + return -EFAULT; + + return 0; +} + +static inline int xfer_from_iotlb(const struct vringh *vrh, void *src, + void *dst, size_t len) +{ + int ret; + + ret = copy_from_iotlb(vrh, dst, src, len); + if (ret != len) + return -EFAULT; + + return 0; +} + +static inline int xfer_to_iotlb(const struct vringh *vrh, + void *dst, void *src, size_t len) +{ + int ret; + + ret = copy_to_iotlb(vrh, dst, src, len); + if (ret != len) + return -EFAULT; + + return 0; +} + +static inline int putused_iotlb(const struct vringh *vrh, + struct vring_used_elem *dst, + const struct vring_used_elem *src, + unsigned int num) +{ + int size = num * sizeof(*dst); + int ret; + + ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst)); + if (ret != size) + return -EFAULT; + + return 0; +} + +/** + * vringh_init_iotlb - initialize a vringh for a ring with IOTLB. + * @vrh: the vringh to initialize. + * @features: the feature bits for this ring. + * @num: the number of elements. + * @weak_barriers: true if we only need memory barriers, not I/O. + * @desc: the userpace descriptor pointer. + * @avail: the userpace avail pointer. + * @used: the userpace used pointer. + * + * Returns an error if num is invalid. + */ +int vringh_init_iotlb(struct vringh *vrh, u64 features, + unsigned int num, bool weak_barriers, + struct vring_desc *desc, + struct vring_avail *avail, + struct vring_used *used) +{ + return vringh_init_kern(vrh, features, num, weak_barriers, + desc, avail, used); +} +EXPORT_SYMBOL(vringh_init_iotlb); + +/** + * vringh_set_iotlb - initialize a vringh for a ring with IOTLB. + * @vrh: the vring + * @iotlb: iotlb associated with this vring + */ +void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb) +{ + vrh->iotlb = iotlb; +} +EXPORT_SYMBOL(vringh_set_iotlb); + +/** + * vringh_getdesc_iotlb - get next available descriptor from ring with + * IOTLB. + * @vrh: the kernelspace vring. + * @riov: where to put the readable descriptors (or NULL) + * @wiov: where to put the writable descriptors (or NULL) + * @head: head index we received, for passing to vringh_complete_iotlb(). + * @gfp: flags for allocating larger riov/wiov. + * + * Returns 0 if there was no descriptor, 1 if there was, or -errno. + * + * Note that on error return, you can tell the difference between an + * invalid ring and a single invalid descriptor: in the former case, + * *head will be vrh->vring.num. You may be able to ignore an invalid + * descriptor, but there's not much you can do with an invalid ring. + * + * Note that you may need to clean up riov and wiov, even on error! + */ +int vringh_getdesc_iotlb(struct vringh *vrh, + struct vringh_kiov *riov, + struct vringh_kiov *wiov, + u16 *head, + gfp_t gfp) +{ + int err; + + err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx); + if (err < 0) + return err; + + /* Empty... */ + if (err == vrh->vring.num) + return 0; + + *head = err; + err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, + gfp, copydesc_iotlb); + if (err) + return err; + + return 1; +} +EXPORT_SYMBOL(vringh_getdesc_iotlb); + +/** + * vringh_iov_pull_iotlb - copy bytes from vring_iov. + * @vrh: the vring. + * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume) + * @dst: the place to copy. + * @len: the maximum length to copy. + * + * Returns the bytes copied <= len or a negative errno. + */ +ssize_t vringh_iov_pull_iotlb(struct vringh *vrh, + struct vringh_kiov *riov, + void *dst, size_t len) +{ + return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb); +} +EXPORT_SYMBOL(vringh_iov_pull_iotlb); + +/** + * vringh_iov_push_iotlb - copy bytes into vring_iov. + * @vrh: the vring. + * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume) + * @dst: the place to copy. + * @len: the maximum length to copy. + * + * Returns the bytes copied <= len or a negative errno. + */ +ssize_t vringh_iov_push_iotlb(struct vringh *vrh, + struct vringh_kiov *wiov, + const void *src, size_t len) +{ + return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb); +} +EXPORT_SYMBOL(vringh_iov_push_iotlb); + +/** + * vringh_abandon_iotlb - we've decided not to handle the descriptor(s). + * @vrh: the vring. + * @num: the number of descriptors to put back (ie. num + * vringh_get_iotlb() to undo). + * + * The next vringh_get_iotlb() will return the old descriptor(s) again. + */ +void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num) +{ + /* We only update vring_avail_event(vr) when we want to be notified, + * so we haven't changed that yet. + */ + vrh->last_avail_idx -= num; +} +EXPORT_SYMBOL(vringh_abandon_iotlb); + +/** + * vringh_complete_iotlb - we've finished with descriptor, publish it. + * @vrh: the vring. + * @head: the head as filled in by vringh_getdesc_iotlb. + * @len: the length of data we have written. + * + * You should check vringh_need_notify_iotlb() after one or more calls + * to this function. + */ +int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len) +{ + struct vring_used_elem used; + + used.id = cpu_to_vringh32(vrh, head); + used.len = cpu_to_vringh32(vrh, len); + + return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb); +} +EXPORT_SYMBOL(vringh_complete_iotlb); + +/** + * vringh_notify_enable_iotlb - we want to know if something changes. + * @vrh: the vring. + * + * This always enables notifications, but returns false if there are + * now more buffers available in the vring. + */ +bool vringh_notify_enable_iotlb(struct vringh *vrh) +{ + return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb); +} +EXPORT_SYMBOL(vringh_notify_enable_iotlb); + +/** + * vringh_notify_disable_iotlb - don't tell us if something changes. + * @vrh: the vring. + * + * This is our normal running state: we disable and then only enable when + * we're going to sleep. + */ +void vringh_notify_disable_iotlb(struct vringh *vrh) +{ + __vringh_notify_disable(vrh, putu16_iotlb); +} +EXPORT_SYMBOL(vringh_notify_disable_iotlb); + +/** + * vringh_need_notify_iotlb - must we tell the other side about used buffers? + * @vrh: the vring we've called vringh_complete_iotlb() on. + * + * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. + */ +int vringh_need_notify_iotlb(struct vringh *vrh) +{ + return __vringh_need_notify(vrh, getu16_iotlb); +} +EXPORT_SYMBOL(vringh_need_notify_iotlb); + + MODULE_LICENSE("GPL"); diff --git a/include/linux/vringh.h b/include/linux/vringh.h index d237087eb257..bd0503ca6f8f 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include /* virtio_ring with information needed for host access. */ @@ -39,6 +41,9 @@ struct vringh { /* The vring (note: it may contain user pointers!) */ struct vring vring; + /* IOTLB for this vring */ + struct vhost_iotlb *iotlb; + /* The function to call to notify the guest about added buffers */ void (*notify)(struct vringh *); }; @@ -248,4 +253,35 @@ static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val) { return __cpu_to_virtio64(vringh_is_little_endian(vrh), val); } + +void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb); + +int vringh_init_iotlb(struct vringh *vrh, u64 features, + unsigned int num, bool weak_barriers, + struct vring_desc *desc, + struct vring_avail *avail, + struct vring_used *used); + +int vringh_getdesc_iotlb(struct vringh *vrh, + struct vringh_kiov *riov, + struct vringh_kiov *wiov, + u16 *head, + gfp_t gfp); + +ssize_t vringh_iov_pull_iotlb(struct vringh *vrh, + struct vringh_kiov *riov, + void *dst, size_t len); +ssize_t vringh_iov_push_iotlb(struct vringh *vrh, + struct vringh_kiov *wiov, + const void *src, size_t len); + +void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num); + +int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len); + +bool vringh_notify_enable_iotlb(struct vringh *vrh); +void vringh_notify_disable_iotlb(struct vringh *vrh); + +int vringh_need_notify_iotlb(struct vringh *vrh); + #endif /* _LINUX_VRINGH_H */ -- cgit v1.2.3-58-ga151 From 961e9c84077f6c8579d7a628cbe94a675cb67ae4 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 26 Mar 2020 22:01:21 +0800 Subject: vDPA: introduce vDPA bus vDPA device is a device that uses a datapath which complies with the virtio specifications with vendor specific control path. vDPA devices can be both physically located on the hardware or emulated by software. vDPA hardware devices are usually implemented through PCIE with the following types: - PF (Physical Function) - A single Physical Function - VF (Virtual Function) - Device that supports single root I/O virtualization (SR-IOV). Its Virtual Function (VF) represents a virtualized instance of the device that can be assigned to different partitions - ADI (Assignable Device Interface) and its equivalents - With technologies such as Intel Scalable IOV, a virtual device (VDEV) composed by host OS utilizing one or more ADIs. Or its equivalent like SF (Sub function) from Mellanox. >From a driver's perspective, depends on how and where the DMA translation is done, vDPA devices are split into two types: - Platform specific DMA translation - From the driver's perspective, the device can be used on a platform where device access to data in memory is limited and/or translated. An example is a PCIE vDPA whose DMA request was tagged via a bus (e.g PCIE) specific way. DMA translation and protection are done at PCIE bus IOMMU level. - Device specific DMA translation - The device implements DMA isolation and protection through its own logic. An example is a vDPA device which uses on-chip IOMMU. To hide the differences and complexity of the above types for a vDPA device/IOMMU options and in order to present a generic virtio device to the upper layer, a device agnostic framework is required. This patch introduces a software vDPA bus which abstracts the common attributes of vDPA device, vDPA bus driver and the communication method (vdpa_config_ops) between the vDPA device abstraction and the vDPA bus driver. This allows multiple types of drivers to be used for vDPA device like the virtio_vdpa and vhost_vdpa driver to operate on the bus and allow vDPA device could be used by either kernel virtio driver or userspace vhost drivers as: virtio drivers vhost drivers | | [virtio bus] [vhost uAPI] | | virtio device vhost device virtio_vdpa drv vhost_vdpa drv \ / [vDPA bus] | vDPA device hardware drv | [hardware bus] | vDPA hardware With the abstraction of vDPA bus and vDPA bus operations, the difference and complexity of the under layer hardware is hidden from upper layer. The vDPA bus drivers on top can use a unified vdpa_config_ops to control different types of vDPA device. Signed-off-by: Jason Wang Link: https://lore.kernel.org/r/20200326140125.19794-6-jasowang@redhat.com Signed-off-by: Michael S. Tsirkin --- MAINTAINERS | 1 + drivers/virtio/Kconfig | 2 + drivers/virtio/Makefile | 1 + drivers/virtio/vdpa/Kconfig | 7 ++ drivers/virtio/vdpa/Makefile | 2 + drivers/virtio/vdpa/vdpa.c | 180 ++++++++++++++++++++++++++++++ include/linux/vdpa.h | 253 +++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 446 insertions(+) create mode 100644 drivers/virtio/vdpa/Kconfig create mode 100644 drivers/virtio/vdpa/Makefile create mode 100644 drivers/virtio/vdpa/vdpa.c create mode 100644 include/linux/vdpa.h (limited to 'include/linux') diff --git a/MAINTAINERS b/MAINTAINERS index 19363ed5e723..70c47bc55343 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17699,6 +17699,7 @@ F: tools/virtio/ F: drivers/net/virtio_net.c F: drivers/block/virtio_blk.c F: include/linux/virtio*.h +F: include/linux/vdpa.h F: include/uapi/linux/virtio_*.h F: drivers/crypto/virtio/ F: mm/balloon_compaction.c diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 078615cf2afc..9c4fdb64d9ac 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -96,3 +96,5 @@ config VIRTIO_MMIO_CMDLINE_DEVICES If unsure, say 'N'. endif # VIRTIO_MENU + +source "drivers/virtio/vdpa/Kconfig" diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile index 3a2b5c5dcf46..fdf5eacd0d0a 100644 --- a/drivers/virtio/Makefile +++ b/drivers/virtio/Makefile @@ -6,3 +6,4 @@ virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o +obj-$(CONFIG_VDPA) += vdpa/ diff --git a/drivers/virtio/vdpa/Kconfig b/drivers/virtio/vdpa/Kconfig new file mode 100644 index 000000000000..351617723d12 --- /dev/null +++ b/drivers/virtio/vdpa/Kconfig @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +config VDPA + tristate + help + Enable this module to support vDPA device that uses a + datapath which complies with virtio specifications with + vendor specific control path. diff --git a/drivers/virtio/vdpa/Makefile b/drivers/virtio/vdpa/Makefile new file mode 100644 index 000000000000..ee6a35e8a4fb --- /dev/null +++ b/drivers/virtio/vdpa/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_VDPA) += vdpa.o diff --git a/drivers/virtio/vdpa/vdpa.c b/drivers/virtio/vdpa/vdpa.c new file mode 100644 index 000000000000..e9ed6a2b635b --- /dev/null +++ b/drivers/virtio/vdpa/vdpa.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * vDPA bus. + * + * Copyright (c) 2020, Red Hat. All rights reserved. + * Author: Jason Wang + * + */ + +#include +#include +#include +#include + +static DEFINE_IDA(vdpa_index_ida); + +static int vdpa_dev_probe(struct device *d) +{ + struct vdpa_device *vdev = dev_to_vdpa(d); + struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver); + int ret = 0; + + if (drv && drv->probe) + ret = drv->probe(vdev); + + return ret; +} + +static int vdpa_dev_remove(struct device *d) +{ + struct vdpa_device *vdev = dev_to_vdpa(d); + struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver); + + if (drv && drv->remove) + drv->remove(vdev); + + return 0; +} + +static struct bus_type vdpa_bus = { + .name = "vdpa", + .probe = vdpa_dev_probe, + .remove = vdpa_dev_remove, +}; + +static void vdpa_release_dev(struct device *d) +{ + struct vdpa_device *vdev = dev_to_vdpa(d); + const struct vdpa_config_ops *ops = vdev->config; + + if (ops->free) + ops->free(vdev); + + ida_simple_remove(&vdpa_index_ida, vdev->index); + kfree(vdev); +} + +/** + * __vdpa_alloc_device - allocate and initilaize a vDPA device + * This allows driver to some prepartion after device is + * initialized but before registered. + * @parent: the parent device + * @config: the bus operations that is supported by this device + * @size: size of the parent structure that contains private data + * + * Drvier should use vdap_alloc_device() wrapper macro instead of + * using this directly. + * + * Returns an error when parent/config/dma_dev is not set or fail to get + * ida. + */ +struct vdpa_device *__vdpa_alloc_device(struct device *parent, + const struct vdpa_config_ops *config, + size_t size) +{ + struct vdpa_device *vdev; + int err = -EINVAL; + + if (!config) + goto err; + + if (!!config->dma_map != !!config->dma_unmap) + goto err; + + err = -ENOMEM; + vdev = kzalloc(size, GFP_KERNEL); + if (!vdev) + goto err; + + err = ida_simple_get(&vdpa_index_ida, 0, 0, GFP_KERNEL); + if (err < 0) + goto err_ida; + + vdev->dev.bus = &vdpa_bus; + vdev->dev.parent = parent; + vdev->dev.release = vdpa_release_dev; + vdev->index = err; + vdev->config = config; + + err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); + if (err) + goto err_name; + + device_initialize(&vdev->dev); + + return vdev; + +err_name: + ida_simple_remove(&vdpa_index_ida, vdev->index); +err_ida: + kfree(vdev); +err: + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(__vdpa_alloc_device); + +/** + * vdpa_register_device - register a vDPA device + * Callers must have a succeed call of vdpa_init_device() before. + * @vdev: the vdpa device to be registered to vDPA bus + * + * Returns an error when fail to add to vDPA bus + */ +int vdpa_register_device(struct vdpa_device *vdev) +{ + return device_add(&vdev->dev); +} +EXPORT_SYMBOL_GPL(vdpa_register_device); + +/** + * vdpa_unregister_device - unregister a vDPA device + * @vdev: the vdpa device to be unregisted from vDPA bus + */ +void vdpa_unregister_device(struct vdpa_device *vdev) +{ + device_unregister(&vdev->dev); +} +EXPORT_SYMBOL_GPL(vdpa_unregister_device); + +/** + * __vdpa_register_driver - register a vDPA device driver + * @drv: the vdpa device driver to be registered + * @owner: module owner of the driver + * + * Returns an err when fail to do the registration + */ +int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner) +{ + drv->driver.bus = &vdpa_bus; + drv->driver.owner = owner; + + return driver_register(&drv->driver); +} +EXPORT_SYMBOL_GPL(__vdpa_register_driver); + +/** + * vdpa_unregister_driver - unregister a vDPA device driver + * @drv: the vdpa device driver to be unregistered + */ +void vdpa_unregister_driver(struct vdpa_driver *drv) +{ + driver_unregister(&drv->driver); +} +EXPORT_SYMBOL_GPL(vdpa_unregister_driver); + +static int vdpa_init(void) +{ + return bus_register(&vdpa_bus); +} + +static void __exit vdpa_exit(void) +{ + bus_unregister(&vdpa_bus); + ida_destroy(&vdpa_index_ida); +} +core_initcall(vdpa_init); +module_exit(vdpa_exit); + +MODULE_AUTHOR("Jason Wang "); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h new file mode 100644 index 000000000000..733acfb7ef84 --- /dev/null +++ b/include/linux/vdpa.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_VDPA_H +#define _LINUX_VDPA_H + +#include +#include +#include +#include + +/** + * vDPA callback definition. + * @callback: interrupt callback function + * @private: the data passed to the callback function + */ +struct vdpa_callback { + irqreturn_t (*callback)(void *data); + void *private; +}; + +/** + * vDPA device - representation of a vDPA device + * @dev: underlying device + * @dma_dev: the actual device that is performing DMA + * @config: the configuration ops for this device. + * @index: device index + */ +struct vdpa_device { + struct device dev; + struct device *dma_dev; + const struct vdpa_config_ops *config; + unsigned int index; +}; + +/** + * vDPA_config_ops - operations for configuring a vDPA device. + * Note: vDPA device drivers are required to implement all of the + * operations unless it is mentioned to be optional in the following + * list. + * + * @set_vq_address: Set the address of virtqueue + * @vdev: vdpa device + * @idx: virtqueue index + * @desc_area: address of desc area + * @driver_area: address of driver area + * @device_area: address of device area + * Returns integer: success (0) or error (< 0) + * @set_vq_num: Set the size of virtqueue + * @vdev: vdpa device + * @idx: virtqueue index + * @num: the size of virtqueue + * @kick_vq: Kick the virtqueue + * @vdev: vdpa device + * @idx: virtqueue index + * @set_vq_cb: Set the interrupt callback function for + * a virtqueue + * @vdev: vdpa device + * @idx: virtqueue index + * @cb: virtio-vdev interrupt callback structure + * @set_vq_ready: Set ready status for a virtqueue + * @vdev: vdpa device + * @idx: virtqueue index + * @ready: ready (true) not ready(false) + * @get_vq_ready: Get ready status for a virtqueue + * @vdev: vdpa device + * @idx: virtqueue index + * Returns boolean: ready (true) or not (false) + * @set_vq_state: Set the state for a virtqueue + * @vdev: vdpa device + * @idx: virtqueue index + * @state: virtqueue state (last_avail_idx) + * Returns integer: success (0) or error (< 0) + * @get_vq_state: Get the state for a virtqueue + * @vdev: vdpa device + * @idx: virtqueue index + * Returns virtqueue state (last_avail_idx) + * @get_vq_align: Get the virtqueue align requirement + * for the device + * @vdev: vdpa device + * Returns virtqueue algin requirement + * @get_features: Get virtio features supported by the device + * @vdev: vdpa device + * Returns the virtio features support by the + * device + * @set_features: Set virtio features supported by the driver + * @vdev: vdpa device + * @features: feature support by the driver + * Returns integer: success (0) or error (< 0) + * @set_config_cb: Set the config interrupt callback + * @vdev: vdpa device + * @cb: virtio-vdev interrupt callback structure + * @get_vq_num_max: Get the max size of virtqueue + * @vdev: vdpa device + * Returns u16: max size of virtqueue + * @get_device_id: Get virtio device id + * @vdev: vdpa device + * Returns u32: virtio device id + * @get_vendor_id: Get id for the vendor that provides this device + * @vdev: vdpa device + * Returns u32: virtio vendor id + * @get_status: Get the device status + * @vdev: vdpa device + * Returns u8: virtio device status + * @set_status: Set the device status + * @vdev: vdpa device + * @status: virtio device status + * @get_config: Read from device specific configuration space + * @vdev: vdpa device + * @offset: offset from the beginning of + * configuration space + * @buf: buffer used to read to + * @len: the length to read from + * configuration space + * @set_config: Write to device specific configuration space + * @vdev: vdpa device + * @offset: offset from the beginning of + * configuration space + * @buf: buffer used to write from + * @len: the length to write to + * configuration space + * @get_generation: Get device config generation (optional) + * @vdev: vdpa device + * Returns u32: device generation + * @set_map: Set device memory mapping (optional) + * Needed for device that using device + * specific DMA translation (on-chip IOMMU) + * @vdev: vdpa device + * @iotlb: vhost memory mapping to be + * used by the vDPA + * Returns integer: success (0) or error (< 0) + * @dma_map: Map an area of PA to IOVA (optional) + * Needed for device that using device + * specific DMA translation (on-chip IOMMU) + * and preferring incremental map. + * @vdev: vdpa device + * @iova: iova to be mapped + * @size: size of the area + * @pa: physical address for the map + * @perm: device access permission (VHOST_MAP_XX) + * Returns integer: success (0) or error (< 0) + * @dma_unmap: Unmap an area of IOVA (optional but + * must be implemented with dma_map) + * Needed for device that using device + * specific DMA translation (on-chip IOMMU) + * and preferring incremental unmap. + * @vdev: vdpa device + * @iova: iova to be unmapped + * @size: size of the area + * Returns integer: success (0) or error (< 0) + * @free: Free resources that belongs to vDPA (optional) + * @vdev: vdpa device + */ +struct vdpa_config_ops { + /* Virtqueue ops */ + int (*set_vq_address)(struct vdpa_device *vdev, + u16 idx, u64 desc_area, u64 driver_area, + u64 device_area); + void (*set_vq_num)(struct vdpa_device *vdev, u16 idx, u32 num); + void (*kick_vq)(struct vdpa_device *vdev, u16 idx); + void (*set_vq_cb)(struct vdpa_device *vdev, u16 idx, + struct vdpa_callback *cb); + void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready); + bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx); + int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, u64 state); + u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx); + + /* Device ops */ + u16 (*get_vq_align)(struct vdpa_device *vdev); + u64 (*get_features)(struct vdpa_device *vdev); + int (*set_features)(struct vdpa_device *vdev, u64 features); + void (*set_config_cb)(struct vdpa_device *vdev, + struct vdpa_callback *cb); + u16 (*get_vq_num_max)(struct vdpa_device *vdev); + u32 (*get_device_id)(struct vdpa_device *vdev); + u32 (*get_vendor_id)(struct vdpa_device *vdev); + u8 (*get_status)(struct vdpa_device *vdev); + void (*set_status)(struct vdpa_device *vdev, u8 status); + void (*get_config)(struct vdpa_device *vdev, unsigned int offset, + void *buf, unsigned int len); + void (*set_config)(struct vdpa_device *vdev, unsigned int offset, + const void *buf, unsigned int len); + u32 (*get_generation)(struct vdpa_device *vdev); + + /* DMA ops */ + int (*set_map)(struct vdpa_device *vdev, struct vhost_iotlb *iotlb); + int (*dma_map)(struct vdpa_device *vdev, u64 iova, u64 size, + u64 pa, u32 perm); + int (*dma_unmap)(struct vdpa_device *vdev, u64 iova, u64 size); + + /* Free device resources */ + void (*free)(struct vdpa_device *vdev); +}; + +struct vdpa_device *__vdpa_alloc_device(struct device *parent, + const struct vdpa_config_ops *config, + size_t size); + +#define vdpa_alloc_device(dev_struct, member, parent, config) \ + container_of(__vdpa_alloc_device( \ + parent, config, \ + sizeof(dev_struct) + \ + BUILD_BUG_ON_ZERO(offsetof( \ + dev_struct, member))), \ + dev_struct, member) + +int vdpa_register_device(struct vdpa_device *vdev); +void vdpa_unregister_device(struct vdpa_device *vdev); + +/** + * vdpa_driver - operations for a vDPA driver + * @driver: underlying device driver + * @probe: the function to call when a device is found. Returns 0 or -errno. + * @remove: the function to call when a device is removed. + */ +struct vdpa_driver { + struct device_driver driver; + int (*probe)(struct vdpa_device *vdev); + void (*remove)(struct vdpa_device *vdev); +}; + +#define vdpa_register_driver(drv) \ + __vdpa_register_driver(drv, THIS_MODULE) +int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner); +void vdpa_unregister_driver(struct vdpa_driver *drv); + +#define module_vdpa_driver(__vdpa_driver) \ + module_driver(__vdpa_driver, vdpa_register_driver, \ + vdpa_unregister_driver) + +static inline struct vdpa_driver *drv_to_vdpa(struct device_driver *driver) +{ + return container_of(driver, struct vdpa_driver, driver); +} + +static inline struct vdpa_device *dev_to_vdpa(struct device *_dev) +{ + return container_of(_dev, struct vdpa_device, dev); +} + +static inline void *vdpa_get_drvdata(const struct vdpa_device *vdev) +{ + return dev_get_drvdata(&vdev->dev); +} + +static inline void vdpa_set_drvdata(struct vdpa_device *vdev, void *data) +{ + dev_set_drvdata(&vdev->dev, data); +} + +static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev) +{ + return vdev->dma_dev; +} +#endif /* _LINUX_VDPA_H */ -- cgit v1.2.3-58-ga151 From 08ca8b21f760c0ed5034a5c122092eec22ccf8f4 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 1 Apr 2020 13:04:49 -0400 Subject: NFS: Fix races nfs_page_group_destroy() vs nfs_destroy_unlinked_subrequests() When a subrequest is being detached from the subgroup, we want to ensure that it is not holding the group lock, or in the process of waiting for the group lock. Fixes: 5b2b5187fa85 ("NFS: Fix nfs_page_group_destroy() and nfs_lock_and_join_requests() race cases") Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 67 ++++++++++++++++++++++++++++++++---------------- fs/nfs/write.c | 10 ++++++-- include/linux/nfs_page.h | 2 ++ 3 files changed, 55 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index be5e209399ea..0e3f0f241d83 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -133,47 +133,70 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx) EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait); /* - * nfs_page_group_lock - lock the head of the page group - * @req - request in group that is to be locked + * nfs_page_set_headlock - set the request PG_HEADLOCK + * @req: request that is to be locked * - * this lock must be held when traversing or modifying the page - * group list + * this lock must be held when modifying req->wb_head * * return 0 on success, < 0 on error */ int -nfs_page_group_lock(struct nfs_page *req) +nfs_page_set_headlock(struct nfs_page *req) { - struct nfs_page *head = req->wb_head; - - WARN_ON_ONCE(head != head->wb_head); - - if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags)) + if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags)) return 0; - set_bit(PG_CONTENDED1, &head->wb_flags); + set_bit(PG_CONTENDED1, &req->wb_flags); smp_mb__after_atomic(); - return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, + return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK, TASK_UNINTERRUPTIBLE); } /* - * nfs_page_group_unlock - unlock the head of the page group - * @req - request in group that is to be unlocked + * nfs_page_clear_headlock - clear the request PG_HEADLOCK + * @req: request that is to be locked */ void -nfs_page_group_unlock(struct nfs_page *req) +nfs_page_clear_headlock(struct nfs_page *req) { - struct nfs_page *head = req->wb_head; - - WARN_ON_ONCE(head != head->wb_head); - smp_mb__before_atomic(); - clear_bit(PG_HEADLOCK, &head->wb_flags); + clear_bit(PG_HEADLOCK, &req->wb_flags); smp_mb__after_atomic(); - if (!test_bit(PG_CONTENDED1, &head->wb_flags)) + if (!test_bit(PG_CONTENDED1, &req->wb_flags)) return; - wake_up_bit(&head->wb_flags, PG_HEADLOCK); + wake_up_bit(&req->wb_flags, PG_HEADLOCK); +} + +/* + * nfs_page_group_lock - lock the head of the page group + * @req: request in group that is to be locked + * + * this lock must be held when traversing or modifying the page + * group list + * + * return 0 on success, < 0 on error + */ +int +nfs_page_group_lock(struct nfs_page *req) +{ + int ret; + + ret = nfs_page_set_headlock(req); + if (ret || req->wb_head == req) + return ret; + return nfs_page_set_headlock(req->wb_head); +} + +/* + * nfs_page_group_unlock - unlock the head of the page group + * @req: request in group that is to be unlocked + */ +void +nfs_page_group_unlock(struct nfs_page *req) +{ + if (req != req->wb_head) + nfs_page_clear_headlock(req->wb_head); + nfs_page_clear_headlock(req); } /* diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 626e99cbb50e..a6d7926b0653 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -428,22 +428,28 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, destroy_list = (subreq->wb_this_page == old_head) ? NULL : subreq->wb_this_page; + /* Note: lock subreq in order to change subreq->wb_head */ + nfs_page_set_headlock(subreq); WARN_ON_ONCE(old_head != subreq->wb_head); /* make sure old group is not used */ subreq->wb_this_page = subreq; + subreq->wb_head = subreq; clear_bit(PG_REMOVE, &subreq->wb_flags); /* Note: races with nfs_page_group_destroy() */ if (!kref_read(&subreq->wb_kref)) { /* Check if we raced with nfs_page_group_destroy() */ - if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) + if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) { + nfs_page_clear_headlock(subreq); nfs_free_request(subreq); + } else + nfs_page_clear_headlock(subreq); continue; } + nfs_page_clear_headlock(subreq); - subreq->wb_head = subreq; nfs_release_request(old_head); if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 0bbd587fac6a..7e9419d74b86 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -142,6 +142,8 @@ extern void nfs_unlock_and_release_request(struct nfs_page *); extern int nfs_page_group_lock(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); +extern int nfs_page_set_headlock(struct nfs_page *req); +extern void nfs_page_clear_headlock(struct nfs_page *req); extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); /* -- cgit v1.2.3-58-ga151 From a62f8e3bd836bf1abde1648a45e14afd050dbd23 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 30 Mar 2020 11:12:16 -0400 Subject: NFS: Clean up nfs_lock_and_join_requests() Clean up nfs_lock_and_join_requests() to simplify the calculation of the range covered by the page group, taking into account the presence of mirrors. Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 74 +++++++++++++++++++++++++++++++++++++++ fs/nfs/write.c | 91 +++++++++++------------------------------------- include/linux/nfs_page.h | 1 + 3 files changed, 95 insertions(+), 71 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index f535a92403bf..261236157e33 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -130,6 +130,80 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx) } EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait); +/* + * nfs_unroll_locks - unlock all newly locked reqs and wait on @req + * @head: head request of page group, must be holding head lock + * @req: request that couldn't lock and needs to wait on the req bit lock + * + * This is a helper function for nfs_lock_and_join_requests + * returns 0 on success, < 0 on error. + */ +static void +nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req) +{ + struct nfs_page *tmp; + + /* relinquish all the locks successfully grabbed this run */ + for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { + if (!kref_read(&tmp->wb_kref)) + continue; + nfs_unlock_and_release_request(tmp); + } +} + +/* + * nfs_page_group_lock_subreq - try to lock a subrequest + * @head: head request of page group + * @subreq: request to lock + * + * This is a helper function for nfs_lock_and_join_requests which + * must be called with the head request and page group both locked. + * On error, it returns with the page group unlocked. + */ +static int +nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq) +{ + int ret; + + if (!kref_get_unless_zero(&subreq->wb_kref)) + return 0; + while (!nfs_lock_request(subreq)) { + nfs_page_group_unlock(head); + ret = nfs_wait_on_request(subreq); + if (!ret) + ret = nfs_page_group_lock(head); + if (ret < 0) { + nfs_unroll_locks(head, subreq); + nfs_release_request(subreq); + return ret; + } + } + return 0; +} + +/* + * nfs_page_group_lock_subrequests - try to lock the subrequests + * @head: head request of page group + * + * This is a helper function for nfs_lock_and_join_requests which + * must be called with the head request and page group both locked. + * On error, it returns with the page group unlocked. + */ +int nfs_page_group_lock_subrequests(struct nfs_page *head) +{ + struct nfs_page *subreq; + int ret; + + /* lock each request in the page group */ + for (subreq = head->wb_this_page; subreq != head; + subreq = subreq->wb_this_page) { + ret = nfs_page_group_lock_subreq(head, subreq); + if (ret < 0) + return ret; + } + return 0; +} + /* * nfs_page_set_headlock - set the request PG_HEADLOCK * @req: request that is to be locked diff --git a/fs/nfs/write.c b/fs/nfs/write.c index a6d7926b0653..832cf57ea442 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -379,34 +379,6 @@ static void nfs_end_page_writeback(struct nfs_page *req) clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); } -/* - * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req - * - * this is a helper function for nfs_lock_and_join_requests - * - * @inode - inode associated with request page group, must be holding inode lock - * @head - head request of page group, must be holding head lock - * @req - request that couldn't lock and needs to wait on the req bit lock - * - * NOTE: this must be called holding page_group bit lock - * which will be released before returning. - * - * returns 0 on success, < 0 on error. - */ -static void -nfs_unroll_locks(struct inode *inode, struct nfs_page *head, - struct nfs_page *req) -{ - struct nfs_page *tmp; - - /* relinquish all the locks successfully grabbed this run */ - for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { - if (!kref_read(&tmp->wb_kref)) - continue; - nfs_unlock_and_release_request(tmp); - } -} - /* * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests * @@ -487,7 +459,7 @@ nfs_lock_and_join_requests(struct page *page) struct inode *inode = page_file_mapping(page)->host; struct nfs_page *head, *subreq; struct nfs_page *destroy_list = NULL; - unsigned int total_bytes; + unsigned int pgbase, off, bytes; int ret; try_again: @@ -520,49 +492,30 @@ try_again: goto release_request; /* lock each request in the page group */ - total_bytes = head->wb_bytes; + ret = nfs_page_group_lock_subrequests(head); + if (ret < 0) + goto release_request; + + pgbase = head->wb_pgbase; + bytes = head->wb_bytes; + off = head->wb_offset; for (subreq = head->wb_this_page; subreq != head; subreq = subreq->wb_this_page) { - - if (!kref_get_unless_zero(&subreq->wb_kref)) { - if (subreq->wb_offset == head->wb_offset + total_bytes) - total_bytes += subreq->wb_bytes; - continue; - } - - while (!nfs_lock_request(subreq)) { - /* - * Unlock page to allow nfs_page_group_sync_on_bit() - * to succeed - */ - nfs_page_group_unlock(head); - ret = nfs_wait_on_request(subreq); - if (!ret) - ret = nfs_page_group_lock(head); - if (ret < 0) { - nfs_unroll_locks(inode, head, subreq); - nfs_release_request(subreq); - goto release_request; - } - } - /* - * Subrequests are always contiguous, non overlapping - * and in order - but may be repeated (mirrored writes). - */ - if (subreq->wb_offset == (head->wb_offset + total_bytes)) { - /* keep track of how many bytes this group covers */ - total_bytes += subreq->wb_bytes; - } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset || - ((subreq->wb_offset + subreq->wb_bytes) > - (head->wb_offset + total_bytes)))) { - nfs_page_group_unlock(head); - nfs_unroll_locks(inode, head, subreq); - nfs_unlock_and_release_request(subreq); - ret = -EIO; - goto release_request; + /* Subrequests should always form a contiguous range */ + if (pgbase > subreq->wb_pgbase) { + off -= pgbase - subreq->wb_pgbase; + bytes += pgbase - subreq->wb_pgbase; + pgbase = subreq->wb_pgbase; } + bytes = max(subreq->wb_pgbase + subreq->wb_bytes + - pgbase, bytes); } + /* Set the head request's range to cover the former page group */ + head->wb_pgbase = pgbase; + head->wb_bytes = bytes; + head->wb_offset = off; + /* Now that all requests are locked, make sure they aren't on any list. * Commit list removal accounting is done after locks are dropped */ subreq = head; @@ -576,10 +529,6 @@ try_again: /* destroy list will be terminated by head */ destroy_list = head->wb_this_page; head->wb_this_page = head; - - /* change head request to cover whole range that - * the former page group covered */ - head->wb_bytes = total_bytes; } /* Postpone destruction of this request */ diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 7e9419d74b86..dd205bc6bc58 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -139,6 +139,7 @@ extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, extern int nfs_wait_on_request(struct nfs_page *); extern void nfs_unlock_request(struct nfs_page *req); extern void nfs_unlock_and_release_request(struct nfs_page *); +extern int nfs_page_group_lock_subrequests(struct nfs_page *head); extern int nfs_page_group_lock(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); -- cgit v1.2.3-58-ga151 From e00ed89d7bd59c4ae49d6aeeee567187b1357a4b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 30 Mar 2020 12:40:47 -0400 Subject: NFS: Refactor nfs_lock_and_join_requests() Refactor nfs_lock_and_join_requests() in order to separate out the subrequest merging into its own function nfs_lock_and_join_group() that can be used by O_DIRECT. Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 26 +++++++- fs/nfs/write.c | 164 ++++++++++++++++++++++++++++------------------- include/linux/nfs_page.h | 1 + 3 files changed, 123 insertions(+), 68 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index b9805d1dac75..f61f96603df7 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -130,6 +130,25 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx) } EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait); +/* + * nfs_page_lock_head_request - page lock the head of the page group + * @req: any member of the page group + */ +struct nfs_page * +nfs_page_group_lock_head(struct nfs_page *req) +{ + struct nfs_page *head = req->wb_head; + + while (!nfs_lock_request(head)) { + int ret = nfs_wait_on_request(head); + if (ret < 0) + return ERR_PTR(ret); + } + if (head != req) + kref_get(&head->wb_kref); + return head; +} + /* * nfs_unroll_locks - unlock all newly locked reqs and wait on @req * @head: head request of page group, must be holding head lock @@ -186,14 +205,16 @@ nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq) * @head: head request of page group * * This is a helper function for nfs_lock_and_join_requests which - * must be called with the head request and page group both locked. - * On error, it returns with the page group unlocked. + * must be called with the head request locked. */ int nfs_page_group_lock_subrequests(struct nfs_page *head) { struct nfs_page *subreq; int ret; + ret = nfs_page_group_lock(head); + if (ret < 0) + return ret; /* lock each request in the page group */ for (subreq = head->wb_this_page; subreq != head; subreq = subreq->wb_this_page) { @@ -201,6 +222,7 @@ int nfs_page_group_lock_subrequests(struct nfs_page *head) if (ret < 0) return ret; } + nfs_page_group_unlock(head); return 0; } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 832cf57ea442..63b64333c3ea 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -149,6 +149,31 @@ static void nfs_io_completion_put(struct nfs_io_completion *ioc) kref_put(&ioc->refcount, nfs_io_completion_release); } +static void +nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode) +{ + if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) { + kref_get(&req->wb_kref); + atomic_long_inc(&NFS_I(inode)->nrequests); + } +} + +static int +nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode) +{ + int ret; + + if (!test_bit(PG_REMOVE, &req->wb_flags)) + return 0; + ret = nfs_page_group_lock(req); + if (ret) + return ret; + if (test_and_clear_bit(PG_REMOVE, &req->wb_flags)) + nfs_page_set_inode_ref(req, inode); + nfs_page_group_unlock(req); + return 0; +} + static struct nfs_page * nfs_page_private_request(struct page *page) { @@ -218,6 +243,36 @@ static struct nfs_page *nfs_page_find_head_request(struct page *page) return req; } +static struct nfs_page *nfs_find_and_lock_page_request(struct page *page) +{ + struct inode *inode = page_file_mapping(page)->host; + struct nfs_page *req, *head; + int ret; + + for (;;) { + req = nfs_page_find_head_request(page); + if (!req) + return req; + head = nfs_page_group_lock_head(req); + if (head != req) + nfs_release_request(req); + if (IS_ERR(head)) + return head; + ret = nfs_cancel_remove_inode(head, inode); + if (ret < 0) { + nfs_unlock_and_release_request(head); + return ERR_PTR(ret); + } + /* Ensure that nobody removed the request before we locked it */ + if (head == nfs_page_private_request(page)) + break; + if (PageSwapCache(page)) + break; + nfs_unlock_and_release_request(head); + } + return head; +} + /* Adjust the file length if we're writing beyond the end */ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) { @@ -436,65 +491,22 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, } /* - * nfs_lock_and_join_requests - join all subreqs to the head req and return - * a locked reference, cancelling any pending - * operations for this page. - * - * @page - the page used to lookup the "page group" of nfs_page structures + * nfs_join_page_group - destroy subrequests of the head req + * @head: the page used to lookup the "page group" of nfs_page structures + * @inode: Inode to which the request belongs. * * This function joins all sub requests to the head request by first * locking all requests in the group, cancelling any pending operations * and finally updating the head request to cover the whole range covered by * the (former) group. All subrequests are removed from any write or commit * lists, unlinked from the group and destroyed. - * - * Returns a locked, referenced pointer to the head request - which after - * this call is guaranteed to be the only request associated with the page. - * Returns NULL if no requests are found for @page, or a ERR_PTR if an - * error was encountered. */ -static struct nfs_page * -nfs_lock_and_join_requests(struct page *page) +static void +nfs_join_page_group(struct nfs_page *head, struct inode *inode) { - struct inode *inode = page_file_mapping(page)->host; - struct nfs_page *head, *subreq; + struct nfs_page *subreq; struct nfs_page *destroy_list = NULL; unsigned int pgbase, off, bytes; - int ret; - -try_again: - /* - * A reference is taken only on the head request which acts as a - * reference to the whole page group - the group will not be destroyed - * until the head reference is released. - */ - head = nfs_page_find_head_request(page); - if (!head) - return NULL; - - /* lock the page head first in order to avoid an ABBA inefficiency */ - if (!nfs_lock_request(head)) { - ret = nfs_wait_on_request(head); - nfs_release_request(head); - if (ret < 0) - return ERR_PTR(ret); - goto try_again; - } - - /* Ensure that nobody removed the request before we locked it */ - if (head != nfs_page_private_request(page) && !PageSwapCache(page)) { - nfs_unlock_and_release_request(head); - goto try_again; - } - - ret = nfs_page_group_lock(head); - if (ret < 0) - goto release_request; - - /* lock each request in the page group */ - ret = nfs_page_group_lock_subrequests(head); - if (ret < 0) - goto release_request; pgbase = head->wb_pgbase; bytes = head->wb_bytes; @@ -531,30 +543,50 @@ try_again: head->wb_this_page = head; } - /* Postpone destruction of this request */ - if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) { - set_bit(PG_INODE_REF, &head->wb_flags); - kref_get(&head->wb_kref); - atomic_long_inc(&NFS_I(inode)->nrequests); - } + nfs_destroy_unlinked_subrequests(destroy_list, head, inode); +} - nfs_page_group_unlock(head); +/* + * nfs_lock_and_join_requests - join all subreqs to the head req + * @page: the page used to lookup the "page group" of nfs_page structures + * + * This function joins all sub requests to the head request by first + * locking all requests in the group, cancelling any pending operations + * and finally updating the head request to cover the whole range covered by + * the (former) group. All subrequests are removed from any write or commit + * lists, unlinked from the group and destroyed. + * + * Returns a locked, referenced pointer to the head request - which after + * this call is guaranteed to be the only request associated with the page. + * Returns NULL if no requests are found for @page, or a ERR_PTR if an + * error was encountered. + */ +static struct nfs_page * +nfs_lock_and_join_requests(struct page *page) +{ + struct inode *inode = page_file_mapping(page)->host; + struct nfs_page *head; + int ret; - nfs_destroy_unlinked_subrequests(destroy_list, head, inode); + /* + * A reference is taken only on the head request which acts as a + * reference to the whole page group - the group will not be destroyed + * until the head reference is released. + */ + head = nfs_find_and_lock_page_request(page); + if (IS_ERR_OR_NULL(head)) + return head; - /* Did we lose a race with nfs_inode_remove_request()? */ - if (!(PagePrivate(page) || PageSwapCache(page))) { + /* lock each request in the page group */ + ret = nfs_page_group_lock_subrequests(head); + if (ret < 0) { nfs_unlock_and_release_request(head); - return NULL; + return ERR_PTR(ret); } - /* still holds ref on head from nfs_page_find_head_request - * and still has lock on head from lock loop */ - return head; + nfs_join_page_group(head, inode); -release_request: - nfs_unlock_and_release_request(head); - return ERR_PTR(ret); + return head; } static void nfs_write_error(struct nfs_page *req, int error) diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index dd205bc6bc58..99198c039bd6 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -139,6 +139,7 @@ extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, extern int nfs_wait_on_request(struct nfs_page *); extern void nfs_unlock_request(struct nfs_page *req); extern void nfs_unlock_and_release_request(struct nfs_page *); +extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req); extern int nfs_page_group_lock_subrequests(struct nfs_page *head); extern int nfs_page_group_lock(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); -- cgit v1.2.3-58-ga151 From ed5d588fe47feef290f271022820e255d8371561 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 30 Mar 2020 20:57:49 -0400 Subject: NFS: Try to join page groups before an O_DIRECT retransmission If we have to retransmit requests, try to join their page groups first. Signed-off-by: Trond Myklebust --- fs/nfs/direct.c | 20 ++++++++++++++++++++ fs/nfs/write.c | 2 +- include/linux/nfs_page.h | 1 + 3 files changed, 22 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 8074304fd5b4..a57e7c72c7f4 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -505,6 +505,24 @@ out: return result; } +static void +nfs_direct_join_group(struct list_head *list, struct inode *inode) +{ + struct nfs_page *req, *next; + + list_for_each_entry(req, list, wb_list) { + if (req->wb_head != req || req->wb_this_page == req) + continue; + for (next = req->wb_this_page; + next != req->wb_head; + next = next->wb_this_page) { + nfs_list_remove_request(next); + nfs_release_request(next); + } + nfs_join_page_group(req, inode); + } +} + static void nfs_direct_write_scan_commit_list(struct inode *inode, struct list_head *list, @@ -527,6 +545,8 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) nfs_init_cinfo_from_dreq(&cinfo, dreq); nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); + nfs_direct_join_group(&reqs, dreq->inode); + dreq->count = 0; dreq->max_count = 0; list_for_each_entry(req, &reqs, wb_list) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 63b64333c3ea..df4b87c30ac9 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -501,7 +501,7 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, * the (former) group. All subrequests are removed from any write or commit * lists, unlinked from the group and destroyed. */ -static void +void nfs_join_page_group(struct nfs_page *head, struct inode *inode) { struct nfs_page *subreq; diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 99198c039bd6..c32c15216da3 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -141,6 +141,7 @@ extern void nfs_unlock_request(struct nfs_page *req); extern void nfs_unlock_and_release_request(struct nfs_page *); extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req); extern int nfs_page_group_lock_subrequests(struct nfs_page *head); +extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode); extern int nfs_page_group_lock(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); -- cgit v1.2.3-58-ga151 From d866dbf6178713e37d2fec2870af00b345684e1a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jul 2019 10:37:22 -0700 Subject: blkcg: rename blkcg->cgwb_refcnt to ->online_pin and always use it blkcg->cgwb_refcnt is used to delay blkcg offlining so that blkgs don't get offlined while there are active cgwbs on them. However, it ends up making offlining unordered sometimes causing parents to be offlined before children. To fix it, we want child blkcgs to pin the parents' online states turning the refcnt into a more generic online pinning mechanism. In prepartion, * blkcg->cgwb_refcnt -> blkcg->online_pin * blkcg_cgwb_get/put() -> blkcg_pin/unpin_online() * Take them out of CONFIG_CGROUP_WRITEBACK Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 6 +++--- include/linux/blk-cgroup.h | 39 +++++++++++++-------------------------- mm/backing-dev.c | 6 +++--- 3 files changed, 19 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index c15a26096038..2acef6a64954 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -883,8 +883,8 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css) /* this prevents anyone from attaching or migrating to this blkcg */ wb_blkcg_offline(blkcg); - /* put the base cgwb reference allowing step 2 to be triggered */ - blkcg_cgwb_put(blkcg); + /* put the base online pin allowing step 2 to be triggered */ + blkcg_unpin_online(blkcg); } /** @@ -983,11 +983,11 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css) } spin_lock_init(&blkcg->lock); + refcount_set(&blkcg->online_pin, 1); INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN); INIT_HLIST_HEAD(&blkcg->blkg_list); #ifdef CONFIG_CGROUP_WRITEBACK INIT_LIST_HEAD(&blkcg->cgwb_list); - refcount_set(&blkcg->cgwb_refcnt, 1); #endif list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index e4a6949fd171..7fb7caa55a3d 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -46,6 +46,7 @@ struct blkcg_gq; struct blkcg { struct cgroup_subsys_state css; spinlock_t lock; + refcount_t online_pin; struct radix_tree_root blkg_tree; struct blkcg_gq __rcu *blkg_hint; @@ -56,7 +57,6 @@ struct blkcg { struct list_head all_blkcgs_node; #ifdef CONFIG_CGROUP_WRITEBACK struct list_head cgwb_list; - refcount_t cgwb_refcnt; #endif }; @@ -412,47 +412,34 @@ static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) extern void blkcg_destroy_blkgs(struct blkcg *blkcg); -#ifdef CONFIG_CGROUP_WRITEBACK - /** - * blkcg_cgwb_get - get a reference for blkcg->cgwb_list + * blkcg_pin_online - pin online state * @blkcg: blkcg of interest * - * This is used to track the number of active wb's related to a blkcg. + * While pinned, a blkcg is kept online. This is primarily used to + * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline + * while an associated cgwb is still active. */ -static inline void blkcg_cgwb_get(struct blkcg *blkcg) +static inline void blkcg_pin_online(struct blkcg *blkcg) { - refcount_inc(&blkcg->cgwb_refcnt); + refcount_inc(&blkcg->online_pin); } /** - * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list + * blkcg_unpin_online - unpin online state * @blkcg: blkcg of interest * - * This is used to track the number of active wb's related to a blkcg. - * When this count goes to zero, all active wb has finished so the + * This is primarily used to impedance-match blkg and cgwb lifetimes so + * that blkg doesn't go offline while an associated cgwb is still active. + * When this count goes to zero, all active cgwbs have finished so the * blkcg can continue destruction by calling blkcg_destroy_blkgs(). - * This work may occur in cgwb_release_workfn() on the cgwb_release - * workqueue. */ -static inline void blkcg_cgwb_put(struct blkcg *blkcg) +static inline void blkcg_unpin_online(struct blkcg *blkcg) { - if (refcount_dec_and_test(&blkcg->cgwb_refcnt)) + if (refcount_dec_and_test(&blkcg->online_pin)) blkcg_destroy_blkgs(blkcg); } -#else - -static inline void blkcg_cgwb_get(struct blkcg *blkcg) { } - -static inline void blkcg_cgwb_put(struct blkcg *blkcg) -{ - /* wb isn't being accounted, so trigger destruction right away */ - blkcg_destroy_blkgs(blkcg); -} - -#endif - /** * blkg_path - format cgroup path of blkg * @blkg: blkg of interest diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 62f05f605fb5..c81b4f3a7268 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -491,8 +491,8 @@ static void cgwb_release_workfn(struct work_struct *work) css_put(wb->blkcg_css); mutex_unlock(&wb->bdi->cgwb_release_mutex); - /* triggers blkg destruction if cgwb_refcnt becomes zero */ - blkcg_cgwb_put(blkcg); + /* triggers blkg destruction if no online users left */ + blkcg_unpin_online(blkcg); fprop_local_destroy_percpu(&wb->memcg_completions); percpu_ref_exit(&wb->refcnt); @@ -592,7 +592,7 @@ static int cgwb_create(struct backing_dev_info *bdi, list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); list_add(&wb->memcg_node, memcg_cgwb_list); list_add(&wb->blkcg_node, blkcg_cgwb_list); - blkcg_cgwb_get(blkcg); + blkcg_pin_online(blkcg); css_get(memcg_css); css_get(blkcg_css); } -- cgit v1.2.3-58-ga151 From 4308a434e5e08c78676aa66bc626ef78cbef0883 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jul 2019 10:37:55 -0700 Subject: blkcg: don't offline parent blkcg first blkcg->cgwb_refcnt is used to delay blkcg offlining so that blkgs don't get offlined while there are active cgwbs on them. However, it ends up making offlining unordered sometimes causing parents to be offlined before children. Let's fix this by making child blkcgs pin the parents' online states. Note that pin/unpin names are chosen over get/put intentionally because css uses get/put online for something different. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 16 ++++++++++++++++ include/linux/blk-cgroup.h | 6 +++++- 2 files changed, 21 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 2acef6a64954..c5dc833212e1 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1006,6 +1006,21 @@ unlock: return ret; } +static int blkcg_css_online(struct cgroup_subsys_state *css) +{ + struct blkcg *blkcg = css_to_blkcg(css); + struct blkcg *parent = blkcg_parent(blkcg); + + /* + * blkcg_pin_online() is used to delay blkcg offline so that blkgs + * don't go offline while cgwbs are still active on them. Pin the + * parent so that offline always happens towards the root. + */ + if (parent) + blkcg_pin_online(parent); + return 0; +} + /** * blkcg_init_queue - initialize blkcg part of request queue * @q: request_queue to initialize @@ -1199,6 +1214,7 @@ static void blkcg_exit(struct task_struct *tsk) struct cgroup_subsys io_cgrp_subsys = { .css_alloc = blkcg_css_alloc, + .css_online = blkcg_css_online, .css_offline = blkcg_css_offline, .css_free = blkcg_css_free, .can_attach = blkcg_can_attach, diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 7fb7caa55a3d..35f8ffe92b70 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -436,8 +436,12 @@ static inline void blkcg_pin_online(struct blkcg *blkcg) */ static inline void blkcg_unpin_online(struct blkcg *blkcg) { - if (refcount_dec_and_test(&blkcg->online_pin)) + do { + if (!refcount_dec_and_test(&blkcg->online_pin)) + break; blkcg_destroy_blkgs(blkcg); + blkcg = blkcg_parent(blkcg); + } while (blkcg); } /** -- cgit v1.2.3-58-ga151 From 6487a8019b3e9f9d79e5f6ad3ea49f9379209b7e Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Mon, 30 Mar 2020 22:15:08 +0200 Subject: rtc: remove rtc_time_to_tm and rtc_tm_to_time There are no callers of the 32bit versions of rtc_time conversion functions, drop them. Link: https://lore.kernel.org/r/20200330201510.861217-1-alexandre.belloni@bootlin.com Signed-off-by: Alexandre Belloni --- include/linux/rtc.h | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 23990bd29040..bba3db3f7efa 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -34,18 +34,6 @@ static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs) return rtc_tm_to_time64(lhs) - rtc_tm_to_time64(rhs); } -static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm) -{ - rtc_time64_to_tm(time, tm); -} - -static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time) -{ - *time = rtc_tm_to_time64(tm); - - return 0; -} - #include #include #include -- cgit v1.2.3-58-ga151 From 83153d9f36e24978c6211d246cb6f532bf54e5dc Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Tue, 25 Feb 2020 13:47:01 +0530 Subject: PCI: endpoint: Fix ->set_msix() to take BIR and offset as arguments commit 8963106eabdc ("PCI: endpoint: Add MSI-X interfaces") while adding support to raise MSI-X interrupts from endpoint didn't include BAR Indicator register (BIR) configuration and MSI-X table offset as arguments in pci_epc_set_msix(). This would result in endpoint controller register using random BAR indicator register, the memory for which might not be allocated by the endpoint function driver. Add BAR indicator register and MSI-X table offset as arguments in pci_epc_set_msix() and allocate space for MSI-X table and pending bit array (PBA) in pci-epf-test endpoint function driver. Fixes: 8963106eabdc ("PCI: endpoint: Add MSI-X interfaces") Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Lorenzo Pieralisi --- drivers/pci/controller/dwc/pcie-designware-ep.c | 15 ++++++++++-- drivers/pci/endpoint/functions/pci-epf-test.c | 32 ++++++++++++++++++++----- drivers/pci/endpoint/pci-epc-core.c | 7 ++++-- include/linux/pci-epc.h | 6 +++-- 4 files changed, 48 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 8e9f31144b69..b48c70e8c7c9 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -278,7 +278,8 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no) return val; } -static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts) +static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts, + enum pci_barno bir, u32 offset) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); @@ -287,12 +288,22 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts) if (!ep->msix_cap) return -EINVAL; + dw_pcie_dbi_ro_wr_en(pci); + reg = ep->msix_cap + PCI_MSIX_FLAGS; val = dw_pcie_readw_dbi(pci, reg); val &= ~PCI_MSIX_FLAGS_QSIZE; val |= interrupts; - dw_pcie_dbi_ro_wr_en(pci); dw_pcie_writew_dbi(pci, reg, val); + + reg = ep->msix_cap + PCI_MSIX_TABLE; + val = offset | bir; + dw_pcie_writel_dbi(pci, reg, val); + + reg = ep->msix_cap + PCI_MSIX_PBA; + val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; + dw_pcie_writel_dbi(pci, reg, val); + dw_pcie_dbi_ro_wr_dis(pci); return 0; diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 0a5019ce7540..60330f3e3751 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -50,6 +50,7 @@ struct pci_epf_test { void *reg[PCI_STD_NUM_BARS]; struct pci_epf *epf; enum pci_barno test_reg_bar; + size_t msix_table_offset; struct delayed_work cmd_handler; struct dma_chan *dma_chan; struct completion transfer_complete; @@ -659,6 +660,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf) static int pci_epf_test_core_init(struct pci_epf *epf) { + struct pci_epf_test *epf_test = epf_get_drvdata(epf); struct pci_epf_header *header = epf->header; const struct pci_epc_features *epc_features; struct pci_epc *epc = epf->epc; @@ -692,7 +694,9 @@ static int pci_epf_test_core_init(struct pci_epf *epf) } if (msix_capable) { - ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts); + ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts, + epf_test->test_reg_bar, + epf_test->msix_table_offset); if (ret) { dev_err(dev, "MSI-X configuration failed\n"); return ret; @@ -734,6 +738,10 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) struct pci_epf_test *epf_test = epf_get_drvdata(epf); struct device *dev = &epf->dev; struct pci_epf_bar *epf_bar; + size_t msix_table_size = 0; + size_t test_reg_bar_size; + size_t pba_size = 0; + bool msix_capable; void *base; int bar, add; enum pci_barno test_reg_bar = epf_test->test_reg_bar; @@ -742,13 +750,25 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) epc_features = epf_test->epc_features; - if (epc_features->bar_fixed_size[test_reg_bar]) + test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128); + + msix_capable = epc_features->msix_capable; + if (msix_capable) { + msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts; + epf_test->msix_table_offset = test_reg_bar_size; + /* Align to QWORD or 8 Bytes */ + pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8); + } + test_reg_size = test_reg_bar_size + msix_table_size + pba_size; + + if (epc_features->bar_fixed_size[test_reg_bar]) { + if (test_reg_size > bar_size[test_reg_bar]) + return -ENOMEM; test_reg_size = bar_size[test_reg_bar]; - else - test_reg_size = sizeof(struct pci_epf_test_reg); + } - base = pci_epf_alloc_space(epf, test_reg_size, - test_reg_bar, epc_features->align); + base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar, + epc_features->align); if (!base) { dev_err(dev, "Failed to allocated register space\n"); return -ENOMEM; diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index 0d22a377a0cf..82ba0dc7f2f5 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -297,10 +297,13 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msix); * @epc: the EPC device on which MSI-X has to be configured * @func_no: the endpoint function number in the EPC device * @interrupts: number of MSI-X interrupts required by the EPF + * @bir: BAR where the MSI-X table resides + * @offset: Offset pointing to the start of MSI-X table * * Invoke to set the required number of MSI-X interrupts. */ -int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts) +int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts, + enum pci_barno bir, u32 offset) { int ret; @@ -312,7 +315,7 @@ int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts) return 0; mutex_lock(&epc->lock); - ret = epc->ops->set_msix(epc, func_no, interrupts - 1); + ret = epc->ops->set_msix(epc, func_no, interrupts - 1, bir, offset); mutex_unlock(&epc->lock); return ret; diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 0d7e91bad91e..e0ed9d01f6e5 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -53,7 +53,8 @@ struct pci_epc_ops { phys_addr_t addr); int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts); int (*get_msi)(struct pci_epc *epc, u8 func_no); - int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts); + int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts, + enum pci_barno, u32 offset); int (*get_msix)(struct pci_epc *epc, u8 func_no); int (*raise_irq)(struct pci_epc *epc, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num); @@ -180,7 +181,8 @@ void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, phys_addr_t phys_addr); int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts); int pci_epc_get_msi(struct pci_epc *epc, u8 func_no); -int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts); +int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts, + enum pci_barno, u32 offset); int pci_epc_get_msix(struct pci_epc *epc, u8 func_no); int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num); -- cgit v1.2.3-58-ga151 From 6f5e193bfb55963ce5f4f68cc927f371ddb0913b Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Tue, 25 Feb 2020 13:47:02 +0530 Subject: PCI: dwc: Fix dw_pcie_ep_raise_msix_irq() to get correct MSI-X table address commit beb4641a787d ("PCI: dwc: Add MSI-X callbacks handler"), in order to raise MSI-X interrupt, obtained MSIX table address from Base Address Register (BAR). However BAR only holds PCI address programmed by the host whereas the MSI-X table should be in the local memory. Store the MSI-X table address (virtual address) as part of ->set_bar() callback and use that to get the message address and message data here. Fixes: beb4641a787d ("PCI: dwc: Add MSI-X callbacks handler") Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Lorenzo Pieralisi --- drivers/pci/controller/dwc/pcie-designware-ep.c | 46 +++++++++---------------- drivers/pci/controller/dwc/pcie-designware.h | 1 + drivers/pci/endpoint/pci-epf-core.c | 2 ++ include/linux/pci-epf.h | 15 ++++++++ 4 files changed, 35 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index b48c70e8c7c9..1cdcbd102ce8 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -134,6 +134,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND); clear_bit(atu_index, ep->ib_window_map); + ep->epf_bar[bar] = NULL; } static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, @@ -167,6 +168,7 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, dw_pcie_writel_dbi(pci, reg + 4, 0); } + ep->epf_bar[bar] = epf_bar; dw_pcie_dbi_ro_wr_dis(pci); return 0; @@ -429,55 +431,41 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct pci_epf_msix_tbl *msix_tbl; struct pci_epc *epc = ep->epc; - u16 tbl_offset, bir; - u32 bar_addr_upper, bar_addr_lower; - u32 msg_addr_upper, msg_addr_lower; + struct pci_epf_bar *epf_bar; u32 reg, msg_data, vec_ctrl; - u64 tbl_addr, msg_addr, reg_u64; - void __iomem *msix_tbl; + unsigned int aligned_offset; + u32 tbl_offset; + u64 msg_addr; int ret; + u8 bir; reg = ep->msix_cap + PCI_MSIX_TABLE; tbl_offset = dw_pcie_readl_dbi(pci, reg); bir = (tbl_offset & PCI_MSIX_TABLE_BIR); tbl_offset &= PCI_MSIX_TABLE_OFFSET; - reg = PCI_BASE_ADDRESS_0 + (4 * bir); - bar_addr_upper = 0; - bar_addr_lower = dw_pcie_readl_dbi(pci, reg); - reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK); - if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64) - bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4); + epf_bar = ep->epf_bar[bir]; + msix_tbl = epf_bar->addr; + msix_tbl = (struct pci_epf_msix_tbl *)((char *)msix_tbl + tbl_offset); - tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower; - tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE)); - tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK; - - msix_tbl = ioremap(ep->phys_base + tbl_addr, - PCI_MSIX_ENTRY_SIZE); - if (!msix_tbl) - return -EINVAL; - - msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR); - msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR); - msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; - msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA); - vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL); - - iounmap(msix_tbl); + msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; + msg_data = msix_tbl[(interrupt_num - 1)].msg_data; + vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl; if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) { dev_dbg(pci->dev, "MSI-X entry ctrl set\n"); return -EPERM; } - ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, + aligned_offset = msg_addr & (epc->mem->page_size - 1); + ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, epc->mem->page_size); if (ret) return ret; - writel(msg_data, ep->msi_mem); + writel(msg_data, ep->msi_mem + aligned_offset); dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index aa98fbd50807..d6e1f397e6b0 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -233,6 +233,7 @@ struct dw_pcie_ep { phys_addr_t msi_mem_phys; u8 msi_cap; /* MSI capability offset */ u8 msix_cap; /* MSI-X capability offset */ + struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS]; }; struct dw_pcie_ops { diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 6e0648991b5c..244e00f48c5c 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -87,6 +87,7 @@ void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar) epf->bar[bar].phys_addr); epf->bar[bar].phys_addr = 0; + epf->bar[bar].addr = NULL; epf->bar[bar].size = 0; epf->bar[bar].barno = 0; epf->bar[bar].flags = 0; @@ -123,6 +124,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, } epf->bar[bar].phys_addr = phys_addr; + epf->bar[bar].addr = space; epf->bar[bar].size = size; epf->bar[bar].barno = bar; epf->bar[bar].flags |= upper_32_bits(size) ? diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 0c628e30c582..6644ff3b0702 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -94,10 +94,12 @@ struct pci_epf_driver { /** * struct pci_epf_bar - represents the BAR of EPF device * @phys_addr: physical address that should be mapped to the BAR + * @addr: virtual address corresponding to the @phys_addr * @size: the size of the address space present in BAR */ struct pci_epf_bar { dma_addr_t phys_addr; + void *addr; size_t size; enum pci_barno barno; int flags; @@ -134,6 +136,19 @@ struct pci_epf { struct mutex lock; }; +/** + * struct pci_epf_msix_tbl - represents the MSIX table entry structure + * @msg_addr: Writes to this address will trigger MSIX interrupt in host + * @msg_data: Data that should be written to @msg_addr to trigger MSIX interrupt + * @vector_ctrl: Identifies if the function is prohibited from sending a message + * using this MSIX table entry + */ +struct pci_epf_msix_tbl { + u64 msg_addr; + u32 msg_data; + u32 vector_ctrl; +}; + #define to_pci_epf(epf_dev) container_of((epf_dev), struct pci_epf, dev) #define pci_epf_register_driver(driver) \ -- cgit v1.2.3-58-ga151 From f605a263e0690177ecc180417eacf2b5507dd177 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Fri, 28 Feb 2020 11:34:52 -0500 Subject: dax, pmem: Add a dax operation zero_page_range Add a dax operation zero_page_range, to zero a page. This will also clear any known poison in the page being zeroed. As of now, zeroing of one page is allowed in a single call. There are no callers which are trying to zero more than a page in a single call. Once we grow the callers which zero more than a page in single call, we can add that support. Primary reason for not doing that yet is that this will add little complexity in dm implementation where a range might be spanning multiple underlying targets and one will have to split the range into multiple sub ranges and call zero_page_range() on individual targets. Suggested-by: Christoph Hellwig Signed-off-by: Vivek Goyal Reviewed-by: Pankaj Gupta Link: https://lore.kernel.org/r/20200228163456.1587-3-vgoyal@redhat.com Signed-off-by: Dan Williams --- drivers/dax/super.c | 20 ++++++++++++++++++++ drivers/nvdimm/pmem.c | 11 +++++++++++ include/linux/dax.h | 4 ++++ 3 files changed, 35 insertions(+) (limited to 'include/linux') diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 0aa4b6bc5101..e498daf3c0d7 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -344,6 +344,26 @@ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, } EXPORT_SYMBOL_GPL(dax_copy_to_iter); +int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, + size_t nr_pages) +{ + if (!dax_alive(dax_dev)) + return -ENXIO; + + if (!dax_dev->ops->zero_page_range) + return -EOPNOTSUPP; + /* + * There are no callers that want to zero more than one page as of now. + * Once users are there, this check can be removed after the + * device mapper code has been updated to split ranges across targets. + */ + if (nr_pages != 1) + return -EIO; + + return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages); +} +EXPORT_SYMBOL_GPL(dax_zero_page_range); + #ifdef CONFIG_ARCH_HAS_PMEM_API void arch_wb_cache_pmem(void *addr, size_t size); void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 075b11682192..5b774ddd0efb 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -282,6 +282,16 @@ static const struct block_device_operations pmem_fops = { .revalidate_disk = nvdimm_revalidate_disk, }; +static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, + size_t nr_pages) +{ + struct pmem_device *pmem = dax_get_private(dax_dev); + + return blk_status_to_errno(pmem_do_write(pmem, ZERO_PAGE(0), 0, + PFN_PHYS(pgoff) >> SECTOR_SHIFT, + PAGE_SIZE)); +} + static long pmem_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) { @@ -313,6 +323,7 @@ static const struct dax_operations pmem_dax_ops = { .dax_supported = generic_fsdax_supported, .copy_from_iter = pmem_copy_from_iter, .copy_to_iter = pmem_copy_to_iter, + .zero_page_range = pmem_dax_zero_page_range, }; static const struct attribute_group *pmem_attribute_groups[] = { diff --git a/include/linux/dax.h b/include/linux/dax.h index 328c2dbb4409..71735c430c05 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -34,6 +34,8 @@ struct dax_operations { /* copy_to_iter: required operation for fs-dax direct-i/o */ size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t, struct iov_iter *); + /* zero_page_range: required operation. Zero page range */ + int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); }; extern struct attribute_group dax_attribute_group; @@ -199,6 +201,8 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); +int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, + size_t nr_pages); void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, -- cgit v1.2.3-58-ga151 From cdf6cdcd3b99a99ea9ecc1b05d1d040d5a69a134 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Fri, 28 Feb 2020 11:34:54 -0500 Subject: dm,dax: Add dax zero_page_range operation This patch adds support for dax zero_page_range operation to dm targets. Signed-off-by: Vivek Goyal Acked-by: Mike Snitzer Link: https://lore.kernel.org/r/20200228163456.1587-5-vgoyal@redhat.com Signed-off-by: Dan Williams --- drivers/md/dm-linear.c | 18 ++++++++++++++++++ drivers/md/dm-log-writes.c | 17 +++++++++++++++++ drivers/md/dm-stripe.c | 23 +++++++++++++++++++++++ drivers/md/dm.c | 30 ++++++++++++++++++++++++++++++ include/linux/device-mapper.h | 3 +++ 5 files changed, 91 insertions(+) (limited to 'include/linux') diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 8d07fdf63a47..e1db43446327 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -201,10 +201,27 @@ static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff, return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i); } +static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, + size_t nr_pages) +{ + int ret; + struct linear_c *lc = ti->private; + struct block_device *bdev = lc->dev->bdev; + struct dax_device *dax_dev = lc->dev->dax_dev; + sector_t dev_sector, sector = pgoff * PAGE_SECTORS; + + dev_sector = linear_map_sector(ti, sector); + ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff); + if (ret) + return ret; + return dax_zero_page_range(dax_dev, pgoff, nr_pages); +} + #else #define linear_dax_direct_access NULL #define linear_dax_copy_from_iter NULL #define linear_dax_copy_to_iter NULL +#define linear_dax_zero_page_range NULL #endif static struct target_type linear_target = { @@ -226,6 +243,7 @@ static struct target_type linear_target = { .direct_access = linear_dax_direct_access, .dax_copy_from_iter = linear_dax_copy_from_iter, .dax_copy_to_iter = linear_dax_copy_to_iter, + .dax_zero_page_range = linear_dax_zero_page_range, }; int __init dm_linear_init(void) diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 99721c76225d..8ea20b56b4d6 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -994,10 +994,26 @@ static size_t log_writes_dax_copy_to_iter(struct dm_target *ti, return dax_copy_to_iter(lc->dev->dax_dev, pgoff, addr, bytes, i); } +static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, + size_t nr_pages) +{ + int ret; + struct log_writes_c *lc = ti->private; + sector_t sector = pgoff * PAGE_SECTORS; + + ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages << PAGE_SHIFT, + &pgoff); + if (ret) + return ret; + return dax_zero_page_range(lc->dev->dax_dev, pgoff, + nr_pages << PAGE_SHIFT); +} + #else #define log_writes_dax_direct_access NULL #define log_writes_dax_copy_from_iter NULL #define log_writes_dax_copy_to_iter NULL +#define log_writes_dax_zero_page_range NULL #endif static struct target_type log_writes_target = { @@ -1016,6 +1032,7 @@ static struct target_type log_writes_target = { .direct_access = log_writes_dax_direct_access, .dax_copy_from_iter = log_writes_dax_copy_from_iter, .dax_copy_to_iter = log_writes_dax_copy_to_iter, + .dax_zero_page_range = log_writes_dax_zero_page_range, }; static int __init dm_log_writes_init(void) diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 63bbcc20f49a..fa813c0f993d 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -360,10 +360,32 @@ static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff, return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i); } +static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, + size_t nr_pages) +{ + int ret; + sector_t dev_sector, sector = pgoff * PAGE_SECTORS; + struct stripe_c *sc = ti->private; + struct dax_device *dax_dev; + struct block_device *bdev; + uint32_t stripe; + + stripe_map_sector(sc, sector, &stripe, &dev_sector); + dev_sector += sc->stripe[stripe].physical_start; + dax_dev = sc->stripe[stripe].dev->dax_dev; + bdev = sc->stripe[stripe].dev->bdev; + + ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff); + if (ret) + return ret; + return dax_zero_page_range(dax_dev, pgoff, nr_pages); +} + #else #define stripe_dax_direct_access NULL #define stripe_dax_copy_from_iter NULL #define stripe_dax_copy_to_iter NULL +#define stripe_dax_zero_page_range NULL #endif /* @@ -486,6 +508,7 @@ static struct target_type stripe_target = { .direct_access = stripe_dax_direct_access, .dax_copy_from_iter = stripe_dax_copy_from_iter, .dax_copy_to_iter = stripe_dax_copy_to_iter, + .dax_zero_page_range = stripe_dax_zero_page_range, }; int __init dm_stripe_init(void) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index b89f07ee2eff..aa72d9e757c1 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1198,6 +1198,35 @@ static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, return ret; } +static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, + size_t nr_pages) +{ + struct mapped_device *md = dax_get_private(dax_dev); + sector_t sector = pgoff * PAGE_SECTORS; + struct dm_target *ti; + int ret = -EIO; + int srcu_idx; + + ti = dm_dax_get_live_target(md, sector, &srcu_idx); + + if (!ti) + goto out; + if (WARN_ON(!ti->type->dax_zero_page_range)) { + /* + * ->zero_page_range() is mandatory dax operation. If we are + * here, something is wrong. + */ + dm_put_live_table(md, srcu_idx); + goto out; + } + ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); + + out: + dm_put_live_table(md, srcu_idx); + + return ret; +} + /* * A target may call dm_accept_partial_bio only from the map routine. It is * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET, @@ -3199,6 +3228,7 @@ static const struct dax_operations dm_dax_ops = { .dax_supported = dm_dax_supported, .copy_from_iter = dm_dax_copy_from_iter, .copy_to_iter = dm_dax_copy_to_iter, + .zero_page_range = dm_dax_zero_page_range, }; /* diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 475668c69dbc..af48d9da3916 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -141,6 +141,8 @@ typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn); typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); +typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff, + size_t nr_pages); #define PAGE_SECTORS (PAGE_SIZE / 512) void dm_error(const char *message); @@ -195,6 +197,7 @@ struct target_type { dm_dax_direct_access_fn direct_access; dm_dax_copy_iter_fn dax_copy_from_iter; dm_dax_copy_iter_fn dax_copy_to_iter; + dm_dax_zero_page_range_fn dax_zero_page_range; /* For internal device-mapper use. */ struct list_head list; -- cgit v1.2.3-58-ga151 From 4f3b4f161d7a070d2181dbcf7fbd97c7631d5c24 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Fri, 28 Feb 2020 11:34:56 -0500 Subject: dax,iomap: Add helper dax_iomap_zero() to zero a range Add a helper dax_ioamp_zero() to zero a range. This patch basically merges __dax_zero_page_range() and iomap_dax_zero(). Suggested-by: Christoph Hellwig Signed-off-by: Vivek Goyal Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20200228163456.1587-7-vgoyal@redhat.com Signed-off-by: Dan Williams --- fs/dax.c | 16 ++++++++-------- fs/iomap/buffered-io.c | 9 +-------- include/linux/dax.h | 17 +++-------------- 3 files changed, 12 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/fs/dax.c b/fs/dax.c index 98ba3756163a..11b16729b86f 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1038,10 +1038,10 @@ static vm_fault_t dax_load_hole(struct xa_state *xas, return ret; } -int __dax_zero_page_range(struct block_device *bdev, - struct dax_device *dax_dev, sector_t sector, - unsigned int offset, unsigned int size) +int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size, + struct iomap *iomap) { + sector_t sector = iomap_sector(iomap, pos & PAGE_MASK); pgoff_t pgoff; long rc, id; void *kaddr; @@ -1052,16 +1052,17 @@ int __dax_zero_page_range(struct block_device *bdev, IS_ALIGNED(size, PAGE_SIZE)) page_aligned = true; - rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); + rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff); if (rc) return rc; id = dax_read_lock(); if (page_aligned) - rc = dax_zero_page_range(dax_dev, pgoff, size >> PAGE_SHIFT); + rc = dax_zero_page_range(iomap->dax_dev, pgoff, + size >> PAGE_SHIFT); else - rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); + rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL); if (rc < 0) { dax_read_unlock(id); return rc; @@ -1069,12 +1070,11 @@ int __dax_zero_page_range(struct block_device *bdev, if (!page_aligned) { memset(kaddr + offset, 0, size); - dax_flush(dax_dev, kaddr + offset, size); + dax_flush(iomap->dax_dev, kaddr + offset, size); } dax_read_unlock(id); return 0; } -EXPORT_SYMBOL_GPL(__dax_zero_page_range); static loff_t dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 7c84c4c027c4..6f750da545e5 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -974,13 +974,6 @@ static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset, return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap); } -static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes, - struct iomap *iomap) -{ - return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, - iomap_sector(iomap, pos & PAGE_MASK), offset, bytes); -} - static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count, void *data, struct iomap *iomap, struct iomap *srcmap) @@ -1000,7 +993,7 @@ iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count, bytes = min_t(loff_t, PAGE_SIZE - offset, count); if (IS_DAX(inode)) - status = iomap_dax_zero(pos, offset, bytes, iomap); + status = dax_iomap_zero(pos, offset, bytes, iomap); else status = iomap_zero(inode, pos, offset, bytes, iomap, srcmap); diff --git a/include/linux/dax.h b/include/linux/dax.h index 71735c430c05..d7af5d243f24 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -13,6 +13,7 @@ typedef unsigned long dax_entry_t; struct iomap_ops; +struct iomap; struct dax_device; struct dax_operations { /* @@ -214,20 +215,8 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_invalidate_mapping_entry_sync(struct address_space *mapping, pgoff_t index); - -#ifdef CONFIG_FS_DAX -int __dax_zero_page_range(struct block_device *bdev, - struct dax_device *dax_dev, sector_t sector, - unsigned int offset, unsigned int length); -#else -static inline int __dax_zero_page_range(struct block_device *bdev, - struct dax_device *dax_dev, sector_t sector, - unsigned int offset, unsigned int length) -{ - return -ENXIO; -} -#endif - +int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size, + struct iomap *iomap); static inline bool dax_mapping(struct address_space *mapping) { return mapping->host && IS_DAX(mapping->host); -- cgit v1.2.3-58-ga151 From 2b729fe7f3e9478a21a336231daf35768e7cf37b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 3 Apr 2020 11:32:13 -0400 Subject: Revert "cpuset: Make cpuset hotplug synchronous" This reverts commit a49e4629b5ed ("cpuset: Make cpuset hotplug synchronous") as it may deadlock with cpu hotplug path. Link: http://lkml.kernel.org/r/F0388D99-84D7-453B-9B6B-EEFF0E7BE4CC@lca.pw Signed-off-by: Tejun Heo Reported-by: Qian Cai Cc: Prateek Sood --- include/linux/cpuset.h | 3 +++ kernel/cgroup/cpuset.c | 31 ++++++++++++------------------- kernel/power/process.c | 2 ++ 3 files changed, 17 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index cede4cb98b78..04c20de66afc 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -54,6 +54,7 @@ extern int cpuset_init(void); extern void cpuset_init_smp(void); extern void cpuset_force_rebuild(void); extern void cpuset_update_active_cpus(void); +extern void cpuset_wait_for_hotplug(void); extern void cpuset_read_lock(void); extern void cpuset_read_unlock(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); @@ -175,6 +176,8 @@ static inline void cpuset_update_active_cpus(void) partition_sched_domains(1, NULL, NULL); } +static inline void cpuset_wait_for_hotplug(void) { } + static inline void cpuset_read_lock(void) { } static inline void cpuset_read_unlock(void) { } diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index cafd4d2ff882..58f5073acff7 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -3101,7 +3101,7 @@ update_tasks: } /** - * cpuset_hotplug - handle CPU/memory hotunplug for a cpuset + * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset * * This function is called after either CPU or memory configuration has * changed and updates cpuset accordingly. The top_cpuset is always @@ -3116,7 +3116,7 @@ update_tasks: * Note that CPU offlining during suspend is ignored. We don't modify * cpusets across suspend/resume cycles at all. */ -static void cpuset_hotplug(bool use_cpu_hp_lock) +static void cpuset_hotplug_workfn(struct work_struct *work) { static cpumask_t new_cpus; static nodemask_t new_mems; @@ -3201,32 +3201,25 @@ static void cpuset_hotplug(bool use_cpu_hp_lock) /* rebuild sched domains if cpus_allowed has changed */ if (cpus_updated || force_rebuild) { force_rebuild = false; - if (use_cpu_hp_lock) - rebuild_sched_domains(); - else { - /* Acquiring cpu_hotplug_lock is not required. - * When cpuset_hotplug() is called in hotplug path, - * cpu_hotplug_lock is held by the hotplug context - * which is waiting for cpuhp_thread_fun to indicate - * completion of callback. - */ - percpu_down_write(&cpuset_rwsem); - rebuild_sched_domains_locked(); - percpu_up_write(&cpuset_rwsem); - } + rebuild_sched_domains(); } free_cpumasks(NULL, ptmp); } -static void cpuset_hotplug_workfn(struct work_struct *work) +void cpuset_update_active_cpus(void) { - cpuset_hotplug(true); + /* + * We're inside cpu hotplug critical region which usually nests + * inside cgroup synchronization. Bounce actual hotplug processing + * to a work item to avoid reverse locking order. + */ + schedule_work(&cpuset_hotplug_work); } -void cpuset_update_active_cpus(void) +void cpuset_wait_for_hotplug(void) { - cpuset_hotplug(false); + flush_work(&cpuset_hotplug_work); } /* diff --git a/kernel/power/process.c b/kernel/power/process.c index 08f7019357ee..4b6a54da7e65 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -204,6 +204,8 @@ void thaw_processes(void) __usermodehelper_set_disable_depth(UMH_FREEZING); thaw_workqueues(); + cpuset_wait_for_hotplug(); + read_lock(&tasklist_lock); for_each_process_thread(g, p) { /* No other threads should have PF_SUSPEND_TASK set */ -- cgit v1.2.3-58-ga151 From 8c5c660529209a0e324c1c1a35ce3f83d67a2aa5 Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 3 Apr 2020 07:33:20 -0700 Subject: nvme-fc: Revert "add module to ops template to allow module references" The original patch was to resolve the lldd being able to be unloaded while being used to talk to the boot device of the system. However, the end result of the original patch is that any driver unload while a nvme controller is live via the lldd is now being prohibited. Given the module reference, the module teardown routine can't be called, thus there's no way, other than manual actions to terminate the controllers. Fixes: 863fbae929c7 ("nvme_fc: add module to ops template to allow module references") Cc: # v5.4+ Signed-off-by: James Smart Reviewed-by: Himanshu Madhani Signed-off-by: Christoph Hellwig --- drivers/nvme/host/fc.c | 14 ++------------ drivers/nvme/target/fcloop.c | 1 - drivers/scsi/lpfc/lpfc_nvme.c | 2 -- drivers/scsi/qla2xxx/qla_nvme.c | 1 - include/linux/nvme-fc-driver.h | 4 ---- 5 files changed, 2 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index a8bf2fb1287b..7dfc4a2ecf1e 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -342,8 +342,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, !template->ls_req || !template->fcp_io || !template->ls_abort || !template->fcp_abort || !template->max_hw_queues || !template->max_sgl_segments || - !template->max_dif_sgl_segments || !template->dma_boundary || - !template->module) { + !template->max_dif_sgl_segments || !template->dma_boundary) { ret = -EINVAL; goto out_reghost_failed; } @@ -2016,7 +2015,6 @@ nvme_fc_ctrl_free(struct kref *ref) { struct nvme_fc_ctrl *ctrl = container_of(ref, struct nvme_fc_ctrl, ref); - struct nvme_fc_lport *lport = ctrl->lport; unsigned long flags; if (ctrl->ctrl.tagset) { @@ -2043,7 +2041,6 @@ nvme_fc_ctrl_free(struct kref *ref) if (ctrl->ctrl.opts) nvmf_free_options(ctrl->ctrl.opts); kfree(ctrl); - module_put(lport->ops->module); } static void @@ -3074,15 +3071,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, goto out_fail; } - if (!try_module_get(lport->ops->module)) { - ret = -EUNATCH; - goto out_free_ctrl; - } - idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); if (idx < 0) { ret = -ENOSPC; - goto out_mod_put; + goto out_free_ctrl; } ctrl->ctrl.opts = opts; @@ -3232,8 +3224,6 @@ out_free_queues: out_free_ida: put_device(ctrl->dev); ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); -out_mod_put: - module_put(lport->ops->module); out_free_ctrl: kfree(ctrl); out_fail: diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 9861fcea39f6..f69ce66e2d44 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -875,7 +875,6 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) #define FCLOOP_DMABOUND_4G 0xFFFFFFFF static struct nvme_fc_port_template fctemplate = { - .module = THIS_MODULE, .localport_delete = fcloop_localport_delete, .remoteport_delete = fcloop_remoteport_delete, .create_queue = fcloop_create_queue, diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index f6c8963c915d..db4a04a207ec 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1985,8 +1985,6 @@ out_unlock: /* Declare and initialization an instance of the FC NVME template. */ static struct nvme_fc_port_template lpfc_nvme_template = { - .module = THIS_MODULE, - /* initiator-based functions */ .localport_delete = lpfc_nvme_localport_delete, .remoteport_delete = lpfc_nvme_remoteport_delete, diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index bfcd02fdf2b8..941aa53363f5 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -610,7 +610,6 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) } static struct nvme_fc_port_template qla_nvme_fc_transport = { - .module = THIS_MODULE, .localport_delete = qla_nvme_localport_delete, .remoteport_delete = qla_nvme_remoteport_delete, .create_queue = qla_nvme_alloc_queue, diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 6d0d70f3219c..10f81629b9ce 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -270,8 +270,6 @@ struct nvme_fc_remote_port { * * Host/Initiator Transport Entrypoints/Parameters: * - * @module: The LLDD module using the interface - * * @localport_delete: The LLDD initiates deletion of a localport via * nvme_fc_deregister_localport(). However, the teardown is * asynchronous. This routine is called upon the completion of the @@ -385,8 +383,6 @@ struct nvme_fc_remote_port { * Value is Mandatory. Allowed to be zero. */ struct nvme_fc_port_template { - struct module *module; - /* initiator-based functions */ void (*localport_delete)(struct nvme_fc_local_port *); void (*remoteport_delete)(struct nvme_fc_remote_port *); -- cgit v1.2.3-58-ga151 From ddfd9dcf270ce23ed1985b66fcfa163920e2e1b8 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 3 Apr 2020 17:48:33 +0200 Subject: ACPI: PM: Add acpi_[un]register_wakeup_handler() Since commit fdde0ff8590b ("ACPI: PM: s2idle: Prevent spurious SCIs from waking up the system") the SCI triggering without there being a wakeup cause recognized by the ACPI sleep code will no longer wakeup the system. This works as intended, but this is a problem for devices where the SCI is shared with another device which is also a wakeup source. In the past these, from the pov of the ACPI sleep code, spurious SCIs would still cause a wakeup so the wakeup from the device sharing the interrupt would actually wakeup the system. This now no longer works. This is a problem on e.g. Bay Trail-T and Cherry Trail devices where some peripherals (typically the XHCI controller) can signal a Power Management Event (PME) to the Power Management Controller (PMC) to wakeup the system, this uses the same interrupt as the SCI. These wakeups are handled through a special INT0002 ACPI device which checks for events in the GPE0a_STS for this and takes care of acking the PME so that the shared interrupt stops triggering. The change to the ACPI sleep code to ignore the spurious SCI, causes the system to no longer wakeup on these PME events. To make things worse this means that the INT0002 device driver interrupt handler will no longer run, causing the PME to not get cleared and resulting in the system hanging. Trying to wakeup the system after such a PME through e.g. the power button no longer works. Add an acpi_register_wakeup_handler() function which registers a handler to be called from acpi_s2idle_wake() and when the handler returns true, return true from acpi_s2idle_wake(). The INT0002 driver will use this mechanism to check the GPE0a_STS register from acpi_s2idle_wake() and to tell the system to wakeup if a PME is signaled in the register. Fixes: fdde0ff8590b ("ACPI: PM: s2idle: Prevent spurious SCIs from waking up the system") Cc: 5.4+ # 5.4+ Signed-off-by: Hans de Goede Signed-off-by: Rafael J. Wysocki --- drivers/acpi/sleep.c | 4 +++ drivers/acpi/sleep.h | 1 + drivers/acpi/wakeup.c | 81 +++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/acpi.h | 5 ++++ 4 files changed, 91 insertions(+) (limited to 'include/linux') diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index bb1ae400ec1f..4edc8a3ce40f 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -1009,6 +1009,10 @@ static bool acpi_s2idle_wake(void) if (acpi_any_fixed_event_status_set()) return true; + /* Check wakeups from drivers sharing the SCI. */ + if (acpi_check_wakeup_handlers()) + return true; + /* * If the status bit is set for any enabled GPE other than the * EC one, the wakeup is regarded as a genuine one. diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h index 41675d24a9bc..3d90480ce1b1 100644 --- a/drivers/acpi/sleep.h +++ b/drivers/acpi/sleep.h @@ -2,6 +2,7 @@ extern void acpi_enable_wakeup_devices(u8 sleep_state); extern void acpi_disable_wakeup_devices(u8 sleep_state); +extern bool acpi_check_wakeup_handlers(void); extern struct list_head acpi_wakeup_device_list; extern struct mutex acpi_device_lock; diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index c28244df56a5..0b2e42530adf 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c @@ -12,6 +12,15 @@ #include "internal.h" #include "sleep.h" +struct acpi_wakeup_handler { + struct list_head list_node; + bool (*wakeup)(void *context); + void *context; +}; + +static LIST_HEAD(acpi_wakeup_handler_head); +static DEFINE_MUTEX(acpi_wakeup_handler_mutex); + /* * We didn't lock acpi_device_lock in the file, because it invokes oops in * suspend/resume and isn't really required as this is called in S-state. At @@ -90,3 +99,75 @@ int __init acpi_wakeup_device_init(void) mutex_unlock(&acpi_device_lock); return 0; } + +/** + * acpi_register_wakeup_handler - Register wakeup handler + * @wake_irq: The IRQ through which the device may receive wakeups + * @wakeup: Wakeup-handler to call when the SCI has triggered a wakeup + * @context: Context to pass to the handler when calling it + * + * Drivers which may share an IRQ with the SCI can use this to register + * a handler which returns true when the device they are managing wants + * to trigger a wakeup. + */ +int acpi_register_wakeup_handler(int wake_irq, bool (*wakeup)(void *context), + void *context) +{ + struct acpi_wakeup_handler *handler; + + /* + * If the device is not sharing its IRQ with the SCI, there is no + * need to register the handler. + */ + if (!acpi_sci_irq_valid() || wake_irq != acpi_sci_irq) + return 0; + + handler = kmalloc(sizeof(*handler), GFP_KERNEL); + if (!handler) + return -ENOMEM; + + handler->wakeup = wakeup; + handler->context = context; + + mutex_lock(&acpi_wakeup_handler_mutex); + list_add(&handler->list_node, &acpi_wakeup_handler_head); + mutex_unlock(&acpi_wakeup_handler_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_register_wakeup_handler); + +/** + * acpi_unregister_wakeup_handler - Unregister wakeup handler + * @wakeup: Wakeup-handler passed to acpi_register_wakeup_handler() + * @context: Context passed to acpi_register_wakeup_handler() + */ +void acpi_unregister_wakeup_handler(bool (*wakeup)(void *context), + void *context) +{ + struct acpi_wakeup_handler *handler; + + mutex_lock(&acpi_wakeup_handler_mutex); + list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) { + if (handler->wakeup == wakeup && handler->context == context) { + list_del(&handler->list_node); + kfree(handler); + break; + } + } + mutex_unlock(&acpi_wakeup_handler_mutex); +} +EXPORT_SYMBOL_GPL(acpi_unregister_wakeup_handler); + +bool acpi_check_wakeup_handlers(void) +{ + struct acpi_wakeup_handler *handler; + + /* No need to lock, nothing else is running when we're called. */ + list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) { + if (handler->wakeup(handler->context)) + return true; + } + + return false; +} diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 0f24d701fbdc..efac0f9c01a2 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -488,6 +488,11 @@ void __init acpi_nvs_nosave_s3(void); void __init acpi_sleep_no_blacklist(void); #endif /* CONFIG_PM_SLEEP */ +int acpi_register_wakeup_handler( + int wake_irq, bool (*wakeup)(void *context), void *context); +void acpi_unregister_wakeup_handler( + bool (*wakeup)(void *context), void *context); + struct acpi_osc_context { char *uuid_str; /* UUID string */ int rev; -- cgit v1.2.3-58-ga151 From 70fbdfef4ba63eeef83b2c94eac9a5a9f913e442 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 5 Apr 2020 11:34:35 -0700 Subject: sysfs: remove redundant __compat_only_sysfs_link_entry_to_kobj fn Commit 9255782f7061 ("sysfs: Wrap __compat_only_sysfs_link_entry_to_kobj function to change the symlink name") made this function a wrapper around a new non-underscored function, which is a bit odd. The normal naming convention is the other way around: the underscored function is the wrappee, and the non-underscored function is the wrapper. There's only one single user (well, two call-sites in that user) of the more limited double underscore version of this function, so just remove the oddly named wrapper entirely and just add the extra NULL argument to the user. I considered just doing that in the merge, but that tends to make history really hard to read. Link: https://lore.kernel.org/lkml/CAHk-=wgkkmNV5tMzQDmPAQuNJBuMcry--Jb+h8H1o4RA3kF7QQ@mail.gmail.com/ Cc: Sourabh Jain Cc: Michael Ellerman Signed-off-by: Linus Torvalds --- drivers/char/tpm/tpm-chip.c | 8 ++++---- fs/sysfs/group.c | 16 ---------------- include/linux/sysfs.h | 11 ----------- 3 files changed, 4 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 58073836b555..8c77e88012e9 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -514,15 +514,15 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip) if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL)) return 0; - rc = __compat_only_sysfs_link_entry_to_kobj( - &chip->dev.parent->kobj, &chip->dev.kobj, "ppi"); + rc = compat_only_sysfs_link_entry_to_kobj( + &chip->dev.parent->kobj, &chip->dev.kobj, "ppi", NULL); if (rc && rc != -ENOENT) return rc; /* All the names from tpm-sysfs */ for (i = chip->groups[0]->attrs; *i != NULL; ++i) { - rc = __compat_only_sysfs_link_entry_to_kobj( - &chip->dev.parent->kobj, &chip->dev.kobj, (*i)->name); + rc = compat_only_sysfs_link_entry_to_kobj( + &chip->dev.parent->kobj, &chip->dev.kobj, (*i)->name, NULL); if (rc) { tpm_del_legacy_sysfs(chip); return rc; diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c index fbb117757c52..64e6a6698935 100644 --- a/fs/sysfs/group.c +++ b/fs/sysfs/group.c @@ -415,22 +415,6 @@ void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, } EXPORT_SYMBOL_GPL(sysfs_remove_link_from_group); -/** - * __compat_only_sysfs_link_entry_to_kobj - add a symlink to a kobject pointing - * to a group or an attribute - * @kobj: The kobject containing the group. - * @target_kobj: The target kobject. - * @target_name: The name of the target group or attribute. - */ -int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, - struct kobject *target_kobj, - const char *target_name) -{ - return compat_only_sysfs_link_entry_to_kobj(kobj, target_kobj, - target_name, NULL); -} -EXPORT_SYMBOL_GPL(__compat_only_sysfs_link_entry_to_kobj); - /** * compat_only_sysfs_link_entry_to_kobj - add a symlink to a kobject pointing * to a group or an attribute diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index fd0fcb4d4f4d..80bb865b3a33 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -297,9 +297,6 @@ int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name, struct kobject *target, const char *link_name); void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, const char *link_name); -int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, - struct kobject *target_kobj, - const char *target_name); int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, struct kobject *target_kobj, const char *target_name, @@ -516,14 +513,6 @@ static inline void sysfs_remove_link_from_group(struct kobject *kobj, { } -static inline int __compat_only_sysfs_link_entry_to_kobj( - struct kobject *kobj, - struct kobject *target_kobj, - const char *target_name) -{ - return 0; -} - static inline int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, struct kobject *target_kobj, const char *target_name, -- cgit v1.2.3-58-ga151 From db1f00fb8ff793889e83f2e37e0c7bbb6fc9934e Mon Sep 17 00:00:00 2001 From: Dexuan Cui Date: Sun, 5 Apr 2020 18:59:24 -0700 Subject: skbuff.h: Improve the checksum related comments Fixed the punctuation and some typos. Improved some sentences with minor changes. No change of semantics or code. Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Randy Dunlap Signed-off-by: Dexuan Cui Signed-off-by: David S. Miller --- include/linux/skbuff.h | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 28b1a2b4459e..3a2ac7072dbb 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -47,8 +47,8 @@ * A. IP checksum related features * * Drivers advertise checksum offload capabilities in the features of a device. - * From the stack's point of view these are capabilities offered by the driver, - * a driver typically only advertises features that it is capable of offloading + * From the stack's point of view these are capabilities offered by the driver. + * A driver typically only advertises features that it is capable of offloading * to its device. * * The checksum related features are: @@ -63,7 +63,7 @@ * TCP or UDP packets over IPv4. These are specifically * unencapsulated packets of the form IPv4|TCP or * IPv4|UDP where the Protocol field in the IPv4 header - * is TCP or UDP. The IPv4 header may contain IP options + * is TCP or UDP. The IPv4 header may contain IP options. * This feature cannot be set in features for a device * with NETIF_F_HW_CSUM also set. This feature is being * DEPRECATED (see below). @@ -79,13 +79,13 @@ * DEPRECATED (see below). * * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload. - * This flag is used only used to disable the RX checksum + * This flag is only used to disable the RX checksum * feature for a device. The stack will accept receive * checksum indication in packets received on a device * regardless of whether NETIF_F_RXCSUM is set. * * B. Checksumming of received packets by device. Indication of checksum - * verification is in set skb->ip_summed. Possible values are: + * verification is set in skb->ip_summed. Possible values are: * * CHECKSUM_NONE: * @@ -115,16 +115,16 @@ * the packet minus one that have been verified as CHECKSUM_UNNECESSARY. * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet * and a device is able to verify the checksums for UDP (possibly zero), - * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to + * GRE (checksum flag is set) and TCP, skb->csum_level would be set to * two. If the device were only able to verify the UDP checksum and not - * GRE, either because it doesn't support GRE checksum of because GRE + * GRE, either because it doesn't support GRE checksum or because GRE * checksum is bad, skb->csum_level would be set to zero (TCP checksum is * not considered in this case). * * CHECKSUM_COMPLETE: * * This is the most generic way. The device supplied checksum of the _whole_ - * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the + * packet as seen by netif_rx() and fills in skb->csum. This means the * hardware doesn't need to parse L3/L4 headers to implement this. * * Notes: @@ -153,8 +153,8 @@ * from skb->csum_start up to the end, and to record/write the checksum at * offset skb->csum_start + skb->csum_offset. A driver may verify that the * csum_start and csum_offset values are valid values given the length and - * offset of the packet, however they should not attempt to validate that the - * checksum refers to a legitimate transport layer checksum-- it is the + * offset of the packet, but it should not attempt to validate that the + * checksum refers to a legitimate transport layer checksum -- it is the * purview of the stack to validate that csum_start and csum_offset are set * correctly. * @@ -178,18 +178,18 @@ * * CHECKSUM_UNNECESSARY: * - * This has the same meaning on as CHECKSUM_NONE for checksum offload on + * This has the same meaning as CHECKSUM_NONE for checksum offload on * output. * * CHECKSUM_COMPLETE: * Not used in checksum output. If a driver observes a packet with this value - * set in skbuff, if should treat as CHECKSUM_NONE being set. + * set in skbuff, it should treat the packet as if CHECKSUM_NONE were set. * * D. Non-IP checksum (CRC) offloads * * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of * offloading the SCTP CRC in a packet. To perform this offload the stack - * will set set csum_start and csum_offset accordingly, set ip_summed to + * will set csum_start and csum_offset accordingly, set ip_summed to * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c. * A driver that supports both IP checksum offload and SCTP CRC32c offload @@ -200,10 +200,10 @@ * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of * offloading the FCOE CRC in a packet. To perform this offload the stack * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset - * accordingly. Note the there is no indication in the skbuff that the - * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports + * accordingly. Note that there is no indication in the skbuff that the + * CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports * both IP checksum offload and FCOE CRC offload must verify which offload - * is configured for a packet presumably by inspecting packet headers. + * is configured for a packet, presumably by inspecting packet headers. * * E. Checksumming on output with GSO. * @@ -211,9 +211,9 @@ * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as * part of the GSO operation is implied. If a checksum is being offloaded - * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset - * are set to refer to the outermost checksum being offload (two offloaded - * checksums are possible with UDP encapsulation). + * with GSO then ip_summed is CHECKSUM_PARTIAL, and both csum_start and + * csum_offset are set to refer to the outermost checksum being offloaded + * (two offloaded checksums are possible with UDP encapsulation). */ /* Don't change this without changing skb_csum_unnecessary! */ -- cgit v1.2.3-58-ga151 From 93ce4af774bc3d8a72ce2271d03241c96383629d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 6 Apr 2020 13:39:29 -0400 Subject: NFS: Clean up process of marking inode stale. Instead of the various open coded calls to set the NFS_INO_STALE bit and call nfs_zap_caches(), consolidate them into a single function nfs_set_inode_stale(). Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 5 +++-- fs/nfs/inode.c | 18 +++++++++++++----- fs/nfs/nfstrace.h | 1 + fs/nfs/read.c | 2 +- include/linux/nfs_fs.h | 1 + 5 files changed, 19 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index f14184d0ba82..d729d8311c7e 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2669,9 +2669,10 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask) status = NFS_PROTO(inode)->access(inode, &cache); if (status != 0) { if (status == -ESTALE) { - nfs_zap_caches(inode); if (!S_ISDIR(inode->i_mode)) - set_bit(NFS_INO_STALE, &NFS_I(inode)->flags); + nfs_set_inode_stale(inode); + else + nfs_zap_caches(inode); } goto out; } diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index a10fb87c6ac3..b9d0921cb4fe 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -62,7 +62,6 @@ /* Default is to see 64-bit inode numbers */ static bool enable_ino64 = NFS_64_BIT_INODE_NUMBERS_ENABLED; -static void nfs_invalidate_inode(struct inode *); static int nfs_update_inode(struct inode *, struct nfs_fattr *); static struct kmem_cache * nfs_inode_cachep; @@ -284,10 +283,18 @@ EXPORT_SYMBOL_GPL(nfs_invalidate_atime); * Invalidate, but do not unhash, the inode. * NB: must be called with inode->i_lock held! */ -static void nfs_invalidate_inode(struct inode *inode) +static void nfs_set_inode_stale_locked(struct inode *inode) { set_bit(NFS_INO_STALE, &NFS_I(inode)->flags); nfs_zap_caches_locked(inode); + trace_nfs_set_inode_stale(inode); +} + +void nfs_set_inode_stale(struct inode *inode) +{ + spin_lock(&inode->i_lock); + nfs_set_inode_stale_locked(inode); + spin_unlock(&inode->i_lock); } struct nfs_find_desc { @@ -1163,9 +1170,10 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) status = 0; break; case -ESTALE: - nfs_zap_caches(inode); if (!S_ISDIR(inode->i_mode)) - set_bit(NFS_INO_STALE, &NFS_I(inode)->flags); + nfs_set_inode_stale(inode); + else + nfs_zap_caches(inode); } goto err_out; } @@ -2064,7 +2072,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) * lookup validation will know that the inode is bad. * (But we fall through to invalidate the caches.) */ - nfs_invalidate_inode(inode); + nfs_set_inode_stale_locked(inode); return -ESTALE; } diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index a9588d19a5ae..7e7a97ae21ed 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -181,6 +181,7 @@ DECLARE_EVENT_CLASS(nfs_inode_event_done, int error \ ), \ TP_ARGS(inode, error)) +DEFINE_NFS_INODE_EVENT(nfs_set_inode_stale); DEFINE_NFS_INODE_EVENT(nfs_refresh_inode_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_refresh_inode_exit); DEFINE_NFS_INODE_EVENT(nfs_revalidate_inode_enter); diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 34bb9add2302..13b22e898116 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -250,7 +250,7 @@ static int nfs_readpage_done(struct rpc_task *task, trace_nfs_readpage_done(task, hdr); if (task->tk_status == -ESTALE) { - set_bit(NFS_INO_STALE, &NFS_I(inode)->flags); + nfs_set_inode_stale(inode); nfs_mark_for_revalidate(inode); } return 0; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 5d5b91e54f73..73eda45f1cfd 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -354,6 +354,7 @@ static inline unsigned long nfs_save_change_attribute(struct inode *dir) extern int nfs_sync_mapping(struct address_space *mapping); extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping); extern void nfs_zap_caches(struct inode *); +extern void nfs_set_inode_stale(struct inode *inode); extern void nfs_invalidate_atime(struct inode *); extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *); -- cgit v1.2.3-58-ga151 From c7e4ea68c1626cceb966323a4b572e2f8d805138 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Sat, 21 Mar 2020 17:01:53 +0100 Subject: leds: old enums are not really applicable to new code Warn about old defines that probably should not be used. Signed-off-by: Pavel Machek --- include/linux/leds.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/leds.h b/include/linux/leds.h index 75353e5f9d13..2451962d1ec5 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -25,6 +25,7 @@ struct device_node; * LED Core */ +/* This is obsolete/useless. We now support variable maximum brightness. */ enum led_brightness { LED_OFF = 0, LED_ON = 1, -- cgit v1.2.3-58-ga151 From 3f5b9959041e0db6dacbea80bb833bff5900999f Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Fri, 3 Apr 2020 22:51:33 +0200 Subject: thermal: devfreq_cooling: inline all stubs for CONFIG_DEVFREQ_THERMAL=n When CONFIG_DEVFREQ_THERMAL is disabled all functions except of_devfreq_cooling_register_power() were already inlined. Also inline the last function to avoid compile errors when multiple drivers call of_devfreq_cooling_register_power() when CONFIG_DEVFREQ_THERMAL is not set. Compilation failed with the following message: multiple definition of `of_devfreq_cooling_register_power' (which then lists all usages of of_devfreq_cooling_register_power()) Thomas Zimmermann reported this problem [0] on a kernel config with CONFIG_DRM_LIMA={m,y}, CONFIG_DRM_PANFROST={m,y} and CONFIG_DEVFREQ_THERMAL=n after both, the lima and panfrost drivers gained devfreq cooling support. [0] https://www.spinics.net/lists/dri-devel/msg252825.html Fixes: a76caf55e5b356 ("thermal: Add devfreq cooling") Cc: stable@vger.kernel.org Reported-by: Thomas Zimmermann Signed-off-by: Martin Blumenstingl Tested-by: Thomas Zimmermann Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200403205133.1101808-1-martin.blumenstingl@googlemail.com --- include/linux/devfreq_cooling.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h index 4635f95000a4..79a6e37a1d6f 100644 --- a/include/linux/devfreq_cooling.h +++ b/include/linux/devfreq_cooling.h @@ -75,7 +75,7 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *dfc); #else /* !CONFIG_DEVFREQ_THERMAL */ -struct thermal_cooling_device * +static inline struct thermal_cooling_device * of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, struct devfreq_cooling_power *dfc_power) { -- cgit v1.2.3-58-ga151 From 3122e80efc0faf4a2accba7a46c7ed795edbfded Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 6 Apr 2020 20:03:47 -0700 Subject: mm/vma: make vma_is_accessible() available for general use Lets move vma_is_accessible() helper to include/linux/mm.h which makes it available for general use. While here, this replaces all remaining open encodings for VMA access check with vma_is_accessible(). Signed-off-by: Anshuman Khandual Signed-off-by: Andrew Morton Acked-by: Geert Uytterhoeven Acked-by: Guo Ren Acked-by: Vlastimil Babka Cc: Guo Ren Cc: Geert Uytterhoeven Cc: Ralf Baechle Cc: Paul Burton Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Yoshinori Sato Cc: Rich Felker Cc: Dave Hansen Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Steven Rostedt Cc: Mel Gorman Cc: Alexander Viro Cc: "Aneesh Kumar K.V" Cc: Arnaldo Carvalho de Melo Cc: Arnd Bergmann Cc: Nick Piggin Cc: Paul Mackerras Cc: Will Deacon Link: http://lkml.kernel.org/r/1582520593-30704-3-git-send-email-anshuman.khandual@arm.com Signed-off-by: Linus Torvalds --- arch/csky/mm/fault.c | 2 +- arch/m68k/mm/fault.c | 2 +- arch/mips/mm/fault.c | 2 +- arch/powerpc/mm/fault.c | 2 +- arch/sh/mm/fault.c | 2 +- arch/x86/mm/fault.c | 2 +- include/linux/mm.h | 6 ++++++ kernel/sched/fair.c | 2 +- mm/gup.c | 2 +- mm/memory.c | 5 ----- mm/mempolicy.c | 3 +-- mm/mmap.c | 5 ++--- 12 files changed, 17 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index d3c61b83e195..a6e8230b6fbf 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -141,7 +141,7 @@ good_area: if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { - if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) + if (!vma_is_accessible(vma)) goto bad_area; } diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index f7afb9897966..0c4a21a685d5 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -125,7 +125,7 @@ good_area: case 1: /* read, present */ goto acc_err; case 0: /* read, not present */ - if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) + if (!vma_is_accessible(vma)) goto acc_err; } diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 4a0eafe3d932..fb048ba2b91d 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -142,7 +142,7 @@ good_area: goto bad_area; } } else { - if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) + if (!vma_is_accessible(vma)) goto bad_area; } } diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index d15f0f0ee806..84af6c8eecf7 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -314,7 +314,7 @@ static bool access_error(bool is_write, bool is_exec, return false; } - if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) + if (unlikely(!vma_is_accessible(vma))) return true; /* * We should ideally do the vma pkey access check here. But in the diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 13ee4d20e622..5f23d7907597 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -355,7 +355,7 @@ static inline int access_error(int error_code, struct vm_area_struct *vma) return 1; /* read, not present: */ - if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) + if (unlikely(!vma_is_accessible(vma))) return 1; return 0; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 859519f5b342..a51df516b87b 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1222,7 +1222,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) return 1; /* read, not present: */ - if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) + if (unlikely(!vma_is_accessible(vma))) return 1; return 0; diff --git a/include/linux/mm.h b/include/linux/mm.h index 7dd5c4ccbf85..be49e371e4b5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -629,6 +629,12 @@ static inline bool vma_is_foreign(struct vm_area_struct *vma) return false; } + +static inline bool vma_is_accessible(struct vm_area_struct *vma) +{ + return vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC); +} + #ifdef CONFIG_SHMEM /* * The vma_is_shmem is not inline because it is used only by slow diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d7fb20adabeb..1ea3dddafe69 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2799,7 +2799,7 @@ static void task_numa_work(struct callback_head *work) * Skip inaccessible VMAs to avoid any confusion between * PROT_NONE and NUMA hinting ptes */ - if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) + if (!vma_is_accessible(vma)) continue; do { diff --git a/mm/gup.c b/mm/gup.c index da3e03185144..4d505c994623 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1416,7 +1416,7 @@ long populate_vma_page_range(struct vm_area_struct *vma, * We want mlock to succeed for regions that have any permissions * other than PROT_NONE. */ - if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) + if (vma_is_accessible(vma)) gup_flags |= FOLL_FORCE; /* diff --git a/mm/memory.c b/mm/memory.c index 586271f3efc6..d2a353c345ad 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3964,11 +3964,6 @@ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) return VM_FAULT_FALLBACK; } -static inline bool vma_is_accessible(struct vm_area_struct *vma) -{ - return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE); -} - static vm_fault_t create_huge_pud(struct vm_fault *vmf) { #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 5fb427aed612..b36926ba02e2 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -678,8 +678,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end, if (flags & MPOL_MF_LAZY) { /* Similar to task_numa_work, skip inaccessible VMAs */ - if (!is_vm_hugetlb_page(vma) && - (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) && + if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && !(vma->vm_flags & VM_MIXEDMAP)) change_prot_numa(vma, start, endvma); return 1; diff --git a/mm/mmap.c b/mm/mmap.c index 94ae18398c59..aa09429d5888 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2358,8 +2358,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) gap_addr = TASK_SIZE; next = vma->vm_next; - if (next && next->vm_start < gap_addr && - (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { + if (next && next->vm_start < gap_addr && vma_is_accessible(next)) { if (!(next->vm_flags & VM_GROWSUP)) return -ENOMEM; /* Check that both stack segments have the same anon_vma? */ @@ -2440,7 +2439,7 @@ int expand_downwards(struct vm_area_struct *vma, prev = vma->vm_prev; /* Check that both stack segments have the same anon_vma? */ if (prev && !(prev->vm_flags & VM_GROWSDOWN) && - (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) { + vma_is_accessible(prev)) { if (address - prev->vm_end < stack_guard_gap) return -ENOMEM; } -- cgit v1.2.3-58-ga151 From 29fd1897070125ab49634524a20f146fb4240a51 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Mon, 6 Apr 2020 20:04:06 -0700 Subject: mm: make it clear that gfp reclaim modifiers are valid only for sleepable allocations While it might be really clear to MM developers that gfp reclaim modifiers are applicable only to sleepable allocations (those with __GFP_DIRECT_RECLAIM) it seems that actual users of the API are not always sure. Make it explicit that they are not applicable for GFP_NOWAIT or GFP_ATOMIC allocations which are the most commonly used non-sleepable allocation masks. Signed-off-by: Michal Hocko Signed-off-by: Andrew Morton Reviewed-by: Joel Fernandes (Google) Acked-by: Paul E. McKenney Acked-by: David Rientjes Cc: Neil Brown Link: http://lkml.kernel.org/r/20200403083543.11552-3-mhocko@kernel.org Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index be2754841369..4aba4c86c626 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -124,6 +124,8 @@ struct vm_area_struct; * * Reclaim modifiers * ~~~~~~~~~~~~~~~~~ + * Please note that all the following flags are only applicable to sleepable + * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them). * * %__GFP_IO can start physical IO. * -- cgit v1.2.3-58-ga151 From dcdf11ee144133328664d90836e712d840d047d9 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 6 Apr 2020 20:04:25 -0700 Subject: mm, shmem: add vmstat for hugepage fallback The existing thp_fault_fallback indicates when thp attempts to allocate a hugepage but fails, or if the hugepage cannot be charged to the mem cgroup hierarchy. Extend this to shmem as well. Adds a new thp_file_fallback to complement thp_file_alloc that gets incremented when a hugepage is attempted to be allocated but fails, or if it cannot be charged to the mem cgroup hierarchy. Additionally, remove the check for CONFIG_TRANSPARENT_HUGE_PAGECACHE from shmem_alloc_hugepage() since it is only called with this configuration option. Signed-off-by: David Rientjes Signed-off-by: Andrew Morton Reviewed-by: Yang Shi Acked-by: Kirill A. Shutemov Cc: Mike Rapoport Cc: Jeremy Cline Cc: Andrea Arcangeli Cc: Mike Kravetz Cc: Michal Hocko Cc: Vlastimil Babka Link: http://lkml.kernel.org/r/alpine.DEB.2.21.2003061421240.7412@chino.kir.corp.google.com Signed-off-by: Linus Torvalds --- Documentation/admin-guide/mm/transhuge.rst | 4 ++++ include/linux/vm_event_item.h | 2 ++ mm/shmem.c | 10 ++++++---- mm/vmstat.c | 1 + 4 files changed, 13 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index bd5714547cee..f79ebbcd6725 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -319,6 +319,10 @@ thp_file_alloc is incremented every time a file huge page is successfully allocated. +thp_file_fallback + is incremented if a file huge page is attempted to be allocated + but fails and instead falls back to using small pages. + thp_file_mapped is incremented every time a file huge page is mapped into user address space. diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 47a3441cf4c4..41a4c7568748 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -76,6 +76,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, THP_COLLAPSE_ALLOC, THP_COLLAPSE_ALLOC_FAILED, THP_FILE_ALLOC, + THP_FILE_FALLBACK, THP_FILE_MAPPED, THP_SPLIT_PAGE, THP_SPLIT_PAGE_FAILED, @@ -115,6 +116,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifndef CONFIG_TRANSPARENT_HUGEPAGE #define THP_FILE_ALLOC ({ BUILD_BUG(); 0; }) +#define THP_FILE_FALLBACK ({ BUILD_BUG(); 0; }) #define THP_FILE_MAPPED ({ BUILD_BUG(); 0; }) #endif diff --git a/mm/shmem.c b/mm/shmem.c index f47347cb30f6..8160d0762bf5 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1472,9 +1472,6 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp, pgoff_t hindex; struct page *page; - if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) - return NULL; - hindex = round_down(index, HPAGE_PMD_NR); if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, XA_PRESENT)) @@ -1486,6 +1483,8 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp, shmem_pseudo_vma_destroy(&pvma); if (page) prep_transhuge_page(page); + else + count_vm_event(THP_FILE_FALLBACK); return page; } @@ -1871,8 +1870,11 @@ alloc_nohuge: error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg, PageTransHuge(page)); - if (error) + if (error) { + if (PageTransHuge(page)) + count_vm_event(THP_FILE_FALLBACK); goto unacct; + } error = shmem_add_to_page_cache(page, mapping, hindex, NULL, gfp & GFP_RECLAIM_MASK); if (error) { diff --git a/mm/vmstat.c b/mm/vmstat.c index c9c0d71f917f..ebc1dcaa0539 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1259,6 +1259,7 @@ const char * const vmstat_text[] = { "thp_collapse_alloc", "thp_collapse_alloc_failed", "thp_file_alloc", + "thp_file_fallback", "thp_file_mapped", "thp_split_page", "thp_split_page_failed", -- cgit v1.2.3-58-ga151 From 85b9f46e8ea451633ccd60a7d8cacbfff9f34047 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 6 Apr 2020 20:04:28 -0700 Subject: mm, thp: track fallbacks due to failed memcg charges separately The thp_fault_fallback and thp_file_fallback vmstats are incremented if either the hugepage allocation fails through the page allocator or the hugepage charge fails through mem cgroup. This patch leaves this field untouched but adds two new fields, thp_{fault,file}_fallback_charge, which is incremented only when the mem cgroup charge fails. This distinguishes between attempted hugepage allocations that fail due to fragmentation (or low memory conditions) and those that fail due to mem cgroup limits. That can be used to determine the impact of fragmentation on the system by excluding faults that failed due to memcg usage. Signed-off-by: David Rientjes Signed-off-by: Andrew Morton Reviewed-by: Yang Shi Acked-by: Kirill A. Shutemov Cc: Mike Rapoport Cc: Jeremy Cline Cc: Andrea Arcangeli Cc: Mike Kravetz Cc: Michal Hocko Cc: Vlastimil Babka Link: http://lkml.kernel.org/r/alpine.DEB.2.21.2003061422070.7412@chino.kir.corp.google.com Signed-off-by: Linus Torvalds --- Documentation/admin-guide/mm/transhuge.rst | 10 ++++++++++ include/linux/vm_event_item.h | 3 +++ mm/huge_memory.c | 2 ++ mm/shmem.c | 4 +++- mm/vmstat.c | 2 ++ 5 files changed, 20 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index f79ebbcd6725..2f31de8f7c74 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -310,6 +310,11 @@ thp_fault_fallback is incremented if a page fault fails to allocate a huge page and instead falls back to using small pages. +thp_fault_fallback_charge + is incremented if a page fault fails to charge a huge page and + instead falls back to using small pages even though the + allocation was successful. + thp_collapse_alloc_failed is incremented if khugepaged found a range of pages that should be collapsed into one huge page but failed @@ -323,6 +328,11 @@ thp_file_fallback is incremented if a file huge page is attempted to be allocated but fails and instead falls back to using small pages. +thp_file_fallback_charge + is incremented if a file huge page cannot be charged and instead + falls back to using small pages even though the allocation was + successful. + thp_file_mapped is incremented every time a file huge page is mapped into user address space. diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 41a4c7568748..ffef0f279747 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -73,10 +73,12 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_TRANSPARENT_HUGEPAGE THP_FAULT_ALLOC, THP_FAULT_FALLBACK, + THP_FAULT_FALLBACK_CHARGE, THP_COLLAPSE_ALLOC, THP_COLLAPSE_ALLOC_FAILED, THP_FILE_ALLOC, THP_FILE_FALLBACK, + THP_FILE_FALLBACK_CHARGE, THP_FILE_MAPPED, THP_SPLIT_PAGE, THP_SPLIT_PAGE_FAILED, @@ -117,6 +119,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifndef CONFIG_TRANSPARENT_HUGEPAGE #define THP_FILE_ALLOC ({ BUILD_BUG(); 0; }) #define THP_FILE_FALLBACK ({ BUILD_BUG(); 0; }) +#define THP_FILE_FALLBACK_CHARGE ({ BUILD_BUG(); 0; }) #define THP_FILE_MAPPED ({ BUILD_BUG(); 0; }) #endif diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0f9389f9d1f8..0080e8df18ef 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -597,6 +597,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) { put_page(page); count_vm_event(THP_FAULT_FALLBACK); + count_vm_event(THP_FAULT_FALLBACK_CHARGE); return VM_FAULT_FALLBACK; } @@ -1446,6 +1447,7 @@ alloc: put_page(page); ret |= VM_FAULT_FALLBACK; count_vm_event(THP_FAULT_FALLBACK); + count_vm_event(THP_FAULT_FALLBACK_CHARGE); goto out; } diff --git a/mm/shmem.c b/mm/shmem.c index 8160d0762bf5..b48ac3806f8f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1871,8 +1871,10 @@ alloc_nohuge: error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg, PageTransHuge(page)); if (error) { - if (PageTransHuge(page)) + if (PageTransHuge(page)) { count_vm_event(THP_FILE_FALLBACK); + count_vm_event(THP_FILE_FALLBACK_CHARGE); + } goto unacct; } error = shmem_add_to_page_cache(page, mapping, hindex, diff --git a/mm/vmstat.c b/mm/vmstat.c index ebc1dcaa0539..96d21a792b57 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1256,10 +1256,12 @@ const char * const vmstat_text[] = { #ifdef CONFIG_TRANSPARENT_HUGEPAGE "thp_fault_alloc", "thp_fault_fallback", + "thp_fault_fallback_charge", "thp_collapse_alloc", "thp_collapse_alloc_failed", "thp_file_alloc", "thp_file_fallback", + "thp_file_fallback_charge", "thp_file_mapped", "thp_split_page", "thp_split_page_failed", -- cgit v1.2.3-58-ga151 From a0650604a707b1926cbc32feb2f006ebb74ef47b Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 6 Apr 2020 20:04:31 -0700 Subject: include/linux/pagemap.h: optimise find_subpage for !THP If THP is disabled, find_subpage() can become a no-op by using hpage_nr_pages() instead of compound_nr(). hpage_nr_pages() embeds a check for PageTail, so we can drop the check here. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Reviewed-by: Christoph Hellwig Acked-by: Kirill A. Shutemov Cc: Aneesh Kumar K.V Cc: Pankaj Gupta Link: http://lkml.kernel.org/r/20200318140253.6141-5-willy@infradead.org Signed-off-by: Linus Torvalds --- include/linux/pagemap.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index f56282491a48..a8f7bd8ea1c6 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -341,9 +341,7 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index) if (PageHuge(head)) return head; - VM_BUG_ON_PAGE(PageTail(head), head); - - return head + (index & (compound_nr(head) - 1)); + return head + (index & (hpage_nr_pages(head) - 1)); } struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); -- cgit v1.2.3-58-ga151 From 396bcc5299c281e9cf1737ad0efcd97be9f83845 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 6 Apr 2020 20:04:35 -0700 Subject: mm: remove CONFIG_TRANSPARENT_HUGE_PAGECACHE Commit e496cf3d7821 ("thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE") notes that it should be reverted when the PowerPC problem was fixed. The commit fixing the PowerPC problem (953c66c2b22a) did not revert the commit; instead setting CONFIG_TRANSPARENT_HUGE_PAGECACHE to the same as CONFIG_TRANSPARENT_HUGEPAGE. Checking with Kirill and Aneesh, this was an oversight, so remove the Kconfig symbol and undo the work of commit e496cf3d7821. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Acked-by: Kirill A. Shutemov Cc: Aneesh Kumar K.V Cc: Christoph Hellwig Cc: Pankaj Gupta Link: http://lkml.kernel.org/r/20200318140253.6141-6-willy@infradead.org Signed-off-by: Linus Torvalds --- include/linux/shmem_fs.h | 10 +--------- mm/Kconfig | 6 +----- mm/huge_memory.c | 2 +- mm/khugepaged.c | 12 ++++-------- mm/memory.c | 5 ++--- mm/rmap.c | 2 +- mm/shmem.c | 34 +++++++++++++++++----------------- 7 files changed, 27 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index d56fefef8905..7a35a6901221 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -78,6 +78,7 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); extern int shmem_unuse(unsigned int type, bool frontswap, unsigned long *fs_pages_to_unuse); +extern bool shmem_huge_enabled(struct vm_area_struct *vma); extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end); @@ -114,15 +115,6 @@ static inline bool shmem_file(struct file *file) extern bool shmem_charge(struct inode *inode, long pages); extern void shmem_uncharge(struct inode *inode, long pages); -#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE -extern bool shmem_huge_enabled(struct vm_area_struct *vma); -#else -static inline bool shmem_huge_enabled(struct vm_area_struct *vma) -{ - return false; -} -#endif - #ifdef CONFIG_SHMEM extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, struct vm_area_struct *dst_vma, diff --git a/mm/Kconfig b/mm/Kconfig index ab80933be65f..211a70e8d5cf 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -420,10 +420,6 @@ config THP_SWAP For selection by architectures with reasonable THP sizes. -config TRANSPARENT_HUGE_PAGECACHE - def_bool y - depends on TRANSPARENT_HUGEPAGE - # # UP and nommu archs use km based percpu allocator # @@ -714,7 +710,7 @@ config GUP_GET_PTE_LOW_HIGH config READ_ONLY_THP_FOR_FS bool "Read-only THP for filesystems (EXPERIMENTAL)" - depends on TRANSPARENT_HUGE_PAGECACHE && SHMEM + depends on TRANSPARENT_HUGEPAGE && SHMEM help Allow khugepaged to put read-only file-backed pages in THP. diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0080e8df18ef..c1e7c71db1e6 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -326,7 +326,7 @@ static struct attribute *hugepage_attr[] = { &defrag_attr.attr, &use_zero_page_attr.attr, &hpage_pmd_size_attr.attr, -#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) +#ifdef CONFIG_SHMEM &shmem_enabled_attr.attr, #endif #ifdef CONFIG_DEBUG_VM diff --git a/mm/khugepaged.c b/mm/khugepaged.c index c659c68728bc..b1d9a8e189b8 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -414,8 +414,6 @@ static bool hugepage_vma_check(struct vm_area_struct *vma, (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file && (vm_flags & VM_DENYWRITE))) { - if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) - return false; return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, HPAGE_PMD_NR); } @@ -1258,7 +1256,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot) } } -#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) +#ifdef CONFIG_SHMEM /* * Notify khugepaged that given addr of the mm is pte-mapped THP. Then * khugepaged should try to collapse the page table. @@ -1973,6 +1971,8 @@ skip: if (khugepaged_scan.address < hstart) khugepaged_scan.address = hstart; VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); + if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma)) + goto skip; while (khugepaged_scan.address < hend) { int ret; @@ -1984,14 +1984,10 @@ skip: khugepaged_scan.address + HPAGE_PMD_SIZE > hend); if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { - struct file *file; + struct file *file = get_file(vma->vm_file); pgoff_t pgoff = linear_page_index(vma, khugepaged_scan.address); - if (shmem_file(vma->vm_file) - && !shmem_huge_enabled(vma)) - goto skip; - file = get_file(vma->vm_file); up_read(&mm->mmap_sem); ret = 1; khugepaged_scan_file(mm, file, pgoff, hpage); diff --git a/mm/memory.c b/mm/memory.c index d2a353c345ad..d527e0ec29c7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3373,7 +3373,7 @@ map_pte: return 0; } -#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE +#ifdef CONFIG_TRANSPARENT_HUGEPAGE static void deposit_prealloc_pte(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; @@ -3475,8 +3475,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, pte_t entry; vm_fault_t ret; - if (pmd_none(*vmf->pmd) && PageTransCompound(page) && - IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { + if (pmd_none(*vmf->pmd) && PageTransCompound(page)) { /* THP on COW? */ VM_BUG_ON_PAGE(memcg, page); diff --git a/mm/rmap.c b/mm/rmap.c index 68fe0472c803..374a9bfdbffa 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -933,7 +933,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, set_pte_at(vma->vm_mm, address, pte, entry); ret = 1; } else { -#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE +#ifdef CONFIG_TRANSPARENT_HUGEPAGE pmd_t *pmd = pvmw.pmd; pmd_t entry; diff --git a/mm/shmem.c b/mm/shmem.c index b48ac3806f8f..2c255f383608 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -410,7 +410,7 @@ static bool shmem_confirm_swap(struct address_space *mapping, #define SHMEM_HUGE_DENY (-1) #define SHMEM_HUGE_FORCE (-2) -#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE +#ifdef CONFIG_TRANSPARENT_HUGEPAGE /* ifdef here to avoid bloating shmem.o when not necessary */ static int shmem_huge __read_mostly; @@ -580,7 +580,7 @@ static long shmem_unused_huge_count(struct super_block *sb, struct shmem_sb_info *sbinfo = SHMEM_SB(sb); return READ_ONCE(sbinfo->shrinklist_len); } -#else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */ +#else /* !CONFIG_TRANSPARENT_HUGEPAGE */ #define shmem_huge SHMEM_HUGE_DENY @@ -589,11 +589,11 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, { return 0; } -#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo) { - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) && shmem_huge != SHMEM_HUGE_DENY) return true; @@ -1059,7 +1059,7 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr) * Part of the huge page can be beyond i_size: subject * to shrink under memory pressure. */ - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { spin_lock(&sbinfo->shrinklist_lock); /* * _careful to defend against unlocked access to @@ -1510,7 +1510,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp, int nr; int err = -ENOSPC; - if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) huge = false; nr = huge ? HPAGE_PMD_NR : 1; @@ -2093,7 +2093,7 @@ unsigned long shmem_get_unmapped_area(struct file *file, get_area = current->mm->get_unmapped_area; addr = get_area(file, uaddr, len, pgoff, flags); - if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return addr; if (IS_ERR_VALUE(addr)) return addr; @@ -2232,7 +2232,7 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma) file_accessed(file); vma->vm_ops = &shmem_vm_ops; - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < (vma->vm_end & HPAGE_PMD_MASK)) { khugepaged_enter(vma, vma->vm_flags); @@ -3459,7 +3459,7 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) case Opt_huge: ctx->huge = result.uint_32; if (ctx->huge != SHMEM_HUGE_NEVER && - !(IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && + !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && has_transparent_hugepage())) goto unsupported_parameter; ctx->seen |= SHMEM_SEEN_HUGE; @@ -3605,7 +3605,7 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root) if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbinfo->gid)); -#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE +#ifdef CONFIG_TRANSPARENT_HUGEPAGE /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ if (sbinfo->huge) seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); @@ -3850,7 +3850,7 @@ static const struct super_operations shmem_ops = { .evict_inode = shmem_evict_inode, .drop_inode = generic_delete_inode, .put_super = shmem_put_super, -#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE +#ifdef CONFIG_TRANSPARENT_HUGEPAGE .nr_cached_objects = shmem_unused_huge_count, .free_cached_objects = shmem_unused_huge_scan, #endif @@ -3912,7 +3912,7 @@ int __init shmem_init(void) goto out1; } -#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE +#ifdef CONFIG_TRANSPARENT_HUGEPAGE if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; else @@ -3928,7 +3928,7 @@ out2: return error; } -#if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS) +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) static ssize_t shmem_enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -3980,9 +3980,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, struct kobj_attribute shmem_enabled_attr = __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); -#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ -#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE +#ifdef CONFIG_TRANSPARENT_HUGEPAGE bool shmem_huge_enabled(struct vm_area_struct *vma) { struct inode *inode = file_inode(vma->vm_file); @@ -4017,7 +4017,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma) return false; } } -#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #else /* !CONFIG_SHMEM */ @@ -4186,7 +4186,7 @@ int shmem_zero_setup(struct vm_area_struct *vma) vma->vm_file = file; vma->vm_ops = &shmem_vm_ops; - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < (vma->vm_end & HPAGE_PMD_MASK)) { khugepaged_enter(vma, vma->vm_flags); -- cgit v1.2.3-58-ga151 From 9de4f22a60f731943f050f4448bf2933ed3fa70b Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Mon, 6 Apr 2020 20:04:41 -0700 Subject: mm: code cleanup for MADV_FREE Some comments for MADV_FREE is revised and added to help people understand the MADV_FREE code, especially the page flag, PG_swapbacked. This makes page_is_file_cache() isn't consistent with its comments. So the function is renamed to page_is_file_lru() to make them consistent again. All these are put in one patch as one logical change. Suggested-by: David Hildenbrand Suggested-by: Johannes Weiner Suggested-by: David Rientjes Signed-off-by: "Huang, Ying" Signed-off-by: Andrew Morton Acked-by: Johannes Weiner Acked-by: David Rientjes Acked-by: Michal Hocko Acked-by: Pankaj Gupta Acked-by: Vlastimil Babka Cc: Dave Hansen Cc: Mel Gorman Cc: Minchan Kim Cc: Hugh Dickins Cc: Rik van Riel Link: http://lkml.kernel.org/r/20200317100342.2730705-1-ying.huang@intel.com Signed-off-by: Linus Torvalds --- include/linux/mm_inline.h | 15 ++++++++------- include/linux/page-flags.h | 5 +++++ include/trace/events/vmscan.h | 2 +- mm/compaction.c | 2 +- mm/gup.c | 2 +- mm/khugepaged.c | 4 ++-- mm/memory-failure.c | 2 +- mm/memory_hotplug.c | 2 +- mm/mempolicy.c | 2 +- mm/migrate.c | 16 ++++++++-------- mm/mprotect.c | 2 +- mm/swap.c | 16 ++++++++-------- mm/vmscan.c | 12 ++++++------ 13 files changed, 44 insertions(+), 38 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 6f2fef7b0784..219bef41d87c 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -6,19 +6,20 @@ #include /** - * page_is_file_cache - should the page be on a file LRU or anon LRU? + * page_is_file_lru - should the page be on a file LRU or anon LRU? * @page: the page to test * - * Returns 1 if @page is page cache page backed by a regular filesystem, - * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed. - * Used by functions that manipulate the LRU lists, to sort a page - * onto the right LRU list. + * Returns 1 if @page is a regular filesystem backed page cache page or a lazily + * freed anonymous page (e.g. via MADV_FREE). Returns 0 if @page is a normal + * anonymous page, a tmpfs page or otherwise ram or swap backed page. Used by + * functions that manipulate the LRU lists, to sort a page onto the right LRU + * list. * * We would like to get this info without a page flag, but the state * needs to survive until the page is last deleted from the LRU, which * could be as far down as __page_cache_release. */ -static inline int page_is_file_cache(struct page *page) +static inline int page_is_file_lru(struct page *page) { return !PageSwapBacked(page); } @@ -75,7 +76,7 @@ static __always_inline void del_page_from_lru_list(struct page *page, */ static inline enum lru_list page_lru_base_type(struct page *page) { - if (page_is_file_cache(page)) + if (page_is_file_lru(page)) return LRU_INACTIVE_FILE; return LRU_INACTIVE_ANON; } diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 77de28bfefb0..acf7988fd640 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -63,6 +63,11 @@ * page_waitqueue(page) is a wait queue of all tasks waiting for the page * to become unlocked. * + * PG_swapbacked is set when a page uses swap as a backing storage. This are + * usually PageAnon or shmem pages but please note that even anonymous pages + * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as + * a result of MADV_FREE). + * * PG_uptodate tells whether the page's contents is valid. When a read * completes, the page becomes uptodate, unless a disk I/O error happened. * diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index a5ab2973e8dc..74bb594ccb25 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -323,7 +323,7 @@ TRACE_EVENT(mm_vmscan_writepage, TP_fast_assign( __entry->pfn = page_to_pfn(page); __entry->reclaim_flags = trace_reclaim_flags( - page_is_file_cache(page)); + page_is_file_lru(page)); ), TP_printk("page=%p pfn=%lu flags=%s", diff --git a/mm/compaction.c b/mm/compaction.c index df3da2f76fdc..46af63eb8212 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -989,7 +989,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* Successfully isolated */ del_page_from_lru_list(page, lruvec, page_lru(page)); mod_node_page_state(page_pgdat(page), - NR_ISOLATED_ANON + page_is_file_cache(page), + NR_ISOLATED_ANON + page_is_file_lru(page), hpage_nr_pages(page)); isolate_success: diff --git a/mm/gup.c b/mm/gup.c index 96af7e08db4b..b185377c38b7 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1677,7 +1677,7 @@ check_again: list_add_tail(&head->lru, &cma_page_list); mod_node_page_state(page_pgdat(head), NR_ISOLATED_ANON + - page_is_file_cache(head), + page_is_file_lru(head), hpage_nr_pages(head)); } } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index b1d9a8e189b8..3afc1e2d7a55 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -511,7 +511,7 @@ void __khugepaged_exit(struct mm_struct *mm) static void release_pte_page(struct page *page) { - dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); + dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_lru(page)); unlock_page(page); putback_lru_page(page); } @@ -611,7 +611,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, goto out; } inc_node_page_state(page, - NR_ISOLATED_ANON + page_is_file_cache(page)); + NR_ISOLATED_ANON + page_is_file_lru(page)); VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageLRU(page), page); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 1c961cd26c0b..a96364be8ab4 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1810,7 +1810,7 @@ static int __soft_offline_page(struct page *page, int flags) */ if (!__PageMovable(page)) inc_node_page_state(page, NR_ISOLATED_ANON + - page_is_file_cache(page)); + page_is_file_lru(page)); list_add(&page->lru, &pagelist); ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, MIGRATE_SYNC, MR_MEMORY_FAILURE); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 19389cdc16a5..005eab3411e5 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1317,7 +1317,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) list_add_tail(&page->lru, &source); if (!__PageMovable(page)) inc_node_page_state(page, NR_ISOLATED_ANON + - page_is_file_cache(page)); + page_is_file_lru(page)); } else { pr_warn("failed to isolate pfn %lx\n", pfn); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index b36926ba02e2..037e5f548118 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1022,7 +1022,7 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist, if (!isolate_lru_page(head)) { list_add_tail(&head->lru, pagelist); mod_node_page_state(page_pgdat(head), - NR_ISOLATED_ANON + page_is_file_cache(head), + NR_ISOLATED_ANON + page_is_file_lru(head), hpage_nr_pages(head)); } else if (flags & MPOL_MF_STRICT) { /* diff --git a/mm/migrate.c b/mm/migrate.c index 1a205503be3f..c1412e04975e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -193,7 +193,7 @@ void putback_movable_pages(struct list_head *l) put_page(page); } else { mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + - page_is_file_cache(page), -hpage_nr_pages(page)); + page_is_file_lru(page), -hpage_nr_pages(page)); putback_lru_page(page); } } @@ -1219,7 +1219,7 @@ out: */ if (likely(!__PageMovable(page))) mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + - page_is_file_cache(page), -hpage_nr_pages(page)); + page_is_file_lru(page), -hpage_nr_pages(page)); } /* @@ -1592,7 +1592,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, err = 1; list_add_tail(&head->lru, pagelist); mod_node_page_state(page_pgdat(head), - NR_ISOLATED_ANON + page_is_file_cache(head), + NR_ISOLATED_ANON + page_is_file_lru(head), hpage_nr_pages(head)); } out_putpage: @@ -1955,7 +1955,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) return 0; } - page_lru = page_is_file_cache(page); + page_lru = page_is_file_lru(page); mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, hpage_nr_pages(page)); @@ -1991,7 +1991,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, * Don't migrate file pages that are mapped in multiple processes * with execute permissions as they are probably shared libraries. */ - if (page_mapcount(page) != 1 && page_is_file_cache(page) && + if (page_mapcount(page) != 1 && page_is_file_lru(page) && (vma->vm_flags & VM_EXEC)) goto out; @@ -1999,7 +1999,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, * Also do not migrate dirty pages as not all filesystems can move * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. */ - if (page_is_file_cache(page) && PageDirty(page)) + if (page_is_file_lru(page) && PageDirty(page)) goto out; isolated = numamigrate_isolate_page(pgdat, page); @@ -2014,7 +2014,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, if (!list_empty(&migratepages)) { list_del(&page->lru); dec_node_page_state(page, NR_ISOLATED_ANON + - page_is_file_cache(page)); + page_is_file_lru(page)); putback_lru_page(page); } isolated = 0; @@ -2044,7 +2044,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, pg_data_t *pgdat = NODE_DATA(node); int isolated = 0; struct page *new_page = NULL; - int page_lru = page_is_file_cache(page); + int page_lru = page_is_file_lru(page); unsigned long start = address & HPAGE_PMD_MASK; new_page = alloc_pages_node(node, diff --git a/mm/mprotect.c b/mm/mprotect.c index 311c0dadf71c..0fee14b39416 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -98,7 +98,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, * it cannot move them all from MIGRATE_ASYNC * context. */ - if (page_is_file_cache(page) && PageDirty(page)) + if (page_is_file_lru(page) && PageDirty(page)) continue; /* diff --git a/mm/swap.c b/mm/swap.c index a4af8c999963..18505990c3b1 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -276,7 +276,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec, void *arg) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { - int file = page_is_file_cache(page); + int file = page_is_file_lru(page); int lru = page_lru_base_type(page); del_page_from_lru_list(page, lruvec, lru); @@ -394,7 +394,7 @@ void mark_page_accessed(struct page *page) else __lru_cache_activate_page(page); ClearPageReferenced(page); - if (page_is_file_cache(page)) + if (page_is_file_lru(page)) workingset_activation(page); } if (page_is_idle(page)) @@ -515,7 +515,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, return; active = PageActive(page); - file = page_is_file_cache(page); + file = page_is_file_lru(page); lru = page_lru_base_type(page); del_page_from_lru_list(page, lruvec, lru + active); @@ -548,7 +548,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, void *arg) { if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { - int file = page_is_file_cache(page); + int file = page_is_file_lru(page); int lru = page_lru_base_type(page); del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); @@ -573,9 +573,9 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, ClearPageActive(page); ClearPageReferenced(page); /* - * lazyfree pages are clean anonymous pages. They have - * SwapBacked flag cleared to distinguish normal anonymous - * pages + * Lazyfree pages are clean anonymous pages. They have + * PG_swapbacked flag cleared, to distinguish them from normal + * anonymous pages */ ClearPageSwapBacked(page); add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); @@ -962,7 +962,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, if (page_evictable(page)) { lru = page_lru(page); - update_page_reclaim_stat(lruvec, page_is_file_cache(page), + update_page_reclaim_stat(lruvec, page_is_file_lru(page), PageActive(page)); if (was_unevictable) count_vm_event(UNEVICTABLE_PGRESCUED); diff --git a/mm/vmscan.c b/mm/vmscan.c index 2e8e690d2813..b06868fc4926 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -919,7 +919,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, * exceptional entries and shadow exceptional entries in the * same address_space. */ - if (reclaimed && page_is_file_cache(page) && + if (reclaimed && page_is_file_lru(page) && !mapping_exiting(mapping) && !dax_mapping(mapping)) shadow = workingset_eviction(page, target_memcg); __delete_from_page_cache(page, shadow); @@ -1043,7 +1043,7 @@ static void page_check_dirty_writeback(struct page *page, * Anonymous pages are not handled by flushers and must be written * from reclaim context. Do not stall reclaim based on them */ - if (!page_is_file_cache(page) || + if (!page_is_file_lru(page) || (PageAnon(page) && !PageSwapBacked(page))) { *dirty = false; *writeback = false; @@ -1315,7 +1315,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, * the rest of the LRU for clean pages and see * the same dirty pages again (PageReclaim). */ - if (page_is_file_cache(page) && + if (page_is_file_lru(page) && (!current_is_kswapd() || !PageReclaim(page) || !test_bit(PGDAT_DIRTY, &pgdat->flags))) { /* @@ -1459,7 +1459,7 @@ activate_locked: try_to_free_swap(page); VM_BUG_ON_PAGE(PageActive(page), page); if (!PageMlocked(page)) { - int type = page_is_file_cache(page); + int type = page_is_file_lru(page); SetPageActive(page); stat->nr_activate[type] += nr_pages; count_memcg_page_event(page, PGACTIVATE); @@ -1497,7 +1497,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, LIST_HEAD(clean_pages); list_for_each_entry_safe(page, next, page_list, lru) { - if (page_is_file_cache(page) && !PageDirty(page) && + if (page_is_file_lru(page) && !PageDirty(page) && !__PageMovable(page) && !PageUnevictable(page)) { ClearPageActive(page); list_move(&page->lru, &clean_pages); @@ -2053,7 +2053,7 @@ static void shrink_active_list(unsigned long nr_to_scan, * IO, plus JVM can create lots of anon VM_EXEC pages, * so we ignore them here. */ - if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { + if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) { list_add(&page->lru, &l_active); continue; } -- cgit v1.2.3-58-ga151 From a2129f24798a993abde9b4bf8b3713b52d56c121 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 6 Apr 2020 20:04:45 -0700 Subject: mm: adjust shuffle code to allow for future coalescing Patch series "mm / virtio: Provide support for free page reporting", v17. This series provides an asynchronous means of reporting free guest pages to a hypervisor so that the memory associated with those pages can be dropped and reused by other processes and/or guests on the host. Using this it is possible to avoid unnecessary I/O to disk and greatly improve performance in the case of memory overcommit on the host. When enabled we will be performing a scan of free memory every 2 seconds while pages of sufficiently high order are being freed. In each pass at least one sixteenth of each free list will be reported. By doing this we avoid racing against other threads that may be causing a high amount of memory churn. The lowest page order currently scanned when reporting pages is pageblock_order so that this feature will not interfere with the use of Transparent Huge Pages in the case of virtualization. Currently this is only in use by virtio-balloon however there is the hope that at some point in the future other hypervisors might be able to make use of it. In the virtio-balloon/QEMU implementation the hypervisor is currently using MADV_DONTNEED to indicate to the host kernel that the page is currently free. It will be zeroed and faulted back into the guest the next time the page is accessed. To track if a page is reported or not the Uptodate flag was repurposed and used as a Reported flag for Buddy pages. We walk though the free list isolating pages and adding them to the scatterlist until we either encounter the end of the list or have processed at least one sixteenth of the pages that were listed in nr_free prior to us starting. If we fill the scatterlist before we reach the end of the list we rotate the list so that the first unreported page we encounter is moved to the head of the list as that is where we will resume after we have freed the reported pages back into the tail of the list. Below are the results from various benchmarks. I primarily focused on two tests. The first is the will-it-scale/page_fault2 test, and the other is a modified version of will-it-scale/page_fault1 that was enabled to use THP. I did this as it allows for better visibility into different parts of the memory subsystem. The guest is running with 32G for RAM on one node of a E5-2630 v3. The host has had some features such as CPU turbo disabled in the BIOS. Test page_fault1 (THP) page_fault2 Name tasks Process Iter STDEV Process Iter STDEV Baseline 1 1012402.50 0.14% 361855.25 0.81% 16 8827457.25 0.09% 3282347.00 0.34% Patches Applied 1 1007897.00 0.23% 361887.00 0.26% 16 8784741.75 0.39% 3240669.25 0.48% Patches Enabled 1 1010227.50 0.39% 359749.25 0.56% 16 8756219.00 0.24% 3226608.75 0.97% Patches Enabled 1 1050982.00 4.26% 357966.25 0.14% page shuffle 16 8672601.25 0.49% 3223177.75 0.40% Patches enabled 1 1003238.00 0.22% 360211.00 0.22% shuffle w/ RFC 16 8767010.50 0.32% 3199874.00 0.71% The results above are for a baseline with a linux-next-20191219 kernel, that kernel with this patch set applied but page reporting disabled in virtio-balloon, the patches applied and page reporting fully enabled, the patches enabled with page shuffling enabled, and the patches applied with page shuffling enabled and an RFC patch that makes used of MADV_FREE in QEMU. These results include the deviation seen between the average value reported here versus the high and/or low value. I observed that during the test memory usage for the first three tests never dropped whereas with the patches fully enabled the VM would drop to using only a few GB of the host's memory when switching from memhog to page fault tests. Any of the overhead visible with this patch set enabled seems due to page faults caused by accessing the reported pages and the host zeroing the page before giving it back to the guest. This overhead is much more visible when using THP than with standard 4K pages. In addition page shuffling seemed to increase the amount of faults generated due to an increase in memory churn. The overehad is reduced when using MADV_FREE as we can avoid the extra zeroing of the pages when they are reintroduced to the host, as can be seen when the RFC is applied with shuffling enabled. The overall guest size is kept fairly small to only a few GB while the test is running. If the host memory were oversubscribed this patch set should result in a performance improvement as swapping memory in the host can be avoided. A brief history on the background of free page reporting can be found at: https://lore.kernel.org/lkml/29f43d5796feed0dec8e8bb98b187d9dac03b900.camel@linux.intel.com/ This patch (of 9): Move the head/tail adding logic out of the shuffle code and into the __free_one_page function since ultimately that is where it is really needed anyway. By doing this we should be able to reduce the overhead and can consolidate all of the list addition bits in one spot. Signed-off-by: Alexander Duyck Signed-off-by: Andrew Morton Reviewed-by: Dan Williams Acked-by: Mel Gorman Acked-by: David Hildenbrand Cc: Yang Zhang Cc: Pankaj Gupta Cc: Konrad Rzeszutek Wilk Cc: Nitesh Narayan Lal Cc: Rik van Riel Cc: Matthew Wilcox Cc: Luiz Capitulino Cc: Dave Hansen Cc: Wei Wang Cc: Andrea Arcangeli Cc: Paolo Bonzini Cc: Michal Hocko Cc: Vlastimil Babka Cc: Oscar Salvador Cc: Michael S. Tsirkin Cc: wei qi Link: http://lkml.kernel.org/r/20200211224602.29318.84523.stgit@localhost.localdomain Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 12 --------- mm/page_alloc.c | 71 +++++++++++++++++++++++++++++--------------------- mm/shuffle.c | 12 ++++----- mm/shuffle.h | 6 +++++ 4 files changed, 54 insertions(+), 47 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e84d448988b6..c023d7968b14 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -116,18 +116,6 @@ static inline void add_to_free_area_tail(struct page *page, struct free_area *ar area->nr_free++; } -#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR -/* Used to preserve page allocation order entropy */ -void add_to_free_area_random(struct page *page, struct free_area *area, - int migratetype); -#else -static inline void add_to_free_area_random(struct page *page, - struct free_area *area, int migratetype) -{ - add_to_free_area(page, area, migratetype); -} -#endif - /* Used for pages which are on another list */ static inline void move_to_free_area(struct page *page, struct free_area *area, int migratetype) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e5f76da8cd4e..f2b8cb8f995f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -864,6 +864,36 @@ compaction_capture(struct capture_control *capc, struct page *page, } #endif /* CONFIG_COMPACTION */ +/* + * If this is not the largest possible page, check if the buddy + * of the next-highest order is free. If it is, it's possible + * that pages are being freed that will coalesce soon. In case, + * that is happening, add the free page to the tail of the list + * so it's less likely to be used soon and more likely to be merged + * as a higher order page + */ +static inline bool +buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, + struct page *page, unsigned int order) +{ + struct page *higher_page, *higher_buddy; + unsigned long combined_pfn; + + if (order >= MAX_ORDER - 2) + return false; + + if (!pfn_valid_within(buddy_pfn)) + return false; + + combined_pfn = buddy_pfn & pfn; + higher_page = page + (combined_pfn - pfn); + buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1); + higher_buddy = higher_page + (buddy_pfn - combined_pfn); + + return pfn_valid_within(buddy_pfn) && + page_is_buddy(higher_page, higher_buddy, order + 1); +} + /* * Freeing function for a buddy system allocator. * @@ -893,11 +923,13 @@ static inline void __free_one_page(struct page *page, struct zone *zone, unsigned int order, int migratetype) { - unsigned long combined_pfn; + struct capture_control *capc = task_capc(zone); unsigned long uninitialized_var(buddy_pfn); - struct page *buddy; + unsigned long combined_pfn; + struct free_area *area; unsigned int max_order; - struct capture_control *capc = task_capc(zone); + struct page *buddy; + bool to_tail; max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); @@ -966,35 +998,16 @@ continue_merging: done_merging: set_page_order(page, order); - /* - * If this is not the largest possible page, check if the buddy - * of the next-highest order is free. If it is, it's possible - * that pages are being freed that will coalesce soon. In case, - * that is happening, add the free page to the tail of the list - * so it's less likely to be used soon and more likely to be merged - * as a higher order page - */ - if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn) - && !is_shuffle_order(order)) { - struct page *higher_page, *higher_buddy; - combined_pfn = buddy_pfn & pfn; - higher_page = page + (combined_pfn - pfn); - buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1); - higher_buddy = higher_page + (buddy_pfn - combined_pfn); - if (pfn_valid_within(buddy_pfn) && - page_is_buddy(higher_page, higher_buddy, order + 1)) { - add_to_free_area_tail(page, &zone->free_area[order], - migratetype); - return; - } - } - + area = &zone->free_area[order]; if (is_shuffle_order(order)) - add_to_free_area_random(page, &zone->free_area[order], - migratetype); + to_tail = shuffle_pick_tail(); else - add_to_free_area(page, &zone->free_area[order], migratetype); + to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); + if (to_tail) + add_to_free_area_tail(page, area, migratetype); + else + add_to_free_area(page, area, migratetype); } /* diff --git a/mm/shuffle.c b/mm/shuffle.c index c716059cbd3c..44406d9977c7 100644 --- a/mm/shuffle.c +++ b/mm/shuffle.c @@ -183,11 +183,11 @@ void __meminit __shuffle_free_memory(pg_data_t *pgdat) shuffle_zone(z); } -void add_to_free_area_random(struct page *page, struct free_area *area, - int migratetype) +bool shuffle_pick_tail(void) { static u64 rand; static u8 rand_bits; + bool ret; /* * The lack of locking is deliberate. If 2 threads race to @@ -198,10 +198,10 @@ void add_to_free_area_random(struct page *page, struct free_area *area, rand = get_random_u64(); } - if (rand & 1) - add_to_free_area(page, area, migratetype); - else - add_to_free_area_tail(page, area, migratetype); + ret = rand & 1; + rand_bits--; rand >>= 1; + + return ret; } diff --git a/mm/shuffle.h b/mm/shuffle.h index 777a257a0d2f..4d79f03b6658 100644 --- a/mm/shuffle.h +++ b/mm/shuffle.h @@ -22,6 +22,7 @@ enum mm_shuffle_ctl { DECLARE_STATIC_KEY_FALSE(page_alloc_shuffle_key); extern void page_alloc_shuffle(enum mm_shuffle_ctl ctl); extern void __shuffle_free_memory(pg_data_t *pgdat); +extern bool shuffle_pick_tail(void); static inline void shuffle_free_memory(pg_data_t *pgdat) { if (!static_branch_unlikely(&page_alloc_shuffle_key)) @@ -44,6 +45,11 @@ static inline bool is_shuffle_order(int order) return order >= SHUFFLE_ORDER; } #else +static inline bool shuffle_pick_tail(void) +{ + return false; +} + static inline void shuffle_free_memory(pg_data_t *pgdat) { } -- cgit v1.2.3-58-ga151 From 6ab0136310961ebf4b5ecb565f0bf52c233dc093 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 6 Apr 2020 20:04:49 -0700 Subject: mm: use zone and order instead of free area in free_list manipulators In order to enable the use of the zone from the list manipulator functions I will need access to the zone pointer. As it turns out most of the accessors were always just being directly passed &zone->free_area[order] anyway so it would make sense to just fold that into the function itself and pass the zone and order as arguments instead of the free area. In order to be able to reference the zone we need to move the declaration of the functions down so that we have the zone defined before we define the list manipulation functions. Since the functions are only used in the file mm/page_alloc.c we can just move them there to reduce noise in the header. Signed-off-by: Alexander Duyck Signed-off-by: Andrew Morton Reviewed-by: Dan Williams Reviewed-by: David Hildenbrand Reviewed-by: Pankaj Gupta Acked-by: Mel Gorman Cc: Andrea Arcangeli Cc: Dave Hansen Cc: Konrad Rzeszutek Wilk Cc: Luiz Capitulino Cc: Matthew Wilcox Cc: Michael S. Tsirkin Cc: Michal Hocko Cc: Nitesh Narayan Lal Cc: Oscar Salvador Cc: Paolo Bonzini Cc: Rik van Riel Cc: Vlastimil Babka Cc: Wei Wang Cc: Yang Zhang Cc: wei qi Link: http://lkml.kernel.org/r/20200211224613.29318.43080.stgit@localhost.localdomain Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 32 ------------------------ mm/page_alloc.c | 67 ++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 49 insertions(+), 50 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c023d7968b14..42b77d3b68e8 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -100,29 +100,6 @@ struct free_area { unsigned long nr_free; }; -/* Used for pages not on another list */ -static inline void add_to_free_area(struct page *page, struct free_area *area, - int migratetype) -{ - list_add(&page->lru, &area->free_list[migratetype]); - area->nr_free++; -} - -/* Used for pages not on another list */ -static inline void add_to_free_area_tail(struct page *page, struct free_area *area, - int migratetype) -{ - list_add_tail(&page->lru, &area->free_list[migratetype]); - area->nr_free++; -} - -/* Used for pages which are on another list */ -static inline void move_to_free_area(struct page *page, struct free_area *area, - int migratetype) -{ - list_move(&page->lru, &area->free_list[migratetype]); -} - static inline struct page *get_page_from_free_area(struct free_area *area, int migratetype) { @@ -130,15 +107,6 @@ static inline struct page *get_page_from_free_area(struct free_area *area, struct page, lru); } -static inline void del_page_from_free_area(struct page *page, - struct free_area *area) -{ - list_del(&page->lru); - __ClearPageBuddy(page); - set_page_private(page, 0); - area->nr_free--; -} - static inline bool free_area_empty(struct free_area *area, int migratetype) { return list_empty(&area->free_list[migratetype]); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f2b8cb8f995f..14bdf3608a6b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -864,6 +864,44 @@ compaction_capture(struct capture_control *capc, struct page *page, } #endif /* CONFIG_COMPACTION */ +/* Used for pages not on another list */ +static inline void add_to_free_list(struct page *page, struct zone *zone, + unsigned int order, int migratetype) +{ + struct free_area *area = &zone->free_area[order]; + + list_add(&page->lru, &area->free_list[migratetype]); + area->nr_free++; +} + +/* Used for pages not on another list */ +static inline void add_to_free_list_tail(struct page *page, struct zone *zone, + unsigned int order, int migratetype) +{ + struct free_area *area = &zone->free_area[order]; + + list_add_tail(&page->lru, &area->free_list[migratetype]); + area->nr_free++; +} + +/* Used for pages which are on another list */ +static inline void move_to_free_list(struct page *page, struct zone *zone, + unsigned int order, int migratetype) +{ + struct free_area *area = &zone->free_area[order]; + + list_move(&page->lru, &area->free_list[migratetype]); +} + +static inline void del_page_from_free_list(struct page *page, struct zone *zone, + unsigned int order) +{ + list_del(&page->lru); + __ClearPageBuddy(page); + set_page_private(page, 0); + zone->free_area[order].nr_free--; +} + /* * If this is not the largest possible page, check if the buddy * of the next-highest order is free. If it is, it's possible @@ -926,7 +964,6 @@ static inline void __free_one_page(struct page *page, struct capture_control *capc = task_capc(zone); unsigned long uninitialized_var(buddy_pfn); unsigned long combined_pfn; - struct free_area *area; unsigned int max_order; struct page *buddy; bool to_tail; @@ -964,7 +1001,7 @@ continue_merging: if (page_is_guard(buddy)) clear_page_guard(zone, buddy, order, migratetype); else - del_page_from_free_area(buddy, &zone->free_area[order]); + del_page_from_free_list(buddy, zone, order); combined_pfn = buddy_pfn & pfn; page = page + (combined_pfn - pfn); pfn = combined_pfn; @@ -998,16 +1035,15 @@ continue_merging: done_merging: set_page_order(page, order); - area = &zone->free_area[order]; if (is_shuffle_order(order)) to_tail = shuffle_pick_tail(); else to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); if (to_tail) - add_to_free_area_tail(page, area, migratetype); + add_to_free_list_tail(page, zone, order, migratetype); else - add_to_free_area(page, area, migratetype); + add_to_free_list(page, zone, order, migratetype); } /* @@ -2021,13 +2057,11 @@ void __init init_cma_reserved_pageblock(struct page *page) * -- nyc */ static inline void expand(struct zone *zone, struct page *page, - int low, int high, struct free_area *area, - int migratetype) + int low, int high, int migratetype) { unsigned long size = 1 << high; while (high > low) { - area--; high--; size >>= 1; VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); @@ -2041,7 +2075,7 @@ static inline void expand(struct zone *zone, struct page *page, if (set_page_guard(zone, &page[size], high, migratetype)) continue; - add_to_free_area(&page[size], area, migratetype); + add_to_free_list(&page[size], zone, high, migratetype); set_page_order(&page[size], high); } } @@ -2199,8 +2233,8 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, page = get_page_from_free_area(area, migratetype); if (!page) continue; - del_page_from_free_area(page, area); - expand(zone, page, order, current_order, area, migratetype); + del_page_from_free_list(page, zone, current_order); + expand(zone, page, order, current_order, migratetype); set_pcppage_migratetype(page, migratetype); return page; } @@ -2274,7 +2308,7 @@ static int move_freepages(struct zone *zone, VM_BUG_ON_PAGE(page_zone(page) != zone, page); order = page_order(page); - move_to_free_area(page, &zone->free_area[order], migratetype); + move_to_free_list(page, zone, order, migratetype); page += 1 << order; pages_moved += 1 << order; } @@ -2390,7 +2424,6 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, unsigned int alloc_flags, int start_type, bool whole_block) { unsigned int current_order = page_order(page); - struct free_area *area; int free_pages, movable_pages, alike_pages; int old_block_type; @@ -2461,8 +2494,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, return; single_page: - area = &zone->free_area[current_order]; - move_to_free_area(page, area, start_type); + move_to_free_list(page, zone, current_order, start_type); } /* @@ -3133,7 +3165,6 @@ EXPORT_SYMBOL_GPL(split_page); int __isolate_free_page(struct page *page, unsigned int order) { - struct free_area *area = &page_zone(page)->free_area[order]; unsigned long watermark; struct zone *zone; int mt; @@ -3159,7 +3190,7 @@ int __isolate_free_page(struct page *page, unsigned int order) /* Remove page from free list */ - del_page_from_free_area(page, area); + del_page_from_free_list(page, zone, order); /* * Set the pageblock if the isolated page is at least half of a @@ -8726,7 +8757,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) BUG_ON(!PageBuddy(page)); order = page_order(page); offlined_pages += 1 << order; - del_page_from_free_area(page, &zone->free_area[order]); + del_page_from_free_list(page, zone, order); pfn += (1 << order); } spin_unlock_irqrestore(&zone->lock, flags); -- cgit v1.2.3-58-ga151 From 36e66c554b5c6a9d17a229faca7a61693527b0bd Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 6 Apr 2020 20:04:56 -0700 Subject: mm: introduce Reported pages In order to pave the way for free page reporting in virtualized environments we will need a way to get pages out of the free lists and identify those pages after they have been returned. To accomplish this, this patch adds the concept of a Reported Buddy, which is essentially meant to just be the Uptodate flag used in conjunction with the Buddy page type. To prevent the reported pages from leaking outside of the buddy lists I added a check to clear the PageReported bit in the del_page_from_free_list function. As a result any reported page that is split, merged, or allocated will have the flag cleared prior to the PageBuddy value being cleared. The process for reporting pages is fairly simple. Once we free a page that meets the minimum order for page reporting we will schedule a worker thread to start 2s or more in the future. That worker thread will begin working from the lowest supported page reporting order up to MAX_ORDER - 1 pulling unreported pages from the free list and storing them in the scatterlist. When processing each individual free list it is necessary for the worker thread to release the zone lock when it needs to stop and report the full scatterlist of pages. To reduce the work of the next iteration the worker thread will rotate the free list so that the first unreported page in the free list becomes the first entry in the list. It will then call a reporting function providing information on how many entries are in the scatterlist. Once the function completes it will return the pages to the free area from which they were allocated and start over pulling more pages from the free areas until there are no longer enough pages to report on to keep the worker busy, or we have processed as many pages as were contained in the free area when we started processing the list. The worker thread will work in a round-robin fashion making its way though each zone requesting reporting, and through each reportable free list within that zone. Once all free areas within the zone have been processed it will check to see if there have been any requests for reporting while it was processing. If so it will reschedule the worker thread to start up again in roughly 2s and exit. Signed-off-by: Alexander Duyck Signed-off-by: Andrew Morton Acked-by: Mel Gorman Cc: Andrea Arcangeli Cc: Dan Williams Cc: Dave Hansen Cc: David Hildenbrand Cc: Konrad Rzeszutek Wilk Cc: Luiz Capitulino Cc: Matthew Wilcox Cc: Michael S. Tsirkin Cc: Michal Hocko Cc: Nitesh Narayan Lal Cc: Oscar Salvador Cc: Pankaj Gupta Cc: Paolo Bonzini Cc: Rik van Riel Cc: Vlastimil Babka Cc: Wei Wang Cc: Yang Zhang Cc: wei qi Link: http://lkml.kernel.org/r/20200211224635.29318.19750.stgit@localhost.localdomain Signed-off-by: Linus Torvalds --- include/linux/page-flags.h | 11 ++ include/linux/page_reporting.h | 25 ++++ mm/Kconfig | 11 ++ mm/Makefile | 1 + mm/page_alloc.c | 17 ++- mm/page_reporting.c | 319 +++++++++++++++++++++++++++++++++++++++++ mm/page_reporting.h | 54 +++++++ 7 files changed, 434 insertions(+), 4 deletions(-) create mode 100644 include/linux/page_reporting.h create mode 100644 mm/page_reporting.c create mode 100644 mm/page_reporting.h (limited to 'include/linux') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index acf7988fd640..222f6f7b2bb3 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -168,6 +168,9 @@ enum pageflags { /* non-lru isolated movable page */ PG_isolated = PG_reclaim, + + /* Only valid for buddy pages. Used to track pages that are reported */ + PG_reported = PG_uptodate, }; #ifndef __GENERATING_BOUNDS_H @@ -436,6 +439,14 @@ TESTCLEARFLAG(Young, young, PF_ANY) PAGEFLAG(Idle, idle, PF_ANY) #endif +/* + * PageReported() is used to track reported free pages within the Buddy + * allocator. We can use the non-atomic version of the test and set + * operations as both should be shielded with the zone lock to prevent + * any possible races on the setting or clearing of the bit. + */ +__PAGEFLAG(Reported, reported, PF_NO_COMPOUND) + /* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; diff --git a/include/linux/page_reporting.h b/include/linux/page_reporting.h new file mode 100644 index 000000000000..32355486f572 --- /dev/null +++ b/include/linux/page_reporting.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PAGE_REPORTING_H +#define _LINUX_PAGE_REPORTING_H + +#include +#include + +#define PAGE_REPORTING_CAPACITY 32 + +struct page_reporting_dev_info { + /* function that alters pages to make them "reported" */ + int (*report)(struct page_reporting_dev_info *prdev, + struct scatterlist *sg, unsigned int nents); + + /* work struct for processing reports */ + struct delayed_work work; + + /* Current state of page reporting */ + atomic_t state; +}; + +/* Tear-down and bring-up for page reporting devices */ +void page_reporting_unregister(struct page_reporting_dev_info *prdev); +int page_reporting_register(struct page_reporting_dev_info *prdev); +#endif /*_LINUX_PAGE_REPORTING_H */ diff --git a/mm/Kconfig b/mm/Kconfig index 211a70e8d5cf..d286dc54458c 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -236,6 +236,17 @@ config COMPACTION it and then we would be really interested to hear about that at linux-mm@kvack.org. +# +# support for free page reporting +config PAGE_REPORTING + bool "Free page reporting" + def_bool n + help + Free page reporting allows for the incremental acquisition of + free pages from the buddy allocator for the purpose of reporting + those pages to another entity, such as a hypervisor, so that the + memory can be freed within the host for other uses. + # # support for page migration # diff --git a/mm/Makefile b/mm/Makefile index dbc8346d16ca..fccd3756b25f 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -111,3 +111,4 @@ obj-$(CONFIG_HMM_MIRROR) += hmm.o obj-$(CONFIG_MEMFD_CREATE) += memfd.o obj-$(CONFIG_MAPPING_DIRTY_HELPERS) += mapping_dirty_helpers.o obj-$(CONFIG_PTDUMP_CORE) += ptdump.o +obj-$(CONFIG_PAGE_REPORTING) += page_reporting.o diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 448e439b75f2..114c56c3685d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -74,6 +74,7 @@ #include #include "internal.h" #include "shuffle.h" +#include "page_reporting.h" /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ static DEFINE_MUTEX(pcp_batch_high_lock); @@ -896,6 +897,10 @@ static inline void move_to_free_list(struct page *page, struct zone *zone, static inline void del_page_from_free_list(struct page *page, struct zone *zone, unsigned int order) { + /* clear reported state and update reported page count */ + if (page_reported(page)) + __ClearPageReported(page); + list_del(&page->lru); __ClearPageBuddy(page); set_page_private(page, 0); @@ -959,7 +964,7 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, static inline void __free_one_page(struct page *page, unsigned long pfn, struct zone *zone, unsigned int order, - int migratetype) + int migratetype, bool report) { struct capture_control *capc = task_capc(zone); unsigned long uninitialized_var(buddy_pfn); @@ -1044,6 +1049,10 @@ done_merging: add_to_free_list_tail(page, zone, order, migratetype); else add_to_free_list(page, zone, order, migratetype); + + /* Notify page reporting subsystem of freed page */ + if (report) + page_reporting_notify_free(order); } /* @@ -1360,7 +1369,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, if (unlikely(isolated_pageblocks)) mt = get_pageblock_migratetype(page); - __free_one_page(page, page_to_pfn(page), zone, 0, mt); + __free_one_page(page, page_to_pfn(page), zone, 0, mt, true); trace_mm_page_pcpu_drain(page, 0, mt); } spin_unlock(&zone->lock); @@ -1376,7 +1385,7 @@ static void free_one_page(struct zone *zone, is_migrate_isolate(migratetype))) { migratetype = get_pfnblock_migratetype(page, pfn); } - __free_one_page(page, pfn, zone, order, migratetype); + __free_one_page(page, pfn, zone, order, migratetype, true); spin_unlock(&zone->lock); } @@ -3227,7 +3236,7 @@ void __putback_isolated_page(struct page *page, unsigned int order, int mt) lockdep_assert_held(&zone->lock); /* Return isolated page to tail of freelist. */ - __free_one_page(page, page_to_pfn(page), zone, order, mt); + __free_one_page(page, page_to_pfn(page), zone, order, mt, false); } /* diff --git a/mm/page_reporting.c b/mm/page_reporting.c new file mode 100644 index 000000000000..1047c6872d4f --- /dev/null +++ b/mm/page_reporting.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include + +#include "page_reporting.h" +#include "internal.h" + +#define PAGE_REPORTING_DELAY (2 * HZ) +static struct page_reporting_dev_info __rcu *pr_dev_info __read_mostly; + +enum { + PAGE_REPORTING_IDLE = 0, + PAGE_REPORTING_REQUESTED, + PAGE_REPORTING_ACTIVE +}; + +/* request page reporting */ +static void +__page_reporting_request(struct page_reporting_dev_info *prdev) +{ + unsigned int state; + + /* Check to see if we are in desired state */ + state = atomic_read(&prdev->state); + if (state == PAGE_REPORTING_REQUESTED) + return; + + /* + * If reporting is already active there is nothing we need to do. + * Test against 0 as that represents PAGE_REPORTING_IDLE. + */ + state = atomic_xchg(&prdev->state, PAGE_REPORTING_REQUESTED); + if (state != PAGE_REPORTING_IDLE) + return; + + /* + * Delay the start of work to allow a sizable queue to build. For + * now we are limiting this to running no more than once every + * couple of seconds. + */ + schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY); +} + +/* notify prdev of free page reporting request */ +void __page_reporting_notify(void) +{ + struct page_reporting_dev_info *prdev; + + /* + * We use RCU to protect the pr_dev_info pointer. In almost all + * cases this should be present, however in the unlikely case of + * a shutdown this will be NULL and we should exit. + */ + rcu_read_lock(); + prdev = rcu_dereference(pr_dev_info); + if (likely(prdev)) + __page_reporting_request(prdev); + + rcu_read_unlock(); +} + +static void +page_reporting_drain(struct page_reporting_dev_info *prdev, + struct scatterlist *sgl, unsigned int nents, bool reported) +{ + struct scatterlist *sg = sgl; + + /* + * Drain the now reported pages back into their respective + * free lists/areas. We assume at least one page is populated. + */ + do { + struct page *page = sg_page(sg); + int mt = get_pageblock_migratetype(page); + unsigned int order = get_order(sg->length); + + __putback_isolated_page(page, order, mt); + + /* If the pages were not reported due to error skip flagging */ + if (!reported) + continue; + + /* + * If page was not comingled with another page we can + * consider the result to be "reported" since the page + * hasn't been modified, otherwise we will need to + * report on the new larger page when we make our way + * up to that higher order. + */ + if (PageBuddy(page) && page_order(page) == order) + __SetPageReported(page); + } while ((sg = sg_next(sg))); + + /* reinitialize scatterlist now that it is empty */ + sg_init_table(sgl, nents); +} + +/* + * The page reporting cycle consists of 4 stages, fill, report, drain, and + * idle. We will cycle through the first 3 stages until we cannot obtain a + * full scatterlist of pages, in that case we will switch to idle. + */ +static int +page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone, + unsigned int order, unsigned int mt, + struct scatterlist *sgl, unsigned int *offset) +{ + struct free_area *area = &zone->free_area[order]; + struct list_head *list = &area->free_list[mt]; + unsigned int page_len = PAGE_SIZE << order; + struct page *page, *next; + int err = 0; + + /* + * Perform early check, if free area is empty there is + * nothing to process so we can skip this free_list. + */ + if (list_empty(list)) + return err; + + spin_lock_irq(&zone->lock); + + /* loop through free list adding unreported pages to sg list */ + list_for_each_entry_safe(page, next, list, lru) { + /* We are going to skip over the reported pages. */ + if (PageReported(page)) + continue; + + /* Attempt to pull page from list */ + if (!__isolate_free_page(page, order)) + break; + + /* Add page to scatter list */ + --(*offset); + sg_set_page(&sgl[*offset], page, page_len, 0); + + /* If scatterlist isn't full grab more pages */ + if (*offset) + continue; + + /* release lock before waiting on report processing */ + spin_unlock_irq(&zone->lock); + + /* begin processing pages in local list */ + err = prdev->report(prdev, sgl, PAGE_REPORTING_CAPACITY); + + /* reset offset since the full list was reported */ + *offset = PAGE_REPORTING_CAPACITY; + + /* reacquire zone lock and resume processing */ + spin_lock_irq(&zone->lock); + + /* flush reported pages from the sg list */ + page_reporting_drain(prdev, sgl, PAGE_REPORTING_CAPACITY, !err); + + /* + * Reset next to first entry, the old next isn't valid + * since we dropped the lock to report the pages + */ + next = list_first_entry(list, struct page, lru); + + /* exit on error */ + if (err) + break; + } + + spin_unlock_irq(&zone->lock); + + return err; +} + +static int +page_reporting_process_zone(struct page_reporting_dev_info *prdev, + struct scatterlist *sgl, struct zone *zone) +{ + unsigned int order, mt, leftover, offset = PAGE_REPORTING_CAPACITY; + unsigned long watermark; + int err = 0; + + /* Generate minimum watermark to be able to guarantee progress */ + watermark = low_wmark_pages(zone) + + (PAGE_REPORTING_CAPACITY << PAGE_REPORTING_MIN_ORDER); + + /* + * Cancel request if insufficient free memory or if we failed + * to allocate page reporting statistics for the zone. + */ + if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) + return err; + + /* Process each free list starting from lowest order/mt */ + for (order = PAGE_REPORTING_MIN_ORDER; order < MAX_ORDER; order++) { + for (mt = 0; mt < MIGRATE_TYPES; mt++) { + /* We do not pull pages from the isolate free list */ + if (is_migrate_isolate(mt)) + continue; + + err = page_reporting_cycle(prdev, zone, order, mt, + sgl, &offset); + if (err) + return err; + } + } + + /* report the leftover pages before going idle */ + leftover = PAGE_REPORTING_CAPACITY - offset; + if (leftover) { + sgl = &sgl[offset]; + err = prdev->report(prdev, sgl, leftover); + + /* flush any remaining pages out from the last report */ + spin_lock_irq(&zone->lock); + page_reporting_drain(prdev, sgl, leftover, !err); + spin_unlock_irq(&zone->lock); + } + + return err; +} + +static void page_reporting_process(struct work_struct *work) +{ + struct delayed_work *d_work = to_delayed_work(work); + struct page_reporting_dev_info *prdev = + container_of(d_work, struct page_reporting_dev_info, work); + int err = 0, state = PAGE_REPORTING_ACTIVE; + struct scatterlist *sgl; + struct zone *zone; + + /* + * Change the state to "Active" so that we can track if there is + * anyone requests page reporting after we complete our pass. If + * the state is not altered by the end of the pass we will switch + * to idle and quit scheduling reporting runs. + */ + atomic_set(&prdev->state, state); + + /* allocate scatterlist to store pages being reported on */ + sgl = kmalloc_array(PAGE_REPORTING_CAPACITY, sizeof(*sgl), GFP_KERNEL); + if (!sgl) + goto err_out; + + sg_init_table(sgl, PAGE_REPORTING_CAPACITY); + + for_each_zone(zone) { + err = page_reporting_process_zone(prdev, sgl, zone); + if (err) + break; + } + + kfree(sgl); +err_out: + /* + * If the state has reverted back to requested then there may be + * additional pages to be processed. We will defer for 2s to allow + * more pages to accumulate. + */ + state = atomic_cmpxchg(&prdev->state, state, PAGE_REPORTING_IDLE); + if (state == PAGE_REPORTING_REQUESTED) + schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY); +} + +static DEFINE_MUTEX(page_reporting_mutex); +DEFINE_STATIC_KEY_FALSE(page_reporting_enabled); + +int page_reporting_register(struct page_reporting_dev_info *prdev) +{ + int err = 0; + + mutex_lock(&page_reporting_mutex); + + /* nothing to do if already in use */ + if (rcu_access_pointer(pr_dev_info)) { + err = -EBUSY; + goto err_out; + } + + /* initialize state and work structures */ + atomic_set(&prdev->state, PAGE_REPORTING_IDLE); + INIT_DELAYED_WORK(&prdev->work, &page_reporting_process); + + /* Begin initial flush of zones */ + __page_reporting_request(prdev); + + /* Assign device to allow notifications */ + rcu_assign_pointer(pr_dev_info, prdev); + + /* enable page reporting notification */ + if (!static_key_enabled(&page_reporting_enabled)) { + static_branch_enable(&page_reporting_enabled); + pr_info("Free page reporting enabled\n"); + } +err_out: + mutex_unlock(&page_reporting_mutex); + + return err; +} +EXPORT_SYMBOL_GPL(page_reporting_register); + +void page_reporting_unregister(struct page_reporting_dev_info *prdev) +{ + mutex_lock(&page_reporting_mutex); + + if (rcu_access_pointer(pr_dev_info) == prdev) { + /* Disable page reporting notification */ + RCU_INIT_POINTER(pr_dev_info, NULL); + synchronize_rcu(); + + /* Flush any existing work, and lock it out */ + cancel_delayed_work_sync(&prdev->work); + } + + mutex_unlock(&page_reporting_mutex); +} +EXPORT_SYMBOL_GPL(page_reporting_unregister); diff --git a/mm/page_reporting.h b/mm/page_reporting.h new file mode 100644 index 000000000000..aa6d37f4dc22 --- /dev/null +++ b/mm/page_reporting.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _MM_PAGE_REPORTING_H +#define _MM_PAGE_REPORTING_H + +#include +#include +#include +#include +#include +#include +#include + +#define PAGE_REPORTING_MIN_ORDER pageblock_order + +#ifdef CONFIG_PAGE_REPORTING +DECLARE_STATIC_KEY_FALSE(page_reporting_enabled); +void __page_reporting_notify(void); + +static inline bool page_reported(struct page *page) +{ + return static_branch_unlikely(&page_reporting_enabled) && + PageReported(page); +} + +/** + * page_reporting_notify_free - Free page notification to start page processing + * + * This function is meant to act as a screener for __page_reporting_notify + * which will determine if a give zone has crossed over the high-water mark + * that will justify us beginning page treatment. If we have crossed that + * threshold then it will start the process of pulling some pages and + * placing them in the batch list for treatment. + */ +static inline void page_reporting_notify_free(unsigned int order) +{ + /* Called from hot path in __free_one_page() */ + if (!static_branch_unlikely(&page_reporting_enabled)) + return; + + /* Determine if we have crossed reporting threshold */ + if (order < PAGE_REPORTING_MIN_ORDER) + return; + + /* This will add a few cycles, but should be called infrequently */ + __page_reporting_notify(); +} +#else /* CONFIG_PAGE_REPORTING */ +#define page_reported(_page) false + +static inline void page_reporting_notify_free(unsigned int order) +{ +} +#endif /* CONFIG_PAGE_REPORTING */ +#endif /*_MM_PAGE_REPORTING_H */ -- cgit v1.2.3-58-ga151 From 43b76f298f023d32273c2b9c25dd83ae02711019 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 6 Apr 2020 20:05:14 -0700 Subject: mm/page_reporting: add budget limit on how many pages can be reported per pass In order to keep ourselves from reporting pages that are just going to be reused again in the case of heavy churn we can put a limit on how many total pages we will process per pass. Doing this will allow the worker thread to go into idle much more quickly so that we avoid competing with other threads that might be allocating or freeing pages. The logic added here will limit the worker thread to no more than one sixteenth of the total free pages in a given area per list. Once that limit is reached it will update the state so that at the end of the pass we will reschedule the worker to try again in 2 seconds when the memory churn has hopefully settled down. Again this optimization doesn't show much of a benefit in the standard case as the memory churn is minmal. However with page allocator shuffling enabled the gain is quite noticeable. Below are the results with a THP enabled version of the will-it-scale page_fault1 test showing the improvement in iterations for 16 processes or threads. Without: tasks processes processes_idle threads threads_idle 16 8283274.75 0.17 5594261.00 38.15 With: tasks processes processes_idle threads threads_idle 16 8767010.50 0.21 5791312.75 36.98 Signed-off-by: Alexander Duyck Signed-off-by: Andrew Morton Acked-by: Mel Gorman Cc: Andrea Arcangeli Cc: Dan Williams Cc: Dave Hansen Cc: David Hildenbrand Cc: Konrad Rzeszutek Wilk Cc: Luiz Capitulino Cc: Matthew Wilcox Cc: Michael S. Tsirkin Cc: Michal Hocko Cc: Nitesh Narayan Lal Cc: Oscar Salvador Cc: Pankaj Gupta Cc: Paolo Bonzini Cc: Rik van Riel Cc: Vlastimil Babka Cc: Wei Wang Cc: Yang Zhang Cc: wei qi Link: http://lkml.kernel.org/r/20200211224719.29318.72113.stgit@localhost.localdomain Signed-off-by: Linus Torvalds --- include/linux/page_reporting.h | 1 + mm/page_reporting.c | 33 ++++++++++++++++++++++++++++++++- 2 files changed, 33 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/page_reporting.h b/include/linux/page_reporting.h index 32355486f572..3b99e0ec24f2 100644 --- a/include/linux/page_reporting.h +++ b/include/linux/page_reporting.h @@ -5,6 +5,7 @@ #include #include +/* This value should always be a power of 2, see page_reporting_cycle() */ #define PAGE_REPORTING_CAPACITY 32 struct page_reporting_dev_info { diff --git a/mm/page_reporting.c b/mm/page_reporting.c index 6885e74c2367..3bbd471cfc81 100644 --- a/mm/page_reporting.c +++ b/mm/page_reporting.c @@ -114,6 +114,7 @@ page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone, struct list_head *list = &area->free_list[mt]; unsigned int page_len = PAGE_SIZE << order; struct page *page, *next; + long budget; int err = 0; /* @@ -125,12 +126,39 @@ page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone, spin_lock_irq(&zone->lock); + /* + * Limit how many calls we will be making to the page reporting + * device for this list. By doing this we avoid processing any + * given list for too long. + * + * The current value used allows us enough calls to process over a + * sixteenth of the current list plus one additional call to handle + * any pages that may have already been present from the previous + * list processed. This should result in us reporting all pages on + * an idle system in about 30 seconds. + * + * The division here should be cheap since PAGE_REPORTING_CAPACITY + * should always be a power of 2. + */ + budget = DIV_ROUND_UP(area->nr_free, PAGE_REPORTING_CAPACITY * 16); + /* loop through free list adding unreported pages to sg list */ list_for_each_entry_safe(page, next, list, lru) { /* We are going to skip over the reported pages. */ if (PageReported(page)) continue; + /* + * If we fully consumed our budget then update our + * state to indicate that we are requesting additional + * processing and exit this list. + */ + if (budget < 0) { + atomic_set(&prdev->state, PAGE_REPORTING_REQUESTED); + next = page; + break; + } + /* Attempt to pull page from list and place in scatterlist */ if (*offset) { if (!__isolate_free_page(page, order)) { @@ -146,7 +174,7 @@ page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone, } /* - * Make the first non-processed page in the free list + * Make the first non-reported page in the free list * the new head of the free list before we release the * zone lock. */ @@ -162,6 +190,9 @@ page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone, /* reset offset since the full list was reported */ *offset = PAGE_REPORTING_CAPACITY; + /* update budget to reflect call to report function */ + budget--; + /* reacquire zone lock and resume processing */ spin_lock_irq(&zone->lock); -- cgit v1.2.3-58-ga151 From 1df319e0b4dee11436fe2ab1a0d536d3fad7cfef Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 6 Apr 2020 20:05:25 -0700 Subject: userfaultfd: wp: add helper for writeprotect check Patch series "userfaultfd: write protection support", v6. Overview ======== The uffd-wp work was initialized by Shaohua Li [1], and later continued by Andrea [2]. This series is based upon Andrea's latest userfaultfd tree, and it is a continuous works from both Shaohua and Andrea. Many of the follow up ideas come from Andrea too. Besides the old MISSING register mode of userfaultfd, the new uffd-wp support provides another alternative register mode called UFFDIO_REGISTER_MODE_WP that can be used to listen to not only missing page faults but also write protection page faults, or even they can be registered together. At the same time, the new feature also provides a new userfaultfd ioctl called UFFDIO_WRITEPROTECT which allows the userspace to write protect a range or memory or fixup write permission of faulted pages. Please refer to the document patch "userfaultfd: wp: UFFDIO_REGISTER_MODE_WP documentation update" for more information on the new interface and what it can do. The major workflow of an uffd-wp program should be: 1. Register a memory region with WP mode using UFFDIO_REGISTER_MODE_WP 2. Write protect part of the whole registered region using UFFDIO_WRITEPROTECT, passing in UFFDIO_WRITEPROTECT_MODE_WP to show that we want to write protect the range. 3. Start a working thread that modifies the protected pages, meanwhile listening to UFFD messages. 4. When a write is detected upon the protected range, page fault happens, a UFFD message will be generated and reported to the page fault handling thread 5. The page fault handler thread resolves the page fault using the new UFFDIO_WRITEPROTECT ioctl, but this time passing in !UFFDIO_WRITEPROTECT_MODE_WP instead showing that we want to recover the write permission. Before this operation, the fault handler thread can do anything it wants, e.g., dumps the page to a persistent storage. 6. The worker thread will continue running with the correctly applied write permission from step 5. Currently there are already two projects that are based on this new userfaultfd feature. QEMU Live Snapshot: The project provides a way to allow the QEMU hypervisor to take snapshot of VMs without stopping the VM [3]. LLNL umap library: The project provides a mmap-like interface and "allow to have an application specific buffer of pages cached from a large file, i.e. out-of-core execution using memory map" [4][5]. Before posting the patchset, this series was smoke tested against QEMU live snapshot and the LLNL umap library (by doing parallel quicksort using 128 sorting threads + 80 uffd servicing threads). My sincere thanks to Marty Mcfadden and Denis Plotnikov for the help along the way. TODO ==== - hugetlbfs/shmem support - performance - more architectures - cooperate with mprotect()-allowed processes (???) - ... References ========== [1] https://lwn.net/Articles/666187/ [2] https://git.kernel.org/pub/scm/linux/kernel/git/andrea/aa.git/log/?h=userfault [3] https://github.com/denis-plotnikov/qemu/commits/background-snapshot-kvm [4] https://github.com/LLNL/umap [5] https://llnl-umap.readthedocs.io/en/develop/ [6] https://git.kernel.org/pub/scm/linux/kernel/git/andrea/aa.git/commit/?h=userfault&id=b245ecf6cf59156966f3da6e6b674f6695a5ffa5 [7] https://lkml.org/lkml/2018/11/21/370 [8] https://lkml.org/lkml/2018/12/30/64 This patch (of 19): Add helper for writeprotect check. Will use it later. Signed-off-by: Shaohua Li Signed-off-by: Andrea Arcangeli Signed-off-by: Peter Xu Signed-off-by: Andrew Morton Reviewed-by: Jerome Glisse Reviewed-by: Mike Rapoport Cc: Rik van Riel Cc: Kirill A. Shutemov Cc: Mel Gorman Cc: Hugh Dickins Cc: Johannes Weiner Cc: Bobby Powers Cc: Brian Geffon Cc: David Hildenbrand Cc: Denis Plotnikov Cc: "Dr . David Alan Gilbert" Cc: Martin Cracauer Cc: Marty McFadden Cc: Maya Gokhale Cc: Mike Kravetz Cc: Pavel Emelyanov Link: http://lkml.kernel.org/r/20200220163112.11409-2-peterx@redhat.com Signed-off-by: Linus Torvalds --- include/linux/userfaultfd_k.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index ac9d71e24b81..5dc247af0f2e 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -52,6 +52,11 @@ static inline bool userfaultfd_missing(struct vm_area_struct *vma) return vma->vm_flags & VM_UFFD_MISSING; } +static inline bool userfaultfd_wp(struct vm_area_struct *vma) +{ + return vma->vm_flags & VM_UFFD_WP; +} + static inline bool userfaultfd_armed(struct vm_area_struct *vma) { return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP); @@ -96,6 +101,11 @@ static inline bool userfaultfd_missing(struct vm_area_struct *vma) return false; } +static inline bool userfaultfd_wp(struct vm_area_struct *vma) +{ + return false; +} + static inline bool userfaultfd_armed(struct vm_area_struct *vma) { return false; -- cgit v1.2.3-58-ga151 From 55adf4de30346e0ec6b988cb9b885a5dddc954af Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Mon, 6 Apr 2020 20:05:37 -0700 Subject: userfaultfd: wp: userfaultfd_pte/huge_pmd_wp() helpers Implement helpers methods to invoke userfaultfd wp faults more selectively: not only when a wp fault triggers on a vma with vma->vm_flags VM_UFFD_WP set, but only if the _PAGE_UFFD_WP bit is set in the pagetable too. Signed-off-by: Andrea Arcangeli Signed-off-by: Peter Xu Signed-off-by: Andrew Morton Reviewed-by: Jerome Glisse Reviewed-by: Mike Rapoport Cc: Bobby Powers Cc: Brian Geffon Cc: David Hildenbrand Cc: Denis Plotnikov Cc: "Dr . David Alan Gilbert" Cc: Hugh Dickins Cc: Johannes Weiner Cc: "Kirill A . Shutemov" Cc: Martin Cracauer Cc: Marty McFadden Cc: Maya Gokhale Cc: Mel Gorman Cc: Mike Kravetz Cc: Pavel Emelyanov Cc: Rik van Riel Cc: Shaohua Li Link: http://lkml.kernel.org/r/20200220163112.11409-5-peterx@redhat.com Signed-off-by: Linus Torvalds --- include/linux/userfaultfd_k.h | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'include/linux') diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 5dc247af0f2e..7b91b76aac58 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -14,6 +14,8 @@ #include /* linux/include/uapi/linux/userfaultfd.h */ #include +#include +#include /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining @@ -57,6 +59,18 @@ static inline bool userfaultfd_wp(struct vm_area_struct *vma) return vma->vm_flags & VM_UFFD_WP; } +static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, + pte_t pte) +{ + return userfaultfd_wp(vma) && pte_uffd_wp(pte); +} + +static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, + pmd_t pmd) +{ + return userfaultfd_wp(vma) && pmd_uffd_wp(pmd); +} + static inline bool userfaultfd_armed(struct vm_area_struct *vma) { return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP); @@ -106,6 +120,19 @@ static inline bool userfaultfd_wp(struct vm_area_struct *vma) return false; } +static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, + pte_t pte) +{ + return false; +} + +static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, + pmd_t pmd) +{ + return false; +} + + static inline bool userfaultfd_armed(struct vm_area_struct *vma) { return false; -- cgit v1.2.3-58-ga151 From 72981e0e7b609c741d7764cc920c8fec00920bd5 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Mon, 6 Apr 2020 20:05:41 -0700 Subject: userfaultfd: wp: add UFFDIO_COPY_MODE_WP This allows UFFDIO_COPY to map pages write-protected. [peterx@redhat.com: switch to VM_WARN_ON_ONCE in mfill_atomic_pte; add brackets around "dst_vma->vm_flags & VM_WRITE"; fix wordings in comments and commit messages] Signed-off-by: Andrea Arcangeli Signed-off-by: Peter Xu Signed-off-by: Andrew Morton Reviewed-by: Jerome Glisse Reviewed-by: Mike Rapoport Cc: Bobby Powers Cc: Brian Geffon Cc: David Hildenbrand Cc: Denis Plotnikov Cc: "Dr . David Alan Gilbert" Cc: Hugh Dickins Cc: Johannes Weiner Cc: "Kirill A . Shutemov" Cc: Martin Cracauer Cc: Marty McFadden Cc: Maya Gokhale Cc: Mel Gorman Cc: Mike Kravetz Cc: Pavel Emelyanov Cc: Rik van Riel Cc: Shaohua Li Link: http://lkml.kernel.org/r/20200220163112.11409-6-peterx@redhat.com Signed-off-by: Linus Torvalds --- fs/userfaultfd.c | 5 +++-- include/linux/userfaultfd_k.h | 2 +- include/uapi/linux/userfaultfd.h | 11 ++++++----- mm/userfaultfd.c | 36 +++++++++++++++++++++++++----------- 4 files changed, 35 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 703c1c3faa6e..c49bef505775 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -1724,11 +1724,12 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx, ret = -EINVAL; if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src) goto out; - if (uffdio_copy.mode & ~UFFDIO_COPY_MODE_DONTWAKE) + if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP)) goto out; if (mmget_not_zero(ctx->mm)) { ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src, - uffdio_copy.len, &ctx->mmap_changing); + uffdio_copy.len, &ctx->mmap_changing, + uffdio_copy.mode); mmput(ctx->mm); } else { return -ESRCH; diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 7b91b76aac58..dcd33172b728 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -36,7 +36,7 @@ extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason); extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, - bool *mmap_changing); + bool *mmap_changing, __u64 mode); extern ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long len, diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 48f1a7c2f1f0..340f23bc251d 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -203,13 +203,14 @@ struct uffdio_copy { __u64 dst; __u64 src; __u64 len; +#define UFFDIO_COPY_MODE_DONTWAKE ((__u64)1<<0) /* - * There will be a wrprotection flag later that allows to map - * pages wrprotected on the fly. And such a flag will be - * available if the wrprotection ioctl are implemented for the - * range according to the uffdio_register.ioctls. + * UFFDIO_COPY_MODE_WP will map the page write protected on + * the fly. UFFDIO_COPY_MODE_WP is available only if the + * write protected ioctl is implemented for the range + * according to the uffdio_register.ioctls. */ -#define UFFDIO_COPY_MODE_DONTWAKE ((__u64)1<<0) +#define UFFDIO_COPY_MODE_WP ((__u64)1<<1) __u64 mode; /* diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index bd96855f3961..05dbbcafdcc0 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -53,7 +53,8 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, - struct page **pagep) + struct page **pagep, + bool wp_copy) { struct mem_cgroup *memcg; pte_t _dst_pte, *dst_pte; @@ -99,9 +100,9 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm, if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false)) goto out_release; - _dst_pte = mk_pte(page, dst_vma->vm_page_prot); - if (dst_vma->vm_flags & VM_WRITE) - _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); + _dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot)); + if ((dst_vma->vm_flags & VM_WRITE) && !wp_copy) + _dst_pte = pte_mkwrite(_dst_pte); dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); if (dst_vma->vm_file) { @@ -415,7 +416,8 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, unsigned long dst_addr, unsigned long src_addr, struct page **page, - bool zeropage) + bool zeropage, + bool wp_copy) { ssize_t err; @@ -432,11 +434,13 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, if (!(dst_vma->vm_flags & VM_SHARED)) { if (!zeropage) err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, - dst_addr, src_addr, page); + dst_addr, src_addr, page, + wp_copy); else err = mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, dst_addr); } else { + VM_WARN_ON_ONCE(wp_copy); if (!zeropage) err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, @@ -454,7 +458,8 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, unsigned long src_start, unsigned long len, bool zeropage, - bool *mmap_changing) + bool *mmap_changing, + __u64 mode) { struct vm_area_struct *dst_vma; ssize_t err; @@ -462,6 +467,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, unsigned long src_addr, dst_addr; long copied; struct page *page; + bool wp_copy; /* * Sanitize the command parameters: @@ -507,6 +513,14 @@ retry: dst_vma->vm_flags & VM_SHARED)) goto out_unlock; + /* + * validate 'mode' now that we know the dst_vma: don't allow + * a wrprotect copy if the userfaultfd didn't register as WP. + */ + wp_copy = mode & UFFDIO_COPY_MODE_WP; + if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP)) + goto out_unlock; + /* * If this is a HUGETLB vma, pass off to appropriate routine */ @@ -562,7 +576,7 @@ retry: BUG_ON(pmd_trans_huge(*dst_pmd)); err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, - src_addr, &page, zeropage); + src_addr, &page, zeropage, wp_copy); cond_resched(); if (unlikely(err == -ENOENT)) { @@ -609,14 +623,14 @@ out: ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, - bool *mmap_changing) + bool *mmap_changing, __u64 mode) { return __mcopy_atomic(dst_mm, dst_start, src_start, len, false, - mmap_changing); + mmap_changing, mode); } ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, unsigned long len, bool *mmap_changing) { - return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing); + return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing, 0); } -- cgit v1.2.3-58-ga151 From 58705444c45b3ca987b03bd9beb41bbbe41ae439 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Mon, 6 Apr 2020 20:05:45 -0700 Subject: mm: merge parameters for change_protection() change_protection() was used by either the NUMA or mprotect() code, there's one parameter for each of the callers (dirty_accountable and prot_numa). Further, these parameters are passed along the calls: - change_protection_range() - change_p4d_range() - change_pud_range() - change_pmd_range() - ... Now we introduce a flag for change_protect() and all these helpers to replace these parameters. Then we can avoid passing multiple parameters multiple times along the way. More importantly, it'll greatly simplify the work if we want to introduce any new parameters to change_protection(). In the follow up patches, a new parameter for userfaultfd write protection will be introduced. No functional change at all. Signed-off-by: Peter Xu Signed-off-by: Andrew Morton Reviewed-by: Jerome Glisse Cc: Andrea Arcangeli Cc: Bobby Powers Cc: Brian Geffon Cc: David Hildenbrand Cc: Denis Plotnikov Cc: "Dr . David Alan Gilbert" Cc: Hugh Dickins Cc: Johannes Weiner Cc: "Kirill A . Shutemov" Cc: Martin Cracauer Cc: Marty McFadden Cc: Maya Gokhale Cc: Mel Gorman Cc: Mike Kravetz Cc: Mike Rapoport Cc: Pavel Emelyanov Cc: Rik van Riel Cc: Shaohua Li Link: http://lkml.kernel.org/r/20200220163112.11409-7-peterx@redhat.com Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 2 +- include/linux/mm.h | 14 +++++++++++++- mm/huge_memory.c | 3 ++- mm/mempolicy.c | 2 +- mm/mprotect.c | 29 ++++++++++++++++------------- 5 files changed, 33 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index f2df2247026a..cfbb0a87c5f0 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -46,7 +46,7 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, pmd_t *old_pmd, pmd_t *new_pmd); extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, - int prot_numa); + unsigned long cp_flags); vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, pgprot_t pgprot, bool write); diff --git a/include/linux/mm.h b/include/linux/mm.h index be49e371e4b5..c7d87ff5027b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1771,9 +1771,21 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, bool need_rmap_locks); + +/* + * Flags used by change_protection(). For now we make it a bitmap so + * that we can pass in multiple flags just like parameters. However + * for now all the callers are only use one of the flags at the same + * time. + */ +/* Whether we should allow dirty bit accounting */ +#define MM_CP_DIRTY_ACCT (1UL << 0) +/* Whether this protection change is for NUMA hints */ +#define MM_CP_PROT_NUMA (1UL << 1) + extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot, - int dirty_accountable, int prot_numa); + unsigned long cp_flags); extern int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c1e7c71db1e6..dc12249af6df 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1979,13 +1979,14 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, * - HPAGE_PMD_NR is protections changed and TLB flush necessary */ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, - unsigned long addr, pgprot_t newprot, int prot_numa) + unsigned long addr, pgprot_t newprot, unsigned long cp_flags) { struct mm_struct *mm = vma->vm_mm; spinlock_t *ptl; pmd_t entry; bool preserve_write; int ret; + bool prot_numa = cp_flags & MM_CP_PROT_NUMA; ptl = __pmd_trans_huge_lock(pmd, vma); if (!ptl) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 037e5f548118..145be04b7108 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -627,7 +627,7 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, { int nr_updated; - nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); + nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA); if (nr_updated) count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); diff --git a/mm/mprotect.c b/mm/mprotect.c index 0fee14b39416..046e0889e65f 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -37,12 +37,14 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot, - int dirty_accountable, int prot_numa) + unsigned long cp_flags) { pte_t *pte, oldpte; spinlock_t *ptl; unsigned long pages = 0; int target_node = NUMA_NO_NODE; + bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT; + bool prot_numa = cp_flags & MM_CP_PROT_NUMA; /* * Can be called with only the mmap_sem for reading by @@ -188,7 +190,7 @@ static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd) static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, - pgprot_t newprot, int dirty_accountable, int prot_numa) + pgprot_t newprot, unsigned long cp_flags) { pmd_t *pmd; unsigned long next; @@ -229,7 +231,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, __split_huge_pmd(vma, pmd, addr, false, NULL); } else { int nr_ptes = change_huge_pmd(vma, pmd, addr, - newprot, prot_numa); + newprot, cp_flags); if (nr_ptes) { if (nr_ptes == HPAGE_PMD_NR) { @@ -244,7 +246,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, /* fall through, the trans huge pmd just split */ } this_pages = change_pte_range(vma, pmd, addr, next, newprot, - dirty_accountable, prot_numa); + cp_flags); pages += this_pages; next: cond_resched(); @@ -260,7 +262,7 @@ next: static inline unsigned long change_pud_range(struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr, unsigned long end, - pgprot_t newprot, int dirty_accountable, int prot_numa) + pgprot_t newprot, unsigned long cp_flags) { pud_t *pud; unsigned long next; @@ -272,7 +274,7 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma, if (pud_none_or_clear_bad(pud)) continue; pages += change_pmd_range(vma, pud, addr, next, newprot, - dirty_accountable, prot_numa); + cp_flags); } while (pud++, addr = next, addr != end); return pages; @@ -280,7 +282,7 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma, static inline unsigned long change_p4d_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, - pgprot_t newprot, int dirty_accountable, int prot_numa) + pgprot_t newprot, unsigned long cp_flags) { p4d_t *p4d; unsigned long next; @@ -292,7 +294,7 @@ static inline unsigned long change_p4d_range(struct vm_area_struct *vma, if (p4d_none_or_clear_bad(p4d)) continue; pages += change_pud_range(vma, p4d, addr, next, newprot, - dirty_accountable, prot_numa); + cp_flags); } while (p4d++, addr = next, addr != end); return pages; @@ -300,7 +302,7 @@ static inline unsigned long change_p4d_range(struct vm_area_struct *vma, static unsigned long change_protection_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, pgprot_t newprot, - int dirty_accountable, int prot_numa) + unsigned long cp_flags) { struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; @@ -317,7 +319,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma, if (pgd_none_or_clear_bad(pgd)) continue; pages += change_p4d_range(vma, pgd, addr, next, newprot, - dirty_accountable, prot_numa); + cp_flags); } while (pgd++, addr = next, addr != end); /* Only flush the TLB if we actually modified any entries: */ @@ -330,14 +332,15 @@ static unsigned long change_protection_range(struct vm_area_struct *vma, unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot, - int dirty_accountable, int prot_numa) + unsigned long cp_flags) { unsigned long pages; if (is_vm_hugetlb_page(vma)) pages = hugetlb_change_protection(vma, start, end, newprot); else - pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); + pages = change_protection_range(vma, start, end, newprot, + cp_flags); return pages; } @@ -459,7 +462,7 @@ success: vma_set_page_prot(vma); change_protection(vma, start, end, vma->vm_page_prot, - dirty_accountable, 0); + dirty_accountable ? MM_CP_DIRTY_ACCT : 0); /* * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major -- cgit v1.2.3-58-ga151 From 292924b260247483a58916f6d3550d8c92f32f55 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Mon, 6 Apr 2020 20:05:49 -0700 Subject: userfaultfd: wp: apply _PAGE_UFFD_WP bit Firstly, introduce two new flags MM_CP_UFFD_WP[_RESOLVE] for change_protection() when used with uffd-wp and make sure the two new flags are exclusively used. Then, - For MM_CP_UFFD_WP: apply the _PAGE_UFFD_WP bit and remove _PAGE_RW when a range of memory is write protected by uffd - For MM_CP_UFFD_WP_RESOLVE: remove the _PAGE_UFFD_WP bit and recover _PAGE_RW when write protection is resolved from userspace And use this new interface in mwriteprotect_range() to replace the old MM_CP_DIRTY_ACCT. Do this change for both PTEs and huge PMDs. Then we can start to identify which PTE/PMD is write protected by general (e.g., COW or soft dirty tracking), and which is for userfaultfd-wp. Since we should keep the _PAGE_UFFD_WP when doing pte_modify(), add it into _PAGE_CHG_MASK as well. Meanwhile, since we have this new bit, we can be even more strict when detecting uffd-wp page faults in either do_wp_page() or wp_huge_pmd(). After we're with _PAGE_UFFD_WP, a special case is when a page is both protected by the general COW logic and also userfault-wp. Here the userfault-wp will have higher priority and will be handled first. Only after the uffd-wp bit is cleared on the PTE/PMD will we continue to handle the general COW. These are the steps on what will happen with such a page: 1. CPU accesses write protected shared page (so both protected by general COW and uffd-wp), blocked by uffd-wp first because in do_wp_page we'll handle uffd-wp first, so it has higher priority than general COW. 2. Uffd service thread receives the request, do UFFDIO_WRITEPROTECT to remove the uffd-wp bit upon the PTE/PMD. However here we still keep the write bit cleared. Notify the blocked CPU. 3. The blocked CPU resumes the page fault process with a fault retry, during retry it'll notice it was not with the uffd-wp bit this time but it is still write protected by general COW, then it'll go though the COW path in the fault handler, copy the page, apply write bit where necessary, and retry again. 4. The CPU will be able to access this page with write bit set. Suggested-by: Andrea Arcangeli Signed-off-by: Peter Xu Signed-off-by: Andrew Morton Cc: Brian Geffon Cc: Pavel Emelyanov Cc: Mike Kravetz Cc: David Hildenbrand Cc: Martin Cracauer Cc: Mel Gorman Cc: Bobby Powers Cc: Mike Rapoport Cc: "Kirill A . Shutemov" Cc: Maya Gokhale Cc: Johannes Weiner Cc: Marty McFadden Cc: Denis Plotnikov Cc: Hugh Dickins Cc: "Dr . David Alan Gilbert" Cc: Jerome Glisse Cc: Rik van Riel Cc: Shaohua Li Link: http://lkml.kernel.org/r/20200220163112.11409-8-peterx@redhat.com Signed-off-by: Linus Torvalds --- include/linux/mm.h | 5 +++++ mm/huge_memory.c | 18 +++++++++++++++++- mm/memory.c | 4 ++-- mm/mprotect.c | 17 +++++++++++++++++ mm/userfaultfd.c | 8 ++++++-- 5 files changed, 47 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index c7d87ff5027b..e2f938c5a9d8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1782,6 +1782,11 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma, #define MM_CP_DIRTY_ACCT (1UL << 0) /* Whether this protection change is for NUMA hints */ #define MM_CP_PROT_NUMA (1UL << 1) +/* Whether this change is for write protecting */ +#define MM_CP_UFFD_WP (1UL << 2) /* do wp */ +#define MM_CP_UFFD_WP_RESOLVE (1UL << 3) /* Resolve wp */ +#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \ + MM_CP_UFFD_WP_RESOLVE) extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index dc12249af6df..425339491677 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1987,6 +1987,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, bool preserve_write; int ret; bool prot_numa = cp_flags & MM_CP_PROT_NUMA; + bool uffd_wp = cp_flags & MM_CP_UFFD_WP; + bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; ptl = __pmd_trans_huge_lock(pmd, vma); if (!ptl) @@ -2053,6 +2055,17 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, entry = pmd_modify(entry, newprot); if (preserve_write) entry = pmd_mk_savedwrite(entry); + if (uffd_wp) { + entry = pmd_wrprotect(entry); + entry = pmd_mkuffd_wp(entry); + } else if (uffd_wp_resolve) { + /* + * Leave the write bit to be handled by PF interrupt + * handler, then things like COW could be properly + * handled. + */ + entry = pmd_clear_uffd_wp(entry); + } ret = HPAGE_PMD_NR; set_pmd_at(mm, addr, pmd, entry); BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); @@ -2201,7 +2214,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, struct page *page; pgtable_t pgtable; pmd_t old_pmd, _pmd; - bool young, write, soft_dirty, pmd_migration = false; + bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; unsigned long addr; int i; @@ -2283,6 +2296,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, write = pmd_write(old_pmd); young = pmd_young(old_pmd); soft_dirty = pmd_soft_dirty(old_pmd); + uffd_wp = pmd_uffd_wp(old_pmd); } VM_BUG_ON_PAGE(!page_count(page), page); page_ref_add(page, HPAGE_PMD_NR - 1); @@ -2316,6 +2330,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, entry = pte_mkold(entry); if (soft_dirty) entry = pte_mksoft_dirty(entry); + if (uffd_wp) + entry = pte_mkuffd_wp(entry); } pte = pte_offset_map(&_pmd, addr); BUG_ON(!pte_none(*pte)); diff --git a/mm/memory.c b/mm/memory.c index 46aa79600ed8..f35821b43c1b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2752,7 +2752,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - if (userfaultfd_wp(vma)) { + if (userfaultfd_pte_wp(vma, *vmf->pte)) { pte_unmap_unlock(vmf->pte, vmf->ptl); return handle_userfault(vmf, VM_UFFD_WP); } @@ -3954,7 +3954,7 @@ static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) { if (vma_is_anonymous(vmf->vma)) { - if (userfaultfd_wp(vmf->vma)) + if (userfaultfd_huge_pmd_wp(vmf->vma, orig_pmd)) return handle_userfault(vmf, VM_UFFD_WP); return do_huge_pmd_wp_page(vmf, orig_pmd); } diff --git a/mm/mprotect.c b/mm/mprotect.c index 046e0889e65f..e4fa41a24bec 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -45,6 +45,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, int target_node = NUMA_NO_NODE; bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT; bool prot_numa = cp_flags & MM_CP_PROT_NUMA; + bool uffd_wp = cp_flags & MM_CP_UFFD_WP; + bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; /* * Can be called with only the mmap_sem for reading by @@ -116,6 +118,19 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (preserve_write) ptent = pte_mk_savedwrite(ptent); + if (uffd_wp) { + ptent = pte_wrprotect(ptent); + ptent = pte_mkuffd_wp(ptent); + } else if (uffd_wp_resolve) { + /* + * Leave the write bit to be handled + * by PF interrupt handler, then + * things like COW could be properly + * handled. + */ + ptent = pte_clear_uffd_wp(ptent); + } + /* Avoid taking write faults for known dirty pages */ if (dirty_accountable && pte_dirty(ptent) && (pte_soft_dirty(ptent) || @@ -336,6 +351,8 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, { unsigned long pages; + BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL); + if (is_vm_hugetlb_page(vma)) pages = hugetlb_change_protection(vma, start, end, newprot); else diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 05dbbcafdcc0..7d6ab05be019 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -101,8 +101,12 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm, goto out_release; _dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot)); - if ((dst_vma->vm_flags & VM_WRITE) && !wp_copy) - _dst_pte = pte_mkwrite(_dst_pte); + if (dst_vma->vm_flags & VM_WRITE) { + if (wp_copy) + _dst_pte = pte_mkuffd_wp(_dst_pte); + else + _dst_pte = pte_mkwrite(_dst_pte); + } dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); if (dst_vma->vm_file) { -- cgit v1.2.3-58-ga151 From f45ec5ff16a75f96dac8c89862d75f1d8739efd4 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Mon, 6 Apr 2020 20:06:01 -0700 Subject: userfaultfd: wp: support swap and page migration For either swap and page migration, we all use the bit 2 of the entry to identify whether this entry is uffd write-protected. It plays a similar role as the existing soft dirty bit in swap entries but only for keeping the uffd-wp tracking for a specific PTE/PMD. Something special here is that when we want to recover the uffd-wp bit from a swap/migration entry to the PTE bit we'll also need to take care of the _PAGE_RW bit and make sure it's cleared, otherwise even with the _PAGE_UFFD_WP bit we can't trap it at all. In change_pte_range() we do nothing for uffd if the PTE is a swap entry. That can lead to data mismatch if the page that we are going to write protect is swapped out when sending the UFFDIO_WRITEPROTECT. This patch also applies/removes the uffd-wp bit even for the swap entries. Signed-off-by: Peter Xu Signed-off-by: Andrew Morton Cc: Andrea Arcangeli Cc: Bobby Powers Cc: Brian Geffon Cc: David Hildenbrand Cc: Denis Plotnikov Cc: "Dr . David Alan Gilbert" Cc: Hugh Dickins Cc: Jerome Glisse Cc: Johannes Weiner Cc: "Kirill A . Shutemov" Cc: Martin Cracauer Cc: Marty McFadden Cc: Maya Gokhale Cc: Mel Gorman Cc: Mike Kravetz Cc: Mike Rapoport Cc: Pavel Emelyanov Cc: Rik van Riel Cc: Shaohua Li Link: http://lkml.kernel.org/r/20200220163112.11409-11-peterx@redhat.com Signed-off-by: Linus Torvalds --- include/linux/swapops.h | 2 ++ mm/huge_memory.c | 3 +++ mm/memory.c | 8 ++++++++ mm/migrate.c | 6 ++++++ mm/mprotect.c | 28 +++++++++++++++++----------- mm/rmap.c | 6 ++++++ 6 files changed, 42 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 877fd239b6ff..9a6f06de183b 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -68,6 +68,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte) if (pte_swp_soft_dirty(pte)) pte = pte_swp_clear_soft_dirty(pte); + if (pte_swp_uffd_wp(pte)) + pte = pte_swp_clear_uffd_wp(pte); arch_entry = __pte_to_swp_entry(pte); return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 8164787cd51f..6ecd1045113b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2297,6 +2297,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, write = is_write_migration_entry(entry); young = false; soft_dirty = pmd_swp_soft_dirty(old_pmd); + uffd_wp = pmd_swp_uffd_wp(old_pmd); } else { page = pmd_page(old_pmd); if (pmd_dirty(old_pmd)) @@ -2329,6 +2330,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, entry = swp_entry_to_pte(swp_entry); if (soft_dirty) entry = pte_swp_mksoft_dirty(entry); + if (uffd_wp) + entry = pte_swp_mkuffd_wp(entry); } else { entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); entry = maybe_mkwrite(entry, vma); diff --git a/mm/memory.c b/mm/memory.c index f8b1969669b7..8ac9af73e9d2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -733,6 +733,8 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte = swp_entry_to_pte(entry); if (pte_swp_soft_dirty(*src_pte)) pte = pte_swp_mksoft_dirty(pte); + if (pte_swp_uffd_wp(*src_pte)) + pte = pte_swp_mkuffd_wp(pte); set_pte_at(src_mm, addr, src_pte, pte); } } else if (is_device_private_entry(entry)) { @@ -762,6 +764,8 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, is_cow_mapping(vm_flags)) { make_device_private_entry_read(&entry); pte = swp_entry_to_pte(entry); + if (pte_swp_uffd_wp(*src_pte)) + pte = pte_swp_mkuffd_wp(pte); set_pte_at(src_mm, addr, src_pte, pte); } } @@ -3098,6 +3102,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) flush_icache_page(vma, page); if (pte_swp_soft_dirty(vmf->orig_pte)) pte = pte_mksoft_dirty(pte); + if (pte_swp_uffd_wp(vmf->orig_pte)) { + pte = pte_mkuffd_wp(pte); + pte = pte_wrprotect(pte); + } set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); vmf->orig_pte = pte; diff --git a/mm/migrate.c b/mm/migrate.c index c1412e04975e..7160c1556f79 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -243,11 +243,15 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, entry = pte_to_swp_entry(*pvmw.pte); if (is_write_migration_entry(entry)) pte = maybe_mkwrite(pte, vma); + else if (pte_swp_uffd_wp(*pvmw.pte)) + pte = pte_mkuffd_wp(pte); if (unlikely(is_zone_device_page(new))) { if (is_device_private_page(new)) { entry = make_device_private_entry(new, pte_write(pte)); pte = swp_entry_to_pte(entry); + if (pte_swp_uffd_wp(*pvmw.pte)) + pte = pte_mkuffd_wp(pte); } } @@ -2338,6 +2342,8 @@ again: swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pte)) swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_uffd_wp(pte)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); set_pte_at(mm, addr, ptep, swp_pte); /* diff --git a/mm/mprotect.c b/mm/mprotect.c index e4fa41a24bec..1d823b050329 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -139,11 +139,11 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, } ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); pages++; - } else if (IS_ENABLED(CONFIG_MIGRATION)) { + } else if (is_swap_pte(oldpte)) { swp_entry_t entry = pte_to_swp_entry(oldpte); + pte_t newpte; if (is_write_migration_entry(entry)) { - pte_t newpte; /* * A protection check is difficult so * just be safe and disable write @@ -152,22 +152,28 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, newpte = swp_entry_to_pte(entry); if (pte_swp_soft_dirty(oldpte)) newpte = pte_swp_mksoft_dirty(newpte); - set_pte_at(vma->vm_mm, addr, pte, newpte); - - pages++; - } - - if (is_write_device_private_entry(entry)) { - pte_t newpte; - + if (pte_swp_uffd_wp(oldpte)) + newpte = pte_swp_mkuffd_wp(newpte); + } else if (is_write_device_private_entry(entry)) { /* * We do not preserve soft-dirtiness. See * copy_one_pte() for explanation. */ make_device_private_entry_read(&entry); newpte = swp_entry_to_pte(entry); - set_pte_at(vma->vm_mm, addr, pte, newpte); + if (pte_swp_uffd_wp(oldpte)) + newpte = pte_swp_mkuffd_wp(newpte); + } else { + newpte = oldpte; + } + if (uffd_wp) + newpte = pte_swp_mkuffd_wp(newpte); + else if (uffd_wp_resolve) + newpte = pte_swp_clear_uffd_wp(newpte); + + if (!pte_same(oldpte, newpte)) { + set_pte_at(vma->vm_mm, addr, pte, newpte); pages++; } } diff --git a/mm/rmap.c b/mm/rmap.c index 374a9bfdbffa..ed8889bf4ede 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1502,6 +1502,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_uffd_wp(pteval)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); /* * No need to invalidate here it will synchronize on @@ -1601,6 +1603,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_uffd_wp(pteval)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); set_pte_at(mm, address, pvmw.pte, swp_pte); /* * No need to invalidate here it will synchronize on @@ -1667,6 +1671,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_uffd_wp(pteval)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); set_pte_at(mm, address, pvmw.pte, swp_pte); /* Invalidate as we cleared the pte */ mmu_notifier_invalidate_range(mm, address, -- cgit v1.2.3-58-ga151 From ffd05793963a44bd119311df3c02b191982574ee Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 6 Apr 2020 20:06:09 -0700 Subject: userfaultfd: wp: support write protection for userfault vma range Add API to enable/disable writeprotect a vma range. Unlike mprotect, this doesn't split/merge vmas. [peterx@redhat.com: - use the helper to find VMA; - return -ENOENT if not found to match mcopy case; - use the new MM_CP_UFFD_WP* flags for change_protection - check against mmap_changing for failures - replace find_dst_vma with vma_find_uffd] Signed-off-by: Shaohua Li Signed-off-by: Andrea Arcangeli Signed-off-by: Peter Xu Signed-off-by: Andrew Morton Reviewed-by: Jerome Glisse Reviewed-by: Mike Rapoport Cc: Andrea Arcangeli Cc: Rik van Riel Cc: Kirill A. Shutemov Cc: Mel Gorman Cc: Hugh Dickins Cc: Johannes Weiner Cc: Bobby Powers Cc: Brian Geffon Cc: David Hildenbrand Cc: Denis Plotnikov Cc: "Dr . David Alan Gilbert" Cc: Martin Cracauer Cc: Marty McFadden Cc: Maya Gokhale Cc: Mike Kravetz Cc: Pavel Emelyanov Link: http://lkml.kernel.org/r/20200220163112.11409-13-peterx@redhat.com Signed-off-by: Linus Torvalds --- include/linux/userfaultfd_k.h | 3 +++ mm/userfaultfd.c | 54 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) (limited to 'include/linux') diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index dcd33172b728..a8e5f3ea9bb2 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -41,6 +41,9 @@ extern ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long len, bool *mmap_changing); +extern int mwriteprotect_range(struct mm_struct *dst_mm, + unsigned long start, unsigned long len, + bool enable_wp, bool *mmap_changing); /* mm helpers */ static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 7d6ab05be019..512576e171ce 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -638,3 +638,57 @@ ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, { return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing, 0); } + +int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, + unsigned long len, bool enable_wp, bool *mmap_changing) +{ + struct vm_area_struct *dst_vma; + pgprot_t newprot; + int err; + + /* + * Sanitize the command parameters: + */ + BUG_ON(start & ~PAGE_MASK); + BUG_ON(len & ~PAGE_MASK); + + /* Does the address range wrap, or is the span zero-sized? */ + BUG_ON(start + len <= start); + + down_read(&dst_mm->mmap_sem); + + /* + * If memory mappings are changing because of non-cooperative + * operation (e.g. mremap) running in parallel, bail out and + * request the user to retry later + */ + err = -EAGAIN; + if (mmap_changing && READ_ONCE(*mmap_changing)) + goto out_unlock; + + err = -ENOENT; + dst_vma = find_dst_vma(dst_mm, start, len); + /* + * Make sure the vma is not shared, that the dst range is + * both valid and fully within a single existing vma. + */ + if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) + goto out_unlock; + if (!userfaultfd_wp(dst_vma)) + goto out_unlock; + if (!vma_is_anonymous(dst_vma)) + goto out_unlock; + + if (enable_wp) + newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE)); + else + newprot = vm_get_page_prot(dst_vma->vm_flags); + + change_protection(dst_vma, start, start + len, newprot, + enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE); + + err = 0; +out_unlock: + up_read(&dst_mm->mmap_sem); + return err; +} -- cgit v1.2.3-58-ga151 From 68c3a6ac65f675b4b783635787fa0ed896f5b3d5 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 6 Apr 2020 20:06:40 -0700 Subject: drivers/base/memory.c: drop section_count Patch series "mm: drop superfluous section checks when onlining/offlining". Let's drop some superfluous section checks on the onlining/offlining path. This patch (of 3): Since commit c5e79ef561b0 ("mm/memory_hotplug.c: don't allow to online/offline memory blocks with holes") we have a generic check in offline_pages() that disallows offlining memory blocks with holes. Memory blocks with missing sections are just another variant of these type of blocks. We can stop checking (and especially storing) present sections. A proper error message is now printed why offlining failed. section_count was initially introduced in commit 07681215975e ("Driver core: Add section count to memory_block struct") in order to detect when it is okay to remove a memory block. It was used in commit 26bbe7ef6d5c ("drivers/base/memory.c: prohibit offlining of memory blocks with missing sections") to disallow offlining memory blocks with missing sections. As we refactored creation/removal of memory devices and have a proper check for holes in place, we can drop the section_count. This also removes a leftover comment regarding the mem_sysfs_mutex, which was removed in commit 848e19ad3c33 ("drivers/base/memory.c: drop the mem_sysfs_mutex"). Signed-off-by: David Hildenbrand Signed-off-by: Andrew Morton Cc: Greg Kroah-Hartman Cc: "Rafael J. Wysocki" Cc: Michal Hocko Cc: Dan Williams Cc: Pavel Tatashin Cc: Anshuman Khandual Link: http://lkml.kernel.org/r/20200127110424.5757-2-david@redhat.com Signed-off-by: Linus Torvalds --- drivers/base/memory.c | 17 +++-------------- include/linux/memory.h | 1 - 2 files changed, 3 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 4086718f6876..086997212dbb 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -267,10 +267,6 @@ static int memory_subsys_offline(struct device *dev) if (mem->state == MEM_OFFLINE) return 0; - /* Can't offline block with non-present sections */ - if (mem->section_count != sections_per_block) - return -EINVAL; - return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); } @@ -627,7 +623,7 @@ static int init_memory_block(struct memory_block **memory, static int add_memory_block(unsigned long base_section_nr) { - int ret, section_count = 0; + int section_count = 0; struct memory_block *mem; unsigned long nr; @@ -638,12 +634,8 @@ static int add_memory_block(unsigned long base_section_nr) if (section_count == 0) return 0; - ret = init_memory_block(&mem, base_memory_block_id(base_section_nr), - MEM_ONLINE); - if (ret) - return ret; - mem->section_count = section_count; - return 0; + return init_memory_block(&mem, base_memory_block_id(base_section_nr), + MEM_ONLINE); } static void unregister_memory(struct memory_block *memory) @@ -679,7 +671,6 @@ int create_memory_block_devices(unsigned long start, unsigned long size) ret = init_memory_block(&mem, block_id, MEM_OFFLINE); if (ret) break; - mem->section_count = sections_per_block; } if (ret) { end_block_id = block_id; @@ -688,7 +679,6 @@ int create_memory_block_devices(unsigned long start, unsigned long size) mem = find_memory_block_by_id(block_id); if (WARN_ON_ONCE(!mem)) continue; - mem->section_count = 0; unregister_memory(mem); } } @@ -717,7 +707,6 @@ void remove_memory_block_devices(unsigned long start, unsigned long size) mem = find_memory_block_by_id(block_id); if (WARN_ON_ONCE(!mem)) continue; - mem->section_count = 0; unregister_memory_block_under_nodes(mem); unregister_memory(mem); } diff --git a/include/linux/memory.h b/include/linux/memory.h index 0b8d791b6669..439a89e758d8 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -26,7 +26,6 @@ struct memory_block { unsigned long start_section_nr; unsigned long state; /* serialized by the dev->lock */ - int section_count; /* serialized by mem_sysfs_mutex */ int online_type; /* for passing data to online routine */ int phys_device; /* to which fru does this belong? */ struct device dev; -- cgit v1.2.3-58-ga151 From 0a9f9f62316606ee827fa3318e95a1c489d9acf5 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Mon, 6 Apr 2020 20:07:06 -0700 Subject: mm/sparse.c: only use subsection map in VMEMMAP case Currently, to support subsection aligned memory region adding for pmem, subsection map is added to track which subsection is present. However, config ZONE_DEVICE depends on SPARSEMEM_VMEMMAP. It means subsection map only makes sense when SPARSEMEM_VMEMMAP enabled. For the classic sparse, it's meaningless. Even worse, it may confuse people when checking code related to the classic sparse. About the classic sparse which doesn't support subsection hotplug, Dan said it's more because the effort and maintenance burden outweighs the benefit. Besides, the current 64 bit ARCHes all enable SPARSEMEM_VMEMMAP_ENABLE by default. Combining the above reasons, no need to provide subsection map and the relevant handling for the classic sparse. Let's remove them. Signed-off-by: Baoquan He Signed-off-by: Andrew Morton Reviewed-by: David Hildenbrand Cc: Dan Williams Cc: Michal Hocko Cc: Pankaj Gupta Cc: Wei Yang Link: http://lkml.kernel.org/r/20200312124414.439-4-bhe@redhat.com Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 2 ++ mm/sparse.c | 25 +++++++++++++++++++++++++ 2 files changed, 27 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 42b77d3b68e8..f3f264826423 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1143,7 +1143,9 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec) #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK) struct mem_section_usage { +#ifdef CONFIG_SPARSEMEM_VMEMMAP DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); +#endif /* See declaration of similar field in struct zone */ unsigned long pageblock_flags[0]; }; diff --git a/mm/sparse.c b/mm/sparse.c index 01204c3b4649..095ecf5bb6d3 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -209,6 +209,7 @@ static inline unsigned long first_present_section_nr(void) return next_present_section_nr(-1); } +#ifdef CONFIG_SPARSEMEM_VMEMMAP static void subsection_mask_set(unsigned long *map, unsigned long pfn, unsigned long nr_pages) { @@ -243,6 +244,11 @@ void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) nr_pages -= pfns; } } +#else +void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) +{ +} +#endif /* Record a memory area against a node. */ void __init memory_present(int nid, unsigned long start, unsigned long end) @@ -705,6 +711,7 @@ static void free_map_bootmem(struct page *memmap) } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ +#ifdef CONFIG_SPARSEMEM_VMEMMAP static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages) { DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; @@ -731,6 +738,17 @@ static bool is_subsection_map_empty(struct mem_section *ms) return bitmap_empty(&ms->usage->subsection_map[0], SUBSECTIONS_PER_SECTION); } +#else +static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages) +{ + return 0; +} + +static bool is_subsection_map_empty(struct mem_section *ms) +{ + return true; +} +#endif static void section_deactivate(unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap) @@ -792,6 +810,7 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages, ms->section_mem_map = (unsigned long)NULL; } +#ifdef CONFIG_SPARSEMEM_VMEMMAP static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) { struct mem_section *ms = __pfn_to_section(pfn); @@ -813,6 +832,12 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) return rc; } +#else +static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) +{ + return 0; +} +#endif static struct page * __meminit section_activate(int nid, unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap) -- cgit v1.2.3-58-ga151 From 956f8b445061667c3545baa24778f890d1d522f4 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 6 Apr 2020 20:07:16 -0700 Subject: drivers/base/memory: rename MMOP_ONLINE_KEEP to MMOP_ONLINE Patch series "mm/memory_hotplug: allow to specify a default online_type", v3. Distributions nowadays use udev rules ([1] [2]) to specify if and how to online hotplugged memory. The rules seem to get more complex with many special cases. Due to the various special cases, CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE cannot be used. All memory hotplug is handled via udev rules. Every time we hotplug memory, the udev rule will come to the same conclusion. Especially Hyper-V (but also soon virtio-mem) add a lot of memory in separate memory blocks and wait for memory to get onlined by user space before continuing to add more memory blocks (to not add memory faster than it is getting onlined). This of course slows down the whole memory hotplug process. To make the job of distributions easier and to avoid udev rules that get more and more complicated, let's extend the mechanism provided by - /sys/devices/system/memory/auto_online_blocks - "memhp_default_state=" on the kernel cmdline to be able to specify also "online_movable" as well as "online_kernel" === Example /usr/libexec/config-memhotplug === #!/bin/bash VIRT=`systemd-detect-virt --vm` ARCH=`uname -p` sense_virtio_mem() { if [ -d "/sys/bus/virtio/drivers/virtio_mem/" ]; then DEVICES=`find /sys/bus/virtio/drivers/virtio_mem/ -maxdepth 1 -type l | wc -l` if [ $DEVICES != "0" ]; then return 0 fi fi return 1 } if [ ! -e "/sys/devices/system/memory/auto_online_blocks" ]; then echo "Memory hotplug configuration support missing in the kernel" exit 1 fi if grep "memhp_default_state=" /proc/cmdline > /dev/null; then echo "Memory hotplug configuration overridden in kernel cmdline (memhp_default_state=)" exit 1 fi if [ $VIRT == "microsoft" ]; then echo "Detected Hyper-V on $ARCH" # Hyper-V wants all memory in ZONE_NORMAL ONLINE_TYPE="online_kernel" elif sense_virtio_mem; then echo "Detected virtio-mem on $ARCH" # virtio-mem wants all memory in ZONE_NORMAL ONLINE_TYPE="online_kernel" elif [ $ARCH == "s390x" ] || [ $ARCH == "s390" ]; then echo "Detected $ARCH" # standby memory should not be onlined automatically ONLINE_TYPE="offline" elif [ $ARCH == "ppc64" ] || [ $ARCH == "ppc64le" ]; then echo "Detected" $ARCH # PPC64 onlines all hotplugged memory right from the kernel ONLINE_TYPE="offline" elif [ $VIRT == "none" ]; then echo "Detected bare-metal on $ARCH" # Bare metal users expect hotplugged memory to be unpluggable. We assume # that ZONE imbalances on such enterpise servers cannot happen and is # properly documented ONLINE_TYPE="online_movable" else # TODO: Hypervisors that want to unplug DIMMs and can guarantee that ZONE # imbalances won't happen echo "Detected $VIRT on $ARCH" # Usually, ballooning is used in virtual environments, so memory should go to # ZONE_NORMAL. However, sometimes "movable_node" is relevant. ONLINE_TYPE="online" fi echo "Selected online_type:" $ONLINE_TYPE # Configure what to do with memory that will be hotplugged in the future echo $ONLINE_TYPE 2>/dev/null > /sys/devices/system/memory/auto_online_blocks if [ $? != "0" ]; then echo "Memory hotplug cannot be configured (e.g., old kernel or missing permissions)" # A backup udev rule should handle old kernels if necessary exit 1 fi # Process all already pluggedd blocks (e.g., DIMMs, but also Hyper-V or virtio-mem) if [ $ONLINE_TYPE != "offline" ]; then for MEMORY in /sys/devices/system/memory/memory*; do STATE=`cat $MEMORY/state` if [ $STATE == "offline" ]; then echo $ONLINE_TYPE > $MEMORY/state fi done fi === Example /usr/lib/systemd/system/config-memhotplug.service === [Unit] Description=Configure memory hotplug behavior DefaultDependencies=no Conflicts=shutdown.target Before=sysinit.target shutdown.target After=systemd-modules-load.service ConditionPathExists=|/sys/devices/system/memory/auto_online_blocks [Service] ExecStart=/usr/libexec/config-memhotplug Type=oneshot TimeoutSec=0 RemainAfterExit=yes [Install] WantedBy=sysinit.target === Example modification to the 40-redhat.rules [2] === : diff --git a/40-redhat.rules b/40-redhat.rules-new : index 2c690e5..168fd03 100644 : --- a/40-redhat.rules : +++ b/40-redhat.rules-new : @@ -6,6 +6,9 @@ SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", ATTR{online} : # Memory hotadd request : SUBSYSTEM!="memory", GOTO="memory_hotplug_end" : ACTION!="add", GOTO="memory_hotplug_end" : +# memory hotplug behavior configured : +PROGRAM=="grep online /sys/devices/system/memory/auto_online_blocks", GOTO="memory_hotplug_end" : + : PROGRAM="/bin/uname -p", RESULT=="s390*", GOTO="memory_hotplug_end" : : ENV{.state}="online" === [1] https://github.com/lnykryn/systemd-rhel/pull/281 [2] https://github.com/lnykryn/systemd-rhel/blob/staging/rules/40-redhat.rules This patch (of 8): The name is misleading and it's not really clear what is "kept". Let's just name it like the online_type name we expose to user space ("online"). Add some documentation to the types. Signed-off-by: David Hildenbrand Signed-off-by: Andrew Morton Reviewed-by: Wei Yang Reviewed-by: Baoquan He Acked-by: Pankaj Gupta Cc: Greg Kroah-Hartman Cc: Michal Hocko Cc: Oscar Salvador Cc: "Rafael J. Wysocki" Cc: Wei Yang Cc: Vitaly Kuznetsov Cc: Yumei Huang Cc: Igor Mammedov Cc: Eduardo Habkost Cc: Benjamin Herrenschmidt Cc: Haiyang Zhang Cc: K. Y. Srinivasan Cc: Michael Ellerman (powerpc) Cc: Paul Mackerras Cc: Stephen Hemminger Cc: Wei Liu Link: http://lkml.kernel.org/r/20200319131221.14044-1-david@redhat.com Link: http://lkml.kernel.org/r/20200317104942.11178-2-david@redhat.com Signed-off-by: Linus Torvalds --- drivers/base/memory.c | 9 +++++---- include/linux/memory_hotplug.h | 6 +++++- 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 96c80dfaac90..156b89b14fcc 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -208,7 +208,7 @@ static int memory_subsys_online(struct device *dev) * attribute and need to set the online_type. */ if (mem->online_type < 0) - mem->online_type = MMOP_ONLINE_KEEP; + mem->online_type = MMOP_ONLINE; ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); @@ -243,7 +243,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, else if (sysfs_streq(buf, "online_movable")) online_type = MMOP_ONLINE_MOVABLE; else if (sysfs_streq(buf, "online")) - online_type = MMOP_ONLINE_KEEP; + online_type = MMOP_ONLINE; else if (sysfs_streq(buf, "offline")) online_type = MMOP_OFFLINE; else { @@ -254,7 +254,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, switch (online_type) { case MMOP_ONLINE_KERNEL: case MMOP_ONLINE_MOVABLE: - case MMOP_ONLINE_KEEP: + case MMOP_ONLINE: /* mem->online_type is protected by device_hotplug_lock */ mem->online_type = online_type; ret = device_online(&mem->dev); @@ -334,7 +334,8 @@ static ssize_t valid_zones_show(struct device *dev, } nid = mem->nid; - default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); + default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn, + nr_pages); strcat(buf, default_zone->name); print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL, diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index f4d59155f3d4..261dbf010d5d 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -47,9 +47,13 @@ enum { /* Types for control the zone type of onlined and offlined memory */ enum { + /* Offline the memory. */ MMOP_OFFLINE = -1, - MMOP_ONLINE_KEEP, + /* Online the memory. Zone depends, see default_zone_for_pfn(). */ + MMOP_ONLINE, + /* Online the memory to ZONE_NORMAL. */ MMOP_ONLINE_KERNEL, + /* Online the memory to ZONE_MOVABLE. */ MMOP_ONLINE_MOVABLE, }; -- cgit v1.2.3-58-ga151 From efc978ad0e05ed6401c7854811750bf55b67f4b9 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 6 Apr 2020 20:07:20 -0700 Subject: drivers/base/memory: map MMOP_OFFLINE to 0 Historically, we used the value -1. Just treat 0 as the special case now. Clarify a comment (which was wrong, when we come via device_online() the first time, the online_type would have been 0 / MEM_ONLINE). The default is now always MMOP_OFFLINE. This removes the last user of the manual "-1", which didn't use the enum value. This is a preparation to use the online_type as an array index. Signed-off-by: David Hildenbrand Signed-off-by: Andrew Morton Reviewed-by: Wei Yang Reviewed-by: Baoquan He Acked-by: Michal Hocko Acked-by: Pankaj Gupta Cc: Greg Kroah-Hartman Cc: Oscar Salvador Cc: "Rafael J. Wysocki" Cc: Wei Yang Cc: Benjamin Herrenschmidt Cc: Eduardo Habkost Cc: Haiyang Zhang Cc: Igor Mammedov Cc: "K. Y. Srinivasan" Cc: Michael Ellerman Cc: Paul Mackerras Cc: Stephen Hemminger Cc: Vitaly Kuznetsov Cc: Wei Liu Cc: Yumei Huang Link: http://lkml.kernel.org/r/20200317104942.11178-3-david@redhat.com Signed-off-by: Linus Torvalds --- drivers/base/memory.c | 11 ++++------- include/linux/memory_hotplug.h | 2 +- 2 files changed, 5 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 156b89b14fcc..f65f3d53dc64 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -203,17 +203,14 @@ static int memory_subsys_online(struct device *dev) return 0; /* - * If we are called from state_store(), online_type will be - * set >= 0 Otherwise we were called from the device online - * attribute and need to set the online_type. + * When called via device_online() without configuring the online_type, + * we want to default to MMOP_ONLINE. */ - if (mem->online_type < 0) + if (mem->online_type == MMOP_OFFLINE) mem->online_type = MMOP_ONLINE; ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); - - /* clear online_type */ - mem->online_type = -1; + mem->online_type = MMOP_OFFLINE; return ret; } diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 261dbf010d5d..c2e06ed5e0e9 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -48,7 +48,7 @@ enum { /* Types for control the zone type of onlined and offlined memory */ enum { /* Offline the memory. */ - MMOP_OFFLINE = -1, + MMOP_OFFLINE = 0, /* Online the memory. Zone depends, see default_zone_for_pfn(). */ MMOP_ONLINE, /* Online the memory to ZONE_NORMAL. */ -- cgit v1.2.3-58-ga151 From 862919e568356cc36288a11b42cd88ec3a7100e9 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 6 Apr 2020 20:07:40 -0700 Subject: mm/memory_hotplug: convert memhp_auto_online to store an online_type ... and rename it to memhp_default_online_type. This is a preparation for more detailed default online behavior. Signed-off-by: David Hildenbrand Signed-off-by: Andrew Morton Reviewed-by: Wei Yang Reviewed-by: Baoquan He Acked-by: Michal Hocko Acked-by: Pankaj Gupta Cc: Greg Kroah-Hartman Cc: Oscar Salvador Cc: "Rafael J. Wysocki" Cc: Wei Yang Cc: Benjamin Herrenschmidt Cc: Eduardo Habkost Cc: Haiyang Zhang Cc: Igor Mammedov Cc: "K. Y. Srinivasan" Cc: Michael Ellerman Cc: Paul Mackerras Cc: Stephen Hemminger Cc: Vitaly Kuznetsov Cc: Wei Liu Cc: Yumei Huang Link: http://lkml.kernel.org/r/20200317104942.11178-8-david@redhat.com Signed-off-by: Linus Torvalds --- drivers/base/memory.c | 10 ++++------ include/linux/memory_hotplug.h | 3 ++- mm/memory_hotplug.c | 11 ++++++----- 3 files changed, 12 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 1c90bdf60d85..7d2f829d00d7 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -378,10 +378,8 @@ static DEVICE_ATTR_RO(block_size_bytes); static ssize_t auto_online_blocks_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (memhp_auto_online) - return sprintf(buf, "online\n"); - else - return sprintf(buf, "offline\n"); + return sprintf(buf, "%s\n", + online_type_to_str[memhp_default_online_type]); } static ssize_t auto_online_blocks_store(struct device *dev, @@ -389,9 +387,9 @@ static ssize_t auto_online_blocks_store(struct device *dev, const char *buf, size_t count) { if (sysfs_streq(buf, "online")) - memhp_auto_online = true; + memhp_default_online_type = MMOP_ONLINE; else if (sysfs_streq(buf, "offline")) - memhp_auto_online = false; + memhp_default_online_type = MMOP_OFFLINE; else return -EINVAL; diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index c2e06ed5e0e9..c6e090b34c4b 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -117,7 +117,8 @@ extern int arch_add_memory(int nid, u64 start, u64 size, struct mhp_restrictions *restrictions); extern u64 max_mem_size; -extern bool memhp_auto_online; +/* Default online_type (MMOP_*) when new memory blocks are added. */ +extern int memhp_default_online_type; /* If movable_node boot option specified */ extern bool movable_node_enabled; static inline bool movable_node_is_enabled(void) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9691cbb4383e..9436f7e6257a 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -67,17 +67,17 @@ void put_online_mems(void) bool movable_node_enabled = false; #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE -bool memhp_auto_online; +int memhp_default_online_type = MMOP_OFFLINE; #else -bool memhp_auto_online = true; +int memhp_default_online_type = MMOP_ONLINE; #endif static int __init setup_memhp_default_state(char *str) { if (!strcmp(str, "online")) - memhp_auto_online = true; + memhp_default_online_type = MMOP_ONLINE; else if (!strcmp(str, "offline")) - memhp_auto_online = false; + memhp_default_online_type = MMOP_OFFLINE; return 1; } @@ -990,6 +990,7 @@ static int check_hotplug_memory_range(u64 start, u64 size) static int online_memory_block(struct memory_block *mem, void *arg) { + mem->online_type = memhp_default_online_type; return device_online(&mem->dev); } @@ -1062,7 +1063,7 @@ int __ref add_memory_resource(int nid, struct resource *res) mem_hotplug_done(); /* online pages if requested */ - if (memhp_auto_online) + if (memhp_default_online_type != MMOP_OFFLINE) walk_memory_blocks(start, size, NULL, online_memory_block); return ret; -- cgit v1.2.3-58-ga151 From 5f47adf762b78cae97de58d9ff01d2d44db09467 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 6 Apr 2020 20:07:44 -0700 Subject: mm/memory_hotplug: allow to specify a default online_type For now, distributions implement advanced udev rules to essentially - Don't online any hotplugged memory (s390x) - Online all memory to ZONE_NORMAL (e.g., most virt environments like hyperv) - Online all memory to ZONE_MOVABLE in case the zone imbalance is taken care of (e.g., bare metal, special virt environments) In summary: All memory is usually onlined the same way, however, the kernel always has to ask user space to come up with the same answer. E.g., Hyper-V always waits for a memory block to get onlined before continuing, otherwise it might end up adding memory faster than onlining it, which can result in strange OOM situations. This waiting slows down adding of a bigger amount of memory. Let's allow to specify a default online_type, not just "online" and "offline". This allows distributions to configure the default online_type when booting up and be done with it. We can now specify "offline", "online", "online_movable" and "online_kernel" via - "memhp_default_state=" on the kernel cmdline - /sys/devices/system/memory/auto_online_blocks just like we are able to specify for a single memory block via /sys/devices/system/memory/memoryX/state Signed-off-by: David Hildenbrand Signed-off-by: Andrew Morton Reviewed-by: Wei Yang Reviewed-by: Baoquan He Acked-by: Michal Hocko Acked-by: Pankaj Gupta Cc: Greg Kroah-Hartman Cc: Oscar Salvador Cc: "Rafael J. Wysocki" Cc: Wei Yang Cc: Benjamin Herrenschmidt Cc: Eduardo Habkost Cc: Haiyang Zhang Cc: Igor Mammedov Cc: "K. Y. Srinivasan" Cc: Michael Ellerman Cc: Paul Mackerras Cc: Stephen Hemminger Cc: Vitaly Kuznetsov Cc: Wei Liu Cc: Yumei Huang Link: http://lkml.kernel.org/r/20200317104942.11178-9-david@redhat.com Signed-off-by: Linus Torvalds --- drivers/base/memory.c | 11 +++++------ include/linux/memory_hotplug.h | 2 ++ mm/memory_hotplug.c | 8 ++++---- 3 files changed, 11 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 7d2f829d00d7..dbec3a05590a 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -34,7 +34,7 @@ static const char *const online_type_to_str[] = { [MMOP_ONLINE_MOVABLE] = "online_movable", }; -static int memhp_online_type_from_str(const char *str) +int memhp_online_type_from_str(const char *str) { int i; @@ -386,13 +386,12 @@ static ssize_t auto_online_blocks_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - if (sysfs_streq(buf, "online")) - memhp_default_online_type = MMOP_ONLINE; - else if (sysfs_streq(buf, "offline")) - memhp_default_online_type = MMOP_OFFLINE; - else + const int online_type = memhp_online_type_from_str(buf); + + if (online_type < 0) return -EINVAL; + memhp_default_online_type = online_type; return count; } diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index c6e090b34c4b..ef55115320fb 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -117,6 +117,8 @@ extern int arch_add_memory(int nid, u64 start, u64 size, struct mhp_restrictions *restrictions); extern u64 max_mem_size; +extern int memhp_online_type_from_str(const char *str); + /* Default online_type (MMOP_*) when new memory blocks are added. */ extern int memhp_default_online_type; /* If movable_node boot option specified */ diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9436f7e6257a..2fb78c5ebaf3 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -74,10 +74,10 @@ int memhp_default_online_type = MMOP_ONLINE; static int __init setup_memhp_default_state(char *str) { - if (!strcmp(str, "online")) - memhp_default_online_type = MMOP_ONLINE; - else if (!strcmp(str, "offline")) - memhp_default_online_type = MMOP_OFFLINE; + const int online_type = memhp_online_type_from_str(str); + + if (online_type >= 0) + memhp_default_online_type = online_type; return 1; } -- cgit v1.2.3-58-ga151 From 552657b7b3343851916fde7e4fd6bfb6516d2bcb Mon Sep 17 00:00:00 2001 From: chenqiwu Date: Mon, 6 Apr 2020 20:08:33 -0700 Subject: mm: fix ambiguous comments for better code readability The parameter of remap_pfn_range() @pfn passed from the caller is actually a page-frame number converted by corresponding physical address of kernel memory, the original comment is ambiguous that may mislead the users. Meanwhile, there is an ambiguous typo "VMM" in the comment of vm_area_struct. So fixing them will make the code more readable. Signed-off-by: chenqiwu Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Link: http://lkml.kernel.org/r/1583026921-15279-1-git-send-email-qiwuchen55@gmail.com Signed-off-by: Linus Torvalds --- include/linux/mm_types.h | 4 ++-- mm/memory.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index dd555e6d23f3..4aba6c0c2ba8 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -289,8 +289,8 @@ struct vm_userfaultfd_ctx {}; #endif /* CONFIG_USERFAULTFD */ /* - * This struct defines a memory VMM memory area. There is one of these - * per VM-area/task. A VM area is any part of the process virtual memory + * This struct describes a virtual memory area. There is one of these + * per VM-area/task. A VM area is any part of the process virtual memory * space that has a special rule for the page-fault handlers (ie a shared * library, the executable area etc). */ diff --git a/mm/memory.c b/mm/memory.c index 8ac9af73e9d2..19874d133a66 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1952,7 +1952,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, * @vma: user vma to map to * @addr: target user address to start at * @pfn: page frame number of kernel physical memory address - * @size: size of map area + * @size: size of mapping area * @prot: page protection flags for this mapping * * Note: this is only safe if the mm semaphore is held when called. -- cgit v1.2.3-58-ga151 From 3f3673d7d324d872d9d8ddb73b3e5e47fbf12e0d Mon Sep 17 00:00:00 2001 From: Steven Price Date: Mon, 6 Apr 2020 20:08:43 -0700 Subject: include/linux/swapops.h: correct guards for non_swap_entry() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If CONFIG_DEVICE_PRIVATE is defined, but neither CONFIG_MEMORY_FAILURE nor CONFIG_MIGRATION, then non_swap_entry() will return 0, meaning that the condition (non_swap_entry(entry) && is_device_private_entry(entry)) in zap_pte_range() will never be true even if the entry is a device private one. Equally any other code depending on non_swap_entry() will not function as expected. I originally spotted this just by looking at the code, I haven't actually observed any problems. Looking a bit more closely it appears that actually this situation (currently at least) cannot occur: DEVICE_PRIVATE depends on ZONE_DEVICE ZONE_DEVICE depends on MEMORY_HOTREMOVE MEMORY_HOTREMOVE depends on MIGRATION Fixes: 5042db43cc26 ("mm/ZONE_DEVICE: new type of ZONE_DEVICE for unaddressable memory") Signed-off-by: Steven Price Signed-off-by: Andrew Morton Cc: Jérôme Glisse Cc: Arnd Bergmann Cc: Dan Williams Cc: John Hubbard Link: http://lkml.kernel.org/r/20200305130550.22693-1-steven.price@arm.com Signed-off-by: Linus Torvalds --- include/linux/swapops.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 9a6f06de183b..d9b7c9132c2f 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -350,7 +350,8 @@ static inline void num_poisoned_pages_inc(void) } #endif -#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) +#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \ + defined(CONFIG_DEVICE_PRIVATE) static inline int non_swap_entry(swp_entry_t entry) { return swp_type(entry) >= MAX_SWAPFILES; -- cgit v1.2.3-58-ga151 From 1d90b6491014ead775146726b81a78ed993c3188 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Mon, 6 Apr 2020 20:08:46 -0700 Subject: include/linux/memremap.h: remove stale comments Fixes: 80a72d0af05a ("memremap: remove the data field in struct dev_pagemap") Fixes: fdc029b19dfd ("memremap: remove the dev field in struct dev_pagemap") Signed-off-by: Ira Weiny Signed-off-by: Andrew Morton Reviewed-by: Christoph Hellwig Cc: Jason Gunthorpe Cc: Dan Williams Link: http://lkml.kernel.org/r/20200316213205.145333-1-ira.weiny@intel.com Signed-off-by: Linus Torvalds --- include/linux/memremap.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 60d97e8fd3c0..8b37c4c9222c 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -98,8 +98,6 @@ struct dev_pagemap_ops { * @ref: reference count that pins the devm_memremap_pages() mapping * @internal_ref: internal reference if @ref is not provided by the caller * @done: completion for @internal_ref - * @dev: host device of the mapping for debug - * @data: private data pointer for page_free() * @type: memory type: see MEMORY_* in memory_hotplug.h * @flags: PGMAP_* flags to specify defailed behavior * @ops: method table -- cgit v1.2.3-58-ga151 From 6218d740ac1bc723d57900b865d3e52b83550c2b Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Mon, 6 Apr 2020 20:08:52 -0700 Subject: mm: remove dummy struct bootmem_data/bootmem_data_t Both bootmem_data and bootmem_data_t structures are no longer defined. Remove the dummy forward declarations. Signed-off-by: Waiman Long Signed-off-by: Andrew Morton Reviewed-by: Baoquan He Acked-by: Mike Rapoport Link: http://lkml.kernel.org/r/20200326022617.26208-1-longman@redhat.com Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/mmzone.h | 2 -- include/linux/mmzone.h | 1 - 2 files changed, 3 deletions(-) (limited to 'include/linux') diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h index 7ee144f484f1..9b521c857436 100644 --- a/arch/alpha/include/asm/mmzone.h +++ b/arch/alpha/include/asm/mmzone.h @@ -8,8 +8,6 @@ #include -struct bootmem_data_t; /* stupid forward decl. */ - /* * Following are macros that are specific to this numa platform. */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f3f264826423..e9892bf9eba9 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -664,7 +664,6 @@ struct deferred_split { * Memory statistics and page replacement data structures are maintained on a * per-zone basis. */ -struct bootmem_data; typedef struct pglist_data { struct zone node_zones[MAX_NR_ZONES]; struct zonelist node_zonelists[MAX_ZONELISTS]; -- cgit v1.2.3-58-ga151 From d919b33dafb3e222d23671b2bb06d119aede625f Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 6 Apr 2020 20:09:01 -0700 Subject: proc: faster open/read/close with "permanent" files Now that "struct proc_ops" exist we can start putting there stuff which could not fly with VFS "struct file_operations"... Most of fs/proc/inode.c file is dedicated to make open/read/.../close reliable in the event of disappearing /proc entries which usually happens if module is getting removed. Files like /proc/cpuinfo which never disappear simply do not need such protection. Save 2 atomic ops, 1 allocation, 1 free per open/read/close sequence for such "permanent" files. Enable "permanent" flag for /proc/cpuinfo /proc/kmsg /proc/modules /proc/slabinfo /proc/stat /proc/sysvipc/* /proc/swaps More will come once I figure out foolproof way to prevent out module authors from marking their stuff "permanent" for performance reasons when it is not. This should help with scalability: benchmark is "read /proc/cpuinfo R times by N threads scattered over the system". N R t, s (before) t, s (after) ----------------------------------------------------- 64 4096 1.582458 1.530502 -3.2% 256 4096 6.371926 6.125168 -3.9% 1024 4096 25.64888 24.47528 -4.6% Benchmark source: #include #include #include #include #include #include #include #include const int NR_CPUS = sysconf(_SC_NPROCESSORS_ONLN); int N; const char *filename; int R; int xxx = 0; int glue(int n) { cpu_set_t m; CPU_ZERO(&m); CPU_SET(n, &m); return sched_setaffinity(0, sizeof(cpu_set_t), &m); } void f(int n) { glue(n % NR_CPUS); while (*(volatile int *)&xxx == 0) { } for (int i = 0; i < R; i++) { int fd = open(filename, O_RDONLY); char buf[4096]; ssize_t rv = read(fd, buf, sizeof(buf)); asm volatile ("" :: "g" (rv)); close(fd); } } int main(int argc, char *argv[]) { if (argc < 4) { std::cerr << "usage: " << argv[0] << ' ' << "N /proc/filename R "; return 1; } N = atoi(argv[1]); filename = argv[2]; R = atoi(argv[3]); for (int i = 0; i < NR_CPUS; i++) { if (glue(i) == 0) break; } std::vector T; T.reserve(N); for (int i = 0; i < N; i++) { T.emplace_back(f, i); } auto t0 = std::chrono::system_clock::now(); { *(volatile int *)&xxx = 1; for (auto& t: T) { t.join(); } } auto t1 = std::chrono::system_clock::now(); std::chrono::duration dt = t1 - t0; std::cout << dt.count() << ' '; return 0; } P.S.: Explicit randomization marker is added because adding non-function pointer will silently disable structure layout randomization. [akpm@linux-foundation.org: coding style fixes] Reported-by: kbuild test robot Reported-by: Dan Carpenter Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Cc: Al Viro Cc: Joe Perches Link: http://lkml.kernel.org/r/20200222201539.GA22576@avx2 Signed-off-by: Linus Torvalds --- fs/proc/cpuinfo.c | 1 + fs/proc/generic.c | 31 +++++++- fs/proc/inode.c | 187 +++++++++++++++++++++++++++++++++++------------- fs/proc/internal.h | 6 ++ fs/proc/kmsg.c | 1 + fs/proc/stat.c | 1 + include/linux/proc_fs.h | 17 ++++- ipc/util.c | 1 + kernel/module.c | 1 + mm/slab_common.c | 1 + mm/swapfile.c | 1 + 11 files changed, 194 insertions(+), 54 deletions(-) (limited to 'include/linux') diff --git a/fs/proc/cpuinfo.c b/fs/proc/cpuinfo.c index c1dea9b8222e..d0989a443c77 100644 --- a/fs/proc/cpuinfo.c +++ b/fs/proc/cpuinfo.c @@ -17,6 +17,7 @@ static int cpuinfo_open(struct inode *inode, struct file *file) } static const struct proc_ops cpuinfo_proc_ops = { + .proc_flags = PROC_ENTRY_PERMANENT, .proc_open = cpuinfo_open, .proc_read = seq_read, .proc_lseek = seq_lseek, diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 3faed94e4b65..4ed6dabdf6ff 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -531,6 +531,12 @@ struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode, return p; } +static inline void pde_set_flags(struct proc_dir_entry *pde) +{ + if (pde->proc_ops->proc_flags & PROC_ENTRY_PERMANENT) + pde->flags |= PROC_ENTRY_PERMANENT; +} + struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct proc_ops *proc_ops, void *data) @@ -541,6 +547,7 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, if (!p) return NULL; p->proc_ops = proc_ops; + pde_set_flags(p); return proc_register(parent, p); } EXPORT_SYMBOL(proc_create_data); @@ -572,6 +579,7 @@ static int proc_seq_release(struct inode *inode, struct file *file) } static const struct proc_ops proc_seq_ops = { + /* not permanent -- can call into arbitrary seq_operations */ .proc_open = proc_seq_open, .proc_read = seq_read, .proc_lseek = seq_lseek, @@ -602,6 +610,7 @@ static int proc_single_open(struct inode *inode, struct file *file) } static const struct proc_ops proc_single_ops = { + /* not permanent -- can call into arbitrary ->single_show */ .proc_open = proc_single_open, .proc_read = seq_read, .proc_lseek = seq_lseek, @@ -662,9 +671,13 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) de = pde_subdir_find(parent, fn, len); if (de) { - rb_erase(&de->subdir_node, &parent->subdir); - if (S_ISDIR(de->mode)) { - parent->nlink--; + if (unlikely(pde_is_permanent(de))) { + WARN(1, "removing permanent /proc entry '%s'", de->name); + de = NULL; + } else { + rb_erase(&de->subdir_node, &parent->subdir); + if (S_ISDIR(de->mode)) + parent->nlink--; } } write_unlock(&proc_subdir_lock); @@ -700,12 +713,24 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) write_unlock(&proc_subdir_lock); return -ENOENT; } + if (unlikely(pde_is_permanent(root))) { + write_unlock(&proc_subdir_lock); + WARN(1, "removing permanent /proc entry '%s/%s'", + root->parent->name, root->name); + return -EINVAL; + } rb_erase(&root->subdir_node, &parent->subdir); de = root; while (1) { next = pde_subdir_first(de); if (next) { + if (unlikely(pde_is_permanent(root))) { + write_unlock(&proc_subdir_lock); + WARN(1, "removing permanent /proc entry '%s/%s'", + next->parent->name, next->name); + return -EINVAL; + } rb_erase(&next->subdir_node, &de->subdir); de = next; continue; diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 05d31c464bee..fb4cace9ea41 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -259,135 +259,204 @@ void proc_entry_rundown(struct proc_dir_entry *de) spin_unlock(&de->pde_unload_lock); } +static loff_t pde_lseek(struct proc_dir_entry *pde, struct file *file, loff_t offset, int whence) +{ + typeof_member(struct proc_ops, proc_lseek) lseek; + + lseek = pde->proc_ops->proc_lseek; + if (!lseek) + lseek = default_llseek; + return lseek(file, offset, whence); +} + static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence) { struct proc_dir_entry *pde = PDE(file_inode(file)); loff_t rv = -EINVAL; - if (use_pde(pde)) { - typeof_member(struct proc_ops, proc_lseek) lseek; - lseek = pde->proc_ops->proc_lseek; - if (!lseek) - lseek = default_llseek; - rv = lseek(file, offset, whence); + if (pde_is_permanent(pde)) { + return pde_lseek(pde, file, offset, whence); + } else if (use_pde(pde)) { + rv = pde_lseek(pde, file, offset, whence); unuse_pde(pde); } return rv; } +static ssize_t pde_read(struct proc_dir_entry *pde, struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + typeof_member(struct proc_ops, proc_read) read; + + read = pde->proc_ops->proc_read; + if (read) + return read(file, buf, count, ppos); + return -EIO; +} + static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct proc_dir_entry *pde = PDE(file_inode(file)); ssize_t rv = -EIO; - if (use_pde(pde)) { - typeof_member(struct proc_ops, proc_read) read; - read = pde->proc_ops->proc_read; - if (read) - rv = read(file, buf, count, ppos); + if (pde_is_permanent(pde)) { + return pde_read(pde, file, buf, count, ppos); + } else if (use_pde(pde)) { + rv = pde_read(pde, file, buf, count, ppos); unuse_pde(pde); } return rv; } +static ssize_t pde_write(struct proc_dir_entry *pde, struct file *file, const char __user *buf, size_t count, loff_t *ppos) +{ + typeof_member(struct proc_ops, proc_write) write; + + write = pde->proc_ops->proc_write; + if (write) + return write(file, buf, count, ppos); + return -EIO; +} + static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct proc_dir_entry *pde = PDE(file_inode(file)); ssize_t rv = -EIO; - if (use_pde(pde)) { - typeof_member(struct proc_ops, proc_write) write; - write = pde->proc_ops->proc_write; - if (write) - rv = write(file, buf, count, ppos); + if (pde_is_permanent(pde)) { + return pde_write(pde, file, buf, count, ppos); + } else if (use_pde(pde)) { + rv = pde_write(pde, file, buf, count, ppos); unuse_pde(pde); } return rv; } +static __poll_t pde_poll(struct proc_dir_entry *pde, struct file *file, struct poll_table_struct *pts) +{ + typeof_member(struct proc_ops, proc_poll) poll; + + poll = pde->proc_ops->proc_poll; + if (poll) + return poll(file, pts); + return DEFAULT_POLLMASK; +} + static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts) { struct proc_dir_entry *pde = PDE(file_inode(file)); __poll_t rv = DEFAULT_POLLMASK; - if (use_pde(pde)) { - typeof_member(struct proc_ops, proc_poll) poll; - poll = pde->proc_ops->proc_poll; - if (poll) - rv = poll(file, pts); + if (pde_is_permanent(pde)) { + return pde_poll(pde, file, pts); + } else if (use_pde(pde)) { + rv = pde_poll(pde, file, pts); unuse_pde(pde); } return rv; } +static long pde_ioctl(struct proc_dir_entry *pde, struct file *file, unsigned int cmd, unsigned long arg) +{ + typeof_member(struct proc_ops, proc_ioctl) ioctl; + + ioctl = pde->proc_ops->proc_ioctl; + if (ioctl) + return ioctl(file, cmd, arg); + return -ENOTTY; +} + static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct proc_dir_entry *pde = PDE(file_inode(file)); long rv = -ENOTTY; - if (use_pde(pde)) { - typeof_member(struct proc_ops, proc_ioctl) ioctl; - ioctl = pde->proc_ops->proc_ioctl; - if (ioctl) - rv = ioctl(file, cmd, arg); + if (pde_is_permanent(pde)) { + return pde_ioctl(pde, file, cmd, arg); + } else if (use_pde(pde)) { + rv = pde_ioctl(pde, file, cmd, arg); unuse_pde(pde); } return rv; } #ifdef CONFIG_COMPAT +static long pde_compat_ioctl(struct proc_dir_entry *pde, struct file *file, unsigned int cmd, unsigned long arg) +{ + typeof_member(struct proc_ops, proc_compat_ioctl) compat_ioctl; + + compat_ioctl = pde->proc_ops->proc_compat_ioctl; + if (compat_ioctl) + return compat_ioctl(file, cmd, arg); + return -ENOTTY; +} + static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct proc_dir_entry *pde = PDE(file_inode(file)); long rv = -ENOTTY; - if (use_pde(pde)) { - typeof_member(struct proc_ops, proc_compat_ioctl) compat_ioctl; - - compat_ioctl = pde->proc_ops->proc_compat_ioctl; - if (compat_ioctl) - rv = compat_ioctl(file, cmd, arg); + if (pde_is_permanent(pde)) { + return pde_compat_ioctl(pde, file, cmd, arg); + } else if (use_pde(pde)) { + rv = pde_compat_ioctl(pde, file, cmd, arg); unuse_pde(pde); } return rv; } #endif +static int pde_mmap(struct proc_dir_entry *pde, struct file *file, struct vm_area_struct *vma) +{ + typeof_member(struct proc_ops, proc_mmap) mmap; + + mmap = pde->proc_ops->proc_mmap; + if (mmap) + return mmap(file, vma); + return -EIO; +} + static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma) { struct proc_dir_entry *pde = PDE(file_inode(file)); int rv = -EIO; - if (use_pde(pde)) { - typeof_member(struct proc_ops, proc_mmap) mmap; - mmap = pde->proc_ops->proc_mmap; - if (mmap) - rv = mmap(file, vma); + if (pde_is_permanent(pde)) { + return pde_mmap(pde, file, vma); + } else if (use_pde(pde)) { + rv = pde_mmap(pde, file, vma); unuse_pde(pde); } return rv; } static unsigned long -proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, +pde_get_unmapped_area(struct proc_dir_entry *pde, struct file *file, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) { - struct proc_dir_entry *pde = PDE(file_inode(file)); - unsigned long rv = -EIO; - - if (use_pde(pde)) { - typeof_member(struct proc_ops, proc_get_unmapped_area) get_area; + typeof_member(struct proc_ops, proc_get_unmapped_area) get_area; - get_area = pde->proc_ops->proc_get_unmapped_area; + get_area = pde->proc_ops->proc_get_unmapped_area; #ifdef CONFIG_MMU - if (!get_area) - get_area = current->mm->get_unmapped_area; + if (!get_area) + get_area = current->mm->get_unmapped_area; #endif + if (get_area) + return get_area(file, orig_addr, len, pgoff, flags); + return orig_addr; +} + +static unsigned long +proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct proc_dir_entry *pde = PDE(file_inode(file)); + unsigned long rv = -EIO; - if (get_area) - rv = get_area(file, orig_addr, len, pgoff, flags); - else - rv = orig_addr; + if (pde_is_permanent(pde)) { + return pde_get_unmapped_area(pde, file, orig_addr, len, pgoff, flags); + } else if (use_pde(pde)) { + rv = pde_get_unmapped_area(pde, file, orig_addr, len, pgoff, flags); unuse_pde(pde); } return rv; @@ -401,6 +470,13 @@ static int proc_reg_open(struct inode *inode, struct file *file) typeof_member(struct proc_ops, proc_release) release; struct pde_opener *pdeo; + if (pde_is_permanent(pde)) { + open = pde->proc_ops->proc_open; + if (open) + rv = open(inode, file); + return rv; + } + /* * Ensure that * 1) PDE's ->release hook will be called no matter what @@ -450,6 +526,17 @@ static int proc_reg_release(struct inode *inode, struct file *file) { struct proc_dir_entry *pde = PDE(inode); struct pde_opener *pdeo; + + if (pde_is_permanent(pde)) { + typeof_member(struct proc_ops, proc_release) release; + + release = pde->proc_ops->proc_release; + if (release) { + return release(inode, file); + } + return 0; + } + spin_lock(&pde->pde_unload_lock); list_for_each_entry(pdeo, &pde->pde_openers, lh) { if (pdeo->file == file) { diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 9e294f0290e5..917cc85e3466 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -61,6 +61,7 @@ struct proc_dir_entry { struct rb_node subdir_node; char *name; umode_t mode; + u8 flags; u8 namelen; char inline_name[]; } __randomize_layout; @@ -73,6 +74,11 @@ struct proc_dir_entry { 0) #define SIZEOF_PDE_INLINE_NAME (SIZEOF_PDE - sizeof(struct proc_dir_entry)) +static inline bool pde_is_permanent(const struct proc_dir_entry *pde) +{ + return pde->flags & PROC_ENTRY_PERMANENT; +} + extern struct kmem_cache *proc_dir_entry_cache; void pde_free(struct proc_dir_entry *pde); diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c index ec1b7d2fb773..b38ad552887f 100644 --- a/fs/proc/kmsg.c +++ b/fs/proc/kmsg.c @@ -50,6 +50,7 @@ static __poll_t kmsg_poll(struct file *file, poll_table *wait) static const struct proc_ops kmsg_proc_ops = { + .proc_flags = PROC_ENTRY_PERMANENT, .proc_read = kmsg_read, .proc_poll = kmsg_poll, .proc_open = kmsg_open, diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 0449edf460f5..46b3293015fe 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -224,6 +224,7 @@ static int stat_open(struct inode *inode, struct file *file) } static const struct proc_ops stat_proc_ops = { + .proc_flags = PROC_ENTRY_PERMANENT, .proc_open = stat_open, .proc_read = seq_read, .proc_lseek = seq_lseek, diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 40a7982b7285..45c05fd9c99d 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -5,6 +5,7 @@ #ifndef _LINUX_PROC_FS_H #define _LINUX_PROC_FS_H +#include #include #include @@ -12,7 +13,21 @@ struct proc_dir_entry; struct seq_file; struct seq_operations; +enum { + /* + * All /proc entries using this ->proc_ops instance are never removed. + * + * If in doubt, ignore this flag. + */ +#ifdef MODULE + PROC_ENTRY_PERMANENT = 0U, +#else + PROC_ENTRY_PERMANENT = 1U << 0, +#endif +}; + struct proc_ops { + unsigned int proc_flags; int (*proc_open)(struct inode *, struct file *); ssize_t (*proc_read)(struct file *, char __user *, size_t, loff_t *); ssize_t (*proc_write)(struct file *, const char __user *, size_t, loff_t *); @@ -25,7 +40,7 @@ struct proc_ops { #endif int (*proc_mmap)(struct file *, struct vm_area_struct *); unsigned long (*proc_get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); -}; +} __randomize_layout; #ifdef CONFIG_PROC_FS diff --git a/ipc/util.c b/ipc/util.c index fe61df53775a..97638eb2d7cb 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -885,6 +885,7 @@ static int sysvipc_proc_release(struct inode *inode, struct file *file) } static const struct proc_ops sysvipc_proc_ops = { + .proc_flags = PROC_ENTRY_PERMANENT, .proc_open = sysvipc_proc_open, .proc_read = seq_read, .proc_lseek = seq_lseek, diff --git a/kernel/module.c b/kernel/module.c index 33569a01d6e1..3447f3b74870 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -4355,6 +4355,7 @@ static int modules_open(struct inode *inode, struct file *file) } static const struct proc_ops modules_proc_ops = { + .proc_flags = PROC_ENTRY_PERMANENT, .proc_open = modules_open, .proc_read = seq_read, .proc_lseek = seq_lseek, diff --git a/mm/slab_common.c b/mm/slab_common.c index 5282f881d2f5..93ec4a574d8d 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1581,6 +1581,7 @@ static int slabinfo_open(struct inode *inode, struct file *file) } static const struct proc_ops slabinfo_proc_ops = { + .proc_flags = PROC_ENTRY_PERMANENT, .proc_open = slabinfo_open, .proc_read = seq_read, .proc_write = slabinfo_write, diff --git a/mm/swapfile.c b/mm/swapfile.c index 273a923c275c..5871a2aa86a5 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2797,6 +2797,7 @@ static int swaps_open(struct inode *inode, struct file *file) } static const struct proc_ops swaps_proc_ops = { + .proc_flags = PROC_ENTRY_PERMANENT, .proc_open = swaps_open, .proc_read = seq_read, .proc_lseek = seq_lseek, -- cgit v1.2.3-58-ga151 From b829a0f0f2f2094c1e40637259c44b854e6ebe96 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 6 Apr 2020 20:09:17 -0700 Subject: seq_file: remove m->version The process maps file was the only user of version (introduced back in 2005). Now that it uses ppos instead, we can remove it. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Link: http://lkml.kernel.org/r/20200317193201.9924-4-adobriyan@gmail.com Signed-off-by: Linus Torvalds --- fs/seq_file.c | 28 ---------------------------- include/linux/seq_file.h | 1 - 2 files changed, 29 deletions(-) (limited to 'include/linux') diff --git a/fs/seq_file.c b/fs/seq_file.c index 1600034a929b..79781ebd2145 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -67,13 +67,6 @@ int seq_open(struct file *file, const struct seq_operations *op) // to the lifetime of the file. p->file = file; - /* - * Wrappers around seq_open(e.g. swaps_open) need to be - * aware of this. If they set f_version themselves, they - * should call seq_open first and then set f_version. - */ - file->f_version = 0; - /* * seq_files support lseek() and pread(). They do not implement * write() at all, but we clear FMODE_PWRITE here for historical @@ -94,7 +87,6 @@ static int traverse(struct seq_file *m, loff_t offset) int error = 0; void *p; - m->version = 0; m->index = 0; m->count = m->from = 0; if (!offset) @@ -160,26 +152,12 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) mutex_lock(&m->lock); - /* - * seq_file->op->..m_start/m_stop/m_next may do special actions - * or optimisations based on the file->f_version, so we want to - * pass the file->f_version to those methods. - * - * seq_file->version is just copy of f_version, and seq_file - * methods can treat it simply as file version. - * It is copied in first and copied out after all operations. - * It is convenient to have it as part of structure to avoid the - * need of passing another argument to all the seq_file methods. - */ - m->version = file->f_version; - /* * if request is to read from zero offset, reset iterator to first * record as it might have been already advanced by previous requests */ if (*ppos == 0) { m->index = 0; - m->version = 0; m->count = 0; } @@ -190,7 +168,6 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) if (err) { /* With prejudice... */ m->read_pos = 0; - m->version = 0; m->index = 0; m->count = 0; goto Done; @@ -243,7 +220,6 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) m->buf = seq_buf_alloc(m->size <<= 1); if (!m->buf) goto Enomem; - m->version = 0; p = m->op->start(m, &m->index); } m->op->stop(m, p); @@ -287,7 +263,6 @@ Done: *ppos += copied; m->read_pos += copied; } - file->f_version = m->version; mutex_unlock(&m->lock); return copied; Enomem: @@ -313,7 +288,6 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence) loff_t retval = -EINVAL; mutex_lock(&m->lock); - m->version = file->f_version; switch (whence) { case SEEK_CUR: offset += file->f_pos; @@ -329,7 +303,6 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence) /* with extreme prejudice... */ file->f_pos = 0; m->read_pos = 0; - m->version = 0; m->index = 0; m->count = 0; } else { @@ -340,7 +313,6 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence) file->f_pos = offset; } } - file->f_version = m->version; mutex_unlock(&m->lock); return retval; } diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 770c2bf3aa43..1672cf6f7614 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -21,7 +21,6 @@ struct seq_file { size_t pad_until; loff_t index; loff_t read_pos; - u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; -- cgit v1.2.3-58-ga151 From 889b3c1245de48ed0cacf7aebb25c489d3e4a3e9 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 6 Apr 2020 20:09:33 -0700 Subject: compiler: remove CONFIG_OPTIMIZE_INLINING entirely Commit ac7c3e4ff401 ("compiler: enable CONFIG_OPTIMIZE_INLINING forcibly") made this always-on option. We released v5.4 and v5.5 including that commit. Remove the CONFIG option and clean up the code now. Signed-off-by: Masahiro Yamada Signed-off-by: Andrew Morton Reviewed-by: Miguel Ojeda Reviewed-by: Nathan Chancellor Cc: Arnd Bergmann Cc: Borislav Petkov Cc: David Miller Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20200220110807.32534-2-masahiroy@kernel.org Signed-off-by: Linus Torvalds --- arch/x86/configs/i386_defconfig | 1 - arch/x86/configs/x86_64_defconfig | 1 - include/linux/compiler_types.h | 11 +---------- kernel/configs/tiny.config | 1 - lib/Kconfig.debug | 12 ------------ 5 files changed, 1 insertion(+), 25 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index ab8b30cb978e..550904591e94 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -285,7 +285,6 @@ CONFIG_EARLY_PRINTK_DBGP=y CONFIG_DEBUG_STACKOVERFLOW=y # CONFIG_DEBUG_RODATA_TEST is not set CONFIG_DEBUG_BOOT_PARAMS=y -CONFIG_OPTIMIZE_INLINING=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_SELINUX=y diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 2d196cb49084..614961009075 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -282,7 +282,6 @@ CONFIG_EARLY_PRINTK_DBGP=y CONFIG_DEBUG_STACKOVERFLOW=y # CONFIG_DEBUG_RODATA_TEST is not set CONFIG_DEBUG_BOOT_PARAMS=y -CONFIG_OPTIMIZE_INLINING=y CONFIG_UNWINDER_ORC=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 72393a8c1a6c..e970f97a7fcb 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -129,22 +129,13 @@ struct ftrace_likely_data { #define __compiler_offsetof(a, b) __builtin_offsetof(a, b) /* - * Force always-inline if the user requests it so via the .config. * Prefer gnu_inline, so that extern inline functions do not emit an * externally visible function. This makes extern inline behave as per gnu89 * semantics rather than c99. This prevents multiple symbol definition errors * of extern inline functions at link time. * A lot of inline functions can cause havoc with function tracing. - * Do not use __always_inline here, since currently it expands to inline again - * (which would break users of __always_inline). */ -#if !defined(CONFIG_OPTIMIZE_INLINING) -#define inline inline __attribute__((__always_inline__)) __gnu_inline \ - __inline_maybe_unused notrace -#else -#define inline inline __gnu_inline \ - __inline_maybe_unused notrace -#endif +#define inline inline __gnu_inline __inline_maybe_unused notrace /* * gcc provides both __inline__ and __inline as alternate spellings of diff --git a/kernel/configs/tiny.config b/kernel/configs/tiny.config index 7fa0c4ae6394..8a44b93da0f3 100644 --- a/kernel/configs/tiny.config +++ b/kernel/configs/tiny.config @@ -6,7 +6,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_KERNEL_XZ=y # CONFIG_KERNEL_LZO is not set # CONFIG_KERNEL_LZ4 is not set -CONFIG_OPTIMIZE_INLINING=y # CONFIG_SLAB is not set # CONFIG_SLUB is not set CONFIG_SLOB=y diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index d1398cef3b18..7f9a89847b65 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -305,18 +305,6 @@ config HEADERS_INSTALL user-space program samples. It is also needed by some features such as uapi header sanity checks. -config OPTIMIZE_INLINING - def_bool y - help - This option determines if the kernel forces gcc to inline the functions - developers have marked 'inline'. Doing so takes away freedom from gcc to - do what it thinks is best, which is desirable for the gcc 3.x series of - compilers. The gcc 4.x series have a rewritten inlining algorithm and - enabling this option will generate a smaller kernel there. Hopefully - this algorithm is so good that allowing gcc 4.x and above to make the - decision will become the default in the future. Until then this option - is there to test gcc for this. - config DEBUG_SECTION_MISMATCH bool "Enable full Section mismatch analysis" help -- cgit v1.2.3-58-ga151 From af9c5d2e3b355854ff0e4acfbfbfadcd5198a349 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Mon, 6 Apr 2020 20:09:37 -0700 Subject: compiler.h: fix error in BUILD_BUG_ON() reporting compiletime_assert() uses __LINE__ to create a unique function name. This means that if you have more than one BUILD_BUG_ON() in the same source line (which can happen if they appear e.g. in a macro), then the error message from the compiler might output the wrong condition. For this source file: #include #define macro() \ BUILD_BUG_ON(1); \ BUILD_BUG_ON(0); void foo() { macro(); } gcc would output: ./include/linux/compiler.h:350:38: error: call to `__compiletime_assert_9' declared with attribute error: BUILD_BUG_ON failed: 0 _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) However, it was not the BUILD_BUG_ON(0) that failed, so it should say 1 instead of 0. With this patch, we use __COUNTER__ instead of __LINE__, so each BUILD_BUG_ON() gets a different function name and the correct condition is printed: ./include/linux/compiler.h:350:38: error: call to `__compiletime_assert_0' declared with attribute error: BUILD_BUG_ON failed: 1 _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) Signed-off-by: Vegard Nossum Signed-off-by: Andrew Morton Reviewed-by: Masahiro Yamada Reviewed-by: Daniel Santos Cc: Rasmus Villemoes Cc: Ian Abbott Cc: Joe Perches Link: http://lkml.kernel.org/r/20200331112637.25047-1-vegard.nossum@oracle.com Signed-off-by: Linus Torvalds --- include/linux/compiler.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 5e88e7e33abe..034b0a644efc 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -347,7 +347,7 @@ static inline void *offset_to_ptr(const int *off) * compiler has support to do so. */ #define compiletime_assert(condition, msg) \ - _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) + _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) #define compiletime_assert_atomic_type(t) \ compiletime_assert(__native_word(t), \ -- cgit v1.2.3-58-ga151 From f80ac98a641a03097cbc9fdfd4b6a41a8dd3b7ae Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 6 Apr 2020 20:09:43 -0700 Subject: bitops: always inline sign extension helpers With CONFIG_CC_OPTIMIZE_FOR_SIZE, objtool reports: drivers/gpu/drm/i915/gem/i915_gem_execbuffer.o: warning: objtool: i915_gem_execbuffer2_ioctl()+0x5b7: call to gen8_canonical_addr() with UACCESS enabled This means i915_gem_execbuffer2_ioctl() is calling gen8_canonical_addr() from the user_access_begin/end critical region (i.e, with SMAP disabled). While it's probably harmless in this case, in general we like to avoid extra function calls in SMAP-disabled regions because it can open up inadvertent security holes. Fix the warning by changing the sign extension helpers to __always_inline. This convinces GCC to inline gen8_canonical_addr(). The sign extension functions are trivial anyway, so it makes sense to always inline them. With my test optimize-for-size-based config, this actually shrinks the text size of i915_gem_execbuffer.o by 45 bytes -- and no change for vmlinux. Reported-by: Randy Dunlap Signed-off-by: Josh Poimboeuf Signed-off-by: Andrew Morton Cc: Peter Zijlstra Cc: Al Viro Cc: Chris Wilson Link: http://lkml.kernel.org/r/740179324b2b18b750b16295c48357f00b5fa9ed.1582982020.git.jpoimboe@redhat.com Signed-off-by: Linus Torvalds --- include/linux/bitops.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 47f54b459c26..9acf654f0b19 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -162,7 +162,7 @@ static inline __u8 ror8(__u8 word, unsigned int shift) * * This is safe to use for 16- and 8-bit types as well. */ -static inline __s32 sign_extend32(__u32 value, int index) +static __always_inline __s32 sign_extend32(__u32 value, int index) { __u8 shift = 31 - index; return (__s32)(value << shift) >> shift; @@ -173,7 +173,7 @@ static inline __s32 sign_extend32(__u32 value, int index) * @value: value to sign extend * @index: 0 based bit index (0<=index<64) to sign bit */ -static inline __s64 sign_extend64(__u64 value, int index) +static __always_inline __s64 sign_extend64(__u64 value, int index) { __u8 shift = 63 - index; return (__s64)(value << shift) >> shift; -- cgit v1.2.3-58-ga151 From 505a0ef15f96c6c43ec719c9fc1833d98957bb39 Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Mon, 6 Apr 2020 20:10:22 -0700 Subject: kasan: stackdepot: move filter_irq_stacks() to stackdepot.c filter_irq_stacks() can be used by other tools (e.g. KMSAN), so it needs to be moved to a common location. lib/stackdepot.c seems a good place, as filter_irq_stacks() is usually applied to the output of stack_trace_save(). This patch has been previously mailed as part of KMSAN RFC patch series. [glider@google.co: nds32: linker script: add SOFTIRQENTRY_TEXT\ Link: http://lkml.kernel.org/r/20200311121002.241430-1-glider@google.com [glider@google.com: add IRQENTRY_TEXT and SOFTIRQENTRY_TEXT to linker script] Link: http://lkml.kernel.org/r/20200311121124.243352-1-glider@google.com Signed-off-by: Alexander Potapenko Signed-off-by: Andrew Morton Cc: Vegard Nossum Cc: Dmitry Vyukov Cc: Marco Elver Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Arnd Bergmann Cc: Sergey Senozhatsky Link: http://lkml.kernel.org/r/20200220141916.55455-3-glider@google.com Signed-off-by: Linus Torvalds --- arch/ia64/kernel/vmlinux.lds.S | 2 ++ arch/nds32/kernel/vmlinux.lds.S | 1 + include/linux/stackdepot.h | 2 ++ lib/stackdepot.c | 24 ++++++++++++++++++++++++ mm/kasan/common.c | 23 ----------------------- 5 files changed, 29 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 1ec6b703c5b4..6b5652ee76f9 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -54,6 +54,8 @@ SECTIONS { CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.gnu.linkonce.t*) } diff --git a/arch/nds32/kernel/vmlinux.lds.S b/arch/nds32/kernel/vmlinux.lds.S index f679d3397436..7a6c1cefe3fe 100644 --- a/arch/nds32/kernel/vmlinux.lds.S +++ b/arch/nds32/kernel/vmlinux.lds.S @@ -47,6 +47,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT + SOFTIRQENTRY_TEXT *(.fixup) } diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h index 3efa97d482cb..24d49c732341 100644 --- a/include/linux/stackdepot.h +++ b/include/linux/stackdepot.h @@ -19,4 +19,6 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries, unsigned int stack_depot_fetch(depot_stack_handle_t handle, unsigned long **entries); +unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries); + #endif diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 1ec36ee344e0..2caffc64e4c8 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -20,6 +20,7 @@ */ #include +#include #include #include #include @@ -316,3 +317,26 @@ fast_exit: return retval; } EXPORT_SYMBOL_GPL(stack_depot_save); + +static inline int in_irqentry_text(unsigned long ptr) +{ + return (ptr >= (unsigned long)&__irqentry_text_start && + ptr < (unsigned long)&__irqentry_text_end) || + (ptr >= (unsigned long)&__softirqentry_text_start && + ptr < (unsigned long)&__softirqentry_text_end); +} + +unsigned int filter_irq_stacks(unsigned long *entries, + unsigned int nr_entries) +{ + unsigned int i; + + for (i = 0; i < nr_entries; i++) { + if (in_irqentry_text(entries[i])) { + /* Include the irqentry function into the stack. */ + return i + 1; + } + } + return nr_entries; +} +EXPORT_SYMBOL_GPL(filter_irq_stacks); diff --git a/mm/kasan/common.c b/mm/kasan/common.c index e61b4a492218..2906358e42f0 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -15,7 +15,6 @@ */ #include -#include #include #include #include @@ -42,28 +41,6 @@ #include "kasan.h" #include "../slab.h" -static inline int in_irqentry_text(unsigned long ptr) -{ - return (ptr >= (unsigned long)&__irqentry_text_start && - ptr < (unsigned long)&__irqentry_text_end) || - (ptr >= (unsigned long)&__softirqentry_text_start && - ptr < (unsigned long)&__softirqentry_text_end); -} - -static inline unsigned int filter_irq_stacks(unsigned long *entries, - unsigned int nr_entries) -{ - unsigned int i; - - for (i = 0; i < nr_entries; i++) { - if (in_irqentry_text(entries[i])) { - /* Include the irqentry function into the stack. */ - return i + 1; - } - } - return nr_entries; -} - static inline depot_stack_handle_t save_stack(gfp_t flags) { unsigned long entries[KASAN_STACK_DEPTH]; -- cgit v1.2.3-58-ga151 From 7e2345200262e4a6056580f0231cccdaffc825f3 Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Mon, 6 Apr 2020 20:10:25 -0700 Subject: percpu_counter: fix a data race at vm_committed_as "vm_committed_as.count" could be accessed concurrently as reported by KCSAN, BUG: KCSAN: data-race in __vm_enough_memory / percpu_counter_add_batch write to 0xffffffff9451c538 of 8 bytes by task 65879 on cpu 35: percpu_counter_add_batch+0x83/0xd0 percpu_counter_add_batch at lib/percpu_counter.c:91 __vm_enough_memory+0xb9/0x260 dup_mm+0x3a4/0x8f0 copy_process+0x2458/0x3240 _do_fork+0xaa/0x9f0 __do_sys_clone+0x125/0x160 __x64_sys_clone+0x70/0x90 do_syscall_64+0x91/0xb05 entry_SYSCALL_64_after_hwframe+0x49/0xbe read to 0xffffffff9451c538 of 8 bytes by task 66773 on cpu 19: __vm_enough_memory+0x199/0x260 percpu_counter_read_positive at include/linux/percpu_counter.h:81 (inlined by) __vm_enough_memory at mm/util.c:839 mmap_region+0x1b2/0xa10 do_mmap+0x45c/0x700 vm_mmap_pgoff+0xc0/0x130 ksys_mmap_pgoff+0x6e/0x300 __x64_sys_mmap+0x33/0x40 do_syscall_64+0x91/0xb05 entry_SYSCALL_64_after_hwframe+0x49/0xbe The read is outside percpu_counter::lock critical section which results in a data race. Fix it by adding a READ_ONCE() in percpu_counter_read_positive() which could also service as the existing compiler memory barrier. Signed-off-by: Qian Cai Signed-off-by: Andrew Morton Acked-by: Marco Elver Link: http://lkml.kernel.org/r/1582302724-2804-1-git-send-email-cai@lca.pw Signed-off-by: Linus Torvalds --- include/linux/percpu_counter.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 4f052496cdfd..0a4f54dd4737 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -78,9 +78,9 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc) */ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) { - s64 ret = fbc->count; + /* Prevent reloads of fbc->count */ + s64 ret = READ_ONCE(fbc->count); - barrier(); /* Prevent reloads of fbc->count */ if (ret >= 0) return ret; return 0; -- cgit v1.2.3-58-ga151 From 295bcca84916cb5079140a89fccb472bb8d1f6e2 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Mon, 6 Apr 2020 20:10:38 -0700 Subject: linux/bits.h: add compile time sanity check of GENMASK inputs GENMASK() and GENMASK_ULL() are supposed to be called with the high bit as the first argument and the low bit as the second argument. Mixing them will return a mask with zero bits set. Recent commits show getting this wrong is not uncommon, see e.g. commit aa4c0c9091b0 ("net: stmmac: Fix misuses of GENMASK macro") and commit 9bdd7bb3a844 ("clocksource/drivers/npcm: Fix misuse of GENMASK macro"). To prevent such mistakes from appearing again, add compile time sanity checking to the arguments of GENMASK() and GENMASK_ULL(). If both arguments are known at compile time, and the low bit is higher than the high bit, break the build to detect the mistake immediately. Since GENMASK() is used in declarations, BUILD_BUG_ON_ZERO() must be used instead of BUILD_BUG_ON(). __builtin_constant_p does not evaluate is argument, it only checks if it is a constant or not at compile time, and __builtin_choose_expr does not evaluate the expression that is not chosen. Therefore, GENMASK(x++, 0) does only evaluate x++ once. Commit 95b980d62d52 ("linux/bits.h: make BIT(), GENMASK(), and friends available in assembly") made the macros in linux/bits.h available in assembly. Since BUILD_BUG_OR_ZERO() is not asm compatible, disable the checks if the file is included in an asm file. Due to bugs in GCC versions before 4.9 [0], disable the check if building with a too old GCC compiler. [0]: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=19449 Signed-off-by: Rikard Falkeborn Signed-off-by: Andrew Morton Reviewed-by: Masahiro Yamada Reviewed-by: Kees Cook Cc: Borislav Petkov Cc: Geert Uytterhoeven Cc: Haren Myneni Cc: Joe Perches Cc: Johannes Berg Cc: lkml Cc: Ingo Molnar Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20200308193954.2372399-1-rikard.falkeborn@gmail.com Signed-off-by: Linus Torvalds --- include/linux/bits.h | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bits.h b/include/linux/bits.h index a740bbcf3cd2..4671fbf28842 100644 --- a/include/linux/bits.h +++ b/include/linux/bits.h @@ -18,12 +18,30 @@ * position @h. For example * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. */ -#define GENMASK(h, l) \ +#if !defined(__ASSEMBLY__) && \ + (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000) +#include +#define GENMASK_INPUT_CHECK(h, l) \ + (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \ + __builtin_constant_p((l) > (h)), (l) > (h), 0))) +#else +/* + * BUILD_BUG_ON_ZERO is not available in h files included from asm files, + * disable the input check if that is the case. + */ +#define GENMASK_INPUT_CHECK(h, l) 0 +#endif + +#define __GENMASK(h, l) \ (((~UL(0)) - (UL(1) << (l)) + 1) & \ (~UL(0) >> (BITS_PER_LONG - 1 - (h)))) +#define GENMASK(h, l) \ + (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l)) -#define GENMASK_ULL(h, l) \ +#define __GENMASK_ULL(h, l) \ (((~ULL(0)) - (ULL(1) << (l)) + 1) & \ (~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h)))) +#define GENMASK_ULL(h, l) \ + (GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l)) #endif /* __LINUX_BITS_H */ -- cgit v1.2.3-58-ga151 From a13f58a0cafa7b0416a2898bc3b0defbb305d108 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Tue, 3 Mar 2020 11:54:27 +0100 Subject: locking/refcount: Document interaction with PID_MAX_LIMIT Document the circumstances under which refcount_t's saturation mechanism works deterministically. Acked-by: Kees Cook Acked-by: Will Deacon Signed-off-by: Jann Horn Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lkml.kernel.org/r/20200303105427.260620-1-jannh@google.com --- include/linux/refcount.h | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 0ac50cf62d06..0e3ee25eb156 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -38,11 +38,24 @@ * atomic operations, then the count will continue to edge closer to 0. If it * reaches a value of 1 before /any/ of the threads reset it to the saturated * value, then a concurrent refcount_dec_and_test() may erroneously free the - * underlying object. Given the precise timing details involved with the - * round-robin scheduling of each thread manipulating the refcount and the need - * to hit the race multiple times in succession, there doesn't appear to be a - * practical avenue of attack even if using refcount_add() operations with - * larger increments. + * underlying object. + * Linux limits the maximum number of tasks to PID_MAX_LIMIT, which is currently + * 0x400000 (and can't easily be raised in the future beyond FUTEX_TID_MASK). + * With the current PID limit, if no batched refcounting operations are used and + * the attacker can't repeatedly trigger kernel oopses in the middle of refcount + * operations, this makes it impossible for a saturated refcount to leave the + * saturation range, even if it is possible for multiple uses of the same + * refcount to nest in the context of a single task: + * + * (UINT_MAX+1-REFCOUNT_SATURATED) / PID_MAX_LIMIT = + * 0x40000000 / 0x400000 = 0x100 = 256 + * + * If hundreds of references are added/removed with a single refcounting + * operation, it may potentially be possible to leave the saturation range; but + * given the precise timing details involved with the round-robin scheduling of + * each thread manipulating the refcount and the need to hit the race multiple + * times in succession, there doesn't appear to be a practical avenue of attack + * even if using refcount_add() operations with larger increments. * * Memory ordering * =============== -- cgit v1.2.3-58-ga151 From 63f818f46af9f8b3f17b9695501e8d08959feb60 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 7 Apr 2020 09:43:04 -0500 Subject: proc: Use a dedicated lock in struct pid syzbot wrote: > ======================================================== > WARNING: possible irq lock inversion dependency detected > 5.6.0-syzkaller #0 Not tainted > -------------------------------------------------------- > swapper/1/0 just changed the state of lock: > ffffffff898090d8 (tasklist_lock){.+.?}-{2:2}, at: send_sigurg+0x9f/0x320 fs/fcntl.c:840 > but this lock took another, SOFTIRQ-unsafe lock in the past: > (&pid->wait_pidfd){+.+.}-{2:2} > > > and interrupts could create inverse lock ordering between them. > > > other info that might help us debug this: > Possible interrupt unsafe locking scenario: > > CPU0 CPU1 > ---- ---- > lock(&pid->wait_pidfd); > local_irq_disable(); > lock(tasklist_lock); > lock(&pid->wait_pidfd); > > lock(tasklist_lock); > > *** DEADLOCK *** > > 4 locks held by swapper/1/0: The problem is that because wait_pidfd.lock is taken under the tasklist lock. It must always be taken with irqs disabled as tasklist_lock can be taken from interrupt context and if wait_pidfd.lock was already taken this would create a lock order inversion. Oleg suggested just disabling irqs where I have added extra calls to wait_pidfd.lock. That should be safe and I think the code will eventually do that. It was rightly pointed out by Christian that sharing the wait_pidfd.lock was a premature optimization. It is also true that my pre-merge window testing was insufficient. So remove the premature optimization and give struct pid a dedicated lock of it's own for struct pid things. I have verified that lockdep sees all 3 paths where we take the new pid->lock and lockdep does not complain. It is my current day dream that one day pid->lock can be used to guard the task lists as well and then the tasklist_lock won't need to be held to deliver signals. That will require taking pid->lock with irqs disabled. Acked-by: Christian Brauner Link: https://lore.kernel.org/lkml/00000000000011d66805a25cd73f@google.com/ Cc: Oleg Nesterov Cc: Christian Brauner Reported-by: syzbot+343f75cdeea091340956@syzkaller.appspotmail.com Reported-by: syzbot+832aabf700bc3ec920b9@syzkaller.appspotmail.com Reported-by: syzbot+f675f964019f884dbd0f@syzkaller.appspotmail.com Reported-by: syzbot+a9fb1457d720a55d6dc5@syzkaller.appspotmail.com Fixes: 7bc3e6e55acf ("proc: Use a list of inodes to flush from proc") Signed-off-by: "Eric W. Biederman" --- fs/proc/base.c | 10 +++++----- include/linux/pid.h | 1 + kernel/pid.c | 1 + 3 files changed, 7 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/fs/proc/base.c b/fs/proc/base.c index 74f948a6b621..6042b646ab27 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1839,9 +1839,9 @@ void proc_pid_evict_inode(struct proc_inode *ei) struct pid *pid = ei->pid; if (S_ISDIR(ei->vfs_inode.i_mode)) { - spin_lock(&pid->wait_pidfd.lock); + spin_lock(&pid->lock); hlist_del_init_rcu(&ei->sibling_inodes); - spin_unlock(&pid->wait_pidfd.lock); + spin_unlock(&pid->lock); } put_pid(pid); @@ -1877,9 +1877,9 @@ struct inode *proc_pid_make_inode(struct super_block * sb, /* Let the pid remember us for quick removal */ ei->pid = pid; if (S_ISDIR(mode)) { - spin_lock(&pid->wait_pidfd.lock); + spin_lock(&pid->lock); hlist_add_head_rcu(&ei->sibling_inodes, &pid->inodes); - spin_unlock(&pid->wait_pidfd.lock); + spin_unlock(&pid->lock); } task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid); @@ -3273,7 +3273,7 @@ static const struct inode_operations proc_tgid_base_inode_operations = { void proc_flush_pid(struct pid *pid) { - proc_invalidate_siblings_dcache(&pid->inodes, &pid->wait_pidfd.lock); + proc_invalidate_siblings_dcache(&pid->inodes, &pid->lock); put_pid(pid); } diff --git a/include/linux/pid.h b/include/linux/pid.h index 01a0d4e28506..cc896f0fc4e3 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -60,6 +60,7 @@ struct pid { refcount_t count; unsigned int level; + spinlock_t lock; /* lists of tasks that use this pid */ struct hlist_head tasks[PIDTYPE_MAX]; struct hlist_head inodes; diff --git a/kernel/pid.c b/kernel/pid.c index efd34874b3d1..517d0855d4cf 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -246,6 +246,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid, get_pid_ns(ns); refcount_set(&pid->count, 1); + spin_lock_init(&pid->lock); for (type = 0; type < PIDTYPE_MAX; ++type) INIT_HLIST_HEAD(&pid->tasks[type]); -- cgit v1.2.3-58-ga151 From ab6f762f0f53162d41497708b33c9a3236d3609e Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 3 Mar 2020 20:30:02 +0900 Subject: printk: queue wake_up_klogd irq_work only if per-CPU areas are ready printk_deferred(), similarly to printk_safe/printk_nmi, does not immediately attempt to print a new message on the consoles, avoiding calls into non-reentrant kernel paths, e.g. scheduler or timekeeping, which potentially can deadlock the system. Those printk() flavors, instead, rely on per-CPU flush irq_work to print messages from safer contexts. For same reasons (recursive scheduler or timekeeping calls) printk() uses per-CPU irq_work in order to wake up user space syslog/kmsg readers. However, only printk_safe/printk_nmi do make sure that per-CPU areas have been initialised and that it's safe to modify per-CPU irq_work. This means that, for instance, should printk_deferred() be invoked "too early", that is before per-CPU areas are initialised, printk_deferred() will perform illegal per-CPU access. Lech Perczak [0] reports that after commit 1b710b1b10ef ("char/random: silence a lockdep splat with printk()") user-space syslog/kmsg readers are not able to read new kernel messages. The reason is printk_deferred() being called too early (as was pointed out by Petr and John). Fix printk_deferred() and do not queue per-CPU irq_work before per-CPU areas are initialized. Link: https://lore.kernel.org/lkml/aa0732c6-5c4e-8a8b-a1c1-75ebe3dca05b@camlintechnologies.com/ Reported-by: Lech Perczak Signed-off-by: Sergey Senozhatsky Tested-by: Jann Horn Reviewed-by: Petr Mladek Cc: Greg Kroah-Hartman Cc: Theodore Ts'o Cc: John Ogness Signed-off-by: Linus Torvalds --- include/linux/printk.h | 5 ----- init/main.c | 1 - kernel/printk/internal.h | 5 +++++ kernel/printk/printk.c | 34 ++++++++++++++++++++++++++++++++++ kernel/printk/printk_safe.c | 11 +---------- 5 files changed, 40 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/printk.h b/include/linux/printk.h index 1e6108b8d15f..e061635e0409 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -202,7 +202,6 @@ __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); void dump_stack_print_info(const char *log_lvl); void show_regs_print_info(const char *log_lvl); extern asmlinkage void dump_stack(void) __cold; -extern void printk_safe_init(void); extern void printk_safe_flush(void); extern void printk_safe_flush_on_panic(void); #else @@ -269,10 +268,6 @@ static inline void dump_stack(void) { } -static inline void printk_safe_init(void) -{ -} - static inline void printk_safe_flush(void) { } diff --git a/init/main.c b/init/main.c index e488213857e2..a48617f2e5e5 100644 --- a/init/main.c +++ b/init/main.c @@ -913,7 +913,6 @@ asmlinkage __visible void __init start_kernel(void) boot_init_stack_canary(); time_init(); - printk_safe_init(); perf_event_init(); profile_init(); call_function_init(); diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h index c8e6ab689d42..b2b0f526f249 100644 --- a/kernel/printk/internal.h +++ b/kernel/printk/internal.h @@ -23,6 +23,9 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args); void __printk_safe_enter(void); void __printk_safe_exit(void); +void printk_safe_init(void); +bool printk_percpu_data_ready(void); + #define printk_safe_enter_irqsave(flags) \ do { \ local_irq_save(flags); \ @@ -64,4 +67,6 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; } #define printk_safe_enter_irq() local_irq_disable() #define printk_safe_exit_irq() local_irq_enable() +static inline void printk_safe_init(void) { } +static inline bool printk_percpu_data_ready(void) { return false; } #endif /* CONFIG_PRINTK */ diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 633f41a11d75..9a9b6156270b 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -460,6 +460,18 @@ static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); static char *log_buf = __log_buf; static u32 log_buf_len = __LOG_BUF_LEN; +/* + * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before + * per_cpu_areas are initialised. This variable is set to true when + * it's safe to access per-CPU data. + */ +static bool __printk_percpu_data_ready __read_mostly; + +bool printk_percpu_data_ready(void) +{ + return __printk_percpu_data_ready; +} + /* Return log buffer address */ char *log_buf_addr_get(void) { @@ -1146,12 +1158,28 @@ static void __init log_buf_add_cpu(void) static inline void log_buf_add_cpu(void) {} #endif /* CONFIG_SMP */ +static void __init set_percpu_data_ready(void) +{ + printk_safe_init(); + /* Make sure we set this flag only after printk_safe() init is done */ + barrier(); + __printk_percpu_data_ready = true; +} + void __init setup_log_buf(int early) { unsigned long flags; char *new_log_buf; unsigned int free; + /* + * Some archs call setup_log_buf() multiple times - first is very + * early, e.g. from setup_arch(), and second - when percpu_areas + * are initialised. + */ + if (!early) + set_percpu_data_ready(); + if (log_buf != __log_buf) return; @@ -2975,6 +3003,9 @@ static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { void wake_up_klogd(void) { + if (!printk_percpu_data_ready()) + return; + preempt_disable(); if (waitqueue_active(&log_wait)) { this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); @@ -2985,6 +3016,9 @@ void wake_up_klogd(void) void defer_console_output(void) { + if (!printk_percpu_data_ready()) + return; + preempt_disable(); __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c index b4045e782743..d9a659a686f3 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c @@ -27,7 +27,6 @@ * There are situations when we want to make sure that all buffers * were handled or when IRQs are blocked. */ -static int printk_safe_irq_ready __read_mostly; #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \ sizeof(atomic_t) - \ @@ -51,7 +50,7 @@ static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq); /* Get flushed in a more safe context. */ static void queue_flush_work(struct printk_safe_seq_buf *s) { - if (printk_safe_irq_ready) + if (printk_percpu_data_ready()) irq_work_queue(&s->work); } @@ -402,14 +401,6 @@ void __init printk_safe_init(void) #endif } - /* - * In the highly unlikely event that a NMI were to trigger at - * this moment. Make sure IRQ work is set up before this - * variable is set. - */ - barrier(); - printk_safe_irq_ready = 1; - /* Flush pending messages that did not have scheduled IRQ works. */ printk_safe_flush(); } -- cgit v1.2.3-58-ga151 From 2370ae4b1d5aa7eb70bd7539a420e791d4b0123b Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 10 Apr 2020 14:32:25 -0700 Subject: docs: mm: slab.h: fix a broken cross-reference There is a typo at the cross-reference link, causing this warning: include/linux/slab.h:11: WARNING: undefined label: memory-allocation (if the link has no caption the label must precede a section header) Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Andrew Morton Cc: Jonathan Corbet Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Link: http://lkml.kernel.org/r/0aeac24235d356ebd935d11e147dcc6edbb6465c.1586359676.git.mchehab+huawei@kernel.org Signed-off-by: Linus Torvalds --- include/linux/slab.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 03a389358562..6d454886bcaf 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -501,7 +501,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) * :ref:`Documentation/core-api/mm-api.rst ` * * The recommended usage of the @flags is described at - * :ref:`Documentation/core-api/memory-allocation.rst ` + * :ref:`Documentation/core-api/memory-allocation.rst ` * * Below is a brief outline of the most useful GFP flags * -- cgit v1.2.3-58-ga151 From 8676af1ff2d28e64e5636147821bda7524cf007d Mon Sep 17 00:00:00 2001 From: Aslan Bakirov Date: Fri, 10 Apr 2020 14:32:42 -0700 Subject: mm: cma: NUMA node interface I've noticed that there is no interface exposed by CMA which would let me to declare contigous memory on particular NUMA node. This patchset adds the ability to try to allocate contiguous memory on a specific node. It will fallback to other nodes if the specified one doesn't work. Implement a new method for declaring contigous memory on particular node and keep cma_declare_contiguous() as a wrapper. [akpm@linux-foundation.org: build fix] Signed-off-by: Aslan Bakirov Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Acked-by: Michal Hocko Cc: Andreas Schaufler Cc: Mike Kravetz Cc: Rik van Riel Cc: Joonsoo Kim Link: http://lkml.kernel.org/r/20200407163840.92263-2-guro@fb.com Signed-off-by: Linus Torvalds --- include/linux/cma.h | 14 ++++++++++++-- include/linux/memblock.h | 3 +++ mm/cma.c | 16 +++++++++------- mm/memblock.c | 2 +- 4 files changed, 25 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cma.h b/include/linux/cma.h index 190184b5ff32..6ff79fefd01f 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -4,6 +4,7 @@ #include #include +#include /* * There is always at least global CMA area and a few optional @@ -24,10 +25,19 @@ extern phys_addr_t cma_get_base(const struct cma *cma); extern unsigned long cma_get_size(const struct cma *cma); extern const char *cma_get_name(const struct cma *cma); -extern int __init cma_declare_contiguous(phys_addr_t base, +extern int __init cma_declare_contiguous_nid(phys_addr_t base, phys_addr_t size, phys_addr_t limit, phys_addr_t alignment, unsigned int order_per_bit, - bool fixed, const char *name, struct cma **res_cma); + bool fixed, const char *name, struct cma **res_cma, + int nid); +static inline int __init cma_declare_contiguous(phys_addr_t base, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + bool fixed, const char *name, struct cma **res_cma) +{ + return cma_declare_contiguous_nid(base, size, limit, alignment, + order_per_bit, fixed, name, res_cma, NUMA_NO_NODE); +} extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, const char *name, diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 079d17d96410..6bc37a731d27 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -348,6 +348,9 @@ static inline int memblock_get_region_node(const struct memblock_region *r) phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end); +phys_addr_t memblock_alloc_range_nid(phys_addr_t size, + phys_addr_t align, phys_addr_t start, + phys_addr_t end, int nid, bool exact_nid); phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); static inline phys_addr_t memblock_phys_alloc(phys_addr_t size, diff --git a/mm/cma.c b/mm/cma.c index be55d1988c67..0463ad2ce06b 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -220,7 +220,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, } /** - * cma_declare_contiguous() - reserve custom contiguous area + * cma_declare_contiguous_nid() - reserve custom contiguous area * @base: Base address of the reserved area optional, use 0 for any * @size: Size of the reserved area (in bytes), * @limit: End address of the reserved memory (optional, 0 for any). @@ -229,6 +229,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, * @fixed: hint about where to place the reserved area * @name: The name of the area. See function cma_init_reserved_mem() * @res_cma: Pointer to store the created cma region. + * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * * This function reserves memory from early allocator. It should be * called by arch specific code once the early allocator (memblock or bootmem) @@ -238,10 +239,11 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, * If @fixed is true, reserve contiguous area at exactly @base. If false, * reserve in range from @base to @limit. */ -int __init cma_declare_contiguous(phys_addr_t base, +int __init cma_declare_contiguous_nid(phys_addr_t base, phys_addr_t size, phys_addr_t limit, phys_addr_t alignment, unsigned int order_per_bit, - bool fixed, const char *name, struct cma **res_cma) + bool fixed, const char *name, struct cma **res_cma, + int nid) { phys_addr_t memblock_end = memblock_end_of_DRAM(); phys_addr_t highmem_start; @@ -336,14 +338,14 @@ int __init cma_declare_contiguous(phys_addr_t base, * memory in case of failure. */ if (base < highmem_start && limit > highmem_start) { - addr = memblock_phys_alloc_range(size, alignment, - highmem_start, limit); + addr = memblock_alloc_range_nid(size, alignment, + highmem_start, limit, nid, false); limit = highmem_start; } if (!addr) { - addr = memblock_phys_alloc_range(size, alignment, base, - limit); + addr = memblock_alloc_range_nid(size, alignment, base, + limit, nid, false); if (!addr) { ret = -ENOMEM; goto err; diff --git a/mm/memblock.c b/mm/memblock.c index 4d06bbaded0f..c79ba6f9920c 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1349,7 +1349,7 @@ __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, * Return: * Physical address of allocated memory block on success, %0 on failure. */ -static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, +phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, int nid, bool exact_nid) -- cgit v1.2.3-58-ga151 From cf11e85fc08cc6a4fe3ac2ba2e610c962bf20bc3 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 10 Apr 2020 14:32:45 -0700 Subject: mm: hugetlb: optionally allocate gigantic hugepages using cma Commit 944d9fec8d7a ("hugetlb: add support for gigantic page allocation at runtime") has added the run-time allocation of gigantic pages. However it actually works only at early stages of the system loading, when the majority of memory is free. After some time the memory gets fragmented by non-movable pages, so the chances to find a contiguous 1GB block are getting close to zero. Even dropping caches manually doesn't help a lot. At large scale rebooting servers in order to allocate gigantic hugepages is quite expensive and complex. At the same time keeping some constant percentage of memory in reserved hugepages even if the workload isn't using it is a big waste: not all workloads can benefit from using 1 GB pages. The following solution can solve the problem: 1) On boot time a dedicated cma area* is reserved. The size is passed as a kernel argument. 2) Run-time allocations of gigantic hugepages are performed using the cma allocator and the dedicated cma area In this case gigantic hugepages can be allocated successfully with a high probability, however the memory isn't completely wasted if nobody is using 1GB hugepages: it can be used for pagecache, anon memory, THPs, etc. * On a multi-node machine a per-node cma area is allocated on each node. Following gigantic hugetlb allocation are using the first available numa node if the mask isn't specified by a user. Usage: 1) configure the kernel to allocate a cma area for hugetlb allocations: pass hugetlb_cma=10G as a kernel argument 2) allocate hugetlb pages as usual, e.g. echo 10 > /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages If the option isn't enabled or the allocation of the cma area failed, the current behavior of the system is preserved. x86 and arm-64 are covered by this patch, other architectures can be trivially added later. The patch contains clean-ups and fixes proposed and implemented by Aslan Bakirov and Randy Dunlap. It also contains ideas and suggestions proposed by Rik van Riel, Michal Hocko and Mike Kravetz. Thanks! Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Tested-by: Andreas Schaufler Acked-by: Mike Kravetz Acked-by: Michal Hocko Cc: Aslan Bakirov Cc: Randy Dunlap Cc: Rik van Riel Cc: Joonsoo Kim Link: http://lkml.kernel.org/r/20200407163840.92263-3-guro@fb.com Signed-off-by: Linus Torvalds --- Documentation/admin-guide/kernel-parameters.txt | 8 ++ arch/arm64/mm/init.c | 6 ++ arch/x86/kernel/setup.c | 4 + include/linux/hugetlb.h | 12 +++ mm/hugetlb.c | 109 ++++++++++++++++++++++++ 5 files changed, 139 insertions(+) (limited to 'include/linux') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 86aae1fa099a..d7df9a8302c4 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1475,6 +1475,14 @@ hpet_mmap= [X86, HPET_MMAP] Allow userspace to mmap HPET registers. Default set by CONFIG_HPET_MMAP_DEFAULT. + hugetlb_cma= [HW] The size of a cma area used for allocation + of gigantic hugepages. + Format: nn[KMGTPE] + + Reserve a cma area of given size and allocate gigantic + hugepages using the cma allocator. If enabled, the + boot-time allocation of gigantic hugepages is skipped. + hugepages= [HW,X86-32,IA-64] HugeTLB pages to allocate at boot. hugepagesz= [HW,IA-64,PPC,X86-64] The size of the HugeTLB pages. On x86-64 and powerpc, this option can be specified diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index b65dffdfb201..e42727e3568e 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -457,6 +458,11 @@ void __init arm64_memblock_init(void) high_memory = __va(memblock_end_of_DRAM() - 1) + 1; dma_contiguous_reserve(arm64_dma32_phys_limit); + +#ifdef CONFIG_ARM64_4K_PAGES + hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); +#endif + } void __init bootmem_init(void) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index e6b545047f38..4b3fa6cd3106 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -1157,6 +1158,9 @@ void __init setup_arch(char **cmdline_p) initmem_init(); dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); + if (boot_cpu_has(X86_FEATURE_GBPAGES)) + hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); + /* * Reserve memory for crash kernel after SRAT is parsed so that it * won't consume hotpluggable memory. diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 5ea05879a0a9..43a1cef8f0f1 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -895,4 +895,16 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h, return ptl; } +#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) +extern void __init hugetlb_cma_reserve(int order); +extern void __init hugetlb_cma_check(void); +#else +static inline __init void hugetlb_cma_reserve(int order) +{ +} +static inline __init void hugetlb_cma_check(void) +{ +} +#endif + #endif /* _LINUX_HUGETLB_H */ diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f5fb53fdfa02..cd459155d28a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -44,6 +45,9 @@ int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; struct hstate hstates[HUGE_MAX_HSTATE]; + +static struct cma *hugetlb_cma[MAX_NUMNODES]; + /* * Minimum page order among possible hugepage sizes, set to a proper value * at boot time. @@ -1228,6 +1232,14 @@ static void destroy_compound_gigantic_page(struct page *page, static void free_gigantic_page(struct page *page, unsigned int order) { + /* + * If the page isn't allocated using the cma allocator, + * cma_release() returns false. + */ + if (IS_ENABLED(CONFIG_CMA) && + cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) + return; + free_contig_range(page_to_pfn(page), 1 << order); } @@ -1237,6 +1249,21 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, { unsigned long nr_pages = 1UL << huge_page_order(h); + if (IS_ENABLED(CONFIG_CMA)) { + struct page *page; + int node; + + for_each_node_mask(node, *nodemask) { + if (!hugetlb_cma[node]) + continue; + + page = cma_alloc(hugetlb_cma[node], nr_pages, + huge_page_order(h), true); + if (page) + return page; + } + } + return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask); } @@ -1281,8 +1308,14 @@ static void update_and_free_page(struct hstate *h, struct page *page) set_compound_page_dtor(page, NULL_COMPOUND_DTOR); set_page_refcounted(page); if (hstate_is_gigantic(h)) { + /* + * Temporarily drop the hugetlb_lock, because + * we might block in free_gigantic_page(). + */ + spin_unlock(&hugetlb_lock); destroy_compound_gigantic_page(page, huge_page_order(h)); free_gigantic_page(page, huge_page_order(h)); + spin_lock(&hugetlb_lock); } else { __free_pages(page, huge_page_order(h)); } @@ -2539,6 +2572,10 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) for (i = 0; i < h->max_huge_pages; ++i) { if (hstate_is_gigantic(h)) { + if (IS_ENABLED(CONFIG_CMA) && hugetlb_cma[0]) { + pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); + break; + } if (!alloc_bootmem_huge_page(h)) break; } else if (!alloc_pool_huge_page(h, @@ -3194,6 +3231,7 @@ static int __init hugetlb_init(void) default_hstate.max_huge_pages = default_hstate_max_huge_pages; } + hugetlb_cma_check(); hugetlb_init_hstates(); gather_bootmem_prealloc(); report_hugepages(); @@ -5506,3 +5544,74 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) spin_unlock(&hugetlb_lock); } } + +#ifdef CONFIG_CMA +static unsigned long hugetlb_cma_size __initdata; +static bool cma_reserve_called __initdata; + +static int __init cmdline_parse_hugetlb_cma(char *p) +{ + hugetlb_cma_size = memparse(p, &p); + return 0; +} + +early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); + +void __init hugetlb_cma_reserve(int order) +{ + unsigned long size, reserved, per_node; + int nid; + + cma_reserve_called = true; + + if (!hugetlb_cma_size) + return; + + if (hugetlb_cma_size < (PAGE_SIZE << order)) { + pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n", + (PAGE_SIZE << order) / SZ_1M); + return; + } + + /* + * If 3 GB area is requested on a machine with 4 numa nodes, + * let's allocate 1 GB on first three nodes and ignore the last one. + */ + per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes); + pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", + hugetlb_cma_size / SZ_1M, per_node / SZ_1M); + + reserved = 0; + for_each_node_state(nid, N_ONLINE) { + int res; + + size = min(per_node, hugetlb_cma_size - reserved); + size = round_up(size, PAGE_SIZE << order); + + res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << order, + 0, false, "hugetlb", + &hugetlb_cma[nid], nid); + if (res) { + pr_warn("hugetlb_cma: reservation failed: err %d, node %d", + res, nid); + continue; + } + + reserved += size; + pr_info("hugetlb_cma: reserved %lu MiB on node %d\n", + size / SZ_1M, nid); + + if (reserved >= hugetlb_cma_size) + break; + } +} + +void __init hugetlb_cma_check(void) +{ + if (!hugetlb_cma_size || cma_reserve_called) + return; + + pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); +} + +#endif /* CONFIG_CMA */ -- cgit v1.2.3-58-ga151 From 8cd3984d81d5fd5e18bccb12d7d228a114ec2508 Mon Sep 17 00:00:00 2001 From: Arjun Roy Date: Fri, 10 Apr 2020 14:33:01 -0700 Subject: mm/memory.c: add vm_insert_pages() Add the ability to insert multiple pages at once to a user VM with lower PTE spinlock operations. The intention of this patch-set is to reduce atomic ops for tcp zerocopy receives, which normally hits the same spinlock multiple times consecutively. [akpm@linux-foundation.org: pte_alloc() no longer takes the `addr' argument] [arjunroy@google.com: add missing page_count() check to vm_insert_pages()] Link: http://lkml.kernel.org/r/20200214005929.104481-1-arjunroy.kdev@gmail.com [arjunroy@google.com: vm_insert_pages() checks if pte_index defined] Link: http://lkml.kernel.org/r/20200228054714.204424-2-arjunroy.kdev@gmail.com Signed-off-by: Arjun Roy Signed-off-by: Eric Dumazet Signed-off-by: Soheil Hassas Yeganeh Signed-off-by: Andrew Morton Cc: David Miller Cc: Matthew Wilcox Cc: Jason Gunthorpe Cc: Stephen Rothwell Link: http://lkml.kernel.org/r/20200128025958.43490-2-arjunroy.kdev@gmail.com Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 + mm/memory.c | 129 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 129 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index e2f938c5a9d8..ed896cedd4c4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2689,6 +2689,8 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t); int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); +int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num); int vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num); int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, diff --git a/mm/memory.c b/mm/memory.c index 52a3303458cb..f703fe8c8346 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1419,8 +1419,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, } EXPORT_SYMBOL_GPL(zap_vma_ptes); -pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, - spinlock_t **ptl) +static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; p4d_t *p4d; @@ -1439,6 +1438,16 @@ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, return NULL; VM_BUG_ON(pmd_trans_huge(*pmd)); + return pmd; +} + +pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, + spinlock_t **ptl) +{ + pmd_t *pmd = walk_to_pmd(mm, addr); + + if (!pmd) + return NULL; return pte_alloc_map_lock(mm, pmd, addr, ptl); } @@ -1491,6 +1500,122 @@ out: return retval; } +#ifdef pte_index +static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, struct page *page, pgprot_t prot) +{ + int err; + + if (!page_count(page)) + return -EINVAL; + err = validate_page_before_insert(page); + return err ? err : insert_page_into_pte_locked( + mm, pte_offset_map(pmd, addr), addr, page, prot); +} + +/* insert_pages() amortizes the cost of spinlock operations + * when inserting pages in a loop. Arch *must* define pte_index. + */ +static int insert_pages(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num, pgprot_t prot) +{ + pmd_t *pmd = NULL; + spinlock_t *pte_lock = NULL; + struct mm_struct *const mm = vma->vm_mm; + unsigned long curr_page_idx = 0; + unsigned long remaining_pages_total = *num; + unsigned long pages_to_write_in_pmd; + int ret; +more: + ret = -EFAULT; + pmd = walk_to_pmd(mm, addr); + if (!pmd) + goto out; + + pages_to_write_in_pmd = min_t(unsigned long, + remaining_pages_total, PTRS_PER_PTE - pte_index(addr)); + + /* Allocate the PTE if necessary; takes PMD lock once only. */ + ret = -ENOMEM; + if (pte_alloc(mm, pmd)) + goto out; + pte_lock = pte_lockptr(mm, pmd); + + while (pages_to_write_in_pmd) { + int pte_idx = 0; + const int batch_size = min_t(int, pages_to_write_in_pmd, 8); + + spin_lock(pte_lock); + for (; pte_idx < batch_size; ++pte_idx) { + int err = insert_page_in_batch_locked(mm, pmd, + addr, pages[curr_page_idx], prot); + if (unlikely(err)) { + spin_unlock(pte_lock); + ret = err; + remaining_pages_total -= pte_idx; + goto out; + } + addr += PAGE_SIZE; + ++curr_page_idx; + } + spin_unlock(pte_lock); + pages_to_write_in_pmd -= batch_size; + remaining_pages_total -= batch_size; + } + if (remaining_pages_total) + goto more; + ret = 0; +out: + *num = remaining_pages_total; + return ret; +} +#endif /* ifdef pte_index */ + +/** + * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock. + * @vma: user vma to map to + * @addr: target start user address of these pages + * @pages: source kernel pages + * @num: in: number of pages to map. out: number of pages that were *not* + * mapped. (0 means all pages were successfully mapped). + * + * Preferred over vm_insert_page() when inserting multiple pages. + * + * In case of error, we may have mapped a subset of the provided + * pages. It is the caller's responsibility to account for this case. + * + * The same restrictions apply as in vm_insert_page(). + */ +int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num) +{ +#ifdef pte_index + const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; + + if (addr < vma->vm_start || end_addr >= vma->vm_end) + return -EFAULT; + if (!(vma->vm_flags & VM_MIXEDMAP)) { + BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); + BUG_ON(vma->vm_flags & VM_PFNMAP); + vma->vm_flags |= VM_MIXEDMAP; + } + /* Defer page refcount checking till we're about to map that page. */ + return insert_pages(vma, addr, pages, num, vma->vm_page_prot); +#else + unsigned long idx = 0, pgcount = *num; + int err; + + for (; idx < pgcount; ++idx) { + err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]); + if (err) + break; + } + *num = pgcount - idx; + return err; +#endif /* ifdef pte_index */ +} +EXPORT_SYMBOL(vm_insert_pages); + /** * vm_insert_page - insert single page into user vma * @vma: user vma to map to -- cgit v1.2.3-58-ga151 From c62da0c35d58518ddb26ff641d2485596567fd96 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 10 Apr 2020 14:33:05 -0700 Subject: mm/vma: define a default value for VM_DATA_DEFAULT_FLAGS There are many platforms with exact same value for VM_DATA_DEFAULT_FLAGS This creates a default value for VM_DATA_DEFAULT_FLAGS in line with the existing VM_STACK_DEFAULT_FLAGS. While here, also define some more macros with standard VMA access flag combinations that are used frequently across many platforms. Apart from simplification, this reduces code duplication as well. Signed-off-by: Anshuman Khandual Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Acked-by: Geert Uytterhoeven Cc: Richard Henderson Cc: Vineet Gupta Cc: Russell King Cc: Catalin Marinas Cc: Mark Salter Cc: Guo Ren Cc: Yoshinori Sato Cc: Brian Cain Cc: Tony Luck Cc: Michal Simek Cc: Ralf Baechle Cc: Paul Burton Cc: Nick Hu Cc: Ley Foon Tan Cc: Jonas Bonn Cc: "James E.J. Bottomley" Cc: Michael Ellerman Cc: Paul Walmsley Cc: Heiko Carstens Cc: Rich Felker Cc: "David S. Miller" Cc: Guan Xuetao Cc: Thomas Gleixner Cc: Jeff Dike Cc: Chris Zankel Link: http://lkml.kernel.org/r/1583391014-8170-2-git-send-email-anshuman.khandual@arm.com Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/page.h | 3 --- arch/arc/include/asm/page.h | 2 +- arch/arm/include/asm/page.h | 4 +--- arch/arm64/include/asm/page.h | 4 +--- arch/c6x/include/asm/page.h | 5 +---- arch/csky/include/asm/page.h | 3 --- arch/h8300/include/asm/page.h | 2 -- arch/hexagon/include/asm/page.h | 3 +-- arch/ia64/include/asm/page.h | 5 +---- arch/m68k/include/asm/page.h | 3 --- arch/microblaze/include/asm/page.h | 2 -- arch/mips/include/asm/page.h | 5 +---- arch/nds32/include/asm/page.h | 3 --- arch/nios2/include/asm/page.h | 3 +-- arch/openrisc/include/asm/page.h | 5 ----- arch/parisc/include/asm/page.h | 3 --- arch/powerpc/include/asm/page.h | 9 ++------- arch/powerpc/include/asm/page_64.h | 7 ++----- arch/riscv/include/asm/page.h | 3 +-- arch/s390/include/asm/page.h | 3 +-- arch/sh/include/asm/page.h | 3 --- arch/sparc/include/asm/page_32.h | 3 --- arch/sparc/include/asm/page_64.h | 3 --- arch/unicore32/include/asm/page.h | 3 --- arch/x86/include/asm/page_types.h | 4 +--- arch/x86/um/asm/vm-flags.h | 10 ++-------- arch/xtensa/include/asm/page.h | 3 --- include/linux/mm.h | 14 ++++++++++++++ 28 files changed, 31 insertions(+), 89 deletions(-) (limited to 'include/linux') diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h index f3fb2848470a..e241bd88880f 100644 --- a/arch/alpha/include/asm/page.h +++ b/arch/alpha/include/asm/page.h @@ -90,9 +90,6 @@ typedef struct page *pgtable_t; #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #endif /* CONFIG_DISCONTIGMEM */ -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - #include #include diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index 0a32e8cfd074..b0dfed0f12be 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -102,7 +102,7 @@ typedef pte_t * pgtable_t; #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) /* Default Permissions for stack/heaps pages (Non Executable) */ -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC #define WANT_PAGE_VIRTUAL 1 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index c2b75cba26df..11b058a72a5b 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -161,9 +161,7 @@ extern int pfn_valid(unsigned long); #endif /* !__ASSEMBLY__ */ -#define VM_DATA_DEFAULT_FLAGS \ - (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ - VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC #include diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 75d6cd23a679..c01b52add377 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -36,9 +36,7 @@ extern int pfn_valid(unsigned long); #endif /* !__ASSEMBLY__ */ -#define VM_DATA_DEFAULT_FLAGS \ - (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ - VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC #include diff --git a/arch/c6x/include/asm/page.h b/arch/c6x/include/asm/page.h index 70db1e7632bc..40079899084d 100644 --- a/arch/c6x/include/asm/page.h +++ b/arch/c6x/include/asm/page.h @@ -2,10 +2,7 @@ #ifndef _ASM_C6X_PAGE_H #define _ASM_C6X_PAGE_H -#define VM_DATA_DEFAULT_FLAGS \ - (VM_READ | VM_WRITE | \ - ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC #include diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h index 9738eacefdc7..9b98bf31d57c 100644 --- a/arch/csky/include/asm/page.h +++ b/arch/csky/include/asm/page.h @@ -85,9 +85,6 @@ extern unsigned long va_pa_offset; PHYS_OFFSET_OFFSET) #define virt_to_page(x) (mem_map + MAP_NR(x)) -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - #define pfn_to_kaddr(x) __va(PFN_PHYS(x)) #include diff --git a/arch/h8300/include/asm/page.h b/arch/h8300/include/asm/page.h index 8da5124ad344..53e037544239 100644 --- a/arch/h8300/include/asm/page.h +++ b/arch/h8300/include/asm/page.h @@ -6,8 +6,6 @@ #include #define MAP_NR(addr) (((uintptr_t)(addr)-PAGE_OFFSET) >> PAGE_SHIFT) -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #ifndef __ASSEMBLY__ extern unsigned long rom_length; diff --git a/arch/hexagon/include/asm/page.h b/arch/hexagon/include/asm/page.h index ee31f36f48f3..7cbf719c578e 100644 --- a/arch/hexagon/include/asm/page.h +++ b/arch/hexagon/include/asm/page.h @@ -93,8 +93,7 @@ struct page; #define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr))) /* Default vm area behavior is non-executable. */ -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC #define pfn_valid(pfn) ((pfn) < max_mapnr) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h index 5798bd2b462c..b69a5499d75b 100644 --- a/arch/ia64/include/asm/page.h +++ b/arch/ia64/include/asm/page.h @@ -218,10 +218,7 @@ get_order (unsigned long size) #define PAGE_OFFSET RGN_BASE(RGN_KERNEL) -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \ - (((current->personality & READ_IMPLIES_EXEC) != 0) \ - ? VM_EXEC : 0)) +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC #define GATE_ADDR RGN_BASE(RGN_GATE) diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h index da546487e177..2614a1206f2f 100644 --- a/arch/m68k/include/asm/page.h +++ b/arch/m68k/include/asm/page.h @@ -65,9 +65,6 @@ extern unsigned long _ramend; #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) #define __pfn_to_phys(pfn) PFN_PHYS(pfn) -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - #include #endif /* _M68K_PAGE_H */ diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index ae7215c94706..b13463d39b38 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h @@ -194,8 +194,6 @@ extern int page_is_ram(unsigned long pfn); #ifdef CONFIG_MMU -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #endif /* CONFIG_MMU */ #endif /* __KERNEL__ */ diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 0ba4ce6e2bf3..e2f503fc7a84 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -253,10 +253,7 @@ extern bool __virt_addr_valid(const volatile void *kaddr); #define virt_addr_valid(kaddr) \ __virt_addr_valid((const volatile void *) (kaddr)) -#define VM_DATA_DEFAULT_FLAGS \ - (VM_READ | VM_WRITE | \ - ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC #include #include diff --git a/arch/nds32/include/asm/page.h b/arch/nds32/include/asm/page.h index 86b32014c5f9..add33a7f02c8 100644 --- a/arch/nds32/include/asm/page.h +++ b/arch/nds32/include/asm/page.h @@ -59,9 +59,6 @@ typedef struct page *pgtable_t; #endif /* !__ASSEMBLY__ */ -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - #endif /* __KERNEL__ */ #endif diff --git a/arch/nios2/include/asm/page.h b/arch/nios2/include/asm/page.h index 79fcac61f6ef..6a989819a7c1 100644 --- a/arch/nios2/include/asm/page.h +++ b/arch/nios2/include/asm/page.h @@ -98,8 +98,7 @@ static inline bool pfn_valid(unsigned long pfn) # define virt_to_page(vaddr) pfn_to_page(PFN_DOWN(virt_to_phys(vaddr))) # define virt_addr_valid(vaddr) pfn_valid(PFN_DOWN(virt_to_phys(vaddr))) -# define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +# define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC #include diff --git a/arch/openrisc/include/asm/page.h b/arch/openrisc/include/asm/page.h index 01069db59454..aab6e64d6db4 100644 --- a/arch/openrisc/include/asm/page.h +++ b/arch/openrisc/include/asm/page.h @@ -86,11 +86,6 @@ typedef struct page *pgtable_t; #endif /* __ASSEMBLY__ */ - -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - - #include #include diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index 796ae29e9b9a..6b3f6740a6a6 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -180,9 +180,6 @@ extern int npmem_ranges; #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - #include #include #include diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 080a0bf8e54b..3ee8df0f66e0 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -240,13 +240,8 @@ static inline bool pfn_valid(unsigned long pfn) * and needs to be executable. This means the whole heap ends * up being executable. */ -#define VM_DATA_DEFAULT_FLAGS32 \ - (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ - VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS32 VM_DATA_FLAGS_TSK_EXEC +#define VM_DATA_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC #ifdef __powerpc64__ #include diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index 5962797f784a..79a9b7c6a132 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -94,11 +94,8 @@ extern u64 ppc64_pft_size; * stack by default, so in the absence of a PT_GNU_STACK program header * we turn execute permission off. */ -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_STACK_DEFAULT_FLAGS32 VM_DATA_FLAGS_EXEC +#define VM_STACK_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC #define VM_STACK_DEFAULT_FLAGS \ (is_32bit_task() ? \ diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 8ca1930caa44..2d50f76efe48 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -137,8 +137,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x); #define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC #include #include diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index f2d4c1bd3429..cc98f9b78fd4 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -181,8 +181,7 @@ int arch_make_page_accessible(struct page *page); #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC #include #include diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 5eef8be3e59f..ea8d68f58e39 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -182,9 +182,6 @@ typedef struct page *pgtable_t; #endif #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - #include #include diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h index b76d59edec8c..478260002836 100644 --- a/arch/sparc/include/asm/page_32.h +++ b/arch/sparc/include/asm/page_32.h @@ -133,9 +133,6 @@ extern unsigned long pfn_base; #define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr)) #define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr) -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - #include #include diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index e80f2d5bf62f..254dffd85fb1 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -158,9 +158,6 @@ extern unsigned long PAGE_OFFSET; #endif /* !(__ASSEMBLY__) */ -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - #include #endif /* _SPARC64_PAGE_H */ diff --git a/arch/unicore32/include/asm/page.h b/arch/unicore32/include/asm/page.h index 8a89335673f9..96d6bdf180bd 100644 --- a/arch/unicore32/include/asm/page.h +++ b/arch/unicore32/include/asm/page.h @@ -69,9 +69,6 @@ extern int pfn_valid(unsigned long); #endif /* !__ASSEMBLY__ */ -#define VM_DATA_DEFAULT_FLAGS \ - (VM_READ | VM_WRITE | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - #include #endif diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index c85e15010f48..e27aa6be6320 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -35,9 +35,7 @@ #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) -#define VM_DATA_DEFAULT_FLAGS \ - (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ - VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC #define __PHYSICAL_START ALIGN(CONFIG_PHYSICAL_START, \ CONFIG_PHYSICAL_ALIGN) diff --git a/arch/x86/um/asm/vm-flags.h b/arch/x86/um/asm/vm-flags.h index 7c297e9e2413..df7a3896f5dd 100644 --- a/arch/x86/um/asm/vm-flags.h +++ b/arch/x86/um/asm/vm-flags.h @@ -9,17 +9,11 @@ #ifdef CONFIG_X86_32 -#define VM_DATA_DEFAULT_FLAGS \ - (VM_READ | VM_WRITE | \ - ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC #else -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#define VM_STACK_DEFAULT_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | \ - VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_STACK_DEFAULT_FLAGS (VM_GROWSDOWN | VM_DATA_FLAGS_EXEC) #endif #endif diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index f4771c29c7e9..37ce25ef92d6 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -203,8 +203,5 @@ static inline unsigned long ___pa(unsigned long va) #endif /* __ASSEMBLY__ */ -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - #include #endif /* _XTENSA_PAGE_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index ed896cedd4c4..33076fa149c8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -343,6 +343,20 @@ extern unsigned int kobjsize(const void *objp); /* Bits set in the VMA until the stack is in its final location */ #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) +#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) + +/* Common data flag combinations */ +#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \ + VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */ +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC +#endif + #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS #endif -- cgit v1.2.3-58-ga151 From 6cb4d9a2870d2062e34c93bfef4d52fca3fe42d1 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 10 Apr 2020 14:33:09 -0700 Subject: mm/vma: introduce VM_ACCESS_FLAGS There are many places where all basic VMA access flags (read, write, exec) are initialized or checked against as a group. One such example is during page fault. Existing vma_is_accessible() wrapper already creates the notion of VMA accessibility as a group access permissions. Hence lets just create VM_ACCESS_FLAGS (VM_READ|VM_WRITE|VM_EXEC) which will not only reduce code duplication but also extend the VMA accessibility concept in general. Signed-off-by: Anshuman Khandual Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Cc: Russell King Cc: Catalin Marinas Cc: Mark Salter Cc: Nick Hu Cc: Ley Foon Tan Cc: Michael Ellerman Cc: Heiko Carstens Cc: Yoshinori Sato Cc: Guan Xuetao Cc: Dave Hansen Cc: Thomas Gleixner Cc: Rob Springer Cc: Greg Kroah-Hartman Cc: Geert Uytterhoeven Link: http://lkml.kernel.org/r/1583391014-8170-3-git-send-email-anshuman.khandual@arm.com Signed-off-by: Linus Torvalds --- arch/arm/mm/fault.c | 2 +- arch/arm64/mm/fault.c | 2 +- arch/nds32/mm/fault.c | 2 +- arch/powerpc/mm/book3s64/pkeys.c | 2 +- arch/s390/mm/fault.c | 2 +- arch/unicore32/mm/fault.c | 2 +- arch/x86/mm/pkeys.c | 2 +- drivers/staging/gasket/gasket_core.c | 2 +- include/linux/mm.h | 6 +++++- mm/mmap.c | 2 +- mm/mprotect.c | 4 ++-- 11 files changed, 16 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index b598e6978b29..2dd5c41cbb8d 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -189,7 +189,7 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs) */ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) { - unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; + unsigned int mask = VM_ACCESS_FLAGS; if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) mask = VM_WRITE; diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 1027851d469a..c9cedc0432d2 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -445,7 +445,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, const struct fault_info *inf; struct mm_struct *mm = current->mm; vm_fault_t fault, major = 0; - unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; + unsigned long vm_flags = VM_ACCESS_FLAGS; unsigned int mm_flags = FAULT_FLAG_DEFAULT; if (kprobe_page_fault(regs, esr)) diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c index 0cf0c08c7da2..f331e533edc2 100644 --- a/arch/nds32/mm/fault.c +++ b/arch/nds32/mm/fault.c @@ -79,7 +79,7 @@ void do_page_fault(unsigned long entry, unsigned long addr, struct vm_area_struct *vma; int si_code; vm_fault_t fault; - unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; + unsigned int mask = VM_ACCESS_FLAGS; unsigned int flags = FAULT_FLAG_DEFAULT; error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE); diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index 07527f1ed108..1199fc2bfaec 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -315,7 +315,7 @@ int __execute_only_pkey(struct mm_struct *mm) static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) { /* Do this check first since the vm_flags should be hot */ - if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC) + if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC) return false; return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index d56f67745e3e..9822a1fd1c6b 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -580,7 +580,7 @@ void do_dat_exception(struct pt_regs *regs) int access; vm_fault_t fault; - access = VM_READ | VM_EXEC | VM_WRITE; + access = VM_ACCESS_FLAGS; fault = do_exception(regs, access); if (unlikely(fault)) do_fault_error(regs, access, fault); diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index a9bd08fbe588..3022104aa613 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c @@ -149,7 +149,7 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs) */ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) { - unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; + unsigned int mask = VM_ACCESS_FLAGS; if (!(fsr ^ 0x12)) /* write? */ mask = VM_WRITE; diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c index c6f84c0b5d7a..8873ed1438a9 100644 --- a/arch/x86/mm/pkeys.c +++ b/arch/x86/mm/pkeys.c @@ -63,7 +63,7 @@ int __execute_only_pkey(struct mm_struct *mm) static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) { /* Do this check first since the vm_flags should be hot */ - if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC) + if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC) return false; if (vma_pkey(vma) != vma->vm_mm->context.execute_only_pkey) return false; diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c index cd181a64f737..8e0575fcb4c8 100644 --- a/drivers/staging/gasket/gasket_core.c +++ b/drivers/staging/gasket/gasket_core.c @@ -689,7 +689,7 @@ static bool gasket_mmap_has_permissions(struct gasket_dev *gasket_dev, /* Make sure that no wrong flags are set. */ requested_permissions = - (vma->vm_flags & (VM_WRITE | VM_READ | VM_EXEC)); + (vma->vm_flags & VM_ACCESS_FLAGS); if (requested_permissions & ~(bar_permissions)) { dev_dbg(gasket_dev->dev, "Attempting to map a region with requested permissions 0x%x, but region has permissions 0x%x.\n", diff --git a/include/linux/mm.h b/include/linux/mm.h index 33076fa149c8..4db1522d7c48 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -369,6 +369,10 @@ extern unsigned int kobjsize(const void *objp); #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) +/* VMA basic access permission flags */ +#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) + + /* * Special vmas that are non-mergable, non-mlock()able. */ @@ -646,7 +650,7 @@ static inline bool vma_is_foreign(struct vm_area_struct *vma) static inline bool vma_is_accessible(struct vm_area_struct *vma) { - return vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC); + return vma->vm_flags & VM_ACCESS_FLAGS; } #ifdef CONFIG_SHMEM diff --git a/mm/mmap.c b/mm/mmap.c index de07bbc0e21f..f609e9ec4a25 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1224,7 +1224,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct * return a->vm_end == b->vm_start && mpol_equal(vma_policy(a), vma_policy(b)) && a->vm_file == b->vm_file && - !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) && + !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); } diff --git a/mm/mprotect.c b/mm/mprotect.c index 1d823b050329..494192ca954b 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -419,7 +419,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, */ if (arch_has_pfn_modify_check() && (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && - (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) { + (newflags & VM_ACCESS_FLAGS) == 0) { pgprot_t new_pgprot = vm_get_page_prot(newflags); error = walk_page_range(current->mm, start, end, @@ -598,7 +598,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len, newflags |= (vma->vm_flags & ~mask_off_old_flags); /* newflags >> 4 shift VM_MAY% in place of VM_% */ - if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { + if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) { error = -EACCES; goto out; } -- cgit v1.2.3-58-ga151 From 78e7c5af080b86e9f28afac5a8307ddab1d2c1a3 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 10 Apr 2020 14:33:13 -0700 Subject: mm/special: create generic fallbacks for pte_special() and pte_mkspecial() Currently there are many platforms that dont enable ARCH_HAS_PTE_SPECIAL but required to define quite similar fallback stubs for special page table entry helpers such as pte_special() and pte_mkspecial(), as they get build in generic MM without a config check. This creates two generic fallback stub definitions for these helpers, eliminating much code duplication. mips platform has a special case where pte_special() and pte_mkspecial() visibility is wider than what ARCH_HAS_PTE_SPECIAL enablement requires. This restricts those symbol visibility in order to avoid redefinitions which is now exposed through this new generic stubs and subsequent build failure. arm platform set_pte_at() definition needs to be moved into a C file just to prevent a build failure. [anshuman.khandual@arm.com: use defined(CONFIG_ARCH_HAS_PTE_SPECIAL) in mips per Thomas] Link: http://lkml.kernel.org/r/1583851924-21603-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Signed-off-by: Andrew Morton Acked-by: Guo Ren [csky] Acked-by: Geert Uytterhoeven [m68k] Acked-by: Stafford Horne [openrisc] Acked-by: Helge Deller [parisc] Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Matt Turner Cc: Russell King Cc: Brian Cain Cc: Tony Luck Cc: Fenghua Yu Cc: Sam Creasey Cc: Michal Simek Cc: Ralf Baechle Cc: Paul Burton Cc: Nick Hu Cc: Greentime Hu Cc: Vincent Chen Cc: Ley Foon Tan Cc: Jonas Bonn Cc: Stefan Kristiansson Cc: "James E.J. Bottomley" Cc: "David S. Miller" Cc: Jeff Dike Cc: Richard Weinberger Cc: Anton Ivanov Cc: Guan Xuetao Cc: Chris Zankel Cc: Max Filippov Cc: Thomas Bogendoerfer Link: http://lkml.kernel.org/r/1583802551-15406-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/pgtable.h | 2 -- arch/arm/include/asm/pgtable-2level.h | 2 -- arch/arm/include/asm/pgtable.h | 15 ++--------- arch/arm/mm/mmu.c | 14 ++++++++++ arch/csky/include/asm/pgtable.h | 3 --- arch/hexagon/include/asm/pgtable.h | 2 -- arch/ia64/include/asm/pgtable.h | 2 -- arch/m68k/include/asm/mcf_pgtable.h | 10 -------- arch/m68k/include/asm/motorola_pgtable.h | 2 -- arch/m68k/include/asm/sun3_pgtable.h | 2 -- arch/microblaze/include/asm/pgtable.h | 4 --- arch/mips/include/asm/pgtable.h | 44 ++++++++++++++++++++++---------- arch/nds32/include/asm/pgtable.h | 9 ------- arch/nios2/include/asm/pgtable.h | 3 --- arch/openrisc/include/asm/pgtable.h | 2 -- arch/parisc/include/asm/pgtable.h | 2 -- arch/sparc/include/asm/pgtable_32.h | 7 ----- arch/um/include/asm/pgtable.h | 10 -------- arch/unicore32/include/asm/pgtable.h | 3 --- arch/xtensa/include/asm/pgtable.h | 3 --- include/linux/mm.h | 12 +++++++++ 21 files changed, 58 insertions(+), 95 deletions(-) (limited to 'include/linux') diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index 299791ce14b6..0267aa8a4f86 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -268,7 +268,6 @@ extern inline void pud_clear(pud_t * pudp) { pud_val(*pudp) = 0; } extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); } extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } -extern inline int pte_special(pte_t pte) { return 0; } extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; } extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; } @@ -276,7 +275,6 @@ extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); ret extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; } extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; } extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; } -extern inline pte_t pte_mkspecial(pte_t pte) { return pte; } #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address)) diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index 0d3ea35c97fe..9e084a464a97 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -211,8 +211,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) #define pmd_addr_end(addr,end) (end) #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) -#define pte_special(pte) (0) -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } /* * We don't have huge page support for short descriptors, for the moment diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 0483cf413315..befc8fcec98f 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -243,19 +243,8 @@ static inline void __sync_icache_dcache(pte_t pteval) extern void __sync_icache_dcache(pte_t pteval); #endif -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval) -{ - unsigned long ext = 0; - - if (addr < TASK_SIZE && pte_valid_user(pteval)) { - if (!pte_special(pteval)) - __sync_icache_dcache(pteval); - ext |= PTE_EXT_NG; - } - - set_pte_ext(ptep, pteval, ext); -} +void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval); static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) { diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 69a337df619f..ec8d0008bfa1 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1646,3 +1646,17 @@ void __init early_mm_init(const struct machine_desc *mdesc) build_mem_type_table(); early_paging_init(mdesc); } + +void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + unsigned long ext = 0; + + if (addr < TASK_SIZE && pte_valid_user(pteval)) { + if (!pte_special(pteval)) + __sync_icache_dcache(pteval); + ext |= PTE_EXT_NG; + } + + set_pte_ext(ptep, pteval, ext); +} diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index 9b7764cb7645..9ab4a445ad99 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -110,9 +110,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; extern void load_pgd(unsigned long pg_dir); extern pte_t invalid_pte_table[PTRS_PER_PTE]; -static inline int pte_special(pte_t pte) { return 0; } -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } - static inline void set_pte(pte_t *p, pte_t pte) { *p = pte; diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h index 2fec20ad939e..d383e8bea5b2 100644 --- a/arch/hexagon/include/asm/pgtable.h +++ b/arch/hexagon/include/asm/pgtable.h @@ -158,8 +158,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */ /* Seems to be zero even in architectures where the zero page is firewalled? */ #define FIRST_USER_ADDRESS 0UL -#define pte_special(pte) 0 -#define pte_mkspecial(pte) (pte) /* HUGETLB not working currently */ #ifdef CONFIG_HUGETLB_PAGE diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index d602e7c622db..0e7b645b76c6 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -298,7 +298,6 @@ extern unsigned long VMALLOC_END; #define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0) #define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0) #define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0) -#define pte_special(pte) 0 /* * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the @@ -311,7 +310,6 @@ extern unsigned long VMALLOC_END; #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) #define pte_mkhuge(pte) (__pte(pte_val(pte))) -#define pte_mkspecial(pte) (pte) /* * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h index b9f45aeded25..0031cd387b75 100644 --- a/arch/m68k/include/asm/mcf_pgtable.h +++ b/arch/m68k/include/asm/mcf_pgtable.h @@ -235,11 +235,6 @@ static inline int pte_young(pte_t pte) return pte_val(pte) & CF_PAGE_ACCESSED; } -static inline int pte_special(pte_t pte) -{ - return 0; -} - static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~CF_PAGE_WRITABLE; @@ -312,11 +307,6 @@ static inline pte_t pte_mkcache(pte_t pte) return pte; } -static inline pte_t pte_mkspecial(pte_t pte) -{ - return pte; -} - #define swapper_pg_dir kernel_pg_dir extern pgd_t kernel_pg_dir[PTRS_PER_PGD]; diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h index 4b91a470ad58..48f19f0ab1e7 100644 --- a/arch/m68k/include/asm/motorola_pgtable.h +++ b/arch/m68k/include/asm/motorola_pgtable.h @@ -174,7 +174,6 @@ static inline void pud_set(pud_t *pudp, pmd_t *pmdp) static inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY); } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } -static inline int pte_special(pte_t pte) { return 0; } static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; } static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } @@ -192,7 +191,6 @@ static inline pte_t pte_mkcache(pte_t pte) pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_supervisor_cachemode; return pte; } -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address)) diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h index bc4155264810..0caa18a08437 100644 --- a/arch/m68k/include/asm/sun3_pgtable.h +++ b/arch/m68k/include/asm/sun3_pgtable.h @@ -155,7 +155,6 @@ static inline void pmd_clear (pmd_t *pmdp) { pmd_val (*pmdp) = 0; } static inline int pte_write(pte_t pte) { return pte_val(pte) & SUN3_PAGE_WRITEABLE; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & SUN3_PAGE_MODIFIED; } static inline int pte_young(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; } -static inline int pte_special(pte_t pte) { return 0; } static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_WRITEABLE; return pte; } static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_MODIFIED; return pte; } @@ -168,7 +167,6 @@ static inline pte_t pte_mknocache(pte_t pte) { pte_val(pte) |= SUN3_PAGE_NOCACHE //static inline pte_t pte_mkcache(pte_t pte) { pte_val(pte) &= SUN3_PAGE_NOCACHE; return pte; } // until then, use: static inline pte_t pte_mkcache(pte_t pte) { return pte; } -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t kernel_pg_dir[PTRS_PER_PGD]; diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index 45b30878fc17..6b056f6545d8 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -77,10 +77,6 @@ extern pte_t *va_to_pte(unsigned long address); * Undefined behaviour if not.. */ -static inline int pte_special(pte_t pte) { return 0; } - -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } - /* Start and end of the vmalloc area. */ /* Make sure to map the vmalloc area above the pinned kernel memory area of 32Mb. */ diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index aef5378f909c..f1801e7a4b15 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -269,6 +269,36 @@ cache_sync_done: */ extern pgd_t swapper_pg_dir[]; +/* + * Platform specific pte_special() and pte_mkspecial() definitions + * are required only when ARCH_HAS_PTE_SPECIAL is enabled. + */ +#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL) +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) +static inline int pte_special(pte_t pte) +{ + return pte.pte_low & _PAGE_SPECIAL; +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + pte.pte_low |= _PAGE_SPECIAL; + return pte; +} +#else +static inline int pte_special(pte_t pte) +{ + return pte_val(pte) & _PAGE_SPECIAL; +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + pte_val(pte) |= _PAGE_SPECIAL; + return pte; +} +#endif +#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ + /* * The following only work if pte_present() is true. * Undefined behaviour if not.. @@ -277,7 +307,6 @@ extern pgd_t swapper_pg_dir[]; static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; } static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; } static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } -static inline int pte_special(pte_t pte) { return pte.pte_low & _PAGE_SPECIAL; } static inline pte_t pte_wrprotect(pte_t pte) { @@ -338,17 +367,10 @@ static inline pte_t pte_mkyoung(pte_t pte) } return pte; } - -static inline pte_t pte_mkspecial(pte_t pte) -{ - pte.pte_low |= _PAGE_SPECIAL; - return pte; -} #else static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } -static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } static inline pte_t pte_wrprotect(pte_t pte) { @@ -392,12 +414,6 @@ static inline pte_t pte_mkyoung(pte_t pte) return pte; } -static inline pte_t pte_mkspecial(pte_t pte) -{ - pte_val(pte) |= _PAGE_SPECIAL; - return pte; -} - #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h index 6abc58ac406d..476cc4dd1709 100644 --- a/arch/nds32/include/asm/pgtable.h +++ b/arch/nds32/include/asm/pgtable.h @@ -286,15 +286,6 @@ PTE_BIT_FUNC(mkclean, &=~_PAGE_D); PTE_BIT_FUNC(mkdirty, |=_PAGE_D); PTE_BIT_FUNC(mkold, &=~_PAGE_YOUNG); PTE_BIT_FUNC(mkyoung, |=_PAGE_YOUNG); -static inline int pte_special(pte_t pte) -{ - return 0; -} - -static inline pte_t pte_mkspecial(pte_t pte) -{ - return pte; -} /* * Mark the prot value as uncacheable and unbufferable. diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h index 99985d8b7166..f98b7f4519ba 100644 --- a/arch/nios2/include/asm/pgtable.h +++ b/arch/nios2/include/asm/pgtable.h @@ -113,7 +113,6 @@ static inline int pte_dirty(pte_t pte) \ { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) \ { return pte_val(pte) & _PAGE_ACCESSED; } -static inline int pte_special(pte_t pte) { return 0; } #define pgprot_noncached pgprot_noncached @@ -168,8 +167,6 @@ static inline pte_t pte_mkdirty(pte_t pte) return pte; } -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } - static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h index 248d22d8faa7..7f3fb9ceb083 100644 --- a/arch/openrisc/include/asm/pgtable.h +++ b/arch/openrisc/include/asm/pgtable.h @@ -236,8 +236,6 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } -static inline int pte_special(pte_t pte) { return 0; } -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } static inline pte_t pte_wrprotect(pte_t pte) { diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index f0a365950536..9832c73a7021 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -377,7 +377,6 @@ static inline void pud_clear(pud_t *pud) { static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } -static inline int pte_special(pte_t pte) { return 0; } static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } @@ -385,7 +384,6 @@ static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; ret static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; } -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } /* * Huge pte definitions. diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index 6d6f44c0cad9..0de659ae0ba4 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -223,11 +223,6 @@ static inline int pte_young(pte_t pte) return pte_val(pte) & SRMMU_REF; } -static inline int pte_special(pte_t pte) -{ - return 0; -} - static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_WRITE); @@ -258,8 +253,6 @@ static inline pte_t pte_mkyoung(pte_t pte) return __pte(pte_val(pte) | SRMMU_REF); } -#define pte_mkspecial(pte) (pte) - #define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot) static inline unsigned long pte_pfn(pte_t pte) diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index 2daa58df2190..b5ddf5d98bd5 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -167,11 +167,6 @@ static inline int pte_newprot(pte_t pte) return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT))); } -static inline int pte_special(pte_t pte) -{ - return 0; -} - /* * ================================= * Flags setting section. @@ -247,11 +242,6 @@ static inline pte_t pte_mknewpage(pte_t pte) return(pte); } -static inline pte_t pte_mkspecial(pte_t pte) -{ - return(pte); -} - static inline void set_pte(pte_t *pteptr, pte_t pteval) { pte_copy(*pteptr, pteval); diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h index c8f7ba12f309..3b8731b3a937 100644 --- a/arch/unicore32/include/asm/pgtable.h +++ b/arch/unicore32/include/asm/pgtable.h @@ -177,7 +177,6 @@ extern struct page *empty_zero_page; #define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) #define pte_young(pte) (pte_val(pte) & PTE_YOUNG) #define pte_exec(pte) (pte_val(pte) & PTE_EXEC) -#define pte_special(pte) (0) #define PTE_BIT_FUNC(fn, op) \ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } @@ -189,8 +188,6 @@ PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY); PTE_BIT_FUNC(mkold, &= ~PTE_YOUNG); PTE_BIT_FUNC(mkyoung, |= PTE_YOUNG); -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } - /* * Mark the prot value as uncacheable. */ diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 27ac17c9da09..8be0c0568c50 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -266,7 +266,6 @@ static inline void paging_init(void) { } static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } -static inline int pte_special(pte_t pte) { return 0; } static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; } @@ -280,8 +279,6 @@ static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITABLE; return pte; } -static inline pte_t pte_mkspecial(pte_t pte) - { return pte; } #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK)) diff --git a/include/linux/mm.h b/include/linux/mm.h index 4db1522d7c48..5a323422d783 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1927,6 +1927,18 @@ static inline void sync_mm_rss(struct mm_struct *mm) } #endif +#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL +static inline int pte_special(pte_t pte) +{ + return 0; +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + return pte; +} +#endif + #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP static inline int pte_devmap(pte_t pte) { -- cgit v1.2.3-58-ga151 From 96c6b598135e7cec66161e8943823470c7c8954e Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Fri, 10 Apr 2020 14:33:17 -0700 Subject: mm/memory_hotplug: drop the flags field from struct mhp_restrictions Patch series "Allow setting caching mode in arch_add_memory() for P2PDMA", v4. Currently, the page tables created using memremap_pages() are always created with the PAGE_KERNEL cacheing mode. However, the P2PDMA code is creating pages for PCI BAR memory which should never be accessed through the cache and instead use either WC or UC. This still works in most cases, on x86, because the MTRR registers typically override the caching settings in the page tables for all of the IO memory to be UC-. However, this tends not to work so well on other arches or some rare x86 machines that have firmware which does not setup the MTRR registers in this way. Instead of this, this series proposes a change to arch_add_memory() to take the pgprot required by the mapping which allows us to explicitly set pagetable entries for P2PDMA memory to UC. This changes is pretty routine for most of the arches: x86_64, arm64 and powerpc simply need to thread the pgprot through to where the page tables are setup. x86_32 unfortunately sets up the page tables at boot so must use _set_memory_prot() to change their caching mode. ia64, s390 and sh don't appear to have an easy way to change the page tables so, for now at least, we just return -EINVAL on such mappings and thus they will not support P2PDMA memory until the work for this is done. This should be fine as they don't yet support ZONE_DEVICE. This patch (of 7): This variable is not used anywhere and should therefore be removed from the structure. Signed-off-by: Logan Gunthorpe Signed-off-by: Andrew Morton Reviewed-by: David Hildenbrand Reviewed-by: Dan Williams Acked-by: Michal Hocko Cc: Christoph Hellwig Cc: Catalin Marinas Cc: Will Deacon Cc: Benjamin Herrenschmidt Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: Eric Badger Cc: "H. Peter Anvin" Cc: Jason Gunthorpe Cc: Michael Ellerman Cc: Paul Mackerras Link: http://lkml.kernel.org/r/20200306170846.9333-2-logang@deltatee.com Signed-off-by: Linus Torvalds --- include/linux/memory_hotplug.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index ef55115320fb..7c1bcff11672 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -59,11 +59,9 @@ enum { /* * Restrictions for the memory hotplug: - * flags: MHP_ flags * altmap: alternative allocator for memmap array */ struct mhp_restrictions { - unsigned long flags; struct vmem_altmap *altmap; }; -- cgit v1.2.3-58-ga151 From f5637d3b42ab0465ef71d5fb8461bce97fba95e8 Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Fri, 10 Apr 2020 14:33:21 -0700 Subject: mm/memory_hotplug: rename mhp_restrictions to mhp_params The mhp_restrictions struct really doesn't specify anything resembling a restriction anymore so rename it to be mhp_params as it is a list of extended parameters. Signed-off-by: Logan Gunthorpe Signed-off-by: Andrew Morton Reviewed-by: David Hildenbrand Reviewed-by: Dan Williams Acked-by: Michal Hocko Cc: Andy Lutomirski Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: Catalin Marinas Cc: Christoph Hellwig Cc: Dave Hansen Cc: Eric Badger Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Jason Gunthorpe Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: http://lkml.kernel.org/r/20200306170846.9333-3-logang@deltatee.com Signed-off-by: Linus Torvalds --- arch/arm64/mm/mmu.c | 4 ++-- arch/ia64/mm/init.c | 4 ++-- arch/powerpc/mm/mem.c | 4 ++-- arch/s390/mm/init.c | 6 +++--- arch/sh/mm/init.c | 4 ++-- arch/x86/mm/init_32.c | 4 ++-- arch/x86/mm/init_64.c | 8 ++++---- include/linux/memory_hotplug.h | 16 ++++++++-------- mm/memory_hotplug.c | 8 ++++---- mm/memremap.c | 8 ++++---- 10 files changed, 33 insertions(+), 33 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 9b08f7c7e6f0..6d4e9c2b4ed0 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1374,7 +1374,7 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) } int arch_add_memory(int nid, u64 start, u64 size, - struct mhp_restrictions *restrictions) + struct mhp_params *params) { int ret, flags = 0; @@ -1387,7 +1387,7 @@ int arch_add_memory(int nid, u64 start, u64 size, memblock_clear_nomap(start, size); ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, - restrictions); + params); if (ret) __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size); diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index b01d68a2d5d9..97bbc23ea1e3 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -670,13 +670,13 @@ mem_init (void) #ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size, - struct mhp_restrictions *restrictions) + struct mhp_params *params) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; int ret; - ret = __add_pages(nid, start_pfn, nr_pages, restrictions); + ret = __add_pages(nid, start_pfn, nr_pages, params); if (ret) printk("%s: Problem encountered in __add_pages() as ret=%d\n", __func__, ret); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 9b4f5fb719e0..e1cc58115816 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -122,7 +122,7 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, } int __ref arch_add_memory(int nid, u64 start, u64 size, - struct mhp_restrictions *restrictions) + struct mhp_params *params) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; @@ -138,7 +138,7 @@ int __ref arch_add_memory(int nid, u64 start, u64 size, return -EFAULT; } - return __add_pages(nid, start_pfn, nr_pages, restrictions); + return __add_pages(nid, start_pfn, nr_pages, params); } void __ref arch_remove_memory(int nid, u64 start, u64 size, diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index ac44bd76db4b..e9e4a7abd0cc 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -268,20 +268,20 @@ device_initcall(s390_cma_mem_init); #endif /* CONFIG_CMA */ int arch_add_memory(int nid, u64 start, u64 size, - struct mhp_restrictions *restrictions) + struct mhp_params *params) { unsigned long start_pfn = PFN_DOWN(start); unsigned long size_pages = PFN_DOWN(size); int rc; - if (WARN_ON_ONCE(restrictions->altmap)) + if (WARN_ON_ONCE(params->altmap)) return -EINVAL; rc = vmem_add_mapping(start, size); if (rc) return rc; - rc = __add_pages(nid, start_pfn, size_pages, restrictions); + rc = __add_pages(nid, start_pfn, size_pages, params); if (rc) vmem_remove_mapping(start, size); return rc; diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index d1b1ff2be17a..e5114c053364 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -406,14 +406,14 @@ void __init mem_init(void) #ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size, - struct mhp_restrictions *restrictions) + struct mhp_params *params) { unsigned long start_pfn = PFN_DOWN(start); unsigned long nr_pages = size >> PAGE_SHIFT; int ret; /* We only have ZONE_NORMAL, so this is easy.. */ - ret = __add_pages(nid, start_pfn, nr_pages, restrictions); + ret = __add_pages(nid, start_pfn, nr_pages, params); if (unlikely(ret)) printk("%s: Failed, __add_pages() == %d\n", __func__, ret); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index de73992b8432..d736c8625503 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -819,12 +819,12 @@ void __init mem_init(void) #ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size, - struct mhp_restrictions *restrictions) + struct mhp_params *params) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - return __add_pages(nid, start_pfn, nr_pages, restrictions); + return __add_pages(nid, start_pfn, nr_pages, params); } void arch_remove_memory(int nid, u64 start, u64 size, diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 0a14711d3a93..faa86a9a3b0d 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -843,11 +843,11 @@ static void update_end_of_memory_vars(u64 start, u64 size) } int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, - struct mhp_restrictions *restrictions) + struct mhp_params *params) { int ret; - ret = __add_pages(nid, start_pfn, nr_pages, restrictions); + ret = __add_pages(nid, start_pfn, nr_pages, params); WARN_ON_ONCE(ret); /* update max_pfn, max_low_pfn and high_memory */ @@ -858,14 +858,14 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, } int arch_add_memory(int nid, u64 start, u64 size, - struct mhp_restrictions *restrictions) + struct mhp_params *params) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; init_memory_mapping(start, start + size); - return add_pages(nid, start_pfn, nr_pages, restrictions); + return add_pages(nid, start_pfn, nr_pages, params); } #define PAGE_INUSE 0xFD diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 7c1bcff11672..75f0f6304735 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -58,10 +58,10 @@ enum { }; /* - * Restrictions for the memory hotplug: - * altmap: alternative allocator for memmap array + * Extended parameters for memory hotplug: + * altmap: alternative allocator for memmap array (optional) */ -struct mhp_restrictions { +struct mhp_params { struct vmem_altmap *altmap; }; @@ -112,7 +112,7 @@ extern int restore_online_page_callback(online_page_callback_t callback); extern int try_online_node(int nid); extern int arch_add_memory(int nid, u64 start, u64 size, - struct mhp_restrictions *restrictions); + struct mhp_params *params); extern u64 max_mem_size; extern int memhp_online_type_from_str(const char *str); @@ -133,17 +133,17 @@ extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, /* reasonably generic interface to expand the physical pages */ extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, - struct mhp_restrictions *restrictions); + struct mhp_params *params); #ifndef CONFIG_ARCH_HAS_ADD_PAGES static inline int add_pages(int nid, unsigned long start_pfn, - unsigned long nr_pages, struct mhp_restrictions *restrictions) + unsigned long nr_pages, struct mhp_params *params) { - return __add_pages(nid, start_pfn, nr_pages, restrictions); + return __add_pages(nid, start_pfn, nr_pages, params); } #else /* ARCH_HAS_ADD_PAGES */ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, - struct mhp_restrictions *restrictions); + struct mhp_params *params); #endif /* ARCH_HAS_ADD_PAGES */ #ifdef CONFIG_NUMA diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 635e8e286598..fbfe7b40f552 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -304,12 +304,12 @@ static int check_hotplug_memory_addressable(unsigned long pfn, * add the new pages. */ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, - struct mhp_restrictions *restrictions) + struct mhp_params *params) { const unsigned long end_pfn = pfn + nr_pages; unsigned long cur_nr_pages; int err; - struct vmem_altmap *altmap = restrictions->altmap; + struct vmem_altmap *altmap = params->altmap; err = check_hotplug_memory_addressable(pfn, nr_pages); if (err) @@ -1002,7 +1002,7 @@ static int online_memory_block(struct memory_block *mem, void *arg) */ int __ref add_memory_resource(int nid, struct resource *res) { - struct mhp_restrictions restrictions = {}; + struct mhp_params params = {}; u64 start, size; bool new_node = false; int ret; @@ -1030,7 +1030,7 @@ int __ref add_memory_resource(int nid, struct resource *res) new_node = ret; /* call arch's memory hotadd */ - ret = arch_add_memory(nid, start, size, &restrictions); + ret = arch_add_memory(nid, start, size, ¶ms); if (ret < 0) goto error; diff --git a/mm/memremap.c b/mm/memremap.c index bbf457c4f166..b0b5170843ff 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -184,7 +184,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) { struct resource *res = &pgmap->res; struct dev_pagemap *conflict_pgmap; - struct mhp_restrictions restrictions = { + struct mhp_params params = { /* * We do not want any optional features only our own memmap */ @@ -302,7 +302,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) */ if (pgmap->type == MEMORY_DEVICE_PRIVATE) { error = add_pages(nid, PHYS_PFN(res->start), - PHYS_PFN(resource_size(res)), &restrictions); + PHYS_PFN(resource_size(res)), ¶ms); } else { error = kasan_add_zero_shadow(__va(res->start), resource_size(res)); if (error) { @@ -311,7 +311,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) } error = arch_add_memory(nid, res->start, resource_size(res), - &restrictions); + ¶ms); } if (!error) { @@ -319,7 +319,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; move_pfn_range_to_zone(zone, PHYS_PFN(res->start), - PHYS_PFN(resource_size(res)), restrictions.altmap); + PHYS_PFN(resource_size(res)), params.altmap); } mem_hotplug_done(); -- cgit v1.2.3-58-ga151 From bfeb022f8fe4c5afdcfd7a3d868fac9765f9bcad Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Fri, 10 Apr 2020 14:33:36 -0700 Subject: mm/memory_hotplug: add pgprot_t to mhp_params devm_memremap_pages() is currently used by the PCI P2PDMA code to create struct page mappings for IO memory. At present, these mappings are created with PAGE_KERNEL which implies setting the PAT bits to be WB. However, on x86, an mtrr register will typically override this and force the cache type to be UC-. In the case firmware doesn't set this register it is effectively WB and will typically result in a machine check exception when it's accessed. Other arches are not currently likely to function correctly seeing they don't have any MTRR registers to fall back on. To solve this, provide a way to specify the pgprot value explicitly to arch_add_memory(). Of the arches that support MEMORY_HOTPLUG: x86_64, and arm64 need a simple change to pass the pgprot_t down to their respective functions which set up the page tables. For x86_32, set the page tables explicitly using _set_memory_prot() (seeing they are already mapped). For ia64, s390 and sh, reject anything but PAGE_KERNEL settings -- this should be fine, for now, seeing these architectures don't support ZONE_DEVICE. A check in __add_pages() is also added to ensure the pgprot parameter was set for all arches. Signed-off-by: Logan Gunthorpe Signed-off-by: Andrew Morton Acked-by: David Hildenbrand Acked-by: Michal Hocko Acked-by: Dan Williams Cc: Andy Lutomirski Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: Catalin Marinas Cc: Christoph Hellwig Cc: Dave Hansen Cc: Eric Badger Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Jason Gunthorpe Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: http://lkml.kernel.org/r/20200306170846.9333-7-logang@deltatee.com Signed-off-by: Linus Torvalds --- arch/arm64/mm/mmu.c | 3 ++- arch/ia64/mm/init.c | 3 +++ arch/powerpc/mm/mem.c | 3 ++- arch/s390/mm/init.c | 3 +++ arch/sh/mm/init.c | 3 +++ arch/x86/mm/init_32.c | 12 ++++++++++++ arch/x86/mm/init_64.c | 2 +- include/linux/memory_hotplug.h | 3 +++ mm/memory_hotplug.c | 5 ++++- mm/memremap.c | 6 +++--- 10 files changed, 36 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 6d4e9c2b4ed0..a374e4f51a62 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1382,7 +1382,8 @@ int arch_add_memory(int nid, u64 start, u64 size, flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), - size, PAGE_KERNEL, __pgd_pgtable_alloc, flags); + size, params->pgprot, __pgd_pgtable_alloc, + flags); memblock_clear_nomap(start, size); diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 97bbc23ea1e3..d637b4ea3147 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -676,6 +676,9 @@ int arch_add_memory(int nid, u64 start, u64 size, unsigned long nr_pages = size >> PAGE_SHIFT; int ret; + if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)) + return -EINVAL; + ret = __add_pages(nid, start_pfn, nr_pages, params); if (ret) printk("%s: Problem encountered in __add_pages() as ret=%d\n", diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index bf63ab04db63..041ed7cfd341 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -132,7 +132,8 @@ int __ref arch_add_memory(int nid, u64 start, u64 size, resize_hpt_for_hotplug(memblock_phys_mem_size()); start = (unsigned long)__va(start); - rc = create_section_mapping(start, start + size, nid, PAGE_KERNEL); + rc = create_section_mapping(start, start + size, nid, + params->pgprot); if (rc) { pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n", start, start + size, rc); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index e9e4a7abd0cc..87b2d024e75a 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -277,6 +277,9 @@ int arch_add_memory(int nid, u64 start, u64 size, if (WARN_ON_ONCE(params->altmap)) return -EINVAL; + if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)) + return -EINVAL; + rc = vmem_add_mapping(start, size); if (rc) return rc; diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index e5114c053364..b9de2d4fa57e 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -412,6 +412,9 @@ int arch_add_memory(int nid, u64 start, u64 size, unsigned long nr_pages = size >> PAGE_SHIFT; int ret; + if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot) + return -EINVAL; + /* We only have ZONE_NORMAL, so this is easy.. */ ret = __add_pages(nid, start_pfn, nr_pages, params); if (unlikely(ret)) diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index ac75a8397804..4222a010057a 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -824,6 +824,18 @@ int arch_add_memory(int nid, u64 start, u64 size, { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; + int ret; + + /* + * The page tables were already mapped at boot so if the caller + * requests a different mapping type then we must change all the + * pages with __set_memory_prot(). + */ + if (params->pgprot.pgprot != PAGE_KERNEL.pgprot) { + ret = __set_memory_prot(start, nr_pages, params->pgprot); + if (ret) + return ret; + } return __add_pages(nid, start_pfn, nr_pages, params); } diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 7480de743105..3b289c2f75cd 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -867,7 +867,7 @@ int arch_add_memory(int nid, u64 start, u64 size, unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - init_memory_mapping(start, start + size, PAGE_KERNEL); + init_memory_mapping(start, start + size, params->pgprot); return add_pages(nid, start_pfn, nr_pages, params); } diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 75f0f6304735..93d9ada74ddd 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -60,9 +60,12 @@ enum { /* * Extended parameters for memory hotplug: * altmap: alternative allocator for memmap array (optional) + * pgprot: page protection flags to apply to newly created page tables + * (required) */ struct mhp_params { struct vmem_altmap *altmap; + pgprot_t pgprot; }; /* diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index fbfe7b40f552..fc0aad0bc1f5 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -311,6 +311,9 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, int err; struct vmem_altmap *altmap = params->altmap; + if (WARN_ON_ONCE(!params->pgprot.pgprot)) + return -EINVAL; + err = check_hotplug_memory_addressable(pfn, nr_pages); if (err) return err; @@ -1002,7 +1005,7 @@ static int online_memory_block(struct memory_block *mem, void *arg) */ int __ref add_memory_resource(int nid, struct resource *res) { - struct mhp_params params = {}; + struct mhp_params params = { .pgprot = PAGE_KERNEL }; u64 start, size; bool new_node = false; int ret; diff --git a/mm/memremap.c b/mm/memremap.c index b0b5170843ff..bc167cde3237 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -189,8 +189,8 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) * We do not want any optional features only our own memmap */ .altmap = pgmap_altmap(pgmap), + .pgprot = PAGE_KERNEL, }; - pgprot_t pgprot = PAGE_KERNEL; int error, is_ram; bool need_devmap_managed = true; @@ -282,8 +282,8 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) if (nid < 0) nid = numa_mem_id(); - error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0, - resource_size(res)); + error = track_pfn_remap(NULL, ¶ms.pgprot, PHYS_PFN(res->start), + 0, resource_size(res)); if (error) goto err_pfn_remap; -- cgit v1.2.3-58-ga151 From 149ed3d404c9bd00f0fadc35215a9e7a54c5cfd0 Mon Sep 17 00:00:00 2001 From: Pali Rohár Date: Fri, 10 Apr 2020 14:34:00 -0700 Subject: change email address for Pali Rohár MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For security reasons I stopped using gmail account and kernel address is now up-to-date alias to my personal address. People periodically send me emails to address which they found in source code of drivers, so this change reflects state where people can contact me. [ Added .mailmap entry as per Joe Perches - Linus ] Signed-off-by: Pali Rohár Signed-off-by: Andrew Morton Cc: Greg Kroah-Hartman Cc: Joe Perches Link: http://lkml.kernel.org/r/20200307104237.8199-1-pali@kernel.org Signed-off-by: Linus Torvalds --- .mailmap | 1 + Documentation/ABI/testing/sysfs-platform-dell-laptop | 8 ++++---- MAINTAINERS | 16 ++++++++-------- arch/arm/mach-omap2/omap-secure.c | 2 +- arch/arm/mach-omap2/omap-secure.h | 2 +- arch/arm/mach-omap2/omap-smc.S | 2 +- drivers/char/hw_random/omap3-rom-rng.c | 4 ++-- drivers/hwmon/dell-smm-hwmon.c | 4 ++-- drivers/platform/x86/dell-laptop.c | 4 ++-- drivers/platform/x86/dell-rbtn.c | 4 ++-- drivers/platform/x86/dell-rbtn.h | 2 +- drivers/platform/x86/dell-smbios-base.c | 4 ++-- drivers/platform/x86/dell-smbios-smm.c | 2 +- drivers/platform/x86/dell-smbios.h | 2 +- drivers/platform/x86/dell-smo8800.c | 2 +- drivers/platform/x86/dell-wmi.c | 4 ++-- drivers/power/supply/bq2415x_charger.c | 4 ++-- drivers/power/supply/bq27xxx_battery.c | 2 +- drivers/power/supply/isp1704_charger.c | 2 +- drivers/power/supply/rx51_battery.c | 4 ++-- fs/udf/ecma_167.h | 2 +- fs/udf/osta_udf.h | 2 +- include/linux/power/bq2415x_charger.h | 2 +- tools/laptop/freefall/freefall.c | 2 +- 24 files changed, 42 insertions(+), 41 deletions(-) (limited to 'include/linux') diff --git a/.mailmap b/.mailmap index 9198a93c2f5c..893266d1f7b0 100644 --- a/.mailmap +++ b/.mailmap @@ -210,6 +210,7 @@ Oleksij Rempel Oleksij Rempel Oleksij Rempel Oleksij Rempel +Pali Rohár Paolo 'Blaisorblade' Giarrusso Patrick Mochel Paul Burton diff --git a/Documentation/ABI/testing/sysfs-platform-dell-laptop b/Documentation/ABI/testing/sysfs-platform-dell-laptop index 8c6a0b8e1131..9b917c7453de 100644 --- a/Documentation/ABI/testing/sysfs-platform-dell-laptop +++ b/Documentation/ABI/testing/sysfs-platform-dell-laptop @@ -2,7 +2,7 @@ What: /sys/class/leds/dell::kbd_backlight/als_enabled Date: December 2014 KernelVersion: 3.19 Contact: Gabriele Mazzotta , - Pali Rohár + Pali Rohár Description: This file allows to control the automatic keyboard illumination mode on some systems that have an ambient @@ -13,7 +13,7 @@ What: /sys/class/leds/dell::kbd_backlight/als_setting Date: December 2014 KernelVersion: 3.19 Contact: Gabriele Mazzotta , - Pali Rohár + Pali Rohár Description: This file allows to specifiy the on/off threshold value, as reported by the ambient light sensor. @@ -22,7 +22,7 @@ What: /sys/class/leds/dell::kbd_backlight/start_triggers Date: December 2014 KernelVersion: 3.19 Contact: Gabriele Mazzotta , - Pali Rohár + Pali Rohár Description: This file allows to control the input triggers that turn on the keyboard backlight illumination that is @@ -45,7 +45,7 @@ What: /sys/class/leds/dell::kbd_backlight/stop_timeout Date: December 2014 KernelVersion: 3.19 Contact: Gabriele Mazzotta , - Pali Rohár + Pali Rohár Description: This file allows to specify the interval after which the keyboard illumination is disabled because of inactivity. diff --git a/MAINTAINERS b/MAINTAINERS index d5b1878f2815..ff043097ea0e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -727,7 +727,7 @@ L: linux-alpha@vger.kernel.org F: arch/alpha/ ALPS PS/2 TOUCHPAD DRIVER -R: Pali Rohár +R: Pali Rohár F: drivers/input/mouse/alps.* ALTERA I2C CONTROLLER DRIVER @@ -4774,23 +4774,23 @@ F: drivers/net/fddi/defza.* DELL LAPTOP DRIVER M: Matthew Garrett -M: Pali Rohár +M: Pali Rohár L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/dell-laptop.c DELL LAPTOP FREEFALL DRIVER -M: Pali Rohár +M: Pali Rohár S: Maintained F: drivers/platform/x86/dell-smo8800.c DELL LAPTOP RBTN DRIVER -M: Pali Rohár +M: Pali Rohár S: Maintained F: drivers/platform/x86/dell-rbtn.* DELL LAPTOP SMM DRIVER -M: Pali Rohár +M: Pali Rohár S: Maintained F: drivers/hwmon/dell-smm-hwmon.c F: include/uapi/linux/i8k.h @@ -4802,7 +4802,7 @@ S: Maintained F: drivers/platform/x86/dell_rbu.c DELL SMBIOS DRIVER -M: Pali Rohár +M: Pali Rohár M: Mario Limonciello L: platform-driver-x86@vger.kernel.org S: Maintained @@ -4835,7 +4835,7 @@ F: drivers/platform/x86/dell-wmi-descriptor.c DELL WMI NOTIFICATIONS DRIVER M: Matthew Garrett -M: Pali Rohár +M: Pali Rohár S: Maintained F: drivers/platform/x86/dell-wmi.c @@ -11950,7 +11950,7 @@ F: drivers/media/i2c/et8ek8 F: drivers/media/i2c/ad5820.c NOKIA N900 POWER SUPPLY DRIVERS -R: Pali Rohár +R: Pali Rohár F: include/linux/power/bq2415x_charger.h F: include/linux/power/bq27xxx_battery.h F: drivers/power/supply/bq2415x_charger.c diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c index d00e3c72e37d..f70d561f37f7 100644 --- a/arch/arm/mach-omap2/omap-secure.c +++ b/arch/arm/mach-omap2/omap-secure.c @@ -5,7 +5,7 @@ * Copyright (C) 2011 Texas Instruments, Inc. * Santosh Shilimkar * Copyright (C) 2012 Ivaylo Dimitrov - * Copyright (C) 2013 Pali Rohár + * Copyright (C) 2013 Pali Rohár */ #include diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h index ba8c486c0454..4aaa95706d39 100644 --- a/arch/arm/mach-omap2/omap-secure.h +++ b/arch/arm/mach-omap2/omap-secure.h @@ -5,7 +5,7 @@ * Copyright (C) 2011 Texas Instruments, Inc. * Santosh Shilimkar * Copyright (C) 2012 Ivaylo Dimitrov - * Copyright (C) 2013 Pali Rohár + * Copyright (C) 2013 Pali Rohár */ #ifndef OMAP_ARCH_OMAP_SECURE_H #define OMAP_ARCH_OMAP_SECURE_H diff --git a/arch/arm/mach-omap2/omap-smc.S b/arch/arm/mach-omap2/omap-smc.S index d4832845a4e8..7376f528034d 100644 --- a/arch/arm/mach-omap2/omap-smc.S +++ b/arch/arm/mach-omap2/omap-smc.S @@ -6,7 +6,7 @@ * Written by Santosh Shilimkar * * Copyright (C) 2012 Ivaylo Dimitrov - * Copyright (C) 2013 Pali Rohár + * Copyright (C) 2013 Pali Rohár */ #include diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c index a431c5cbe2be..e0d77fa048fb 100644 --- a/drivers/char/hw_random/omap3-rom-rng.c +++ b/drivers/char/hw_random/omap3-rom-rng.c @@ -4,7 +4,7 @@ * Copyright (C) 2009 Nokia Corporation * Author: Juha Yrjola * - * Copyright (C) 2013 Pali Rohár + * Copyright (C) 2013 Pali Rohár * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -178,5 +178,5 @@ module_platform_driver(omap3_rom_rng_driver); MODULE_ALIAS("platform:omap3-rom-rng"); MODULE_AUTHOR("Juha Yrjola"); -MODULE_AUTHOR("Pali Rohár "); +MODULE_AUTHOR("Pali Rohár "); MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index d4c83009d625..ab719d372b0d 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -7,7 +7,7 @@ * Hwmon integration: * Copyright (C) 2011 Jean Delvare * Copyright (C) 2013, 2014 Guenter Roeck - * Copyright (C) 2014, 2015 Pali Rohár + * Copyright (C) 2014, 2015 Pali Rohár */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -86,7 +86,7 @@ static unsigned int auto_fan; #define I8K_HWMON_HAVE_FAN3 (1 << 12) MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)"); -MODULE_AUTHOR("Pali Rohár "); +MODULE_AUTHOR("Pali Rohár "); MODULE_DESCRIPTION("Dell laptop SMM BIOS hwmon driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("i8k"); diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 74e988f839e8..f8d3e3bd1bb5 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -4,7 +4,7 @@ * * Copyright (c) Red Hat * Copyright (c) 2014 Gabriele Mazzotta - * Copyright (c) 2014 Pali Rohár + * Copyright (c) 2014 Pali Rohár * * Based on documentation in the libsmbios package: * Copyright (C) 2005-2014 Dell Inc. @@ -2295,6 +2295,6 @@ module_exit(dell_exit); MODULE_AUTHOR("Matthew Garrett "); MODULE_AUTHOR("Gabriele Mazzotta "); -MODULE_AUTHOR("Pali Rohár "); +MODULE_AUTHOR("Pali Rohár "); MODULE_DESCRIPTION("Dell laptop driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c index a6b856cd86bd..a89fad47ff13 100644 --- a/drivers/platform/x86/dell-rbtn.c +++ b/drivers/platform/x86/dell-rbtn.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* Dell Airplane Mode Switch driver - Copyright (C) 2014-2015 Pali Rohár + Copyright (C) 2014-2015 Pali Rohár */ @@ -495,5 +495,5 @@ MODULE_PARM_DESC(auto_remove_rfkill, "Automatically remove rfkill devices when " "(default true)"); MODULE_DEVICE_TABLE(acpi, rbtn_ids); MODULE_DESCRIPTION("Dell Airplane Mode Switch driver"); -MODULE_AUTHOR("Pali Rohár "); +MODULE_AUTHOR("Pali Rohár "); MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/dell-rbtn.h b/drivers/platform/x86/dell-rbtn.h index 0fdc81644458..5e030f926c58 100644 --- a/drivers/platform/x86/dell-rbtn.h +++ b/drivers/platform/x86/dell-rbtn.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Dell Airplane Mode Switch driver - Copyright (C) 2014-2015 Pali Rohár + Copyright (C) 2014-2015 Pali Rohár */ diff --git a/drivers/platform/x86/dell-smbios-base.c b/drivers/platform/x86/dell-smbios-base.c index fe59b0ebff31..2e2cd565926a 100644 --- a/drivers/platform/x86/dell-smbios-base.c +++ b/drivers/platform/x86/dell-smbios-base.c @@ -4,7 +4,7 @@ * * Copyright (c) Red Hat * Copyright (c) 2014 Gabriele Mazzotta - * Copyright (c) 2014 Pali Rohár + * Copyright (c) 2014 Pali Rohár * * Based on documentation in the libsmbios package: * Copyright (C) 2005-2014 Dell Inc. @@ -645,7 +645,7 @@ module_exit(dell_smbios_exit); MODULE_AUTHOR("Matthew Garrett "); MODULE_AUTHOR("Gabriele Mazzotta "); -MODULE_AUTHOR("Pali Rohár "); +MODULE_AUTHOR("Pali Rohár "); MODULE_AUTHOR("Mario Limonciello "); MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS"); MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c index d6854d1c4119..97c52a839a3e 100644 --- a/drivers/platform/x86/dell-smbios-smm.c +++ b/drivers/platform/x86/dell-smbios-smm.c @@ -4,7 +4,7 @@ * * Copyright (c) Red Hat * Copyright (c) 2014 Gabriele Mazzotta - * Copyright (c) 2014 Pali Rohár + * Copyright (c) 2014 Pali Rohár * Copyright (c) 2017 Dell Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/platform/x86/dell-smbios.h b/drivers/platform/x86/dell-smbios.h index a7ff9803f41a..75fa8ea0476d 100644 --- a/drivers/platform/x86/dell-smbios.h +++ b/drivers/platform/x86/dell-smbios.h @@ -4,7 +4,7 @@ * * Copyright (c) Red Hat * Copyright (c) 2014 Gabriele Mazzotta - * Copyright (c) 2014 Pali Rohár + * Copyright (c) 2014 Pali Rohár * * Based on documentation in the libsmbios package: * Copyright (C) 2005-2014 Dell Inc. diff --git a/drivers/platform/x86/dell-smo8800.c b/drivers/platform/x86/dell-smo8800.c index b531fe8ab7e0..5d9304a7de1b 100644 --- a/drivers/platform/x86/dell-smo8800.c +++ b/drivers/platform/x86/dell-smo8800.c @@ -3,7 +3,7 @@ * dell-smo8800.c - Dell Latitude ACPI SMO88XX freefall sensor driver * * Copyright (C) 2012 Sonal Santan - * Copyright (C) 2014 Pali Rohár + * Copyright (C) 2014 Pali Rohár * * This is loosely based on lis3lv02d driver. */ diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index 6669db2555fb..86e8dd6a8b33 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c @@ -3,7 +3,7 @@ * Dell WMI hotkeys * * Copyright (C) 2008 Red Hat - * Copyright (C) 2014-2015 Pali Rohár + * Copyright (C) 2014-2015 Pali Rohár * * Portions based on wistron_btns.c: * Copyright (C) 2005 Miloslav Trmac @@ -29,7 +29,7 @@ #include "dell-wmi-descriptor.h" MODULE_AUTHOR("Matthew Garrett "); -MODULE_AUTHOR("Pali Rohár "); +MODULE_AUTHOR("Pali Rohár "); MODULE_DESCRIPTION("Dell laptop WMI hotkeys driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c index 532f6e4fcafb..a1f00ae1c180 100644 --- a/drivers/power/supply/bq2415x_charger.c +++ b/drivers/power/supply/bq2415x_charger.c @@ -2,7 +2,7 @@ /* * bq2415x charger driver * - * Copyright (C) 2011-2013 Pali Rohár + * Copyright (C) 2011-2013 Pali Rohár * * Datasheets: * http://www.ti.com/product/bq24150 @@ -1788,6 +1788,6 @@ static struct i2c_driver bq2415x_driver = { }; module_i2c_driver(bq2415x_driver); -MODULE_AUTHOR("Pali Rohár "); +MODULE_AUTHOR("Pali Rohár "); MODULE_DESCRIPTION("bq2415x charger driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 664e50103eaa..942c92127b6d 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -4,7 +4,7 @@ * Copyright (C) 2008 Rodolfo Giometti * Copyright (C) 2008 Eurotech S.p.A. * Copyright (C) 2010-2011 Lars-Peter Clausen - * Copyright (C) 2011 Pali Rohár + * Copyright (C) 2011 Pali Rohár * Copyright (C) 2017 Liam Breck * * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc. diff --git a/drivers/power/supply/isp1704_charger.c b/drivers/power/supply/isp1704_charger.c index 4812ac1ff2df..b6efc454e4f0 100644 --- a/drivers/power/supply/isp1704_charger.c +++ b/drivers/power/supply/isp1704_charger.c @@ -3,7 +3,7 @@ * ISP1704 USB Charger Detection driver * * Copyright (C) 2010 Nokia Corporation - * Copyright (C) 2012 - 2013 Pali Rohár + * Copyright (C) 2012 - 2013 Pali Rohár */ #include diff --git a/drivers/power/supply/rx51_battery.c b/drivers/power/supply/rx51_battery.c index 8548b639ff2f..6e488ecf4dcb 100644 --- a/drivers/power/supply/rx51_battery.c +++ b/drivers/power/supply/rx51_battery.c @@ -2,7 +2,7 @@ /* * Nokia RX-51 battery driver * - * Copyright (C) 2012 Pali Rohár + * Copyright (C) 2012 Pali Rohár */ #include @@ -278,6 +278,6 @@ static struct platform_driver rx51_battery_driver = { module_platform_driver(rx51_battery_driver); MODULE_ALIAS("platform:rx51-battery"); -MODULE_AUTHOR("Pali Rohár "); +MODULE_AUTHOR("Pali Rohár "); MODULE_DESCRIPTION("Nokia RX-51 battery driver"); MODULE_LICENSE("GPL"); diff --git a/fs/udf/ecma_167.h b/fs/udf/ecma_167.h index 3fd85464abd5..736ebc5dc441 100644 --- a/fs/udf/ecma_167.h +++ b/fs/udf/ecma_167.h @@ -5,7 +5,7 @@ * http://www.ecma.ch * * Copyright (c) 2001-2002 Ben Fennema - * Copyright (c) 2017-2019 Pali Rohár + * Copyright (c) 2017-2019 Pali Rohár * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/fs/udf/osta_udf.h b/fs/udf/osta_udf.h index 35e61b2cacfe..d5fbfab3ddb6 100644 --- a/fs/udf/osta_udf.h +++ b/fs/udf/osta_udf.h @@ -5,7 +5,7 @@ * http://www.osta.org * * Copyright (c) 2001-2004 Ben Fennema - * Copyright (c) 2017-2019 Pali Rohár + * Copyright (c) 2017-2019 Pali Rohár * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/include/linux/power/bq2415x_charger.h b/include/linux/power/bq2415x_charger.h index 7a91b357e3ac..4ca08321e251 100644 --- a/include/linux/power/bq2415x_charger.h +++ b/include/linux/power/bq2415x_charger.h @@ -2,7 +2,7 @@ /* * bq2415x charger driver * - * Copyright (C) 2011-2013 Pali Rohár + * Copyright (C) 2011-2013 Pali Rohár */ #ifndef BQ2415X_CHARGER_H diff --git a/tools/laptop/freefall/freefall.c b/tools/laptop/freefall/freefall.c index d29a86cda87f..d77d7861787c 100644 --- a/tools/laptop/freefall/freefall.c +++ b/tools/laptop/freefall/freefall.c @@ -4,7 +4,7 @@ * Copyright 2008 Eric Piel * Copyright 2009 Pavel Machek * Copyright 2012 Sonal Santan - * Copyright 2014 Pali Rohár + * Copyright 2014 Pali Rohár */ #include -- cgit v1.2.3-58-ga151 From 07d8350ede4c4c29634b26c163a1eecdf39dfcfb Mon Sep 17 00:00:00 2001 From: afzal mohammed Date: Fri, 27 Mar 2020 21:41:16 +0530 Subject: genirq: Remove setup_irq() and remove_irq() Now that all the users of setup_irq() & remove_irq() have been replaced by request_irq() & free_irq() respectively, delete them. Signed-off-by: afzal mohammed Signed-off-by: Thomas Gleixner Reviewed-by: Linus Walleij Link: https://lkml.kernel.org/r/0aa8771ada1ac8e1312f6882980c9c08bd023148.1585320721.git.afzal.mohd.ma@gmail.com --- include/linux/irq.h | 2 -- kernel/irq/manage.c | 44 -------------------------------------------- 2 files changed, 46 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irq.h b/include/linux/irq.h index 9315fbb87db3..c63c2aa915ff 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -573,8 +573,6 @@ enum { #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS struct irqaction; -extern int setup_irq(unsigned int irq, struct irqaction *new); -extern void remove_irq(unsigned int irq, struct irqaction *act); extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); extern void remove_percpu_irq(unsigned int irq, struct irqaction *act); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index fe40c658f86f..453a8a0f4804 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1690,34 +1690,6 @@ out_mput: return ret; } -/** - * setup_irq - setup an interrupt - * @irq: Interrupt line to setup - * @act: irqaction for the interrupt - * - * Used to statically setup interrupts in the early boot process. - */ -int setup_irq(unsigned int irq, struct irqaction *act) -{ - int retval; - struct irq_desc *desc = irq_to_desc(irq); - - if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) - return -EINVAL; - - retval = irq_chip_pm_get(&desc->irq_data); - if (retval < 0) - return retval; - - retval = __setup_irq(irq, desc, act); - - if (retval) - irq_chip_pm_put(&desc->irq_data); - - return retval; -} -EXPORT_SYMBOL_GPL(setup_irq); - /* * Internal function to unregister an irqaction - used to free * regular and special interrupts that are part of the architecture. @@ -1858,22 +1830,6 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) return action; } -/** - * remove_irq - free an interrupt - * @irq: Interrupt line to free - * @act: irqaction for the interrupt - * - * Used to remove interrupts statically setup by the early boot process. - */ -void remove_irq(unsigned int irq, struct irqaction *act) -{ - struct irq_desc *desc = irq_to_desc(irq); - - if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) - __free_irq(desc, act->dev_id); -} -EXPORT_SYMBOL_GPL(remove_irq); - /** * free_irq - free an interrupt allocated with request_irq * @irq: Interrupt line to free -- cgit v1.2.3-58-ga151 From 3c1d1613be80c2e17f1ddf672df1d8a8caebfd0d Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Mon, 6 Apr 2020 14:25:31 +0200 Subject: i2c: remove i2c_new_probed_device API All in-tree users have been converted to the new i2c_new_scanned_device function, so remove this deprecated one. Signed-off-by: Wolfram Sang Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-base.c | 13 ------------- include/linux/i2c.h | 6 ------ 2 files changed, 19 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 5cc0b0ec5570..a66912782064 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -2273,19 +2273,6 @@ i2c_new_scanned_device(struct i2c_adapter *adap, } EXPORT_SYMBOL_GPL(i2c_new_scanned_device); -struct i2c_client * -i2c_new_probed_device(struct i2c_adapter *adap, - struct i2c_board_info *info, - unsigned short const *addr_list, - int (*probe)(struct i2c_adapter *adap, unsigned short addr)) -{ - struct i2c_client *client; - - client = i2c_new_scanned_device(adap, info, addr_list, probe); - return IS_ERR(client) ? NULL : client; -} -EXPORT_SYMBOL_GPL(i2c_new_probed_device); - struct i2c_adapter *i2c_get_adapter(int nr) { struct i2c_adapter *adapter; diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 456fc17ecb1c..45d36ba4826b 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -461,12 +461,6 @@ i2c_new_scanned_device(struct i2c_adapter *adap, unsigned short const *addr_list, int (*probe)(struct i2c_adapter *adap, unsigned short addr)); -struct i2c_client * -i2c_new_probed_device(struct i2c_adapter *adap, - struct i2c_board_info *info, - unsigned short const *addr_list, - int (*probe)(struct i2c_adapter *adap, unsigned short addr)); - /* Common custom probe functions */ int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr); -- cgit v1.2.3-58-ga151 From d87f639258a6a5980183f11876c884931ad93da2 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 28 Feb 2020 16:14:11 -0800 Subject: ext4: use non-movable memory for superblock readahead Since commit a8ac900b8163 ("ext4: use non-movable memory for the superblock") buffers for ext4 superblock were allocated using the sb_bread_unmovable() helper which allocated buffer heads out of non-movable memory blocks. It was necessarily to not block page migrations and do not cause cma allocation failures. However commit 85c8f176a611 ("ext4: preload block group descriptors") broke this by introducing pre-reading of the ext4 superblock. The problem is that __breadahead() is using __getblk() underneath, which allocates buffer heads out of movable memory. It resulted in page migration failures I've seen on a machine with an ext4 partition and a preallocated cma area. Fix this by introducing sb_breadahead_unmovable() and __breadahead_gfp() helpers which use non-movable memory for buffer head allocations and use them for the ext4 superblock readahead. Reviewed-by: Andreas Dilger Fixes: 85c8f176a611 ("ext4: preload block group descriptors") Signed-off-by: Roman Gushchin Link: https://lore.kernel.org/r/20200229001411.128010-1-guro@fb.com Signed-off-by: Theodore Ts'o --- fs/buffer.c | 11 +++++++++++ fs/ext4/inode.c | 2 +- fs/ext4/super.c | 2 +- include/linux/buffer_head.h | 8 ++++++++ 4 files changed, 21 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/buffer.c b/fs/buffer.c index f73276d746bb..599a0bf7257b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1371,6 +1371,17 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size) } EXPORT_SYMBOL(__breadahead); +void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size, + gfp_t gfp) +{ + struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); + if (likely(bh)) { + ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); + brelse(bh); + } +} +EXPORT_SYMBOL(__breadahead_gfp); + /** * __bread_gfp() - reads a specified block and returns the bh * @bdev: the block_device to read from diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 68f6c0af8e5d..2a4aae6acdcb 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4364,7 +4364,7 @@ make_io: if (end > table) end = table; while (b <= end) - sb_breadahead(sb, b++); + sb_breadahead_unmovable(sb, b++); } /* diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 9728e7b0e84f..83413f0f1e28 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4340,7 +4340,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) /* Pre-read the descriptors into the buffer cache */ for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logical_sb_block, i); - sb_breadahead(sb, block); + sb_breadahead_unmovable(sb, block); } for (i = 0; i < db_count; i++) { diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index e0b020eaf32e..15b765a181b8 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -189,6 +189,8 @@ struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, void __brelse(struct buffer_head *); void __bforget(struct buffer_head *); void __breadahead(struct block_device *, sector_t block, unsigned int size); +void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size, + gfp_t gfp); struct buffer_head *__bread_gfp(struct block_device *, sector_t block, unsigned size, gfp_t gfp); void invalidate_bh_lrus(void); @@ -319,6 +321,12 @@ sb_breadahead(struct super_block *sb, sector_t block) __breadahead(sb->s_bdev, block, sb->s_blocksize); } +static inline void +sb_breadahead_unmovable(struct super_block *sb, sector_t block) +{ + __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0); +} + static inline struct buffer_head * sb_getblk(struct super_block *sb, sector_t block) { -- cgit v1.2.3-58-ga151 From 96806229ca033f85310bc5c203410189f8a1d2ee Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 10 Apr 2020 11:13:26 +0100 Subject: irqchip/gic-v4.1: Add support for VPENDBASER's Dirty+Valid signaling When a vPE is made resident, the GIC starts parsing the virtual pending table to deliver pending interrupts. This takes place asynchronously, and can at times take a long while. Long enough that the vcpu enters the guest and hits WFI before any interrupt has been signaled yet. The vcpu then exits, blocks, and now gets a doorbell. Rince, repeat. In order to avoid the above, a (optional on GICv4, mandatory on v4.1) feature allows the GIC to feedback to the hypervisor whether it is done parsing the VPT by clearing the GICR_VPENDBASER.Dirty bit. The hypervisor can then wait until the GIC is ready before actually running the vPE. Plug the detection code as well as polling on vPE schedule. While at it, tidy-up the kernel message that displays the GICv4 optional features. Reviewed-by: Zenghui Yu Signed-off-by: Marc Zyngier --- drivers/irqchip/irq-gic-v3-its.c | 19 +++++++++++++++++++ drivers/irqchip/irq-gic-v3.c | 11 +++++++---- include/linux/irqchip/arm-gic-v3.h | 2 ++ 3 files changed, 28 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 54d142ccc63a..affd325cc3d4 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -3672,6 +3673,20 @@ out: return IRQ_SET_MASK_OK_DONE; } +static void its_wait_vpt_parse_complete(void) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + if (!gic_rdists->has_vpend_valid_dirty) + return; + + WARN_ON_ONCE(readq_relaxed_poll_timeout(vlpi_base + GICR_VPENDBASER, + val, + !(val & GICR_VPENDBASER_Dirty), + 10, 500)); +} + static void its_vpe_schedule(struct its_vpe *vpe) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); @@ -3702,6 +3717,8 @@ static void its_vpe_schedule(struct its_vpe *vpe) val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; val |= GICR_VPENDBASER_Valid; gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + its_wait_vpt_parse_complete(); } static void its_vpe_deschedule(struct its_vpe *vpe) @@ -3910,6 +3927,8 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe, val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + its_wait_vpt_parse_complete(); } static void its_vpe_4_1_deschedule(struct its_vpe *vpe, diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 9dbc81b6f62e..d7006ef18a0d 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -873,6 +873,7 @@ static int __gic_update_rdist_properties(struct redist_region *region, gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID); gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) | gic_data.rdists.has_rvpeid); + gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY); /* Detect non-sensical configurations */ if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) { @@ -893,10 +894,11 @@ static void gic_update_rdist_properties(void) if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) gic_data.ppi_nr = 0; pr_info("%d PPIs implemented\n", gic_data.ppi_nr); - pr_info("%sVLPI support, %sdirect LPI support, %sRVPEID support\n", - !gic_data.rdists.has_vlpis ? "no " : "", - !gic_data.rdists.has_direct_lpi ? "no " : "", - !gic_data.rdists.has_rvpeid ? "no " : ""); + if (gic_data.rdists.has_vlpis) + pr_info("GICv4 features: %s%s%s\n", + gic_data.rdists.has_direct_lpi ? "DirectLPI " : "", + gic_data.rdists.has_rvpeid ? "RVPEID " : "", + gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : ""); } /* Check whether it's single security state view */ @@ -1620,6 +1622,7 @@ static int __init gic_init_bases(void __iomem *dist_base, gic_data.rdists.has_rvpeid = true; gic_data.rdists.has_vlpis = true; gic_data.rdists.has_direct_lpi = true; + gic_data.rdists.has_vpend_valid_dirty = true; if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { err = -ENOMEM; diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 765d9b769b69..6c36b6cc3edf 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -243,6 +243,7 @@ #define GICR_TYPER_PLPIS (1U << 0) #define GICR_TYPER_VLPIS (1U << 1) +#define GICR_TYPER_DIRTY (1U << 2) #define GICR_TYPER_DirectLPIS (1U << 3) #define GICR_TYPER_LAST (1U << 4) #define GICR_TYPER_RVPEID (1U << 7) @@ -686,6 +687,7 @@ struct rdists { bool has_vlpis; bool has_rvpeid; bool has_direct_lpi; + bool has_vpend_valid_dirty; }; struct irq_domain; -- cgit v1.2.3-58-ga151 From 0a368bf00e3a7c57a57efc1bf79b79facb97639c Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 16:40:21 -0500 Subject: bio: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/bio.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/bio.h b/include/linux/bio.h index c1c0f9ea4e63..a0ee494a6329 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -319,7 +319,7 @@ struct bio_integrity_payload { struct work_struct bip_work; /* I/O completion */ struct bio_vec *bip_vec; - struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ + struct bio_vec bip_inline_vecs[];/* embedded bvec array */ }; #if defined(CONFIG_BLK_DEV_INTEGRITY) -- cgit v1.2.3-58-ga151 From f36aaf8be421099103193c49796a14213d3be315 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 16:43:39 -0500 Subject: blk-mq: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/blk-mq.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index f389d7c724bd..b45148ba3291 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -173,7 +173,7 @@ struct blk_mq_hw_ctx { * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also * blk_mq_hw_ctx_size(). */ - struct srcu_struct srcu[0]; + struct srcu_struct srcu[]; }; /** -- cgit v1.2.3-58-ga151 From 5a58ec8cfc8621f5bdbd610202f62f817e5da204 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 16:45:36 -0500 Subject: blk_types: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/blk_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 70254ae11769..31eb92876be7 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -198,7 +198,7 @@ struct bio { * double allocations for a small number of bio_vecs. This member * MUST obviously be kept at the very end of the bio. */ - struct bio_vec bi_inline_vecs[0]; + struct bio_vec bi_inline_vecs[]; }; #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) -- cgit v1.2.3-58-ga151 From e76018cb604ace486de9cf85898c14bb2b47faff Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 16:48:10 -0500 Subject: can: dev: peak_canfd.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/can/dev/peak_canfd.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/can/dev/peak_canfd.h b/include/linux/can/dev/peak_canfd.h index 511a37302fea..5fd627e9da19 100644 --- a/include/linux/can/dev/peak_canfd.h +++ b/include/linux/can/dev/peak_canfd.h @@ -189,7 +189,7 @@ struct __packed pucan_rx_msg { u8 client; __le16 flags; __le32 can_id; - u8 d[0]; + u8 d[]; }; /* uCAN error types */ @@ -266,7 +266,7 @@ struct __packed pucan_tx_msg { u8 client; __le16 flags; __le32 can_id; - u8 d[0]; + u8 d[]; }; /* build the cmd opcode_channel field with respect to the correct endianness */ -- cgit v1.2.3-58-ga151 From 1fa0949bede6de2b595da535c3ce69de8e130db2 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 17:03:49 -0500 Subject: digsig.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/digsig.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/digsig.h b/include/linux/digsig.h index 594fc66a395a..2ace69e41088 100644 --- a/include/linux/digsig.h +++ b/include/linux/digsig.h @@ -29,7 +29,7 @@ struct pubkey_hdr { uint32_t timestamp; /* key made, always 0 for now */ uint8_t algo; uint8_t nmpi; - char mpi[0]; + char mpi[]; } __packed; struct signature_hdr { @@ -39,7 +39,7 @@ struct signature_hdr { uint8_t hash; uint8_t keyid[8]; uint8_t nmpi; - char mpi[0]; + char mpi[]; } __packed; #if defined(CONFIG_SIGNATURE) || defined(CONFIG_SIGNATURE_MODULE) -- cgit v1.2.3-58-ga151 From a2008395fe2ebd9cd82f220d034d36cc887f35fe Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 17:17:52 -0500 Subject: dirent.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/dirent.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/dirent.h b/include/linux/dirent.h index fc61f3cff72f..99002220cd45 100644 --- a/include/linux/dirent.h +++ b/include/linux/dirent.h @@ -7,7 +7,7 @@ struct linux_dirent64 { s64 d_off; unsigned short d_reclen; unsigned char d_type; - char d_name[0]; + char d_name[]; }; #endif -- cgit v1.2.3-58-ga151 From 192199464d6cccb084356add54b3a48d6dde9f96 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 17:21:19 -0500 Subject: enclosure.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/enclosure.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h index 564e96f625ff..1c630e2c2756 100644 --- a/include/linux/enclosure.h +++ b/include/linux/enclosure.h @@ -101,7 +101,7 @@ struct enclosure_device { struct device edev; struct enclosure_component_callbacks *cb; int components; - struct enclosure_component component[0]; + struct enclosure_component component[]; }; static inline struct enclosure_device * -- cgit v1.2.3-58-ga151 From beb69f15a095245c5cc62389eea93002b41d2eb9 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 17:23:01 -0500 Subject: energy_model.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/energy_model.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index d249b88a4d5a..ade6486a3382 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -36,7 +36,7 @@ struct em_cap_state { struct em_perf_domain { struct em_cap_state *table; int nr_cap_states; - unsigned long cpus[0]; + unsigned long cpus[]; }; #ifdef CONFIG_ENERGY_MODEL -- cgit v1.2.3-58-ga151 From 5299a11a9378e8c68e3b8e2040f7aa7e401d50b7 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 17:24:53 -0500 Subject: ethtool.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/ethtool.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index c1d379bf6ee1..a23b26eab479 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -35,7 +35,7 @@ struct compat_ethtool_rxnfc { compat_u64 data; struct compat_ethtool_rx_flow_spec fs; u32 rule_cnt; - u32 rule_locs[0]; + u32 rule_locs[]; }; #endif /* CONFIG_COMPAT */ @@ -462,7 +462,7 @@ int ethtool_check_ops(const struct ethtool_ops *ops); struct ethtool_rx_flow_rule { struct flow_rule *rule; - unsigned long priv[0]; + unsigned long priv[]; }; struct ethtool_rx_flow_spec_input { -- cgit v1.2.3-58-ga151 From 89f60a5d9bf5a6b9b16dfdd56a91c4a2d7b8830d Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 17:43:59 -0500 Subject: genalloc.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/genalloc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 5b14a0f38124..0bd581003cd5 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -76,7 +76,7 @@ struct gen_pool_chunk { void *owner; /* private data to retrieve at alloc time */ unsigned long start_addr; /* start address of memory chunk */ unsigned long end_addr; /* end address of memory chunk (inclusive) */ - unsigned long bits[0]; /* bitmap for allocating memory chunk */ + unsigned long bits[]; /* bitmap for allocating memory chunk */ }; /* -- cgit v1.2.3-58-ga151 From 0ead33642f1df89699f2e4dda8eea59c326b68f6 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 17:59:00 -0500 Subject: igmp.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/igmp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 463047d0190b..faa6586a5783 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -38,7 +38,7 @@ struct ip_sf_socklist { unsigned int sl_max; unsigned int sl_count; struct rcu_head rcu; - __be32 sl_addr[0]; + __be32 sl_addr[]; }; #define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \ -- cgit v1.2.3-58-ga151 From 1d9e13e8ef05029c61d52ad9a6f48f14771d14b7 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 18:00:04 -0500 Subject: ihex.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/ihex.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/ihex.h b/include/linux/ihex.h index 98cb5ce0b0a0..b824877e6d1b 100644 --- a/include/linux/ihex.h +++ b/include/linux/ihex.h @@ -18,7 +18,7 @@ struct ihex_binrec { __be32 addr; __be16 len; - uint8_t data[0]; + uint8_t data[]; } __attribute__((packed)); static inline uint16_t ihex_binrec_size(const struct ihex_binrec *p) -- cgit v1.2.3-58-ga151 From 7856e9f12f1f59cc6abb25f92b336528d0660ebb Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 18:01:11 -0500 Subject: irq.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/irq.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irq.h b/include/linux/irq.h index 9315fbb87db3..fa8ad93029ad 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -1043,7 +1043,7 @@ struct irq_chip_generic { unsigned long unused; struct irq_domain *domain; struct list_head list; - struct irq_chip_type chip_types[0]; + struct irq_chip_type chip_types[]; }; /** @@ -1079,7 +1079,7 @@ struct irq_domain_chip_generic { unsigned int irq_flags_to_clear; unsigned int irq_flags_to_set; enum irq_gc_flags gc_flags; - struct irq_chip_generic *gc[0]; + struct irq_chip_generic *gc[]; }; /* Generic chip callback functions */ -- cgit v1.2.3-58-ga151 From 312322722872324939f0d0347a6e41807c2d4c56 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 16:58:49 -0500 Subject: lib: cpu_rmap: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/cpu_rmap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h index 02edeafcb2bf..be8aea04d023 100644 --- a/include/linux/cpu_rmap.h +++ b/include/linux/cpu_rmap.h @@ -28,7 +28,7 @@ struct cpu_rmap { struct { u16 index; u16 dist; - } near[0]; + } near[]; }; #define CPU_RMAP_DIST_INF 0xffff -- cgit v1.2.3-58-ga151 From 859b494111b196853fd8c1852c6b57ef33738b50 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 18:32:01 -0500 Subject: list_lru.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/list_lru.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index d5ceb2839a2d..9dcaa3e582c9 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -34,7 +34,7 @@ struct list_lru_one { struct list_lru_memcg { struct rcu_head rcu; /* array of per cgroup lists, indexed by memcg_cache_id */ - struct list_lru_one *lru[0]; + struct list_lru_one *lru[]; }; struct list_lru_node { -- cgit v1.2.3-58-ga151 From 307ed94c37f842676d336cf5f2162022f4d7cdc4 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 18:36:10 -0500 Subject: memcontrol.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/memcontrol.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1b4150ff64be..d275c72c4f8e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -106,7 +106,7 @@ struct lruvec_stat { */ struct memcg_shrinker_map { struct rcu_head rcu; - unsigned long map[0]; + unsigned long map[]; }; /* @@ -148,7 +148,7 @@ struct mem_cgroup_threshold_ary { /* Size of entries[] */ unsigned int size; /* Array of thresholds */ - struct mem_cgroup_threshold entries[0]; + struct mem_cgroup_threshold entries[]; }; struct mem_cgroup_thresholds { -- cgit v1.2.3-58-ga151 From 1223f3db71ba7bbcf2e77c7a5d4f440c2a2fa9c3 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 19:07:49 -0500 Subject: platform_data: wilco-ec.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/platform_data/wilco-ec.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/platform_data/wilco-ec.h b/include/linux/platform_data/wilco-ec.h index 25f46a939637..3e268e636b5b 100644 --- a/include/linux/platform_data/wilco-ec.h +++ b/include/linux/platform_data/wilco-ec.h @@ -83,7 +83,7 @@ struct wilco_ec_response { u16 result; u16 data_size; u8 reserved[2]; - u8 data[0]; + u8 data[]; } __packed; /** -- cgit v1.2.3-58-ga151 From 70f1451ec98ee43d2c66d2caa5ae6935ee97f90a Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 19:08:58 -0500 Subject: posix_acl.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/posix_acl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index 540595a321a7..90797f1b421d 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -28,7 +28,7 @@ struct posix_acl { refcount_t a_refcount; struct rcu_head a_rcu; unsigned int a_count; - struct posix_acl_entry a_entries[0]; + struct posix_acl_entry a_entries[]; }; #define FOREACH_ACL_ENTRY(pa, acl, pe) \ -- cgit v1.2.3-58-ga151 From a1c4b9247ddfb62fe3a23eb53d250382e82fae77 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 19:12:17 -0500 Subject: rio.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/rio.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rio.h b/include/linux/rio.h index 317bace5ac64..2cd637268b4f 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h @@ -100,7 +100,7 @@ struct rio_switch { u32 port_ok; struct rio_switch_ops *ops; spinlock_t lock; - struct rio_dev *nextdev[0]; + struct rio_dev *nextdev[]; }; /** @@ -201,7 +201,7 @@ struct rio_dev { u8 hopcount; struct rio_dev *prev; atomic_t state; - struct rio_switch rswitch[0]; /* RIO switch info */ + struct rio_switch rswitch[]; /* RIO switch info */ }; #define rio_dev_g(n) list_entry(n, struct rio_dev, global_list) -- cgit v1.2.3-58-ga151 From 9dd8bb5f8c449e87cc0084a118673c6d4182bab2 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 19:13:20 -0500 Subject: rslib.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/rslib.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rslib.h b/include/linux/rslib.h index 5974cedd008c..238bb85243d3 100644 --- a/include/linux/rslib.h +++ b/include/linux/rslib.h @@ -54,7 +54,7 @@ struct rs_codec { */ struct rs_control { struct rs_codec *codec; - uint16_t buffers[0]; + uint16_t buffers[]; }; /* General purpose RS codec, 8-bit data width, symbol width 1-15 bit */ -- cgit v1.2.3-58-ga151 From fe946db6ca851a0cd8c2f9c9dd96ef74e051cf2f Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 19:14:37 -0500 Subject: sched: topology.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/sched/topology.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index af9319e4cfb9..95253ad792b0 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -142,7 +142,7 @@ struct sched_domain { * by attaching extra space to the end of the structure, * depending on how many CPUs the kernel has booted up with) */ - unsigned long span[0]; + unsigned long span[]; }; static inline struct cpumask *sched_domain_span(struct sched_domain *sd) -- cgit v1.2.3-58-ga151 From 5c91aa1df00ec4fa283c35e92736392df3137d81 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 19:22:24 -0500 Subject: skbuff.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/skbuff.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3a2ac7072dbb..3000c526f552 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -4162,7 +4162,7 @@ struct skb_ext { refcount_t refcnt; u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */ u8 chunks; /* same */ - char data[0] __aligned(8); + char data[] __aligned(8); }; struct skb_ext *__skb_ext_alloc(void); -- cgit v1.2.3-58-ga151 From 16c3380f8c2e7ed3d75a30776a89aabf5512027a Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 19:23:10 -0500 Subject: swap.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/swap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index b835d8dbea0e..e1bbf7a16b27 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -275,7 +275,7 @@ struct swap_info_struct { */ struct work_struct discard_work; /* discard worker */ struct swap_cluster_list discard_clusters; /* discard clusters list */ - struct plist_node avail_lists[0]; /* + struct plist_node avail_lists[]; /* * entries in swap_avail_heads, one * entry per node. * Must be last as the number of the -- cgit v1.2.3-58-ga151 From 4ea19ecf322c2f98ef87fc980b3851625b082ac2 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 19:25:06 -0500 Subject: ti_wilink_st.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/ti_wilink_st.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h index eb6cbdf10e50..44a7f9169ac6 100644 --- a/include/linux/ti_wilink_st.h +++ b/include/linux/ti_wilink_st.h @@ -295,7 +295,7 @@ struct bts_header { u32 magic; u32 version; u8 future[24]; - u8 actions[0]; + u8 actions[]; } __attribute__ ((packed)); /** @@ -305,7 +305,7 @@ struct bts_header { struct bts_action { u16 type; u16 size; - u8 data[0]; + u8 data[]; } __attribute__ ((packed)); struct bts_action_send { @@ -315,7 +315,7 @@ struct bts_action_send { struct bts_action_wait { u32 msec; u32 size; - u8 data[0]; + u8 data[]; } __attribute__ ((packed)); struct bts_action_delay { -- cgit v1.2.3-58-ga151 From 06ccf63da5d8e90e4dff8b741972a9b279b5bf4c Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 19:38:18 -0500 Subject: tpm_eventlog.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/tpm_eventlog.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 131ea1bad458..c253461b1c4e 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -28,7 +28,7 @@ struct tcpa_event { u32 event_type; u8 pcr_value[20]; /* SHA1 */ u32 event_size; - u8 event_data[0]; + u8 event_data[]; }; enum tcpa_event_types { @@ -55,7 +55,7 @@ enum tcpa_event_types { struct tcpa_pc_event { u32 event_id; u32 event_size; - u8 event_data[0]; + u8 event_data[]; }; enum tcpa_pc_event_ids { @@ -102,7 +102,7 @@ struct tcg_pcr_event { struct tcg_event_field { u32 event_size; - u8 event[0]; + u8 event[]; } __packed; struct tcg_pcr_event2_head { -- cgit v1.2.3-58-ga151 From 43951585e1308b322c8ee31a4aafd08213f5c5d7 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 23 Mar 2020 19:41:14 -0500 Subject: xattr.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva --- include/linux/xattr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 4cf6e11f4a3c..47eaa34f8761 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -73,7 +73,7 @@ struct simple_xattr { struct list_head list; char *name; size_t size; - char value[0]; + char value[]; }; /* -- cgit v1.2.3-58-ga151