From 22a9e57fccfea0cf9a066935dc6b81b866aaca55 Mon Sep 17 00:00:00 2001 From: Zheng Yongjun Date: Thu, 24 Dec 2020 21:24:46 +0800 Subject: misc: ocxl: use DEFINE_MUTEX() for mutex lock mutex lock can be initialized automatically with DEFINE_MUTEX() rather than explicitly calling mutex_init(). Acked-by: Frederic Barrat Signed-off-by: Zheng Yongjun Link: https://lore.kernel.org/r/20201224132446.31286-1-zhengyongjun3@huawei.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/ocxl/file.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index 4d1b44de1492..e70525eedaae 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c @@ -15,7 +15,7 @@ static dev_t ocxl_dev; static struct class *ocxl_class; -static struct mutex minors_idr_lock; +static DEFINE_MUTEX(minors_idr_lock); static struct idr minors_idr; static struct ocxl_file_info *find_and_get_file_info(dev_t devno) @@ -588,7 +588,6 @@ int ocxl_file_init(void) { int rc; - mutex_init(&minors_idr_lock); idr_init(&minors_idr); rc = alloc_chrdev_region(&ocxl_dev, 0, OCXL_NUM_MINORS, "ocxl"); -- cgit v1.2.3-58-ga151 From 157576d552339edc7988c79f37f4a335eaa0eaf2 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Mon, 28 Dec 2020 21:44:13 +0100 Subject: misc: remove atmel_tclib There is no driver depending on atmel_tclib anymore. Remove this driver. Signed-off-by: Alexandre Belloni Link: https://lore.kernel.org/r/20201228204413.2677762-1-alexandre.belloni@bootlin.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/Kconfig | 8 -- drivers/misc/Makefile | 1 - drivers/misc/atmel_tclib.c | 200 --------------------------------------------- 3 files changed, 209 deletions(-) delete mode 100644 drivers/misc/atmel_tclib.c (limited to 'drivers/misc') diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index fafa8b0d8099..e90c2524e46c 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -50,14 +50,6 @@ config AD525X_DPOT_SPI To compile this driver as a module, choose M here: the module will be called ad525x_dpot-spi. -config ATMEL_TCLIB - bool "Atmel AT32/AT91 Timer/Counter Library" - depends on ARCH_AT91 - help - Select this if you want a library to allocate the Timer/Counter - blocks found on many Atmel processors. This facilitates using - these blocks by different drivers despite processor differences. - config DUMMY_IRQ tristate "Dummy IRQ handler" help diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index d23231e73330..f65e8b18ecd8 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o obj-$(CONFIG_INTEL_MID_PTI) += pti.o obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o -obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o obj-$(CONFIG_DUMMY_IRQ) += dummy-irq.o obj-$(CONFIG_ICS932S401) += ics932s401.o obj-$(CONFIG_LKDTM) += lkdtm/ diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c deleted file mode 100644 index 7de7840f613c..000000000000 --- a/drivers/misc/atmel_tclib.c +++ /dev/null @@ -1,200 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * This is a thin library to solve the problem of how to portably allocate - * one of the TC blocks. For simplicity, it doesn't currently expect to - * share individual timers between different drivers. - */ - -#if defined(CONFIG_AVR32) -/* AVR32 has these divide PBB */ -const u8 atmel_tc_divisors[5] = { 0, 4, 8, 16, 32, }; -EXPORT_SYMBOL(atmel_tc_divisors); - -#elif defined(CONFIG_ARCH_AT91) -/* AT91 has these divide MCK */ -const u8 atmel_tc_divisors[5] = { 2, 8, 32, 128, 0, }; -EXPORT_SYMBOL(atmel_tc_divisors); - -#endif - -static DEFINE_SPINLOCK(tc_list_lock); -static LIST_HEAD(tc_list); - -/** - * atmel_tc_alloc - allocate a specified TC block - * @block: which block to allocate - * - * Caller allocates a block. If it is available, a pointer to a - * pre-initialized struct atmel_tc is returned. The caller can access - * the registers directly through the "regs" field. - */ -struct atmel_tc *atmel_tc_alloc(unsigned block) -{ - struct atmel_tc *tc; - struct platform_device *pdev = NULL; - - spin_lock(&tc_list_lock); - list_for_each_entry(tc, &tc_list, node) { - if (tc->allocated) - continue; - - if ((tc->pdev->dev.of_node && tc->id == block) || - (tc->pdev->id == block)) { - pdev = tc->pdev; - tc->allocated = true; - break; - } - } - spin_unlock(&tc_list_lock); - - return pdev ? tc : NULL; -} -EXPORT_SYMBOL_GPL(atmel_tc_alloc); - -/** - * atmel_tc_free - release a specified TC block - * @tc: Timer/counter block that was returned by atmel_tc_alloc() - * - * This reverses the effect of atmel_tc_alloc(), invalidating the resource - * returned by that routine and making the TC available to other drivers. - */ -void atmel_tc_free(struct atmel_tc *tc) -{ - spin_lock(&tc_list_lock); - if (tc->allocated) - tc->allocated = false; - spin_unlock(&tc_list_lock); -} -EXPORT_SYMBOL_GPL(atmel_tc_free); - -#if defined(CONFIG_OF) -static struct atmel_tcb_config tcb_rm9200_config = { - .counter_width = 16, -}; - -static struct atmel_tcb_config tcb_sam9x5_config = { - .counter_width = 32, -}; - -static const struct of_device_id atmel_tcb_dt_ids[] = { - { - .compatible = "atmel,at91rm9200-tcb", - .data = &tcb_rm9200_config, - }, { - .compatible = "atmel,at91sam9x5-tcb", - .data = &tcb_sam9x5_config, - }, { - /* sentinel */ - } -}; - -MODULE_DEVICE_TABLE(of, atmel_tcb_dt_ids); -#endif - -static int __init tc_probe(struct platform_device *pdev) -{ - struct atmel_tc *tc; - struct clk *clk; - int irq; - unsigned int i; - - if (of_get_child_count(pdev->dev.of_node)) - return -EBUSY; - - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return -EINVAL; - - tc = devm_kzalloc(&pdev->dev, sizeof(struct atmel_tc), GFP_KERNEL); - if (!tc) - return -ENOMEM; - - tc->pdev = pdev; - - clk = devm_clk_get(&pdev->dev, "t0_clk"); - if (IS_ERR(clk)) - return PTR_ERR(clk); - - tc->slow_clk = devm_clk_get(&pdev->dev, "slow_clk"); - if (IS_ERR(tc->slow_clk)) - return PTR_ERR(tc->slow_clk); - - tc->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(tc->regs)) - return PTR_ERR(tc->regs); - - /* Now take SoC information if available */ - if (pdev->dev.of_node) { - const struct of_device_id *match; - match = of_match_node(atmel_tcb_dt_ids, pdev->dev.of_node); - if (match) - tc->tcb_config = match->data; - - tc->id = of_alias_get_id(tc->pdev->dev.of_node, "tcb"); - } else { - tc->id = pdev->id; - } - - tc->clk[0] = clk; - tc->clk[1] = devm_clk_get(&pdev->dev, "t1_clk"); - if (IS_ERR(tc->clk[1])) - tc->clk[1] = clk; - tc->clk[2] = devm_clk_get(&pdev->dev, "t2_clk"); - if (IS_ERR(tc->clk[2])) - tc->clk[2] = clk; - - tc->irq[0] = irq; - tc->irq[1] = platform_get_irq(pdev, 1); - if (tc->irq[1] < 0) - tc->irq[1] = irq; - tc->irq[2] = platform_get_irq(pdev, 2); - if (tc->irq[2] < 0) - tc->irq[2] = irq; - - for (i = 0; i < 3; i++) - writel(ATMEL_TC_ALL_IRQ, tc->regs + ATMEL_TC_REG(i, IDR)); - - spin_lock(&tc_list_lock); - list_add_tail(&tc->node, &tc_list); - spin_unlock(&tc_list_lock); - - platform_set_drvdata(pdev, tc); - - return 0; -} - -static void tc_shutdown(struct platform_device *pdev) -{ - int i; - struct atmel_tc *tc = platform_get_drvdata(pdev); - - for (i = 0; i < 3; i++) - writel(ATMEL_TC_ALL_IRQ, tc->regs + ATMEL_TC_REG(i, IDR)); -} - -static struct platform_driver tc_driver = { - .driver = { - .name = "atmel_tcb", - .of_match_table = of_match_ptr(atmel_tcb_dt_ids), - }, - .shutdown = tc_shutdown, -}; - -static int __init tc_init(void) -{ - return platform_driver_probe(&tc_driver, tc_probe); -} -arch_initcall(tc_init); -- cgit v1.2.3-58-ga151 From 38d98d73be9f5f90b63d56d73afffa6c9de59c02 Mon Sep 17 00:00:00 2001 From: Ricky Wu Date: Wed, 30 Dec 2020 14:39:53 +0800 Subject: misc: rtsx: remove unused function removed unused function 'rtsx_pci_disable_aspm' Signed-off-by: Ricky Wu Link: https://lore.kernel.org/r/20201230063953.10972-1-ricky_wu@realtek.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cardreader/rtsx_pcr.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 2aa6648fa41f..d782754fa346 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -59,12 +59,6 @@ static const struct pci_device_id rtsx_pci_ids[] = { MODULE_DEVICE_TABLE(pci, rtsx_pci_ids); -static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr) -{ - pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_ASPMC, 0); -} - static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency) { rtsx_pci_write_register(pcr, MSGTXDATA0, -- cgit v1.2.3-58-ga151 From 13613a2246bf531f5fc04e8e62e8f21a3d39bf1c Mon Sep 17 00:00:00 2001 From: Aswath Govindraju Date: Thu, 7 Jan 2021 22:09:53 +0530 Subject: misc: eeprom_93xx46: Fix module alias to enable module autoprobe Fix module autoprobe by correcting module alias to match the string from /sys/class/.../spi1.0/modalias content. Fixes: 06b4501e88ad ("misc/eeprom: add driver for microwire 93xx46 EEPROMs") Signed-off-by: Aswath Govindraju Link: https://lore.kernel.org/r/20210107163957.28664-2-a-govindraju@ti.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/eeprom/eeprom_93xx46.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index 7c45f82b4302..206d920dc92f 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c @@ -511,4 +511,4 @@ module_spi_driver(eeprom_93xx46_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs"); MODULE_AUTHOR("Anatolij Gustschin "); -MODULE_ALIAS("spi:93xx46"); +MODULE_ALIAS("spi:eeprom-93xx46"); -- cgit v1.2.3-58-ga151 From 47771f1715bf05b69b040bf784a2607a5dc13c62 Mon Sep 17 00:00:00 2001 From: Aswath Govindraju Date: Thu, 7 Jan 2021 22:09:54 +0530 Subject: misc: eeprom_93xx46: Enable module autoprobe for microchip 93LC46B eeprom Add module alias to enable autoprobe for microchip 93LC46B eeprom by using /sys/class/.../spi1.0/modalias content. Signed-off-by: Aswath Govindraju Link: https://lore.kernel.org/r/20210107163957.28664-3-a-govindraju@ti.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/eeprom/eeprom_93xx46.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/misc') diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index 206d920dc92f..849cfea9e294 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c @@ -512,3 +512,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs"); MODULE_AUTHOR("Anatolij Gustschin "); MODULE_ALIAS("spi:eeprom-93xx46"); +MODULE_ALIAS("spi:93lc46b"); -- cgit v1.2.3-58-ga151 From 4540b9fbd8ebb21bb3735796d300a1589ee5fbf2 Mon Sep 17 00:00:00 2001 From: Aswath Govindraju Date: Wed, 13 Jan 2021 10:42:52 +0530 Subject: misc: eeprom_93xx46: Add module alias to avoid breaking support for non device tree users Module alias "spi:93xx46" is used by non device tree users like drivers/misc/eeprom/digsy_mtc_eeprom.c and removing it will break support for them. Fix this by adding back the module alias "spi:93xx46". Fixes: 13613a2246bf ("misc: eeprom_93xx46: Fix module alias to enable module autoprobe") Signed-off-by: Aswath Govindraju Link: https://lore.kernel.org/r/20210113051253.15061-1-a-govindraju@ti.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/eeprom/eeprom_93xx46.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/misc') diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index 849cfea9e294..e130a0d858e9 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c @@ -511,5 +511,6 @@ module_spi_driver(eeprom_93xx46_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs"); MODULE_AUTHOR("Anatolij Gustschin "); +MODULE_ALIAS("spi:93xx46"); MODULE_ALIAS("spi:eeprom-93xx46"); MODULE_ALIAS("spi:93lc46b"); -- cgit v1.2.3-58-ga151 From f6f1f8e6e3eea25f539105d48166e91f0ab46dd1 Mon Sep 17 00:00:00 2001 From: Aswath Govindraju Date: Tue, 5 Jan 2021 16:28:12 +0530 Subject: misc: eeprom_93xx46: Add quirk to support Microchip 93LC46B eeprom A dummy zero bit is sent preceding the data during a read transfer by the Microchip 93LC46B eeprom (section 2.7 of[1]). This results in right shift of data during a read. In order to ignore this bit a quirk can be added to send an extra zero bit after the read address. Add a quirk to ignore the zero bit sent before data by adding a zero bit after the read address. [1] - https://www.mouser.com/datasheet/2/268/20001749K-277859.pdf Signed-off-by: Aswath Govindraju Link: https://lore.kernel.org/r/20210105105817.17644-3-a-govindraju@ti.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/eeprom/eeprom_93xx46.c | 15 +++++++++++++++ include/linux/eeprom_93xx46.h | 2 ++ 2 files changed, 17 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index e130a0d858e9..80114f4c80ad 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c @@ -35,6 +35,10 @@ static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = { EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH, }; +static const struct eeprom_93xx46_devtype_data microchip_93lc46b_data = { + .quirks = EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE, +}; + struct eeprom_93xx46_dev { struct spi_device *spi; struct eeprom_93xx46_platform_data *pdata; @@ -55,6 +59,11 @@ static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev) return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH; } +static inline bool has_quirk_extra_read_cycle(struct eeprom_93xx46_dev *edev) +{ + return edev->pdata->quirks & EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE; +} + static int eeprom_93xx46_read(void *priv, unsigned int off, void *val, size_t count) { @@ -96,6 +105,11 @@ static int eeprom_93xx46_read(void *priv, unsigned int off, dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n", cmd_addr, edev->spi->max_speed_hz); + if (has_quirk_extra_read_cycle(edev)) { + cmd_addr <<= 1; + bits += 1; + } + spi_message_init(&m); t[0].tx_buf = (char *)&cmd_addr; @@ -363,6 +377,7 @@ static void select_deassert(void *context) static const struct of_device_id eeprom_93xx46_of_table[] = { { .compatible = "eeprom-93xx46", }, { .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, }, + { .compatible = "microchip,93lc46b", .data = µchip_93lc46b_data, }, {} }; MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table); diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h index eec7928ff8fe..99580c22f91a 100644 --- a/include/linux/eeprom_93xx46.h +++ b/include/linux/eeprom_93xx46.h @@ -16,6 +16,8 @@ struct eeprom_93xx46_platform_data { #define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0) /* Instructions such as EWEN are (addrlen + 2) in length. */ #define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1) +/* Add extra cycle after address during a read */ +#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2) /* * optional hooks to control additional logic -- cgit v1.2.3-58-ga151 From 522f692686a73e51300073c08e4107a8cd0b9854 Mon Sep 17 00:00:00 2001 From: Scott Branden Date: Wed, 20 Jan 2021 09:58:16 -0800 Subject: misc: bcm-vk: add Broadcom VK driver Add initial version of Broadcom VK driver to enumerate PCI device IDs of Valkyrie and Viper device IDs. VK based cards provide real-time high performance, high throughput, low latency offload compute engine operations. They are used for multiple parallel offload tasks as: audio, video and image processing and crypto operations. Further commits add additional features to driver beyond probe/remove. Acked-by: Olof Johansson Signed-off-by: Scott Branden Link: https://lore.kernel.org/r/20210120175827.14820-3-scott.branden@broadcom.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/Kconfig | 1 + drivers/misc/Makefile | 1 + drivers/misc/bcm-vk/Kconfig | 17 +++++ drivers/misc/bcm-vk/Makefile | 8 +++ drivers/misc/bcm-vk/bcm_vk.h | 29 ++++++++ drivers/misc/bcm-vk/bcm_vk_dev.c | 141 +++++++++++++++++++++++++++++++++++++++ 6 files changed, 197 insertions(+) create mode 100644 drivers/misc/bcm-vk/Kconfig create mode 100644 drivers/misc/bcm-vk/Makefile create mode 100644 drivers/misc/bcm-vk/bcm_vk.h create mode 100644 drivers/misc/bcm-vk/bcm_vk_dev.c (limited to 'drivers/misc') diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index e90c2524e46c..8085213913cc 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -470,6 +470,7 @@ source "drivers/misc/genwqe/Kconfig" source "drivers/misc/echo/Kconfig" source "drivers/misc/cxl/Kconfig" source "drivers/misc/ocxl/Kconfig" +source "drivers/misc/bcm-vk/Kconfig" source "drivers/misc/cardreader/Kconfig" source "drivers/misc/habanalabs/Kconfig" source "drivers/misc/uacce/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index f65e8b18ecd8..456aa8a4c896 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -50,6 +50,7 @@ obj-$(CONFIG_ECHO) += echo/ obj-$(CONFIG_CXL_BASE) += cxl/ obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o obj-$(CONFIG_OCXL) += ocxl/ +obj-$(CONFIG_BCM_VK) += bcm-vk/ obj-y += cardreader/ obj-$(CONFIG_PVPANIC) += pvpanic.o obj-$(CONFIG_HABANA_AI) += habanalabs/ diff --git a/drivers/misc/bcm-vk/Kconfig b/drivers/misc/bcm-vk/Kconfig new file mode 100644 index 000000000000..052f6f28b540 --- /dev/null +++ b/drivers/misc/bcm-vk/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Broadcom VK device +# +config BCM_VK + tristate "Support for Broadcom VK Accelerators" + depends on PCI_MSI + help + Select this option to enable support for Broadcom + VK Accelerators. VK is used for performing + multiple specific offload processing tasks in parallel. + Such offload tasks assist in such operations as video + transcoding, compression, and crypto tasks. + This driver enables userspace programs to access these + accelerators via /dev/bcm-vk.N devices. + + If unsure, say N. diff --git a/drivers/misc/bcm-vk/Makefile b/drivers/misc/bcm-vk/Makefile new file mode 100644 index 000000000000..f8a7ac4c242f --- /dev/null +++ b/drivers/misc/bcm-vk/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Broadcom VK driver +# + +obj-$(CONFIG_BCM_VK) += bcm_vk.o +bcm_vk-objs := \ + bcm_vk_dev.o diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h new file mode 100644 index 000000000000..9152785199ab --- /dev/null +++ b/drivers/misc/bcm-vk/bcm_vk.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2018-2020 Broadcom. + */ + +#ifndef BCM_VK_H +#define BCM_VK_H + +#include + +#define DRV_MODULE_NAME "bcm-vk" + +/* VK device supports a maximum of 3 bars */ +#define MAX_BAR 3 + +enum pci_barno { + BAR_0 = 0, + BAR_1, + BAR_2 +}; + +#define BCM_VK_NUM_TTY 2 + +struct bcm_vk { + struct pci_dev *pdev; + void __iomem *bar[MAX_BAR]; +}; + +#endif diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c new file mode 100644 index 000000000000..14afe2477b97 --- /dev/null +++ b/drivers/misc/bcm-vk/bcm_vk_dev.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2018-2020 Broadcom. + */ + +#include +#include +#include +#include + +#include "bcm_vk.h" + +#define PCI_DEVICE_ID_VALKYRIE 0x5e87 +#define PCI_DEVICE_ID_VIPER 0x5e88 + +/* MSIX usages */ +#define VK_MSIX_MSGQ_MAX 3 +#define VK_MSIX_NOTF_MAX 1 +#define VK_MSIX_TTY_MAX BCM_VK_NUM_TTY +#define VK_MSIX_IRQ_MAX (VK_MSIX_MSGQ_MAX + VK_MSIX_NOTF_MAX + \ + VK_MSIX_TTY_MAX) +#define VK_MSIX_IRQ_MIN_REQ (VK_MSIX_MSGQ_MAX + VK_MSIX_NOTF_MAX) + +/* Number of bits set in DMA mask*/ +#define BCM_VK_DMA_BITS 64 + +static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err; + int i; + int irq; + struct bcm_vk *vk; + struct device *dev = &pdev->dev; + + vk = kzalloc(sizeof(*vk), GFP_KERNEL); + if (!vk) + return -ENOMEM; + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Cannot enable PCI device\n"); + goto err_free_exit; + } + vk->pdev = pci_dev_get(pdev); + + err = pci_request_regions(pdev, DRV_MODULE_NAME); + if (err) { + dev_err(dev, "Cannot obtain PCI resources\n"); + goto err_disable_pdev; + } + + /* make sure DMA is good */ + err = dma_set_mask_and_coherent(&pdev->dev, + DMA_BIT_MASK(BCM_VK_DMA_BITS)); + if (err) { + dev_err(dev, "failed to set DMA mask\n"); + goto err_disable_pdev; + } + + pci_set_master(pdev); + pci_set_drvdata(pdev, vk); + + irq = pci_alloc_irq_vectors(pdev, + 1, + VK_MSIX_IRQ_MAX, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + + if (irq < VK_MSIX_IRQ_MIN_REQ) { + dev_err(dev, "failed to get min %d MSIX interrupts, irq(%d)\n", + VK_MSIX_IRQ_MIN_REQ, irq); + err = (irq >= 0) ? -EINVAL : irq; + goto err_disable_pdev; + } + + if (irq != VK_MSIX_IRQ_MAX) + dev_warn(dev, "Number of IRQs %d allocated - requested(%d).\n", + irq, VK_MSIX_IRQ_MAX); + + for (i = 0; i < MAX_BAR; i++) { + /* multiple by 2 for 64 bit BAR mapping */ + vk->bar[i] = pci_ioremap_bar(pdev, i * 2); + if (!vk->bar[i]) { + dev_err(dev, "failed to remap BAR%d\n", i); + goto err_iounmap; + } + } + + return 0; + +err_iounmap: + for (i = 0; i < MAX_BAR; i++) { + if (vk->bar[i]) + pci_iounmap(pdev, vk->bar[i]); + } + pci_release_regions(pdev); + +err_disable_pdev: + pci_free_irq_vectors(pdev); + pci_disable_device(pdev); + pci_dev_put(pdev); + +err_free_exit: + kfree(vk); + + return err; +} + +static void bcm_vk_remove(struct pci_dev *pdev) +{ + int i; + struct bcm_vk *vk = pci_get_drvdata(pdev); + + for (i = 0; i < MAX_BAR; i++) { + if (vk->bar[i]) + pci_iounmap(pdev, vk->bar[i]); + } + + pci_release_regions(pdev); + pci_free_irq_vectors(pdev); + pci_disable_device(pdev); +} + +static const struct pci_device_id bcm_vk_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_VALKYRIE), }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_VIPER), }, + { } +}; +MODULE_DEVICE_TABLE(pci, bcm_vk_ids); + +static struct pci_driver pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = bcm_vk_ids, + .probe = bcm_vk_probe, + .remove = bcm_vk_remove, +}; +module_pci_driver(pci_driver); + +MODULE_DESCRIPTION("Broadcom VK Host Driver"); +MODULE_AUTHOR("Scott Branden "); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION("1.0"); -- cgit v1.2.3-58-ga151 From 064ffc7c3939ebc9c152e17bd9f4496d7b318688 Mon Sep 17 00:00:00 2001 From: Scott Branden Date: Wed, 20 Jan 2021 09:58:17 -0800 Subject: misc: bcm-vk: add autoload support Add support to load and boot images on card automatically. The kernel module parameter auto_load can be passed in as false to disable such support on probe. As well, nr_scratch_pages can be specified to allocate more or less scratch memory on init as needed for desired card operation. Co-developed-by: Desmond Yan Co-developed-by: James Hu Acked-by: Olof Johansson Signed-off-by: Desmond Yan Signed-off-by: James Hu Signed-off-by: Scott Branden Link: https://lore.kernel.org/r/20210120175827.14820-4-scott.branden@broadcom.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/bcm-vk/bcm_vk.h | 250 ++++++++++++++ drivers/misc/bcm-vk/bcm_vk_dev.c | 723 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 973 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h index 9152785199ab..c4fb61a84e41 100644 --- a/drivers/misc/bcm-vk/bcm_vk.h +++ b/drivers/misc/bcm-vk/bcm_vk.h @@ -6,10 +6,187 @@ #ifndef BCM_VK_H #define BCM_VK_H +#include #include +#include #define DRV_MODULE_NAME "bcm-vk" +/* + * Load Image is completed in two stages: + * + * 1) When the VK device boot-up, M7 CPU runs and executes the BootROM. + * The Secure Boot Loader (SBL) as part of the BootROM will run + * to open up ITCM for host to push BOOT1 image. + * SBL will authenticate the image before jumping to BOOT1 image. + * + * 2) Because BOOT1 image is a secured image, we also called it the + * Secure Boot Image (SBI). At second stage, SBI will initialize DDR + * and wait for host to push BOOT2 image to DDR. + * SBI will authenticate the image before jumping to BOOT2 image. + * + */ +/* Location of registers of interest in BAR0 */ + +/* Request register for Secure Boot Loader (SBL) download */ +#define BAR_CODEPUSH_SBL 0x400 +/* Start of ITCM */ +#define CODEPUSH_BOOT1_ENTRY 0x00400000 +#define CODEPUSH_MASK 0xfffff000 +#define CODEPUSH_BOOTSTART BIT(0) + +/* Boot Status register */ +#define BAR_BOOT_STATUS 0x404 + +#define SRAM_OPEN BIT(16) +#define DDR_OPEN BIT(17) + +/* Firmware loader progress status definitions */ +#define FW_LOADER_ACK_SEND_MORE_DATA BIT(18) +#define FW_LOADER_ACK_IN_PROGRESS BIT(19) +#define FW_LOADER_ACK_RCVD_ALL_DATA BIT(20) + +/* Boot1/2 is running in standalone mode */ +#define BOOT_STDALONE_RUNNING BIT(21) + +/* definitions for boot status register */ +#define BOOT_STATE_MASK (0xffffffff & \ + ~(FW_LOADER_ACK_SEND_MORE_DATA | \ + FW_LOADER_ACK_IN_PROGRESS | \ + BOOT_STDALONE_RUNNING)) + +#define BOOT_ERR_SHIFT 4 +#define BOOT_ERR_MASK (0xf << BOOT_ERR_SHIFT) +#define BOOT_PROG_MASK 0xf + +#define BROM_STATUS_NOT_RUN 0x2 +#define BROM_NOT_RUN (SRAM_OPEN | BROM_STATUS_NOT_RUN) +#define BROM_STATUS_COMPLETE 0x6 +#define BROM_RUNNING (SRAM_OPEN | BROM_STATUS_COMPLETE) +#define BOOT1_STATUS_COMPLETE 0x6 +#define BOOT1_RUNNING (DDR_OPEN | BOOT1_STATUS_COMPLETE) +#define BOOT2_STATUS_COMPLETE 0x6 +#define BOOT2_RUNNING (FW_LOADER_ACK_RCVD_ALL_DATA | \ + BOOT2_STATUS_COMPLETE) + +/* Boot request for Secure Boot Image (SBI) */ +#define BAR_CODEPUSH_SBI 0x408 +/* 64M mapped to BAR2 */ +#define CODEPUSH_BOOT2_ENTRY 0x60000000 + +#define BAR_CARD_STATUS 0x410 + +#define BAR_BOOT1_STDALONE_PROGRESS 0x420 +#define BOOT1_STDALONE_SUCCESS (BIT(13) | BIT(14)) +#define BOOT1_STDALONE_PROGRESS_MASK BOOT1_STDALONE_SUCCESS + +#define BAR_METADATA_VERSION 0x440 +#define BAR_OS_UPTIME 0x444 +#define BAR_CHIP_ID 0x448 +#define MAJOR_SOC_REV(_chip_id) (((_chip_id) >> 20) & 0xf) + +#define BAR_CARD_TEMPERATURE 0x45c + +#define BAR_CARD_VOLTAGE 0x460 + +#define BAR_CARD_ERR_LOG 0x464 + +#define BAR_CARD_ERR_MEM 0x468 + +#define BAR_CARD_PWR_AND_THRE 0x46c + +#define BAR_CARD_STATIC_INFO 0x470 + +#define BAR_INTF_VER 0x47c +#define BAR_INTF_VER_MAJOR_SHIFT 16 +#define BAR_INTF_VER_MASK 0xffff +/* + * major and minor semantic version numbers supported + * Please update as required on interface changes + */ +#define SEMANTIC_MAJOR 1 +#define SEMANTIC_MINOR 0 + +/* + * first door bell reg, ie for queue = 0. Only need the first one, as + * we will use the queue number to derive the others + */ +#define VK_BAR0_REGSEG_DB_BASE 0x484 +#define VK_BAR0_REGSEG_DB_REG_GAP 8 /* + * DB register gap, + * DB1 at 0x48c and DB2 at 0x494 + */ + +/* reset register and specific values */ +#define VK_BAR0_RESET_DB_NUM 3 +#define VK_BAR0_RESET_DB_SOFT 0xffffffff +#define VK_BAR0_RESET_DB_HARD 0xfffffffd +#define VK_BAR0_RESET_RAMPDUMP 0xa0000000 + +#define VK_BAR0_Q_DB_BASE(q_num) (VK_BAR0_REGSEG_DB_BASE + \ + ((q_num) * VK_BAR0_REGSEG_DB_REG_GAP)) +#define VK_BAR0_RESET_DB_BASE (VK_BAR0_REGSEG_DB_BASE + \ + (VK_BAR0_RESET_DB_NUM * VK_BAR0_REGSEG_DB_REG_GAP)) + +#define BAR_BOOTSRC_SELECT 0xc78 +/* BOOTSRC definitions */ +#define BOOTSRC_SOFT_ENABLE BIT(14) + +/* Card OS Firmware version size */ +#define BAR_FIRMWARE_TAG_SIZE 50 +#define FIRMWARE_STATUS_PRE_INIT_DONE 0x1f + +/* + * BAR1 + */ + +/* BAR1 message q definition */ + +/* indicate if msgq ctrl in BAR1 is populated */ +#define VK_BAR1_MSGQ_DEF_RDY 0x60c0 +/* ready marker value for the above location, normal boot2 */ +#define VK_BAR1_MSGQ_RDY_MARKER 0xbeefcafe +/* ready marker value for the above location, normal boot2 */ +#define VK_BAR1_DIAG_RDY_MARKER 0xdeadcafe +/* number of msgqs in BAR1 */ +#define VK_BAR1_MSGQ_NR 0x60c4 +/* BAR1 queue control structure offset */ +#define VK_BAR1_MSGQ_CTRL_OFF 0x60c8 + +/* BAR1 ucode and boot1 version tag */ +#define VK_BAR1_UCODE_VER_TAG 0x6170 +#define VK_BAR1_BOOT1_VER_TAG 0x61b0 +#define VK_BAR1_VER_TAG_SIZE 64 + +/* Memory to hold the DMA buffer memory address allocated for boot2 download */ +#define VK_BAR1_DMA_BUF_OFF_HI 0x61e0 +#define VK_BAR1_DMA_BUF_OFF_LO (VK_BAR1_DMA_BUF_OFF_HI + 4) +#define VK_BAR1_DMA_BUF_SZ (VK_BAR1_DMA_BUF_OFF_HI + 8) + +/* Scratch memory allocated on host for VK */ +#define VK_BAR1_SCRATCH_OFF_HI 0x61f0 +#define VK_BAR1_SCRATCH_OFF_LO (VK_BAR1_SCRATCH_OFF_HI + 4) +#define VK_BAR1_SCRATCH_SZ_ADDR (VK_BAR1_SCRATCH_OFF_HI + 8) +#define VK_BAR1_SCRATCH_DEF_NR_PAGES 32 + +/* BAR1 DAUTH info */ +#define VK_BAR1_DAUTH_BASE_ADDR 0x6200 +#define VK_BAR1_DAUTH_STORE_SIZE 0x48 +#define VK_BAR1_DAUTH_VALID_SIZE 0x8 +#define VK_BAR1_DAUTH_MAX 4 +#define VK_BAR1_DAUTH_STORE_ADDR(x) \ + (VK_BAR1_DAUTH_BASE_ADDR + \ + (x) * (VK_BAR1_DAUTH_STORE_SIZE + VK_BAR1_DAUTH_VALID_SIZE)) +#define VK_BAR1_DAUTH_VALID_ADDR(x) \ + (VK_BAR1_DAUTH_STORE_ADDR(x) + VK_BAR1_DAUTH_STORE_SIZE) + +/* BAR1 SOTP AUTH and REVID info */ +#define VK_BAR1_SOTP_REVID_BASE_ADDR 0x6340 +#define VK_BAR1_SOTP_REVID_SIZE 0x10 +#define VK_BAR1_SOTP_REVID_MAX 2 +#define VK_BAR1_SOTP_REVID_ADDR(x) \ + (VK_BAR1_SOTP_REVID_BASE_ADDR + (x) * VK_BAR1_SOTP_REVID_SIZE) + /* VK device supports a maximum of 3 bars */ #define MAX_BAR 3 @@ -21,9 +198,82 @@ enum pci_barno { #define BCM_VK_NUM_TTY 2 +/* DAUTH related info */ +struct bcm_vk_dauth_key { + char store[VK_BAR1_DAUTH_STORE_SIZE]; + char valid[VK_BAR1_DAUTH_VALID_SIZE]; +}; + +struct bcm_vk_dauth_info { + struct bcm_vk_dauth_key keys[VK_BAR1_DAUTH_MAX]; +}; + struct bcm_vk { struct pci_dev *pdev; void __iomem *bar[MAX_BAR]; + + struct bcm_vk_dauth_info dauth_info; + + int devid; /* dev id allocated */ + + struct workqueue_struct *wq_thread; + struct work_struct wq_work; /* work queue for deferred job */ + unsigned long wq_offload[1]; /* various flags on wq requested */ + void *tdma_vaddr; /* test dma segment virtual addr */ + dma_addr_t tdma_addr; /* test dma segment bus addr */ +}; + +/* wq offload work items bits definitions */ +enum bcm_vk_wq_offload_flags { + BCM_VK_WQ_DWNLD_PEND = 0, + BCM_VK_WQ_DWNLD_AUTO = 1, }; +/* + * check if PCIe interface is down on read. Use it when it is + * certain that _val should never be all ones. + */ +#define BCM_VK_INTF_IS_DOWN(val) ((val) == 0xffffffff) + +static inline u32 vkread32(struct bcm_vk *vk, enum pci_barno bar, u64 offset) +{ + return readl(vk->bar[bar] + offset); +} + +static inline void vkwrite32(struct bcm_vk *vk, + u32 value, + enum pci_barno bar, + u64 offset) +{ + writel(value, vk->bar[bar] + offset); +} + +static inline u8 vkread8(struct bcm_vk *vk, enum pci_barno bar, u64 offset) +{ + return readb(vk->bar[bar] + offset); +} + +static inline void vkwrite8(struct bcm_vk *vk, + u8 value, + enum pci_barno bar, + u64 offset) +{ + writeb(value, vk->bar[bar] + offset); +} + +static inline bool bcm_vk_msgq_marker_valid(struct bcm_vk *vk) +{ + u32 rdy_marker = 0; + u32 fw_status; + + fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS); + + if ((fw_status & VK_FWSTS_READY) == VK_FWSTS_READY) + rdy_marker = vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY); + + return (rdy_marker == VK_BAR1_MSGQ_RDY_MARKER); +} + +int bcm_vk_auto_load_all_images(struct bcm_vk *vk); + #endif diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c index 14afe2477b97..adc3103c7012 100644 --- a/drivers/misc/bcm-vk/bcm_vk_dev.c +++ b/drivers/misc/bcm-vk/bcm_vk_dev.c @@ -3,16 +3,72 @@ * Copyright 2018-2020 Broadcom. */ +#include #include +#include +#include #include #include #include +#include #include "bcm_vk.h" #define PCI_DEVICE_ID_VALKYRIE 0x5e87 #define PCI_DEVICE_ID_VIPER 0x5e88 +static DEFINE_IDA(bcm_vk_ida); + +enum soc_idx { + VALKYRIE_A0 = 0, + VALKYRIE_B0, + VIPER, + VK_IDX_INVALID +}; + +enum img_idx { + IMG_PRI = 0, + IMG_SEC, + IMG_PER_TYPE_MAX +}; + +struct load_image_entry { + const u32 image_type; + const char *image_name[IMG_PER_TYPE_MAX]; +}; + +#define NUM_BOOT_STAGES 2 +/* default firmware images names */ +static const struct load_image_entry image_tab[][NUM_BOOT_STAGES] = { + [VALKYRIE_A0] = { + {VK_IMAGE_TYPE_BOOT1, {"vk_a0-boot1.bin", "vk-boot1.bin"}}, + {VK_IMAGE_TYPE_BOOT2, {"vk_a0-boot2.bin", "vk-boot2.bin"}} + }, + [VALKYRIE_B0] = { + {VK_IMAGE_TYPE_BOOT1, {"vk_b0-boot1.bin", "vk-boot1.bin"}}, + {VK_IMAGE_TYPE_BOOT2, {"vk_b0-boot2.bin", "vk-boot2.bin"}} + }, + + [VIPER] = { + {VK_IMAGE_TYPE_BOOT1, {"vp-boot1.bin", ""}}, + {VK_IMAGE_TYPE_BOOT2, {"vp-boot2.bin", ""}} + }, +}; + +/* Location of memory base addresses of interest in BAR1 */ +/* Load Boot1 to start of ITCM */ +#define BAR1_CODEPUSH_BASE_BOOT1 0x100000 + +/* Allow minimum 1s for Load Image timeout responses */ +#define LOAD_IMAGE_TIMEOUT_MS (1 * MSEC_PER_SEC) + +/* Image startup timeouts */ +#define BOOT1_STARTUP_TIMEOUT_MS (5 * MSEC_PER_SEC) +#define BOOT2_STARTUP_TIMEOUT_MS (10 * MSEC_PER_SEC) + +/* 1ms wait for checking the transfer complete status */ +#define TXFR_COMPLETE_TIMEOUT_MS 1 + /* MSIX usages */ #define VK_MSIX_MSGQ_MAX 3 #define VK_MSIX_NOTF_MAX 1 @@ -24,13 +80,570 @@ /* Number of bits set in DMA mask*/ #define BCM_VK_DMA_BITS 64 +/* Ucode boot wait time */ +#define BCM_VK_UCODE_BOOT_US (100 * USEC_PER_MSEC) +/* 50% margin */ +#define BCM_VK_UCODE_BOOT_MAX_US ((BCM_VK_UCODE_BOOT_US * 3) >> 1) + +/* deinit time for the card os after receiving doorbell */ +#define BCM_VK_DEINIT_TIME_MS (2 * MSEC_PER_SEC) + +/* + * module parameters + */ +static bool auto_load = true; +module_param(auto_load, bool, 0444); +MODULE_PARM_DESC(auto_load, + "Load images automatically at PCIe probe time.\n"); +static uint nr_scratch_pages = VK_BAR1_SCRATCH_DEF_NR_PAGES; +module_param(nr_scratch_pages, uint, 0444); +MODULE_PARM_DESC(nr_scratch_pages, + "Number of pre allocated DMAable coherent pages.\n"); + +static int bcm_vk_intf_ver_chk(struct bcm_vk *vk) +{ + struct device *dev = &vk->pdev->dev; + u32 reg; + u16 major, minor; + int ret = 0; + + /* read interface register */ + reg = vkread32(vk, BAR_0, BAR_INTF_VER); + major = (reg >> BAR_INTF_VER_MAJOR_SHIFT) & BAR_INTF_VER_MASK; + minor = reg & BAR_INTF_VER_MASK; + + /* + * if major number is 0, it is pre-release and it would be allowed + * to continue, else, check versions accordingly + */ + if (!major) { + dev_warn(dev, "Pre-release major.minor=%d.%d - drv %d.%d\n", + major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR); + } else if (major != SEMANTIC_MAJOR) { + dev_err(dev, + "Intf major.minor=%d.%d rejected - drv %d.%d\n", + major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR); + ret = -EPFNOSUPPORT; + } else { + dev_dbg(dev, + "Intf major.minor=%d.%d passed - drv %d.%d\n", + major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR); + } + return ret; +} + +static inline int bcm_vk_wait(struct bcm_vk *vk, enum pci_barno bar, + u64 offset, u32 mask, u32 value, + unsigned long timeout_ms) +{ + struct device *dev = &vk->pdev->dev; + unsigned long start_time; + unsigned long timeout; + u32 rd_val, boot_status; + + start_time = jiffies; + timeout = start_time + msecs_to_jiffies(timeout_ms); + + do { + rd_val = vkread32(vk, bar, offset); + dev_dbg(dev, "BAR%d Offset=0x%llx: 0x%x\n", + bar, offset, rd_val); + + /* check for any boot err condition */ + boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); + if (boot_status & BOOT_ERR_MASK) { + dev_err(dev, "Boot Err 0x%x, progress 0x%x after %d ms\n", + (boot_status & BOOT_ERR_MASK) >> BOOT_ERR_SHIFT, + boot_status & BOOT_PROG_MASK, + jiffies_to_msecs(jiffies - start_time)); + return -EFAULT; + } + + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + + cpu_relax(); + cond_resched(); + } while ((rd_val & mask) != value); + + return 0; +} + +static int bcm_vk_sync_card_info(struct bcm_vk *vk) +{ + u32 rdy_marker = vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY); + + /* check for marker, but allow diags mode to skip sync */ + if (!bcm_vk_msgq_marker_valid(vk)) + return (rdy_marker == VK_BAR1_DIAG_RDY_MARKER ? 0 : -EINVAL); + + /* + * Write down scratch addr which is used for DMA. For + * signed part, BAR1 is accessible only after boot2 has come + * up + */ + if (vk->tdma_addr) { + vkwrite32(vk, (u64)vk->tdma_addr >> 32, BAR_1, + VK_BAR1_SCRATCH_OFF_HI); + vkwrite32(vk, (u32)vk->tdma_addr, BAR_1, + VK_BAR1_SCRATCH_OFF_LO); + vkwrite32(vk, nr_scratch_pages * PAGE_SIZE, BAR_1, + VK_BAR1_SCRATCH_SZ_ADDR); + } + return 0; +} + +static void bcm_vk_buf_notify(struct bcm_vk *vk, void *bufp, + dma_addr_t host_buf_addr, u32 buf_size) +{ + /* update the dma address to the card */ + vkwrite32(vk, (u64)host_buf_addr >> 32, BAR_1, + VK_BAR1_DMA_BUF_OFF_HI); + vkwrite32(vk, (u32)host_buf_addr, BAR_1, + VK_BAR1_DMA_BUF_OFF_LO); + vkwrite32(vk, buf_size, BAR_1, VK_BAR1_DMA_BUF_SZ); +} + +static int bcm_vk_load_image_by_type(struct bcm_vk *vk, u32 load_type, + const char *filename) +{ + struct device *dev = &vk->pdev->dev; + const struct firmware *fw = NULL; + void *bufp = NULL; + size_t max_buf, offset; + int ret; + u64 offset_codepush; + u32 codepush; + u32 value; + dma_addr_t boot_dma_addr; + bool is_stdalone; + + if (load_type == VK_IMAGE_TYPE_BOOT1) { + /* + * After POR, enable VK soft BOOTSRC so bootrom do not clear + * the pushed image (the TCM memories). + */ + value = vkread32(vk, BAR_0, BAR_BOOTSRC_SELECT); + value |= BOOTSRC_SOFT_ENABLE; + vkwrite32(vk, value, BAR_0, BAR_BOOTSRC_SELECT); + + codepush = CODEPUSH_BOOTSTART + CODEPUSH_BOOT1_ENTRY; + offset_codepush = BAR_CODEPUSH_SBL; + + /* Write a 1 to request SRAM open bit */ + vkwrite32(vk, CODEPUSH_BOOTSTART, BAR_0, offset_codepush); + + /* Wait for VK to respond */ + ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, SRAM_OPEN, + SRAM_OPEN, LOAD_IMAGE_TIMEOUT_MS); + if (ret < 0) { + dev_err(dev, "boot1 wait SRAM err - ret(%d)\n", ret); + goto err_buf_out; + } + + max_buf = SZ_256K; + bufp = dma_alloc_coherent(dev, + max_buf, + &boot_dma_addr, GFP_KERNEL); + if (!bufp) { + dev_err(dev, "Error allocating 0x%zx\n", max_buf); + ret = -ENOMEM; + goto err_buf_out; + } + } else if (load_type == VK_IMAGE_TYPE_BOOT2) { + codepush = CODEPUSH_BOOT2_ENTRY; + offset_codepush = BAR_CODEPUSH_SBI; + + /* Wait for VK to respond */ + ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, DDR_OPEN, + DDR_OPEN, LOAD_IMAGE_TIMEOUT_MS); + if (ret < 0) { + dev_err(dev, "boot2 wait DDR open error - ret(%d)\n", + ret); + goto err_buf_out; + } + + max_buf = SZ_4M; + bufp = dma_alloc_coherent(dev, + max_buf, + &boot_dma_addr, GFP_KERNEL); + if (!bufp) { + dev_err(dev, "Error allocating 0x%zx\n", max_buf); + ret = -ENOMEM; + goto err_buf_out; + } + + bcm_vk_buf_notify(vk, bufp, boot_dma_addr, max_buf); + } else { + dev_err(dev, "Error invalid image type 0x%x\n", load_type); + ret = -EINVAL; + goto err_buf_out; + } + + offset = 0; + ret = request_partial_firmware_into_buf(&fw, filename, dev, + bufp, max_buf, offset); + if (ret) { + dev_err(dev, "Error %d requesting firmware file: %s\n", + ret, filename); + goto err_firmware_out; + } + dev_dbg(dev, "size=0x%zx\n", fw->size); + if (load_type == VK_IMAGE_TYPE_BOOT1) + memcpy_toio(vk->bar[BAR_1] + BAR1_CODEPUSH_BASE_BOOT1, + bufp, + fw->size); + + dev_dbg(dev, "Signaling 0x%x to 0x%llx\n", codepush, offset_codepush); + vkwrite32(vk, codepush, BAR_0, offset_codepush); + + if (load_type == VK_IMAGE_TYPE_BOOT1) { + u32 boot_status; + + /* wait until done */ + ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, + BOOT1_RUNNING, + BOOT1_RUNNING, + BOOT1_STARTUP_TIMEOUT_MS); + + boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); + is_stdalone = !BCM_VK_INTF_IS_DOWN(boot_status) && + (boot_status & BOOT_STDALONE_RUNNING); + if (ret && !is_stdalone) { + dev_err(dev, + "Timeout %ld ms waiting for boot1 to come up - ret(%d)\n", + BOOT1_STARTUP_TIMEOUT_MS, ret); + goto err_firmware_out; + } else if (is_stdalone) { + u32 reg; + + reg = vkread32(vk, BAR_0, BAR_BOOT1_STDALONE_PROGRESS); + if ((reg & BOOT1_STDALONE_PROGRESS_MASK) == + BOOT1_STDALONE_SUCCESS) { + dev_info(dev, "Boot1 standalone success\n"); + ret = 0; + } else { + dev_err(dev, "Timeout %ld ms - Boot1 standalone failure\n", + BOOT1_STARTUP_TIMEOUT_MS); + ret = -EINVAL; + goto err_firmware_out; + } + } + } else if (load_type == VK_IMAGE_TYPE_BOOT2) { + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(LOAD_IMAGE_TIMEOUT_MS); + + /* To send more data to VK than max_buf allowed at a time */ + do { + /* + * Check for ack from card. when Ack is received, + * it means all the data is received by card. + * Exit the loop after ack is received. + */ + ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, + FW_LOADER_ACK_RCVD_ALL_DATA, + FW_LOADER_ACK_RCVD_ALL_DATA, + TXFR_COMPLETE_TIMEOUT_MS); + if (ret == 0) { + dev_dbg(dev, "Exit boot2 download\n"); + break; + } else if (ret == -EFAULT) { + dev_err(dev, "Error detected during ACK waiting"); + goto err_firmware_out; + } + + /* exit the loop, if there is no response from card */ + if (time_after(jiffies, timeout)) { + dev_err(dev, "Error. No reply from card\n"); + ret = -ETIMEDOUT; + goto err_firmware_out; + } + + /* Wait for VK to open BAR space to copy new data */ + ret = bcm_vk_wait(vk, BAR_0, offset_codepush, + codepush, 0, + TXFR_COMPLETE_TIMEOUT_MS); + if (ret == 0) { + offset += max_buf; + ret = request_partial_firmware_into_buf + (&fw, + filename, + dev, bufp, + max_buf, + offset); + if (ret) { + dev_err(dev, + "Error %d requesting firmware file: %s offset: 0x%zx\n", + ret, filename, offset); + goto err_firmware_out; + } + dev_dbg(dev, "size=0x%zx\n", fw->size); + dev_dbg(dev, "Signaling 0x%x to 0x%llx\n", + codepush, offset_codepush); + vkwrite32(vk, codepush, BAR_0, offset_codepush); + /* reload timeout after every codepush */ + timeout = jiffies + + msecs_to_jiffies(LOAD_IMAGE_TIMEOUT_MS); + } else if (ret == -EFAULT) { + dev_err(dev, "Error detected waiting for transfer\n"); + goto err_firmware_out; + } + } while (1); + + /* wait for fw status bits to indicate app ready */ + ret = bcm_vk_wait(vk, BAR_0, VK_BAR_FWSTS, + VK_FWSTS_READY, + VK_FWSTS_READY, + BOOT2_STARTUP_TIMEOUT_MS); + if (ret < 0) { + dev_err(dev, "Boot2 not ready - ret(%d)\n", ret); + goto err_firmware_out; + } + + is_stdalone = vkread32(vk, BAR_0, BAR_BOOT_STATUS) & + BOOT_STDALONE_RUNNING; + if (!is_stdalone) { + ret = bcm_vk_intf_ver_chk(vk); + if (ret) { + dev_err(dev, "failure in intf version check\n"); + goto err_firmware_out; + } + + /* sync & channel other info */ + ret = bcm_vk_sync_card_info(vk); + if (ret) { + dev_err(dev, "Syncing Card Info failure\n"); + goto err_firmware_out; + } + } + } + +err_firmware_out: + release_firmware(fw); + +err_buf_out: + if (bufp) + dma_free_coherent(dev, max_buf, bufp, boot_dma_addr); + + return ret; +} + +static u32 bcm_vk_next_boot_image(struct bcm_vk *vk) +{ + u32 boot_status; + u32 fw_status; + u32 load_type = 0; /* default for unknown */ + + boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); + fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS); + + if (!BCM_VK_INTF_IS_DOWN(boot_status) && (boot_status & SRAM_OPEN)) + load_type = VK_IMAGE_TYPE_BOOT1; + else if (boot_status == BOOT1_RUNNING) + load_type = VK_IMAGE_TYPE_BOOT2; + + /* Log status so that we know different stages */ + dev_info(&vk->pdev->dev, + "boot-status value for next image: 0x%x : fw-status 0x%x\n", + boot_status, fw_status); + + return load_type; +} + +static enum soc_idx get_soc_idx(struct bcm_vk *vk) +{ + struct pci_dev *pdev = vk->pdev; + enum soc_idx idx = VK_IDX_INVALID; + u32 rev; + static enum soc_idx const vk_soc_tab[] = { VALKYRIE_A0, VALKYRIE_B0 }; + + switch (pdev->device) { + case PCI_DEVICE_ID_VALKYRIE: + /* get the chip id to decide sub-class */ + rev = MAJOR_SOC_REV(vkread32(vk, BAR_0, BAR_CHIP_ID)); + if (rev < ARRAY_SIZE(vk_soc_tab)) { + idx = vk_soc_tab[rev]; + } else { + /* Default to A0 firmware for all other chip revs */ + idx = VALKYRIE_A0; + dev_warn(&pdev->dev, + "Rev %d not in image lookup table, default to idx=%d\n", + rev, idx); + } + break; + + case PCI_DEVICE_ID_VIPER: + idx = VIPER; + break; + + default: + dev_err(&pdev->dev, "no images for 0x%x\n", pdev->device); + } + return idx; +} + +static const char *get_load_fw_name(struct bcm_vk *vk, + const struct load_image_entry *entry) +{ + const struct firmware *fw; + struct device *dev = &vk->pdev->dev; + int ret; + unsigned long dummy; + int i; + + for (i = 0; i < IMG_PER_TYPE_MAX; i++) { + fw = NULL; + ret = request_partial_firmware_into_buf(&fw, + entry->image_name[i], + dev, &dummy, + sizeof(dummy), + 0); + release_firmware(fw); + if (!ret) + return entry->image_name[i]; + } + return NULL; +} + +int bcm_vk_auto_load_all_images(struct bcm_vk *vk) +{ + int i, ret = -1; + enum soc_idx idx; + struct device *dev = &vk->pdev->dev; + u32 curr_type; + const char *curr_name; + + idx = get_soc_idx(vk); + if (idx == VK_IDX_INVALID) + goto auto_load_all_exit; + + /* log a message to know the relative loading order */ + dev_dbg(dev, "Load All for device %d\n", vk->devid); + + for (i = 0; i < NUM_BOOT_STAGES; i++) { + curr_type = image_tab[idx][i].image_type; + if (bcm_vk_next_boot_image(vk) == curr_type) { + curr_name = get_load_fw_name(vk, &image_tab[idx][i]); + if (!curr_name) { + dev_err(dev, "No suitable firmware exists for type %d", + curr_type); + ret = -ENOENT; + goto auto_load_all_exit; + } + ret = bcm_vk_load_image_by_type(vk, curr_type, + curr_name); + dev_info(dev, "Auto load %s, ret %d\n", + curr_name, ret); + + if (ret) { + dev_err(dev, "Error loading default %s\n", + curr_name); + goto auto_load_all_exit; + } + } + } + +auto_load_all_exit: + return ret; +} + +static int bcm_vk_trigger_autoload(struct bcm_vk *vk) +{ + if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) + return -EPERM; + + set_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload); + queue_work(vk->wq_thread, &vk->wq_work); + + return 0; +} + +/* + * deferred work queue for auto download. + */ +static void bcm_vk_wq_handler(struct work_struct *work) +{ + struct bcm_vk *vk = container_of(work, struct bcm_vk, wq_work); + + if (test_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload)) { + bcm_vk_auto_load_all_images(vk); + + /* + * at the end of operation, clear AUTO bit and pending + * bit + */ + clear_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload); + clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload); + } +} + +static void bcm_to_v_reset_doorbell(struct bcm_vk *vk, u32 db_val) +{ + vkwrite32(vk, db_val, BAR_0, VK_BAR0_RESET_DB_BASE); +} + +static int bcm_vk_trigger_reset(struct bcm_vk *vk) +{ + u32 i; + u32 value, boot_status; + static const u32 bar0_reg_clr_list[] = { BAR_OS_UPTIME, + BAR_INTF_VER, + BAR_CARD_VOLTAGE, + BAR_CARD_TEMPERATURE, + BAR_CARD_PWR_AND_THRE }; + + /* make tag '\0' terminated */ + vkwrite32(vk, 0, BAR_1, VK_BAR1_BOOT1_VER_TAG); + + for (i = 0; i < VK_BAR1_DAUTH_MAX; i++) { + vkwrite32(vk, 0, BAR_1, VK_BAR1_DAUTH_STORE_ADDR(i)); + vkwrite32(vk, 0, BAR_1, VK_BAR1_DAUTH_VALID_ADDR(i)); + } + for (i = 0; i < VK_BAR1_SOTP_REVID_MAX; i++) + vkwrite32(vk, 0, BAR_1, VK_BAR1_SOTP_REVID_ADDR(i)); + + /* + * When boot request fails, the CODE_PUSH_OFFSET stays persistent. + * Allowing us to debug the failure. When we call reset, + * we should clear CODE_PUSH_OFFSET so ROM does not execute + * boot again (and fails again) and instead waits for a new + * codepush. And, if previous boot has encountered error, need + * to clear the entry values + */ + boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); + if (boot_status & BOOT_ERR_MASK) { + dev_info(&vk->pdev->dev, + "Card in boot error 0x%x, clear CODEPUSH val\n", + boot_status); + value = 0; + } else { + value = vkread32(vk, BAR_0, BAR_CODEPUSH_SBL); + value &= CODEPUSH_MASK; + } + vkwrite32(vk, value, BAR_0, BAR_CODEPUSH_SBL); + + /* reset fw_status with proper reason, and press db */ + vkwrite32(vk, VK_FWSTS_RESET_MBOX_DB, BAR_0, VK_BAR_FWSTS); + bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_SOFT); + + /* clear other necessary registers records */ + for (i = 0; i < ARRAY_SIZE(bar0_reg_clr_list); i++) + vkwrite32(vk, 0, BAR_0, bar0_reg_clr_list[i]); + + return 0; +} + static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int err; int i; + int id; int irq; + char name[20]; struct bcm_vk *vk; struct device *dev = &pdev->dev; + u32 boot_status; vk = kzalloc(sizeof(*vk), GFP_KERNEL); if (!vk) @@ -57,6 +670,18 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_disable_pdev; } + /* The tdma is a scratch area for some DMA testings. */ + if (nr_scratch_pages) { + vk->tdma_vaddr = dma_alloc_coherent + (dev, + nr_scratch_pages * PAGE_SIZE, + &vk->tdma_addr, GFP_KERNEL); + if (!vk->tdma_vaddr) { + err = -ENOMEM; + goto err_disable_pdev; + } + } + pci_set_master(pdev); pci_set_drvdata(pdev, vk); @@ -85,8 +710,53 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } + id = ida_simple_get(&bcm_vk_ida, 0, 0, GFP_KERNEL); + if (id < 0) { + err = id; + dev_err(dev, "unable to get id\n"); + goto err_iounmap; + } + + vk->devid = id; + snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id); + + INIT_WORK(&vk->wq_work, bcm_vk_wq_handler); + + /* create dedicated workqueue */ + vk->wq_thread = create_singlethread_workqueue(name); + if (!vk->wq_thread) { + dev_err(dev, "Fail to create workqueue thread\n"); + err = -ENOMEM; + goto err_ida_remove; + } + + /* sync other info */ + bcm_vk_sync_card_info(vk); + + /* + * lets trigger an auto download. We don't want to do it serially here + * because at probing time, it is not supposed to block for a long time. + */ + boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); + if (auto_load) { + if ((boot_status & BOOT_STATE_MASK) == BROM_RUNNING) { + if (bcm_vk_trigger_autoload(vk)) + goto err_destroy_workqueue; + } else { + dev_err(dev, + "Auto-load skipped - BROM not in proper state (0x%x)\n", + boot_status); + } + } + return 0; +err_destroy_workqueue: + destroy_workqueue(vk->wq_thread); + +err_ida_remove: + ida_simple_remove(&bcm_vk_ida, id); + err_iounmap: for (i = 0; i < MAX_BAR; i++) { if (vk->bar[i]) @@ -95,6 +765,10 @@ err_iounmap: pci_release_regions(pdev); err_disable_pdev: + if (vk->tdma_vaddr) + dma_free_coherent(&pdev->dev, nr_scratch_pages * PAGE_SIZE, + vk->tdma_vaddr, vk->tdma_addr); + pci_free_irq_vectors(pdev); pci_disable_device(pdev); pci_dev_put(pdev); @@ -110,6 +784,22 @@ static void bcm_vk_remove(struct pci_dev *pdev) int i; struct bcm_vk *vk = pci_get_drvdata(pdev); + /* + * Trigger a reset to card and wait enough time for UCODE to rerun, + * which re-initialize the card into its default state. + * This ensures when driver is re-enumerated it will start from + * a completely clean state. + */ + bcm_vk_trigger_reset(vk); + usleep_range(BCM_VK_UCODE_BOOT_US, BCM_VK_UCODE_BOOT_MAX_US); + + if (vk->tdma_vaddr) + dma_free_coherent(&pdev->dev, nr_scratch_pages * PAGE_SIZE, + vk->tdma_vaddr, vk->tdma_addr); + + cancel_work_sync(&vk->wq_work); + destroy_workqueue(vk->wq_thread); + for (i = 0; i < MAX_BAR; i++) { if (vk->bar[i]) pci_iounmap(pdev, vk->bar[i]); @@ -120,6 +810,38 @@ static void bcm_vk_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +static void bcm_vk_shutdown(struct pci_dev *pdev) +{ + struct bcm_vk *vk = pci_get_drvdata(pdev); + u32 reg, boot_stat; + + reg = vkread32(vk, BAR_0, BAR_BOOT_STATUS); + boot_stat = reg & BOOT_STATE_MASK; + + if (boot_stat == BOOT1_RUNNING) { + /* simply trigger a reset interrupt to park it */ + bcm_vk_trigger_reset(vk); + } else if (boot_stat == BROM_NOT_RUN) { + int err; + u16 lnksta; + + /* + * The boot status only reflects boot condition since last reset + * As ucode will run only once to configure pcie, if multiple + * resets happen, we lost track if ucode has run or not. + * Here, read the current link speed and use that to + * sync up the bootstatus properly so that on reboot-back-up, + * it has the proper state to start with autoload + */ + err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta); + if (!err && + (lnksta & PCI_EXP_LNKSTA_CLS) != PCI_EXP_LNKSTA_CLS_2_5GB) { + reg |= BROM_STATUS_COMPLETE; + vkwrite32(vk, reg, BAR_0, BAR_BOOT_STATUS); + } + } +} + static const struct pci_device_id bcm_vk_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_VALKYRIE), }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_VIPER), }, @@ -132,6 +854,7 @@ static struct pci_driver pci_driver = { .id_table = bcm_vk_ids, .probe = bcm_vk_probe, .remove = bcm_vk_remove, + .shutdown = bcm_vk_shutdown, }; module_pci_driver(pci_driver); -- cgit v1.2.3-58-ga151 From bfc53e01d221e7068d66a0a051762f8b1edcbf63 Mon Sep 17 00:00:00 2001 From: Scott Branden Date: Wed, 20 Jan 2021 09:58:18 -0800 Subject: misc: bcm-vk: add misc device to Broadcom VK driver Add misc device base support to create and remove devnode. Additional misc functions for open/read/write/release/ioctl/sysfs, etc will be added in follow on commits to allow for individual review. Co-developed-by: Desmond Yan Acked-by: Olof Johansson Signed-off-by: Desmond Yan Signed-off-by: Scott Branden Link: https://lore.kernel.org/r/20210120175827.14820-5-scott.branden@broadcom.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/bcm-vk/bcm_vk.h | 2 ++ drivers/misc/bcm-vk/bcm_vk_dev.c | 36 +++++++++++++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h index c4fb61a84e41..0a366db693c8 100644 --- a/drivers/misc/bcm-vk/bcm_vk.h +++ b/drivers/misc/bcm-vk/bcm_vk.h @@ -7,6 +7,7 @@ #define BCM_VK_H #include +#include #include #include @@ -214,6 +215,7 @@ struct bcm_vk { struct bcm_vk_dauth_info dauth_info; + struct miscdevice miscdev; int devid; /* dev id allocated */ struct workqueue_struct *wq_thread; diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c index adc3103c7012..4ecd5b5f80d3 100644 --- a/drivers/misc/bcm-vk/bcm_vk_dev.c +++ b/drivers/misc/bcm-vk/bcm_vk_dev.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -643,6 +644,7 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) char name[20]; struct bcm_vk *vk; struct device *dev = &pdev->dev; + struct miscdevice *misc_device; u32 boot_status; vk = kzalloc(sizeof(*vk), GFP_KERNEL); @@ -719,6 +721,19 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) vk->devid = id; snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id); + misc_device = &vk->miscdev; + misc_device->minor = MISC_DYNAMIC_MINOR; + misc_device->name = kstrdup(name, GFP_KERNEL); + if (!misc_device->name) { + err = -ENOMEM; + goto err_ida_remove; + } + + err = misc_register(misc_device); + if (err) { + dev_err(dev, "failed to register device\n"); + goto err_kfree_name; + } INIT_WORK(&vk->wq_work, bcm_vk_wq_handler); @@ -727,7 +742,7 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!vk->wq_thread) { dev_err(dev, "Fail to create workqueue thread\n"); err = -ENOMEM; - goto err_ida_remove; + goto err_misc_deregister; } /* sync other info */ @@ -749,11 +764,20 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } + dev_dbg(dev, "BCM-VK:%u created\n", id); + return 0; err_destroy_workqueue: destroy_workqueue(vk->wq_thread); +err_misc_deregister: + misc_deregister(misc_device); + +err_kfree_name: + kfree(misc_device->name); + misc_device->name = NULL; + err_ida_remove: ida_simple_remove(&bcm_vk_ida, id); @@ -783,6 +807,7 @@ static void bcm_vk_remove(struct pci_dev *pdev) { int i; struct bcm_vk *vk = pci_get_drvdata(pdev); + struct miscdevice *misc_device = &vk->miscdev; /* * Trigger a reset to card and wait enough time for UCODE to rerun, @@ -797,6 +822,13 @@ static void bcm_vk_remove(struct pci_dev *pdev) dma_free_coherent(&pdev->dev, nr_scratch_pages * PAGE_SIZE, vk->tdma_vaddr, vk->tdma_addr); + /* remove if name is set which means misc dev registered */ + if (misc_device->name) { + misc_deregister(misc_device); + kfree(misc_device->name); + ida_simple_remove(&bcm_vk_ida, vk->devid); + } + cancel_work_sync(&vk->wq_work); destroy_workqueue(vk->wq_thread); @@ -805,6 +837,8 @@ static void bcm_vk_remove(struct pci_dev *pdev) pci_iounmap(pdev, vk->bar[i]); } + dev_dbg(&pdev->dev, "BCM-VK:%d released\n", vk->devid); + pci_release_regions(pdev); pci_free_irq_vectors(pdev); pci_disable_device(pdev); -- cgit v1.2.3-58-ga151 From af22527e82d12f9d0b5afb39f25926a91d5fa7e7 Mon Sep 17 00:00:00 2001 From: Scott Branden Date: Wed, 20 Jan 2021 09:58:19 -0800 Subject: misc: bcm-vk: add triggers when host panic or reboots to notify card Pass down an interrupt to card in case of panic or reboot so that card can take appropriate action to perform a clean reset. Uses kernel notifier block either directly (register on panic list), or implicitly (add shutdown method for PCI device). Co-developed-by: Desmond Yan Acked-by: Olof Johansson Signed-off-by: Desmond Yan Signed-off-by: Scott Branden Link: https://lore.kernel.org/r/20210120175827.14820-6-scott.branden@broadcom.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/bcm-vk/bcm_vk.h | 2 ++ drivers/misc/bcm-vk/bcm_vk_dev.c | 29 ++++++++++++++++++++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h index 0a366db693c8..f428ad9a0c3d 100644 --- a/drivers/misc/bcm-vk/bcm_vk.h +++ b/drivers/misc/bcm-vk/bcm_vk.h @@ -223,6 +223,8 @@ struct bcm_vk { unsigned long wq_offload[1]; /* various flags on wq requested */ void *tdma_vaddr; /* test dma segment virtual addr */ dma_addr_t tdma_addr; /* test dma segment bus addr */ + + struct notifier_block panic_nb; }; /* wq offload work items bits definitions */ diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c index 4ecd5b5f80d3..09d99bd36e8a 100644 --- a/drivers/misc/bcm-vk/bcm_vk_dev.c +++ b/drivers/misc/bcm-vk/bcm_vk_dev.c @@ -635,6 +635,16 @@ static int bcm_vk_trigger_reset(struct bcm_vk *vk) return 0; } +static int bcm_vk_on_panic(struct notifier_block *nb, + unsigned long e, void *p) +{ + struct bcm_vk *vk = container_of(nb, struct bcm_vk, panic_nb); + + bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_HARD); + + return 0; +} + static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int err; @@ -748,6 +758,15 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* sync other info */ bcm_vk_sync_card_info(vk); + /* register for panic notifier */ + vk->panic_nb.notifier_call = bcm_vk_on_panic; + err = atomic_notifier_chain_register(&panic_notifier_list, + &vk->panic_nb); + if (err) { + dev_err(dev, "Fail to register panic notifier\n"); + goto err_destroy_workqueue; + } + /* * lets trigger an auto download. We don't want to do it serially here * because at probing time, it is not supposed to block for a long time. @@ -756,7 +775,7 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (auto_load) { if ((boot_status & BOOT_STATE_MASK) == BROM_RUNNING) { if (bcm_vk_trigger_autoload(vk)) - goto err_destroy_workqueue; + goto err_unregister_panic_notifier; } else { dev_err(dev, "Auto-load skipped - BROM not in proper state (0x%x)\n", @@ -768,6 +787,10 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; +err_unregister_panic_notifier: + atomic_notifier_chain_unregister(&panic_notifier_list, + &vk->panic_nb); + err_destroy_workqueue: destroy_workqueue(vk->wq_thread); @@ -818,6 +841,10 @@ static void bcm_vk_remove(struct pci_dev *pdev) bcm_vk_trigger_reset(vk); usleep_range(BCM_VK_UCODE_BOOT_US, BCM_VK_UCODE_BOOT_MAX_US); + /* unregister panic notifier */ + atomic_notifier_chain_unregister(&panic_notifier_list, + &vk->panic_nb); + if (vk->tdma_vaddr) dma_free_coherent(&pdev->dev, nr_scratch_pages * PAGE_SIZE, vk->tdma_vaddr, vk->tdma_addr); -- cgit v1.2.3-58-ga151 From 22c30607d1e0c604b2450a7aa5bc90a63e24f088 Mon Sep 17 00:00:00 2001 From: Scott Branden Date: Wed, 20 Jan 2021 09:58:20 -0800 Subject: misc: bcm-vk: add open/release Add open/release to replace private data with context for other methods to use. Reason for the context is because it is allowed for multiple sessions to open sysfs. For each file open, when upper layer queries the response, only those that are tied to a specified open should be returned. Co-developed-by: Desmond Yan Acked-by: Olof Johansson Signed-off-by: Desmond Yan Signed-off-by: Scott Branden Link: https://lore.kernel.org/r/20210120175827.14820-7-scott.branden@broadcom.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/bcm-vk/Makefile | 4 +- drivers/misc/bcm-vk/bcm_vk.h | 15 +++++ drivers/misc/bcm-vk/bcm_vk_dev.c | 23 +++++++ drivers/misc/bcm-vk/bcm_vk_msg.c | 127 +++++++++++++++++++++++++++++++++++++++ drivers/misc/bcm-vk/bcm_vk_msg.h | 31 ++++++++++ 5 files changed, 199 insertions(+), 1 deletion(-) create mode 100644 drivers/misc/bcm-vk/bcm_vk_msg.c create mode 100644 drivers/misc/bcm-vk/bcm_vk_msg.h (limited to 'drivers/misc') diff --git a/drivers/misc/bcm-vk/Makefile b/drivers/misc/bcm-vk/Makefile index f8a7ac4c242f..a2ae79858409 100644 --- a/drivers/misc/bcm-vk/Makefile +++ b/drivers/misc/bcm-vk/Makefile @@ -5,4 +5,6 @@ obj-$(CONFIG_BCM_VK) += bcm_vk.o bcm_vk-objs := \ - bcm_vk_dev.o + bcm_vk_dev.o \ + bcm_vk_msg.o + diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h index f428ad9a0c3d..5f0fcfdaf265 100644 --- a/drivers/misc/bcm-vk/bcm_vk.h +++ b/drivers/misc/bcm-vk/bcm_vk.h @@ -7,9 +7,14 @@ #define BCM_VK_H #include +#include #include +#include #include #include +#include + +#include "bcm_vk_msg.h" #define DRV_MODULE_NAME "bcm-vk" @@ -218,6 +223,13 @@ struct bcm_vk { struct miscdevice miscdev; int devid; /* dev id allocated */ + /* Reference-counting to handle file operations */ + struct kref kref; + + spinlock_t ctx_lock; /* Spinlock for component context */ + struct bcm_vk_ctx ctx[VK_CMPT_CTX_MAX]; + struct bcm_vk_ht_entry pid_ht[VK_PID_HT_SZ]; + struct workqueue_struct *wq_thread; struct work_struct wq_work; /* work queue for deferred job */ unsigned long wq_offload[1]; /* various flags on wq requested */ @@ -278,6 +290,9 @@ static inline bool bcm_vk_msgq_marker_valid(struct bcm_vk *vk) return (rdy_marker == VK_BAR1_MSGQ_RDY_MARKER); } +int bcm_vk_open(struct inode *inode, struct file *p_file); +int bcm_vk_release(struct inode *inode, struct file *p_file); +void bcm_vk_release_data(struct kref *kref); int bcm_vk_auto_load_all_images(struct bcm_vk *vk); #endif diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c index 09d99bd36e8a..79fffb1e6f84 100644 --- a/drivers/misc/bcm-vk/bcm_vk_dev.c +++ b/drivers/misc/bcm-vk/bcm_vk_dev.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -635,6 +636,12 @@ static int bcm_vk_trigger_reset(struct bcm_vk *vk) return 0; } +static const struct file_operations bcm_vk_fops = { + .owner = THIS_MODULE, + .open = bcm_vk_open, + .release = bcm_vk_release, +}; + static int bcm_vk_on_panic(struct notifier_block *nb, unsigned long e, void *p) { @@ -657,10 +664,13 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct miscdevice *misc_device; u32 boot_status; + /* allocate vk structure which is tied to kref for freeing */ vk = kzalloc(sizeof(*vk), GFP_KERNEL); if (!vk) return -ENOMEM; + kref_init(&vk->kref); + err = pci_enable_device(pdev); if (err) { dev_err(dev, "Cannot enable PCI device\n"); @@ -738,6 +748,7 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENOMEM; goto err_ida_remove; } + misc_device->fops = &bcm_vk_fops, err = misc_register(misc_device); if (err) { @@ -826,6 +837,16 @@ err_free_exit: return err; } +void bcm_vk_release_data(struct kref *kref) +{ + struct bcm_vk *vk = container_of(kref, struct bcm_vk, kref); + struct pci_dev *pdev = vk->pdev; + + dev_dbg(&pdev->dev, "BCM-VK:%d release data 0x%p\n", vk->devid, vk); + pci_dev_put(pdev); + kfree(vk); +} + static void bcm_vk_remove(struct pci_dev *pdev) { int i; @@ -869,6 +890,8 @@ static void bcm_vk_remove(struct pci_dev *pdev) pci_release_regions(pdev); pci_free_irq_vectors(pdev); pci_disable_device(pdev); + + kref_put(&vk->kref, bcm_vk_release_data); } static void bcm_vk_shutdown(struct pci_dev *pdev) diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.c b/drivers/misc/bcm-vk/bcm_vk_msg.c new file mode 100644 index 000000000000..2d9a6b4e5f61 --- /dev/null +++ b/drivers/misc/bcm-vk/bcm_vk_msg.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2018-2020 Broadcom. + */ + +#include "bcm_vk.h" +#include "bcm_vk_msg.h" + +/* + * allocate a ctx per file struct + */ +static struct bcm_vk_ctx *bcm_vk_get_ctx(struct bcm_vk *vk, const pid_t pid) +{ + u32 i; + struct bcm_vk_ctx *ctx = NULL; + u32 hash_idx = hash_32(pid, VK_PID_HT_SHIFT_BIT); + + spin_lock(&vk->ctx_lock); + + for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) { + if (!vk->ctx[i].in_use) { + vk->ctx[i].in_use = true; + ctx = &vk->ctx[i]; + break; + } + } + + if (!ctx) { + dev_err(&vk->pdev->dev, "All context in use\n"); + + goto all_in_use_exit; + } + + /* set the pid and insert it to hash table */ + ctx->pid = pid; + ctx->hash_idx = hash_idx; + list_add_tail(&ctx->node, &vk->pid_ht[hash_idx].head); + + /* increase kref */ + kref_get(&vk->kref); + +all_in_use_exit: + spin_unlock(&vk->ctx_lock); + + return ctx; +} + +static int bcm_vk_free_ctx(struct bcm_vk *vk, struct bcm_vk_ctx *ctx) +{ + u32 idx; + u32 hash_idx; + pid_t pid; + struct bcm_vk_ctx *entry; + int count = 0; + + if (!ctx) { + dev_err(&vk->pdev->dev, "NULL context detected\n"); + return -EINVAL; + } + idx = ctx->idx; + pid = ctx->pid; + + spin_lock(&vk->ctx_lock); + + if (!vk->ctx[idx].in_use) { + dev_err(&vk->pdev->dev, "context[%d] not in use!\n", idx); + } else { + vk->ctx[idx].in_use = false; + vk->ctx[idx].miscdev = NULL; + + /* Remove it from hash list and see if it is the last one. */ + list_del(&ctx->node); + hash_idx = ctx->hash_idx; + list_for_each_entry(entry, &vk->pid_ht[hash_idx].head, node) { + if (entry->pid == pid) + count++; + } + } + + spin_unlock(&vk->ctx_lock); + + return count; +} + +int bcm_vk_open(struct inode *inode, struct file *p_file) +{ + struct bcm_vk_ctx *ctx; + struct miscdevice *miscdev = (struct miscdevice *)p_file->private_data; + struct bcm_vk *vk = container_of(miscdev, struct bcm_vk, miscdev); + struct device *dev = &vk->pdev->dev; + int rc = 0; + + /* get a context and set it up for file */ + ctx = bcm_vk_get_ctx(vk, task_tgid_nr(current)); + if (!ctx) { + dev_err(dev, "Error allocating context\n"); + rc = -ENOMEM; + } else { + /* + * set up context and replace private data with context for + * other methods to use. Reason for the context is because + * it is allowed for multiple sessions to open the sysfs, and + * for each file open, when upper layer query the response, + * only those that are tied to a specific open should be + * returned. The context->idx will be used for such binding + */ + ctx->miscdev = miscdev; + p_file->private_data = ctx; + dev_dbg(dev, "ctx_returned with idx %d, pid %d\n", + ctx->idx, ctx->pid); + } + return rc; +} + +int bcm_vk_release(struct inode *inode, struct file *p_file) +{ + int ret; + struct bcm_vk_ctx *ctx = p_file->private_data; + struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev); + + ret = bcm_vk_free_ctx(vk, ctx); + + kref_put(&vk->kref, bcm_vk_release_data); + + return ret; +} + diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.h b/drivers/misc/bcm-vk/bcm_vk_msg.h new file mode 100644 index 000000000000..32516abcaf89 --- /dev/null +++ b/drivers/misc/bcm-vk/bcm_vk_msg.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2018-2020 Broadcom. + */ + +#ifndef BCM_VK_MSG_H +#define BCM_VK_MSG_H + +/* context per session opening of sysfs */ +struct bcm_vk_ctx { + struct list_head node; /* use for linkage in Hash Table */ + unsigned int idx; + bool in_use; + pid_t pid; + u32 hash_idx; + struct miscdevice *miscdev; +}; + +/* pid hash table entry */ +struct bcm_vk_ht_entry { + struct list_head head; +}; + +/* total number of supported ctx, 32 ctx each for 5 components */ +#define VK_CMPT_CTX_MAX (32 * 5) + +/* hash table defines to store the opened FDs */ +#define VK_PID_HT_SHIFT_BIT 7 /* 128 */ +#define VK_PID_HT_SZ BIT(VK_PID_HT_SHIFT_BIT) + +#endif -- cgit v1.2.3-58-ga151 From 7367e0ad77d21ff851404ffd71bd1f626bc93ef2 Mon Sep 17 00:00:00 2001 From: Scott Branden Date: Wed, 20 Jan 2021 09:58:21 -0800 Subject: misc: bcm-vk: add ioctl load_image Add ioctl support to issue load_image operation to VK card. Co-developed-by: Desmond Yan Co-developed-by: James Hu Acked-by: Olof Johansson Signed-off-by: Desmond Yan Signed-off-by: James Hu Signed-off-by: Scott Branden Link: https://lore.kernel.org/r/20210120175827.14820-8-scott.branden@broadcom.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/bcm-vk/bcm_vk.h | 3 ++ drivers/misc/bcm-vk/bcm_vk_dev.c | 95 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h index 5f0fcfdaf265..726aab71bb6b 100644 --- a/drivers/misc/bcm-vk/bcm_vk.h +++ b/drivers/misc/bcm-vk/bcm_vk.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include "bcm_vk_msg.h" @@ -220,6 +221,8 @@ struct bcm_vk { struct bcm_vk_dauth_info dauth_info; + /* mutex to protect the ioctls */ + struct mutex mutex; struct miscdevice miscdev; int devid; /* dev id allocated */ diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c index 79fffb1e6f84..203a1cf2bae3 100644 --- a/drivers/misc/bcm-vk/bcm_vk_dev.c +++ b/drivers/misc/bcm-vk/bcm_vk_dev.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -580,6 +581,71 @@ static void bcm_vk_wq_handler(struct work_struct *work) } } +static long bcm_vk_load_image(struct bcm_vk *vk, + const struct vk_image __user *arg) +{ + struct device *dev = &vk->pdev->dev; + const char *image_name; + struct vk_image image; + u32 next_loadable; + enum soc_idx idx; + int image_idx; + int ret = -EPERM; + + if (copy_from_user(&image, arg, sizeof(image))) + return -EACCES; + + if ((image.type != VK_IMAGE_TYPE_BOOT1) && + (image.type != VK_IMAGE_TYPE_BOOT2)) { + dev_err(dev, "invalid image.type %u\n", image.type); + return ret; + } + + next_loadable = bcm_vk_next_boot_image(vk); + if (next_loadable != image.type) { + dev_err(dev, "Next expected image %u, Loading %u\n", + next_loadable, image.type); + return ret; + } + + /* + * if something is pending download already. This could only happen + * for now when the driver is being loaded, or if someone has issued + * another download command in another shell. + */ + if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) { + dev_err(dev, "Download operation already pending.\n"); + return ret; + } + + image_name = image.filename; + if (image_name[0] == '\0') { + /* Use default image name if NULL */ + idx = get_soc_idx(vk); + if (idx == VK_IDX_INVALID) + goto err_idx; + + /* Image idx starts with boot1 */ + image_idx = image.type - VK_IMAGE_TYPE_BOOT1; + image_name = get_load_fw_name(vk, &image_tab[idx][image_idx]); + if (!image_name) { + dev_err(dev, "No suitable image found for type %d", + image.type); + ret = -ENOENT; + goto err_idx; + } + } else { + /* Ensure filename is NULL terminated */ + image.filename[sizeof(image.filename) - 1] = '\0'; + } + ret = bcm_vk_load_image_by_type(vk, image.type, image_name); + dev_info(dev, "Load %s, ret %d\n", image_name, ret); +err_idx: + clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload); + + return ret; +} + static void bcm_to_v_reset_doorbell(struct bcm_vk *vk, u32 db_val) { vkwrite32(vk, db_val, BAR_0, VK_BAR0_RESET_DB_BASE); @@ -636,10 +702,38 @@ static int bcm_vk_trigger_reset(struct bcm_vk *vk) return 0; } +static long bcm_vk_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret = -EINVAL; + struct bcm_vk_ctx *ctx = file->private_data; + struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev); + void __user *argp = (void __user *)arg; + + dev_dbg(&vk->pdev->dev, + "ioctl, cmd=0x%02x, arg=0x%02lx\n", + cmd, arg); + + mutex_lock(&vk->mutex); + + switch (cmd) { + case VK_IOCTL_LOAD_IMAGE: + ret = bcm_vk_load_image(vk, argp); + break; + + default: + break; + } + + mutex_unlock(&vk->mutex); + + return ret; +} + static const struct file_operations bcm_vk_fops = { .owner = THIS_MODULE, .open = bcm_vk_open, .release = bcm_vk_release, + .unlocked_ioctl = bcm_vk_ioctl, }; static int bcm_vk_on_panic(struct notifier_block *nb, @@ -670,6 +764,7 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENOMEM; kref_init(&vk->kref); + mutex_init(&vk->mutex); err = pci_enable_device(pdev); if (err) { -- cgit v1.2.3-58-ga151 From ff428d052b3b6fb22242d17c213c4898e5136323 Mon Sep 17 00:00:00 2001 From: Scott Branden Date: Wed, 20 Jan 2021 09:58:22 -0800 Subject: misc: bcm-vk: add get_card_info, peerlog_info, and proc_mon_info Add support to get card_info (details about card), peerlog_info (to get details of peerlog on card), and proc_mon_info (process monitoring on card). This info is used for collection of logs via direct read of BAR space and by sysfs access (in a follow on commit). Co-developed-by: Desmond Yan Acked-by: Olof Johansson Signed-off-by: Desmond Yan Signed-off-by: Scott Branden Link: https://lore.kernel.org/r/20210120175827.14820-9-scott.branden@broadcom.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/bcm-vk/bcm_vk.h | 60 ++++++++++++++++++++++ drivers/misc/bcm-vk/bcm_vk_dev.c | 105 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 165 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h index 726aab71bb6b..50f2a0cd6e13 100644 --- a/drivers/misc/bcm-vk/bcm_vk.h +++ b/drivers/misc/bcm-vk/bcm_vk.h @@ -205,6 +205,21 @@ enum pci_barno { #define BCM_VK_NUM_TTY 2 +/* VK device max power state, supports 3, full, reduced and low */ +#define MAX_OPP 3 +#define MAX_CARD_INFO_TAG_SIZE 64 + +struct bcm_vk_card_info { + u32 version; + char os_tag[MAX_CARD_INFO_TAG_SIZE]; + char cmpt_tag[MAX_CARD_INFO_TAG_SIZE]; + u32 cpu_freq_mhz; + u32 cpu_scale[MAX_OPP]; + u32 ddr_freq_mhz; + u32 ddr_size_MB; + u32 video_core_freq_mhz; +}; + /* DAUTH related info */ struct bcm_vk_dauth_key { char store[VK_BAR1_DAUTH_STORE_SIZE]; @@ -215,10 +230,49 @@ struct bcm_vk_dauth_info { struct bcm_vk_dauth_key keys[VK_BAR1_DAUTH_MAX]; }; +/* + * Control structure of logging messages from the card. This + * buffer is for logmsg that comes from vk + */ +struct bcm_vk_peer_log { + u32 rd_idx; + u32 wr_idx; + u32 buf_size; + u32 mask; + char data[0]; +}; + +/* max buf size allowed */ +#define BCM_VK_PEER_LOG_BUF_MAX SZ_16K +/* max size per line of peer log */ +#define BCM_VK_PEER_LOG_LINE_MAX 256 + +/* + * single entry for processing type + utilization + */ +#define BCM_VK_PROC_TYPE_TAG_LEN 8 +struct bcm_vk_proc_mon_entry_t { + char tag[BCM_VK_PROC_TYPE_TAG_LEN]; + u32 used; + u32 max; /**< max capacity */ +}; + +/** + * Structure for run time utilization + */ +#define BCM_VK_PROC_MON_MAX 8 /* max entries supported */ +struct bcm_vk_proc_mon_info { + u32 num; /**< no of entries */ + u32 entry_size; /**< per entry size */ + struct bcm_vk_proc_mon_entry_t entries[BCM_VK_PROC_MON_MAX]; +}; + struct bcm_vk { struct pci_dev *pdev; void __iomem *bar[MAX_BAR]; + struct bcm_vk_card_info card_info; + struct bcm_vk_proc_mon_info proc_mon_info; struct bcm_vk_dauth_info dauth_info; /* mutex to protect the ioctls */ @@ -240,6 +294,12 @@ struct bcm_vk { dma_addr_t tdma_addr; /* test dma segment bus addr */ struct notifier_block panic_nb; + + /* offset of the peer log control in BAR2 */ + u32 peerlog_off; + struct bcm_vk_peer_log peerlog_info; /* record of peer log info */ + /* offset of processing monitoring info in BAR2 */ + u32 proc_mon_off; }; /* wq offload work items bits definitions */ diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c index 203a1cf2bae3..a63208513c64 100644 --- a/drivers/misc/bcm-vk/bcm_vk_dev.c +++ b/drivers/misc/bcm-vk/bcm_vk_dev.c @@ -172,6 +172,104 @@ static inline int bcm_vk_wait(struct bcm_vk *vk, enum pci_barno bar, return 0; } +static void bcm_vk_get_card_info(struct bcm_vk *vk) +{ + struct device *dev = &vk->pdev->dev; + u32 offset; + int i; + u8 *dst; + struct bcm_vk_card_info *info = &vk->card_info; + + /* first read the offset from spare register */ + offset = vkread32(vk, BAR_0, BAR_CARD_STATIC_INFO); + offset &= (pci_resource_len(vk->pdev, BAR_2 * 2) - 1); + + /* based on the offset, read info to internal card info structure */ + dst = (u8 *)info; + for (i = 0; i < sizeof(*info); i++) + *dst++ = vkread8(vk, BAR_2, offset++); + +#define CARD_INFO_LOG_FMT "version : %x\n" \ + "os_tag : %s\n" \ + "cmpt_tag : %s\n" \ + "cpu_freq : %d MHz\n" \ + "cpu_scale : %d full, %d lowest\n" \ + "ddr_freq : %d MHz\n" \ + "ddr_size : %d MB\n" \ + "video_freq: %d MHz\n" + dev_dbg(dev, CARD_INFO_LOG_FMT, info->version, info->os_tag, + info->cmpt_tag, info->cpu_freq_mhz, info->cpu_scale[0], + info->cpu_scale[MAX_OPP - 1], info->ddr_freq_mhz, + info->ddr_size_MB, info->video_core_freq_mhz); + + /* + * get the peer log pointer, only need the offset, and get record + * of the log buffer information which would be used for checking + * before dump, in case the BAR2 memory has been corrupted. + */ + vk->peerlog_off = offset; + memcpy_fromio(&vk->peerlog_info, vk->bar[BAR_2] + vk->peerlog_off, + sizeof(vk->peerlog_info)); + + /* + * Do a range checking and if out of bound, the record will be zeroed + * which guarantees that nothing would be dumped. In other words, + * peer dump is disabled. + */ + if ((vk->peerlog_info.buf_size > BCM_VK_PEER_LOG_BUF_MAX) || + (vk->peerlog_info.mask != (vk->peerlog_info.buf_size - 1)) || + (vk->peerlog_info.rd_idx > vk->peerlog_info.mask) || + (vk->peerlog_info.wr_idx > vk->peerlog_info.mask)) { + dev_err(dev, "Peer log disabled - range error: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n", + vk->peerlog_info.buf_size, + vk->peerlog_info.mask, + vk->peerlog_info.rd_idx, + vk->peerlog_info.wr_idx); + memset(&vk->peerlog_info, 0, sizeof(vk->peerlog_info)); + } else { + dev_dbg(dev, "Peer log: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n", + vk->peerlog_info.buf_size, + vk->peerlog_info.mask, + vk->peerlog_info.rd_idx, + vk->peerlog_info.wr_idx); + } +} + +static void bcm_vk_get_proc_mon_info(struct bcm_vk *vk) +{ + struct device *dev = &vk->pdev->dev; + struct bcm_vk_proc_mon_info *mon = &vk->proc_mon_info; + u32 num, entry_size, offset, buf_size; + u8 *dst; + + /* calculate offset which is based on peerlog offset */ + buf_size = vkread32(vk, BAR_2, + vk->peerlog_off + + offsetof(struct bcm_vk_peer_log, buf_size)); + offset = vk->peerlog_off + sizeof(struct bcm_vk_peer_log) + + buf_size; + + /* first read the num and entry size */ + num = vkread32(vk, BAR_2, offset); + entry_size = vkread32(vk, BAR_2, offset + sizeof(num)); + + /* check for max allowed */ + if (num > BCM_VK_PROC_MON_MAX) { + dev_err(dev, "Processing monitoring entry %d exceeds max %d\n", + num, BCM_VK_PROC_MON_MAX); + return; + } + mon->num = num; + mon->entry_size = entry_size; + + vk->proc_mon_off = offset; + + /* read it once that will capture those static info */ + dst = (u8 *)&mon->entries[0]; + offset += sizeof(num) + sizeof(entry_size); + memcpy_fromio(dst, vk->bar[BAR_2] + offset, num * entry_size); +} + static int bcm_vk_sync_card_info(struct bcm_vk *vk) { u32 rdy_marker = vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY); @@ -193,6 +291,13 @@ static int bcm_vk_sync_card_info(struct bcm_vk *vk) vkwrite32(vk, nr_scratch_pages * PAGE_SIZE, BAR_1, VK_BAR1_SCRATCH_SZ_ADDR); } + + /* get static card info, only need to read once */ + bcm_vk_get_card_info(vk); + + /* get the proc mon info once */ + bcm_vk_get_proc_mon_info(vk); + return 0; } -- cgit v1.2.3-58-ga151 From 111d746bb4767ad476f80fe49067e3df3d9a9375 Mon Sep 17 00:00:00 2001 From: Scott Branden Date: Wed, 20 Jan 2021 09:58:23 -0800 Subject: misc: bcm-vk: add VK messaging support Add message support in order to be able to communicate to VK card via message queues. This info is used for debug purposes via collection of logs via direct read of BAR space and by sysfs access (in a follow on commit). Co-developed-by: Desmond Yan Acked-by: Olof Johansson Signed-off-by: Desmond Yan Signed-off-by: Scott Branden Link: https://lore.kernel.org/r/20210120175827.14820-10-scott.branden@broadcom.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/bcm-vk/Makefile | 3 +- drivers/misc/bcm-vk/bcm_vk.h | 123 ++++ drivers/misc/bcm-vk/bcm_vk_dev.c | 309 +++++++++- drivers/misc/bcm-vk/bcm_vk_msg.c | 1187 ++++++++++++++++++++++++++++++++++++++ drivers/misc/bcm-vk/bcm_vk_msg.h | 132 +++++ drivers/misc/bcm-vk/bcm_vk_sg.c | 275 +++++++++ drivers/misc/bcm-vk/bcm_vk_sg.h | 61 ++ 7 files changed, 2087 insertions(+), 3 deletions(-) create mode 100644 drivers/misc/bcm-vk/bcm_vk_sg.c create mode 100644 drivers/misc/bcm-vk/bcm_vk_sg.h (limited to 'drivers/misc') diff --git a/drivers/misc/bcm-vk/Makefile b/drivers/misc/bcm-vk/Makefile index a2ae79858409..79b4e365c9e6 100644 --- a/drivers/misc/bcm-vk/Makefile +++ b/drivers/misc/bcm-vk/Makefile @@ -6,5 +6,6 @@ obj-$(CONFIG_BCM_VK) += bcm_vk.o bcm_vk-objs := \ bcm_vk_dev.o \ - bcm_vk_msg.o + bcm_vk_msg.o \ + bcm_vk_sg.o diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h index 50f2a0cd6e13..d847a512d0ed 100644 --- a/drivers/misc/bcm-vk/bcm_vk.h +++ b/drivers/misc/bcm-vk/bcm_vk.h @@ -6,11 +6,13 @@ #ifndef BCM_VK_H #define BCM_VK_H +#include #include #include #include #include #include +#include #include #include #include @@ -93,14 +95,53 @@ #define MAJOR_SOC_REV(_chip_id) (((_chip_id) >> 20) & 0xf) #define BAR_CARD_TEMPERATURE 0x45c +/* defines for all temperature sensor */ +#define BCM_VK_TEMP_FIELD_MASK 0xff +#define BCM_VK_CPU_TEMP_SHIFT 0 +#define BCM_VK_DDR0_TEMP_SHIFT 8 +#define BCM_VK_DDR1_TEMP_SHIFT 16 #define BAR_CARD_VOLTAGE 0x460 +/* defines for voltage rail conversion */ +#define BCM_VK_VOLT_RAIL_MASK 0xffff +#define BCM_VK_3P3_VOLT_REG_SHIFT 16 #define BAR_CARD_ERR_LOG 0x464 +/* Error log register bit definition - register for error alerts */ +#define ERR_LOG_UECC BIT(0) +#define ERR_LOG_SSIM_BUSY BIT(1) +#define ERR_LOG_AFBC_BUSY BIT(2) +#define ERR_LOG_HIGH_TEMP_ERR BIT(3) +#define ERR_LOG_WDOG_TIMEOUT BIT(4) +#define ERR_LOG_SYS_FAULT BIT(5) +#define ERR_LOG_RAMDUMP BIT(6) +#define ERR_LOG_COP_WDOG_TIMEOUT BIT(7) +/* warnings */ +#define ERR_LOG_MEM_ALLOC_FAIL BIT(8) +#define ERR_LOG_LOW_TEMP_WARN BIT(9) +#define ERR_LOG_ECC BIT(10) +#define ERR_LOG_IPC_DWN BIT(11) + +/* Alert bit definitions detectd on host */ +#define ERR_LOG_HOST_INTF_V_FAIL BIT(13) +#define ERR_LOG_HOST_HB_FAIL BIT(14) +#define ERR_LOG_HOST_PCIE_DWN BIT(15) #define BAR_CARD_ERR_MEM 0x468 +/* defines for mem err, all fields have same width */ +#define BCM_VK_MEM_ERR_FIELD_MASK 0xff +#define BCM_VK_ECC_MEM_ERR_SHIFT 0 +#define BCM_VK_UECC_MEM_ERR_SHIFT 8 +/* threshold of event occurrence and logs start to come out */ +#define BCM_VK_ECC_THRESHOLD 10 +#define BCM_VK_UECC_THRESHOLD 1 #define BAR_CARD_PWR_AND_THRE 0x46c +/* defines for power and temp threshold, all fields have same width */ +#define BCM_VK_PWR_AND_THRE_FIELD_MASK 0xff +#define BCM_VK_LOW_TEMP_THRE_SHIFT 0 +#define BCM_VK_HIGH_TEMP_THRE_SHIFT 8 +#define BCM_VK_PWR_STATE_SHIFT 16 #define BAR_CARD_STATIC_INFO 0x470 @@ -143,6 +184,11 @@ #define BAR_FIRMWARE_TAG_SIZE 50 #define FIRMWARE_STATUS_PRE_INIT_DONE 0x1f +/* VK MSG_ID defines */ +#define VK_MSG_ID_BITMAP_SIZE 4096 +#define VK_MSG_ID_BITMAP_MASK (VK_MSG_ID_BITMAP_SIZE - 1) +#define VK_MSG_ID_OVERFLOW 0xffff + /* * BAR1 */ @@ -197,6 +243,10 @@ /* VK device supports a maximum of 3 bars */ #define MAX_BAR 3 +/* default number of msg blk for inband SGL */ +#define BCM_VK_DEF_IB_SGL_BLK_LEN 16 +#define BCM_VK_IB_SGL_BLK_MAX 24 + enum pci_barno { BAR_0 = 0, BAR_1, @@ -267,9 +317,27 @@ struct bcm_vk_proc_mon_info { struct bcm_vk_proc_mon_entry_t entries[BCM_VK_PROC_MON_MAX]; }; +struct bcm_vk_hb_ctrl { + struct timer_list timer; + u32 last_uptime; + u32 lost_cnt; +}; + +struct bcm_vk_alert { + u16 flags; + u16 notfs; +}; + +/* some alert counters that the driver will keep track */ +struct bcm_vk_alert_cnts { + u16 ecc; + u16 uecc; +}; + struct bcm_vk { struct pci_dev *pdev; void __iomem *bar[MAX_BAR]; + int num_irqs; struct bcm_vk_card_info card_info; struct bcm_vk_proc_mon_info proc_mon_info; @@ -283,9 +351,17 @@ struct bcm_vk { /* Reference-counting to handle file operations */ struct kref kref; + spinlock_t msg_id_lock; /* Spinlock for msg_id */ + u16 msg_id; + DECLARE_BITMAP(bmap, VK_MSG_ID_BITMAP_SIZE); spinlock_t ctx_lock; /* Spinlock for component context */ struct bcm_vk_ctx ctx[VK_CMPT_CTX_MAX]; struct bcm_vk_ht_entry pid_ht[VK_PID_HT_SZ]; + pid_t reset_pid; /* process that issue reset */ + + atomic_t msgq_inited; /* indicate if info has been synced with vk */ + struct bcm_vk_msg_chan to_v_msg_chan; + struct bcm_vk_msg_chan to_h_msg_chan; struct workqueue_struct *wq_thread; struct work_struct wq_work; /* work queue for deferred job */ @@ -294,6 +370,15 @@ struct bcm_vk { dma_addr_t tdma_addr; /* test dma segment bus addr */ struct notifier_block panic_nb; + u32 ib_sgl_size; /* size allocated for inband sgl insertion */ + + /* heart beat mechanism control structure */ + struct bcm_vk_hb_ctrl hb_ctrl; + /* house-keeping variable of error logs */ + spinlock_t host_alert_lock; /* protection to access host_alert struct */ + struct bcm_vk_alert host_alert; + struct bcm_vk_alert peer_alert; /* bits set by the card */ + struct bcm_vk_alert_cnts alert_cnts; /* offset of the peer log control in BAR2 */ u32 peerlog_off; @@ -306,8 +391,26 @@ struct bcm_vk { enum bcm_vk_wq_offload_flags { BCM_VK_WQ_DWNLD_PEND = 0, BCM_VK_WQ_DWNLD_AUTO = 1, + BCM_VK_WQ_NOTF_PEND = 2, }; +/* a macro to get an individual field with mask and shift */ +#define BCM_VK_EXTRACT_FIELD(_field, _reg, _mask, _shift) \ + (_field = (((_reg) >> (_shift)) & (_mask))) + +struct bcm_vk_entry { + const u32 mask; + const u32 exp_val; + const char *str; +}; + +/* alerts that could be generated from peer */ +#define BCM_VK_PEER_ERR_NUM 12 +extern struct bcm_vk_entry const bcm_vk_peer_err[BCM_VK_PEER_ERR_NUM]; +/* alerts detected by the host */ +#define BCM_VK_HOST_ERR_NUM 3 +extern struct bcm_vk_entry const bcm_vk_host_err[BCM_VK_HOST_ERR_NUM]; + /* * check if PCIe interface is down on read. Use it when it is * certain that _val should never be all ones. @@ -354,8 +457,28 @@ static inline bool bcm_vk_msgq_marker_valid(struct bcm_vk *vk) } int bcm_vk_open(struct inode *inode, struct file *p_file); +ssize_t bcm_vk_read(struct file *p_file, char __user *buf, size_t count, + loff_t *f_pos); +ssize_t bcm_vk_write(struct file *p_file, const char __user *buf, + size_t count, loff_t *f_pos); +__poll_t bcm_vk_poll(struct file *p_file, struct poll_table_struct *wait); int bcm_vk_release(struct inode *inode, struct file *p_file); void bcm_vk_release_data(struct kref *kref); +irqreturn_t bcm_vk_msgq_irqhandler(int irq, void *dev_id); +irqreturn_t bcm_vk_notf_irqhandler(int irq, void *dev_id); +int bcm_vk_msg_init(struct bcm_vk *vk); +void bcm_vk_msg_remove(struct bcm_vk *vk); +int bcm_vk_sync_msgq(struct bcm_vk *vk, bool force_sync); +void bcm_vk_blk_drv_access(struct bcm_vk *vk); +s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk); +int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type, + const pid_t pid, const u32 q_num); +void bcm_to_v_q_doorbell(struct bcm_vk *vk, u32 q_num, u32 db_val); int bcm_vk_auto_load_all_images(struct bcm_vk *vk); +void bcm_vk_hb_init(struct bcm_vk *vk); +void bcm_vk_hb_deinit(struct bcm_vk *vk); +void bcm_vk_handle_notf(struct bcm_vk *vk); +bool bcm_vk_drv_access_ok(struct bcm_vk *vk); +void bcm_vk_set_host_alert(struct bcm_vk *vk, u32 bit_mask); #endif diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c index a63208513c64..5d82f02c0f27 100644 --- a/drivers/misc/bcm-vk/bcm_vk_dev.c +++ b/drivers/misc/bcm-vk/bcm_vk_dev.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -102,6 +103,54 @@ static uint nr_scratch_pages = VK_BAR1_SCRATCH_DEF_NR_PAGES; module_param(nr_scratch_pages, uint, 0444); MODULE_PARM_DESC(nr_scratch_pages, "Number of pre allocated DMAable coherent pages.\n"); +static uint nr_ib_sgl_blk = BCM_VK_DEF_IB_SGL_BLK_LEN; +module_param(nr_ib_sgl_blk, uint, 0444); +MODULE_PARM_DESC(nr_ib_sgl_blk, + "Number of in-band msg blks for short SGL.\n"); + +/* + * alerts that could be generated from peer + */ +const struct bcm_vk_entry bcm_vk_peer_err[BCM_VK_PEER_ERR_NUM] = { + {ERR_LOG_UECC, ERR_LOG_UECC, "uecc"}, + {ERR_LOG_SSIM_BUSY, ERR_LOG_SSIM_BUSY, "ssim_busy"}, + {ERR_LOG_AFBC_BUSY, ERR_LOG_AFBC_BUSY, "afbc_busy"}, + {ERR_LOG_HIGH_TEMP_ERR, ERR_LOG_HIGH_TEMP_ERR, "high_temp"}, + {ERR_LOG_WDOG_TIMEOUT, ERR_LOG_WDOG_TIMEOUT, "wdog_timeout"}, + {ERR_LOG_SYS_FAULT, ERR_LOG_SYS_FAULT, "sys_fault"}, + {ERR_LOG_RAMDUMP, ERR_LOG_RAMDUMP, "ramdump"}, + {ERR_LOG_COP_WDOG_TIMEOUT, ERR_LOG_COP_WDOG_TIMEOUT, + "cop_wdog_timeout"}, + {ERR_LOG_MEM_ALLOC_FAIL, ERR_LOG_MEM_ALLOC_FAIL, "malloc_fail warn"}, + {ERR_LOG_LOW_TEMP_WARN, ERR_LOG_LOW_TEMP_WARN, "low_temp warn"}, + {ERR_LOG_ECC, ERR_LOG_ECC, "ecc"}, + {ERR_LOG_IPC_DWN, ERR_LOG_IPC_DWN, "ipc_down"}, +}; + +/* alerts detected by the host */ +const struct bcm_vk_entry bcm_vk_host_err[BCM_VK_HOST_ERR_NUM] = { + {ERR_LOG_HOST_PCIE_DWN, ERR_LOG_HOST_PCIE_DWN, "PCIe_down"}, + {ERR_LOG_HOST_HB_FAIL, ERR_LOG_HOST_HB_FAIL, "hb_fail"}, + {ERR_LOG_HOST_INTF_V_FAIL, ERR_LOG_HOST_INTF_V_FAIL, "intf_ver_fail"}, +}; + +irqreturn_t bcm_vk_notf_irqhandler(int irq, void *dev_id) +{ + struct bcm_vk *vk = dev_id; + + if (!bcm_vk_drv_access_ok(vk)) { + dev_err(&vk->pdev->dev, + "Interrupt %d received when msgq not inited\n", irq); + goto skip_schedule_work; + } + + /* if notification is not pending, set bit and schedule work */ + if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0) + queue_work(vk->wq_thread, &vk->wq_work); + +skip_schedule_work: + return IRQ_HANDLED; +} static int bcm_vk_intf_ver_chk(struct bcm_vk *vk) { @@ -126,6 +175,7 @@ static int bcm_vk_intf_ver_chk(struct bcm_vk *vk) dev_err(dev, "Intf major.minor=%d.%d rejected - drv %d.%d\n", major, minor, SEMANTIC_MAJOR, SEMANTIC_MINOR); + bcm_vk_set_host_alert(vk, ERR_LOG_HOST_INTF_V_FAIL); ret = -EPFNOSUPPORT; } else { dev_dbg(dev, @@ -135,6 +185,154 @@ static int bcm_vk_intf_ver_chk(struct bcm_vk *vk) return ret; } +static void bcm_vk_log_notf(struct bcm_vk *vk, + struct bcm_vk_alert *alert, + struct bcm_vk_entry const *entry_tab, + const u32 table_size) +{ + u32 i; + u32 masked_val, latched_val; + struct bcm_vk_entry const *entry; + u32 reg; + u16 ecc_mem_err, uecc_mem_err; + struct device *dev = &vk->pdev->dev; + + for (i = 0; i < table_size; i++) { + entry = &entry_tab[i]; + masked_val = entry->mask & alert->notfs; + latched_val = entry->mask & alert->flags; + + if (masked_val == ERR_LOG_UECC) { + /* + * if there is difference between stored cnt and it + * is greater than threshold, log it. + */ + reg = vkread32(vk, BAR_0, BAR_CARD_ERR_MEM); + BCM_VK_EXTRACT_FIELD(uecc_mem_err, reg, + BCM_VK_MEM_ERR_FIELD_MASK, + BCM_VK_UECC_MEM_ERR_SHIFT); + if ((uecc_mem_err != vk->alert_cnts.uecc) && + (uecc_mem_err >= BCM_VK_UECC_THRESHOLD)) + dev_info(dev, + "ALERT! %s.%d uecc RAISED - ErrCnt %d\n", + DRV_MODULE_NAME, vk->devid, + uecc_mem_err); + vk->alert_cnts.uecc = uecc_mem_err; + } else if (masked_val == ERR_LOG_ECC) { + reg = vkread32(vk, BAR_0, BAR_CARD_ERR_MEM); + BCM_VK_EXTRACT_FIELD(ecc_mem_err, reg, + BCM_VK_MEM_ERR_FIELD_MASK, + BCM_VK_ECC_MEM_ERR_SHIFT); + if ((ecc_mem_err != vk->alert_cnts.ecc) && + (ecc_mem_err >= BCM_VK_ECC_THRESHOLD)) + dev_info(dev, "ALERT! %s.%d ecc RAISED - ErrCnt %d\n", + DRV_MODULE_NAME, vk->devid, + ecc_mem_err); + vk->alert_cnts.ecc = ecc_mem_err; + } else if (masked_val != latched_val) { + /* print a log as info */ + dev_info(dev, "ALERT! %s.%d %s %s\n", + DRV_MODULE_NAME, vk->devid, entry->str, + masked_val ? "RAISED" : "CLEARED"); + } + } +} + +static void bcm_vk_dump_peer_log(struct bcm_vk *vk) +{ + struct bcm_vk_peer_log log; + struct bcm_vk_peer_log *log_info = &vk->peerlog_info; + char loc_buf[BCM_VK_PEER_LOG_LINE_MAX]; + int cnt; + struct device *dev = &vk->pdev->dev; + unsigned int data_offset; + + memcpy_fromio(&log, vk->bar[BAR_2] + vk->peerlog_off, sizeof(log)); + + dev_dbg(dev, "Peer PANIC: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n", + log.buf_size, log.mask, log.rd_idx, log.wr_idx); + + if (!log_info->buf_size) { + dev_err(dev, "Peer log dump disabled - skipped!\n"); + return; + } + + /* perform range checking for rd/wr idx */ + if ((log.rd_idx > log_info->mask) || + (log.wr_idx > log_info->mask) || + (log.buf_size != log_info->buf_size) || + (log.mask != log_info->mask)) { + dev_err(dev, + "Corrupted Ptrs: Size 0x%x(0x%x) Mask 0x%x(0x%x) [Rd Wr] = [%d %d], skip log dump.\n", + log_info->buf_size, log.buf_size, + log_info->mask, log.mask, + log.rd_idx, log.wr_idx); + return; + } + + cnt = 0; + data_offset = vk->peerlog_off + sizeof(struct bcm_vk_peer_log); + loc_buf[BCM_VK_PEER_LOG_LINE_MAX - 1] = '\0'; + while (log.rd_idx != log.wr_idx) { + loc_buf[cnt] = vkread8(vk, BAR_2, data_offset + log.rd_idx); + + if ((loc_buf[cnt] == '\0') || + (cnt == (BCM_VK_PEER_LOG_LINE_MAX - 1))) { + dev_err(dev, "%s", loc_buf); + cnt = 0; + } else { + cnt++; + } + log.rd_idx = (log.rd_idx + 1) & log.mask; + } + /* update rd idx at the end */ + vkwrite32(vk, log.rd_idx, BAR_2, + vk->peerlog_off + offsetof(struct bcm_vk_peer_log, rd_idx)); +} + +void bcm_vk_handle_notf(struct bcm_vk *vk) +{ + u32 reg; + struct bcm_vk_alert alert; + bool intf_down; + unsigned long flags; + + /* handle peer alerts and then locally detected ones */ + reg = vkread32(vk, BAR_0, BAR_CARD_ERR_LOG); + intf_down = BCM_VK_INTF_IS_DOWN(reg); + if (!intf_down) { + vk->peer_alert.notfs = reg; + bcm_vk_log_notf(vk, &vk->peer_alert, bcm_vk_peer_err, + ARRAY_SIZE(bcm_vk_peer_err)); + vk->peer_alert.flags = vk->peer_alert.notfs; + } else { + /* turn off access */ + bcm_vk_blk_drv_access(vk); + } + + /* check and make copy of alert with lock and then free lock */ + spin_lock_irqsave(&vk->host_alert_lock, flags); + if (intf_down) + vk->host_alert.notfs |= ERR_LOG_HOST_PCIE_DWN; + + alert = vk->host_alert; + vk->host_alert.flags = vk->host_alert.notfs; + spin_unlock_irqrestore(&vk->host_alert_lock, flags); + + /* call display with copy */ + bcm_vk_log_notf(vk, &alert, bcm_vk_host_err, + ARRAY_SIZE(bcm_vk_host_err)); + + /* + * If it is a sys fault or heartbeat timeout, we would like extract + * log msg from the card so that we would know what is the last fault + */ + if (!intf_down && + ((vk->host_alert.flags & ERR_LOG_HOST_HB_FAIL) || + (vk->peer_alert.flags & ERR_LOG_SYS_FAULT))) + bcm_vk_dump_peer_log(vk); +} + static inline int bcm_vk_wait(struct bcm_vk *vk, enum pci_barno bar, u64 offset, u32 mask, u32 value, unsigned long timeout_ms) @@ -301,6 +499,31 @@ static int bcm_vk_sync_card_info(struct bcm_vk *vk) return 0; } +void bcm_vk_blk_drv_access(struct bcm_vk *vk) +{ + int i; + + /* + * kill all the apps + */ + spin_lock(&vk->ctx_lock); + + /* set msgq_inited to 0 so that all rd/wr will be blocked */ + atomic_set(&vk->msgq_inited, 0); + + for (i = 0; i < VK_PID_HT_SZ; i++) { + struct bcm_vk_ctx *ctx; + + list_for_each_entry(ctx, &vk->pid_ht[i].head, node) { + dev_dbg(&vk->pdev->dev, + "Send kill signal to pid %d\n", + ctx->pid); + kill_pid(find_vpid(ctx->pid), SIGKILL, 1); + } + } + spin_unlock(&vk->ctx_lock); +} + static void bcm_vk_buf_notify(struct bcm_vk *vk, void *bufp, dma_addr_t host_buf_addr, u32 buf_size) { @@ -518,6 +741,17 @@ static int bcm_vk_load_image_by_type(struct bcm_vk *vk, u32 load_type, goto err_firmware_out; } + /* + * Next, initialize Message Q if we are loading boot2. + * Do a force sync + */ + ret = bcm_vk_sync_msgq(vk, true); + if (ret) { + dev_err(dev, "Boot2 Error reading comm msg Q info\n"); + ret = -EIO; + goto err_firmware_out; + } + /* sync & channel other info */ ret = bcm_vk_sync_card_info(vk); if (ret) { @@ -668,12 +902,20 @@ static int bcm_vk_trigger_autoload(struct bcm_vk *vk) } /* - * deferred work queue for auto download. + * deferred work queue for draining and auto download. */ static void bcm_vk_wq_handler(struct work_struct *work) { struct bcm_vk *vk = container_of(work, struct bcm_vk, wq_work); + struct device *dev = &vk->pdev->dev; + s32 ret; + /* check wq offload bit map to perform various operations */ + if (test_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload)) { + /* clear bit right the way for notification */ + clear_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload); + bcm_vk_handle_notf(vk); + } if (test_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload)) { bcm_vk_auto_load_all_images(vk); @@ -684,6 +926,14 @@ static void bcm_vk_wq_handler(struct work_struct *work) clear_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload); clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload); } + + /* next, try to drain */ + ret = bcm_to_h_msg_dequeue(vk); + + if (ret == 0) + dev_dbg(dev, "Spurious trigger for workqueue\n"); + else if (ret < 0) + bcm_vk_blk_drv_access(vk); } static long bcm_vk_load_image(struct bcm_vk *vk, @@ -837,6 +1087,9 @@ static long bcm_vk_ioctl(struct file *file, unsigned int cmd, unsigned long arg) static const struct file_operations bcm_vk_fops = { .owner = THIS_MODULE, .open = bcm_vk_open, + .read = bcm_vk_read, + .write = bcm_vk_write, + .poll = bcm_vk_poll, .release = bcm_vk_release, .unlocked_ioctl = bcm_vk_ioctl, }; @@ -869,6 +1122,12 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENOMEM; kref_init(&vk->kref); + if (nr_ib_sgl_blk > BCM_VK_IB_SGL_BLK_MAX) { + dev_warn(dev, "Inband SGL blk %d limited to max %d\n", + nr_ib_sgl_blk, BCM_VK_IB_SGL_BLK_MAX); + nr_ib_sgl_blk = BCM_VK_IB_SGL_BLK_MAX; + } + vk->ib_sgl_size = nr_ib_sgl_blk * VK_MSGQ_BLK_SIZE; mutex_init(&vk->mutex); err = pci_enable_device(pdev); @@ -932,11 +1191,34 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } + for (vk->num_irqs = 0; + vk->num_irqs < VK_MSIX_MSGQ_MAX; + vk->num_irqs++) { + err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs), + bcm_vk_msgq_irqhandler, + IRQF_SHARED, DRV_MODULE_NAME, vk); + if (err) { + dev_err(dev, "failed to request msgq IRQ %d for MSIX %d\n", + pdev->irq + vk->num_irqs, vk->num_irqs + 1); + goto err_irq; + } + } + /* one irq for notification from VK */ + err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs), + bcm_vk_notf_irqhandler, + IRQF_SHARED, DRV_MODULE_NAME, vk); + if (err) { + dev_err(dev, "failed to request notf IRQ %d for MSIX %d\n", + pdev->irq + vk->num_irqs, vk->num_irqs + 1); + goto err_irq; + } + vk->num_irqs++; + id = ida_simple_get(&bcm_vk_ida, 0, 0, GFP_KERNEL); if (id < 0) { err = id; dev_err(dev, "unable to get id\n"); - goto err_iounmap; + goto err_irq; } vk->devid = id; @@ -966,6 +1248,12 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_misc_deregister; } + err = bcm_vk_msg_init(vk); + if (err) { + dev_err(dev, "failed to init msg queue info\n"); + goto err_destroy_workqueue; + } + /* sync other info */ bcm_vk_sync_card_info(vk); @@ -994,6 +1282,9 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } + /* enable hb */ + bcm_vk_hb_init(vk); + dev_dbg(dev, "BCM-VK:%u created\n", id); return 0; @@ -1015,6 +1306,13 @@ err_kfree_name: err_ida_remove: ida_simple_remove(&bcm_vk_ida, id); +err_irq: + for (i = 0; i < vk->num_irqs; i++) + devm_free_irq(dev, pci_irq_vector(pdev, i), vk); + + pci_disable_msix(pdev); + pci_disable_msi(pdev); + err_iounmap: for (i = 0; i < MAX_BAR; i++) { if (vk->bar[i]) @@ -1053,6 +1351,8 @@ static void bcm_vk_remove(struct pci_dev *pdev) struct bcm_vk *vk = pci_get_drvdata(pdev); struct miscdevice *misc_device = &vk->miscdev; + bcm_vk_hb_deinit(vk); + /* * Trigger a reset to card and wait enough time for UCODE to rerun, * which re-initialize the card into its default state. @@ -1076,6 +1376,11 @@ static void bcm_vk_remove(struct pci_dev *pdev) kfree(misc_device->name); ida_simple_remove(&bcm_vk_ida, vk->devid); } + for (i = 0; i < vk->num_irqs; i++) + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), vk); + + pci_disable_msix(pdev); + pci_disable_msi(pdev); cancel_work_sync(&vk->wq_work); destroy_workqueue(vk->wq_thread); diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.c b/drivers/misc/bcm-vk/bcm_vk_msg.c index 2d9a6b4e5f61..b05e20a72a8b 100644 --- a/drivers/misc/bcm-vk/bcm_vk_msg.c +++ b/drivers/misc/bcm-vk/bcm_vk_msg.c @@ -3,8 +3,200 @@ * Copyright 2018-2020 Broadcom. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "bcm_vk.h" #include "bcm_vk_msg.h" +#include "bcm_vk_sg.h" + +/* functions to manipulate the transport id in msg block */ +#define BCM_VK_MSG_Q_SHIFT 4 +#define BCM_VK_MSG_Q_MASK 0xF +#define BCM_VK_MSG_ID_MASK 0xFFF + +#define BCM_VK_DMA_DRAIN_MAX_MS 2000 + +/* number x q_size will be the max number of msg processed per loop */ +#define BCM_VK_MSG_PROC_MAX_LOOP 2 + +/* module parameter */ +static bool hb_mon = true; +module_param(hb_mon, bool, 0444); +MODULE_PARM_DESC(hb_mon, "Monitoring heartbeat continuously.\n"); +static int batch_log = 1; +module_param(batch_log, int, 0444); +MODULE_PARM_DESC(batch_log, "Max num of logs per batch operation.\n"); + +static bool hb_mon_is_on(void) +{ + return hb_mon; +} + +static u32 get_q_num(const struct vk_msg_blk *msg) +{ + u32 q_num = msg->trans_id & BCM_VK_MSG_Q_MASK; + + if (q_num >= VK_MSGQ_PER_CHAN_MAX) + q_num = VK_MSGQ_NUM_DEFAULT; + return q_num; +} + +static void set_q_num(struct vk_msg_blk *msg, u32 q_num) +{ + msg->trans_id = (msg->trans_id & ~BCM_VK_MSG_Q_MASK) | + (q_num >= VK_MSGQ_PER_CHAN_MAX) ? VK_MSGQ_NUM_DEFAULT : q_num; +} + +static u32 get_msg_id(const struct vk_msg_blk *msg) +{ + return ((msg->trans_id >> BCM_VK_MSG_Q_SHIFT) & BCM_VK_MSG_ID_MASK); +} + +static void set_msg_id(struct vk_msg_blk *msg, u32 val) +{ + msg->trans_id = (val << BCM_VK_MSG_Q_SHIFT) | get_q_num(msg); +} + +static u32 msgq_inc(const struct bcm_vk_sync_qinfo *qinfo, u32 idx, u32 inc) +{ + return ((idx + inc) & qinfo->q_mask); +} + +static +struct vk_msg_blk __iomem *msgq_blk_addr(const struct bcm_vk_sync_qinfo *qinfo, + u32 idx) +{ + return qinfo->q_start + (VK_MSGQ_BLK_SIZE * idx); +} + +static u32 msgq_occupied(const struct bcm_vk_msgq __iomem *msgq, + const struct bcm_vk_sync_qinfo *qinfo) +{ + u32 wr_idx, rd_idx; + + wr_idx = readl_relaxed(&msgq->wr_idx); + rd_idx = readl_relaxed(&msgq->rd_idx); + + return ((wr_idx - rd_idx) & qinfo->q_mask); +} + +static +u32 msgq_avail_space(const struct bcm_vk_msgq __iomem *msgq, + const struct bcm_vk_sync_qinfo *qinfo) +{ + return (qinfo->q_size - msgq_occupied(msgq, qinfo) - 1); +} + +/* number of retries when enqueue message fails before returning EAGAIN */ +#define BCM_VK_H2VK_ENQ_RETRY 10 +#define BCM_VK_H2VK_ENQ_RETRY_DELAY_MS 50 + +bool bcm_vk_drv_access_ok(struct bcm_vk *vk) +{ + return (!!atomic_read(&vk->msgq_inited)); +} + +void bcm_vk_set_host_alert(struct bcm_vk *vk, u32 bit_mask) +{ + struct bcm_vk_alert *alert = &vk->host_alert; + unsigned long flags; + + /* use irqsave version as this maybe called inside timer interrupt */ + spin_lock_irqsave(&vk->host_alert_lock, flags); + alert->notfs |= bit_mask; + spin_unlock_irqrestore(&vk->host_alert_lock, flags); + + if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0) + queue_work(vk->wq_thread, &vk->wq_work); +} + +/* + * Heartbeat related defines + * The heartbeat from host is a last resort. If stuck condition happens + * on the card, firmware is supposed to detect it. Therefore, the heartbeat + * values used will be more relaxed on the driver, which need to be bigger + * than the watchdog timeout on the card. The watchdog timeout on the card + * is 20s, with a jitter of 2s => 22s. We use a value of 27s here. + */ +#define BCM_VK_HB_TIMER_S 3 +#define BCM_VK_HB_TIMER_VALUE (BCM_VK_HB_TIMER_S * HZ) +#define BCM_VK_HB_LOST_MAX (27 / BCM_VK_HB_TIMER_S) + +static void bcm_vk_hb_poll(struct timer_list *t) +{ + u32 uptime_s; + struct bcm_vk_hb_ctrl *hb = container_of(t, struct bcm_vk_hb_ctrl, + timer); + struct bcm_vk *vk = container_of(hb, struct bcm_vk, hb_ctrl); + + if (bcm_vk_drv_access_ok(vk) && hb_mon_is_on()) { + /* read uptime from register and compare */ + uptime_s = vkread32(vk, BAR_0, BAR_OS_UPTIME); + + if (uptime_s == hb->last_uptime) + hb->lost_cnt++; + else /* reset to avoid accumulation */ + hb->lost_cnt = 0; + + dev_dbg(&vk->pdev->dev, "Last uptime %d current %d, lost %d\n", + hb->last_uptime, uptime_s, hb->lost_cnt); + + /* + * if the interface goes down without any activity, a value + * of 0xFFFFFFFF will be continuously read, and the detection + * will be happened eventually. + */ + hb->last_uptime = uptime_s; + } else { + /* reset heart beat lost cnt */ + hb->lost_cnt = 0; + } + + /* next, check if heartbeat exceeds limit */ + if (hb->lost_cnt > BCM_VK_HB_LOST_MAX) { + dev_err(&vk->pdev->dev, "Heartbeat Misses %d times, %d s!\n", + BCM_VK_HB_LOST_MAX, + BCM_VK_HB_LOST_MAX * BCM_VK_HB_TIMER_S); + + bcm_vk_blk_drv_access(vk); + bcm_vk_set_host_alert(vk, ERR_LOG_HOST_HB_FAIL); + } + /* re-arm timer */ + mod_timer(&hb->timer, jiffies + BCM_VK_HB_TIMER_VALUE); +} + +void bcm_vk_hb_init(struct bcm_vk *vk) +{ + struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl; + + timer_setup(&hb->timer, bcm_vk_hb_poll, 0); + mod_timer(&hb->timer, jiffies + BCM_VK_HB_TIMER_VALUE); +} + +void bcm_vk_hb_deinit(struct bcm_vk *vk) +{ + struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl; + + del_timer(&hb->timer); +} + +static void bcm_vk_msgid_bitmap_clear(struct bcm_vk *vk, + unsigned int start, + unsigned int nbits) +{ + spin_lock(&vk->msg_id_lock); + bitmap_clear(vk->bmap, start, nbits); + spin_unlock(&vk->msg_id_lock); +} /* * allocate a ctx per file struct @@ -39,12 +231,47 @@ static struct bcm_vk_ctx *bcm_vk_get_ctx(struct bcm_vk *vk, const pid_t pid) /* increase kref */ kref_get(&vk->kref); + /* clear counter */ + atomic_set(&ctx->pend_cnt, 0); + atomic_set(&ctx->dma_cnt, 0); + init_waitqueue_head(&ctx->rd_wq); + all_in_use_exit: spin_unlock(&vk->ctx_lock); return ctx; } +static u16 bcm_vk_get_msg_id(struct bcm_vk *vk) +{ + u16 rc = VK_MSG_ID_OVERFLOW; + u16 test_bit_count = 0; + + spin_lock(&vk->msg_id_lock); + while (test_bit_count < (VK_MSG_ID_BITMAP_SIZE - 1)) { + /* + * first time come in this loop, msg_id will be 0 + * and the first one tested will be 1. We skip + * VK_SIMPLEX_MSG_ID (0) for one way host2vk + * communication + */ + vk->msg_id++; + if (vk->msg_id == VK_MSG_ID_BITMAP_SIZE) + vk->msg_id = 1; + + if (test_bit(vk->msg_id, vk->bmap)) { + test_bit_count++; + continue; + } + rc = vk->msg_id; + bitmap_set(vk->bmap, vk->msg_id, 1); + break; + } + spin_unlock(&vk->msg_id_lock); + + return rc; +} + static int bcm_vk_free_ctx(struct bcm_vk *vk, struct bcm_vk_ctx *ctx) { u32 idx; @@ -82,6 +309,636 @@ static int bcm_vk_free_ctx(struct bcm_vk *vk, struct bcm_vk_ctx *ctx) return count; } +static void bcm_vk_free_wkent(struct device *dev, struct bcm_vk_wkent *entry) +{ + int proc_cnt; + + bcm_vk_sg_free(dev, entry->dma, VK_DMA_MAX_ADDRS, &proc_cnt); + if (proc_cnt) + atomic_dec(&entry->ctx->dma_cnt); + + kfree(entry->to_h_msg); + kfree(entry); +} + +static void bcm_vk_drain_all_pend(struct device *dev, + struct bcm_vk_msg_chan *chan, + struct bcm_vk_ctx *ctx) +{ + u32 num; + struct bcm_vk_wkent *entry, *tmp; + struct bcm_vk *vk; + struct list_head del_q; + + if (ctx) + vk = container_of(ctx->miscdev, struct bcm_vk, miscdev); + + INIT_LIST_HEAD(&del_q); + spin_lock(&chan->pendq_lock); + for (num = 0; num < chan->q_nr; num++) { + list_for_each_entry_safe(entry, tmp, &chan->pendq[num], node) { + if ((!ctx) || (entry->ctx->idx == ctx->idx)) { + list_del(&entry->node); + list_add_tail(&entry->node, &del_q); + } + } + } + spin_unlock(&chan->pendq_lock); + + /* batch clean up */ + num = 0; + list_for_each_entry_safe(entry, tmp, &del_q, node) { + list_del(&entry->node); + num++; + if (ctx) { + struct vk_msg_blk *msg; + int bit_set; + bool responded; + u32 msg_id; + + /* if it is specific ctx, log for any stuck */ + msg = entry->to_v_msg; + msg_id = get_msg_id(msg); + bit_set = test_bit(msg_id, vk->bmap); + responded = entry->to_h_msg ? true : false; + if (num <= batch_log) + dev_info(dev, + "Drained: fid %u size %u msg 0x%x(seq-%x) ctx 0x%x[fd-%d] args:[0x%x 0x%x] resp %s, bmap %d\n", + msg->function_id, msg->size, + msg_id, entry->seq_num, + msg->context_id, entry->ctx->idx, + msg->cmd, msg->arg, + responded ? "T" : "F", bit_set); + if (responded) + atomic_dec(&ctx->pend_cnt); + else if (bit_set) + bcm_vk_msgid_bitmap_clear(vk, msg_id, 1); + } + bcm_vk_free_wkent(dev, entry); + } + if (num && ctx) + dev_info(dev, "Total drained items %d [fd-%d]\n", + num, ctx->idx); +} + +/* + * Function to sync up the messages queue info that is provided by BAR1 + */ +int bcm_vk_sync_msgq(struct bcm_vk *vk, bool force_sync) +{ + struct bcm_vk_msgq __iomem *msgq; + struct device *dev = &vk->pdev->dev; + u32 msgq_off; + u32 num_q; + struct bcm_vk_msg_chan *chan_list[] = {&vk->to_v_msg_chan, + &vk->to_h_msg_chan}; + struct bcm_vk_msg_chan *chan; + int i, j; + int ret = 0; + + /* + * If the driver is loaded at startup where vk OS is not up yet, + * the msgq-info may not be available until a later time. In + * this case, we skip and the sync function is supposed to be + * called again. + */ + if (!bcm_vk_msgq_marker_valid(vk)) { + dev_info(dev, "BAR1 msgq marker not initialized.\n"); + return -EAGAIN; + } + + msgq_off = vkread32(vk, BAR_1, VK_BAR1_MSGQ_CTRL_OFF); + + /* each side is always half the total */ + num_q = vkread32(vk, BAR_1, VK_BAR1_MSGQ_NR) / 2; + if (!num_q || (num_q > VK_MSGQ_PER_CHAN_MAX)) { + dev_err(dev, + "Advertised msgq %d error - max %d allowed\n", + num_q, VK_MSGQ_PER_CHAN_MAX); + return -EINVAL; + } + + vk->to_v_msg_chan.q_nr = num_q; + vk->to_h_msg_chan.q_nr = num_q; + + /* first msgq location */ + msgq = vk->bar[BAR_1] + msgq_off; + + /* + * if this function is called when it is already inited, + * something is wrong + */ + if (bcm_vk_drv_access_ok(vk) && !force_sync) { + dev_err(dev, "Msgq info already in sync\n"); + return -EPERM; + } + + for (i = 0; i < ARRAY_SIZE(chan_list); i++) { + chan = chan_list[i]; + memset(chan->sync_qinfo, 0, sizeof(chan->sync_qinfo)); + + for (j = 0; j < num_q; j++) { + struct bcm_vk_sync_qinfo *qinfo; + u32 msgq_start; + u32 msgq_size; + u32 msgq_nxt; + u32 msgq_db_offset, q_db_offset; + + chan->msgq[j] = msgq; + msgq_start = readl_relaxed(&msgq->start); + msgq_size = readl_relaxed(&msgq->size); + msgq_nxt = readl_relaxed(&msgq->nxt); + msgq_db_offset = readl_relaxed(&msgq->db_offset); + q_db_offset = (msgq_db_offset & ((1 << DB_SHIFT) - 1)); + if (q_db_offset == (~msgq_db_offset >> DB_SHIFT)) + msgq_db_offset = q_db_offset; + else + /* fall back to default */ + msgq_db_offset = VK_BAR0_Q_DB_BASE(j); + + dev_info(dev, + "MsgQ[%d] type %d num %d, @ 0x%x, db_offset 0x%x rd_idx %d wr_idx %d, size %d, nxt 0x%x\n", + j, + readw_relaxed(&msgq->type), + readw_relaxed(&msgq->num), + msgq_start, + msgq_db_offset, + readl_relaxed(&msgq->rd_idx), + readl_relaxed(&msgq->wr_idx), + msgq_size, + msgq_nxt); + + qinfo = &chan->sync_qinfo[j]; + /* formulate and record static info */ + qinfo->q_start = vk->bar[BAR_1] + msgq_start; + qinfo->q_size = msgq_size; + /* set low threshold as 50% or 1/2 */ + qinfo->q_low = qinfo->q_size >> 1; + qinfo->q_mask = qinfo->q_size - 1; + qinfo->q_db_offset = msgq_db_offset; + + msgq++; + } + } + atomic_set(&vk->msgq_inited, 1); + + return ret; +} + +static int bcm_vk_msg_chan_init(struct bcm_vk_msg_chan *chan) +{ + u32 i; + + mutex_init(&chan->msgq_mutex); + spin_lock_init(&chan->pendq_lock); + for (i = 0; i < VK_MSGQ_MAX_NR; i++) + INIT_LIST_HEAD(&chan->pendq[i]); + + return 0; +} + +static void bcm_vk_append_pendq(struct bcm_vk_msg_chan *chan, u16 q_num, + struct bcm_vk_wkent *entry) +{ + struct bcm_vk_ctx *ctx; + + spin_lock(&chan->pendq_lock); + list_add_tail(&entry->node, &chan->pendq[q_num]); + if (entry->to_h_msg) { + ctx = entry->ctx; + atomic_inc(&ctx->pend_cnt); + wake_up_interruptible(&ctx->rd_wq); + } + spin_unlock(&chan->pendq_lock); +} + +static u32 bcm_vk_append_ib_sgl(struct bcm_vk *vk, + struct bcm_vk_wkent *entry, + struct _vk_data *data, + unsigned int num_planes) +{ + unsigned int i; + unsigned int item_cnt = 0; + struct device *dev = &vk->pdev->dev; + struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan; + struct vk_msg_blk *msg = &entry->to_v_msg[0]; + struct bcm_vk_msgq __iomem *msgq; + struct bcm_vk_sync_qinfo *qinfo; + u32 ib_sgl_size = 0; + u8 *buf = (u8 *)&entry->to_v_msg[entry->to_v_blks]; + u32 avail; + u32 q_num; + + /* check if high watermark is hit, and if so, skip */ + q_num = get_q_num(msg); + msgq = chan->msgq[q_num]; + qinfo = &chan->sync_qinfo[q_num]; + avail = msgq_avail_space(msgq, qinfo); + if (avail < qinfo->q_low) { + dev_dbg(dev, "Skip inserting inband SGL, [0x%x/0x%x]\n", + avail, qinfo->q_size); + return 0; + } + + for (i = 0; i < num_planes; i++) { + if (data[i].address && + (ib_sgl_size + data[i].size) <= vk->ib_sgl_size) { + item_cnt++; + memcpy(buf, entry->dma[i].sglist, data[i].size); + ib_sgl_size += data[i].size; + buf += data[i].size; + } + } + + dev_dbg(dev, "Num %u sgl items appended, size 0x%x, room 0x%x\n", + item_cnt, ib_sgl_size, vk->ib_sgl_size); + + /* round up size */ + ib_sgl_size = (ib_sgl_size + VK_MSGQ_BLK_SIZE - 1) + >> VK_MSGQ_BLK_SZ_SHIFT; + + return ib_sgl_size; +} + +void bcm_to_v_q_doorbell(struct bcm_vk *vk, u32 q_num, u32 db_val) +{ + struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan; + struct bcm_vk_sync_qinfo *qinfo = &chan->sync_qinfo[q_num]; + + vkwrite32(vk, db_val, BAR_0, qinfo->q_db_offset); +} + +static int bcm_to_v_msg_enqueue(struct bcm_vk *vk, struct bcm_vk_wkent *entry) +{ + static u32 seq_num; + struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan; + struct device *dev = &vk->pdev->dev; + struct vk_msg_blk *src = &entry->to_v_msg[0]; + + struct vk_msg_blk __iomem *dst; + struct bcm_vk_msgq __iomem *msgq; + struct bcm_vk_sync_qinfo *qinfo; + u32 q_num = get_q_num(src); + u32 wr_idx; /* local copy */ + u32 i; + u32 avail; + u32 retry; + + if (entry->to_v_blks != src->size + 1) { + dev_err(dev, "number of blks %d not matching %d MsgId[0x%x]: func %d ctx 0x%x\n", + entry->to_v_blks, + src->size + 1, + get_msg_id(src), + src->function_id, + src->context_id); + return -EMSGSIZE; + } + + msgq = chan->msgq[q_num]; + qinfo = &chan->sync_qinfo[q_num]; + + mutex_lock(&chan->msgq_mutex); + + avail = msgq_avail_space(msgq, qinfo); + + /* if not enough space, return EAGAIN and let app handles it */ + retry = 0; + while ((avail < entry->to_v_blks) && + (retry++ < BCM_VK_H2VK_ENQ_RETRY)) { + mutex_unlock(&chan->msgq_mutex); + + msleep(BCM_VK_H2VK_ENQ_RETRY_DELAY_MS); + mutex_lock(&chan->msgq_mutex); + avail = msgq_avail_space(msgq, qinfo); + } + if (retry > BCM_VK_H2VK_ENQ_RETRY) { + mutex_unlock(&chan->msgq_mutex); + return -EAGAIN; + } + + /* at this point, mutex is taken and there is enough space */ + entry->seq_num = seq_num++; /* update debug seq number */ + wr_idx = readl_relaxed(&msgq->wr_idx); + + if (wr_idx >= qinfo->q_size) { + dev_crit(dev, "Invalid wr_idx 0x%x => max 0x%x!", + wr_idx, qinfo->q_size); + bcm_vk_blk_drv_access(vk); + bcm_vk_set_host_alert(vk, ERR_LOG_HOST_PCIE_DWN); + goto idx_err; + } + + dst = msgq_blk_addr(qinfo, wr_idx); + for (i = 0; i < entry->to_v_blks; i++) { + memcpy_toio(dst, src, sizeof(*dst)); + + src++; + wr_idx = msgq_inc(qinfo, wr_idx, 1); + dst = msgq_blk_addr(qinfo, wr_idx); + } + + /* flush the write pointer */ + writel(wr_idx, &msgq->wr_idx); + + /* log new info for debugging */ + dev_dbg(dev, + "MsgQ[%d] [Rd Wr] = [%d %d] blks inserted %d - Q = [u-%d a-%d]/%d\n", + readl_relaxed(&msgq->num), + readl_relaxed(&msgq->rd_idx), + wr_idx, + entry->to_v_blks, + msgq_occupied(msgq, qinfo), + msgq_avail_space(msgq, qinfo), + readl_relaxed(&msgq->size)); + /* + * press door bell based on queue number. 1 is added to the wr_idx + * to avoid the value of 0 appearing on the VK side to distinguish + * from initial value. + */ + bcm_to_v_q_doorbell(vk, q_num, wr_idx + 1); +idx_err: + mutex_unlock(&chan->msgq_mutex); + return 0; +} + +int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type, + const pid_t pid, const u32 q_num) +{ + int rc = 0; + struct bcm_vk_wkent *entry; + struct device *dev = &vk->pdev->dev; + + /* + * check if the marker is still good. Sometimes, the PCIe interface may + * have gone done, and if so and we ship down thing based on broken + * values, kernel may panic. + */ + if (!bcm_vk_msgq_marker_valid(vk)) { + dev_info(dev, "PCIe comm chan - invalid marker (0x%x)!\n", + vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY)); + return -EINVAL; + } + + entry = kzalloc(sizeof(*entry) + + sizeof(struct vk_msg_blk), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + /* fill up necessary data */ + entry->to_v_msg[0].function_id = VK_FID_SHUTDOWN; + set_q_num(&entry->to_v_msg[0], q_num); + set_msg_id(&entry->to_v_msg[0], VK_SIMPLEX_MSG_ID); + entry->to_v_blks = 1; /* always 1 block */ + + entry->to_v_msg[0].cmd = shut_type; + entry->to_v_msg[0].arg = pid; + + rc = bcm_to_v_msg_enqueue(vk, entry); + if (rc) + dev_err(dev, + "Sending shutdown message to q %d for pid %d fails.\n", + get_q_num(&entry->to_v_msg[0]), pid); + + kfree(entry); + + return rc; +} + +static int bcm_vk_handle_last_sess(struct bcm_vk *vk, const pid_t pid, + const u32 q_num) +{ + int rc = 0; + struct device *dev = &vk->pdev->dev; + + /* + * don't send down or do anything if message queue is not initialized + */ + if (!bcm_vk_drv_access_ok(vk)) + return -EPERM; + + dev_dbg(dev, "No more sessions, shut down pid %d\n", pid); + + rc = bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_PID, pid, q_num); + + return rc; +} + +static struct bcm_vk_wkent *bcm_vk_dequeue_pending(struct bcm_vk *vk, + struct bcm_vk_msg_chan *chan, + u16 q_num, + u16 msg_id) +{ + bool found = false; + struct bcm_vk_wkent *entry; + + spin_lock(&chan->pendq_lock); + list_for_each_entry(entry, &chan->pendq[q_num], node) { + if (get_msg_id(&entry->to_v_msg[0]) == msg_id) { + list_del(&entry->node); + found = true; + bcm_vk_msgid_bitmap_clear(vk, msg_id, 1); + break; + } + } + spin_unlock(&chan->pendq_lock); + return ((found) ? entry : NULL); +} + +s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk) +{ + struct device *dev = &vk->pdev->dev; + struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan; + struct vk_msg_blk *data; + struct vk_msg_blk __iomem *src; + struct vk_msg_blk *dst; + struct bcm_vk_msgq __iomem *msgq; + struct bcm_vk_sync_qinfo *qinfo; + struct bcm_vk_wkent *entry; + u32 rd_idx, wr_idx; + u32 q_num, msg_id, j; + u32 num_blks; + s32 total = 0; + int cnt = 0; + int msg_processed = 0; + int max_msg_to_process; + bool exit_loop; + + /* + * drain all the messages from the queues, and find its pending + * entry in the to_v queue, based on msg_id & q_num, and move the + * entry to the to_h pending queue, waiting for user space + * program to extract + */ + mutex_lock(&chan->msgq_mutex); + + for (q_num = 0; q_num < chan->q_nr; q_num++) { + msgq = chan->msgq[q_num]; + qinfo = &chan->sync_qinfo[q_num]; + max_msg_to_process = BCM_VK_MSG_PROC_MAX_LOOP * qinfo->q_size; + + rd_idx = readl_relaxed(&msgq->rd_idx); + wr_idx = readl_relaxed(&msgq->wr_idx); + msg_processed = 0; + exit_loop = false; + while ((rd_idx != wr_idx) && !exit_loop) { + u8 src_size; + + /* + * Make a local copy and get pointer to src blk + * The rd_idx is masked before getting the pointer to + * avoid out of bound access in case the interface goes + * down. It will end up pointing to the last block in + * the buffer, but subsequent src->size check would be + * able to catch this. + */ + src = msgq_blk_addr(qinfo, rd_idx & qinfo->q_mask); + src_size = readb(&src->size); + + if ((rd_idx >= qinfo->q_size) || + (src_size > (qinfo->q_size - 1))) { + dev_crit(dev, + "Invalid rd_idx 0x%x or size 0x%x => max 0x%x!", + rd_idx, src_size, qinfo->q_size); + bcm_vk_blk_drv_access(vk); + bcm_vk_set_host_alert(vk, + ERR_LOG_HOST_PCIE_DWN); + goto idx_err; + } + + num_blks = src_size + 1; + data = kzalloc(num_blks * VK_MSGQ_BLK_SIZE, GFP_KERNEL); + if (data) { + /* copy messages and linearize it */ + dst = data; + for (j = 0; j < num_blks; j++) { + memcpy_fromio(dst, src, sizeof(*dst)); + + dst++; + rd_idx = msgq_inc(qinfo, rd_idx, 1); + src = msgq_blk_addr(qinfo, rd_idx); + } + total++; + } else { + /* + * if we could not allocate memory in kernel, + * that is fatal. + */ + dev_crit(dev, "Kernel mem allocation failure.\n"); + return -ENOMEM; + } + + /* flush rd pointer after a message is dequeued */ + writel(rd_idx, &msgq->rd_idx); + + /* log new info for debugging */ + dev_dbg(dev, + "MsgQ[%d] [Rd Wr] = [%d %d] blks extracted %d - Q = [u-%d a-%d]/%d\n", + readl_relaxed(&msgq->num), + rd_idx, + wr_idx, + num_blks, + msgq_occupied(msgq, qinfo), + msgq_avail_space(msgq, qinfo), + readl_relaxed(&msgq->size)); + + /* + * No need to search if it is an autonomous one-way + * message from driver, as these messages do not bear + * a to_v pending item. Currently, only the shutdown + * message falls into this category. + */ + if (data->function_id == VK_FID_SHUTDOWN) { + kfree(data); + continue; + } + + msg_id = get_msg_id(data); + /* lookup original message in to_v direction */ + entry = bcm_vk_dequeue_pending(vk, + &vk->to_v_msg_chan, + q_num, + msg_id); + + /* + * if there is message to does not have prior send, + * this is the location to add here + */ + if (entry) { + entry->to_h_blks = num_blks; + entry->to_h_msg = data; + bcm_vk_append_pendq(&vk->to_h_msg_chan, + q_num, entry); + + } else { + if (cnt++ < batch_log) + dev_info(dev, + "Could not find MsgId[0x%x] for resp func %d bmap %d\n", + msg_id, data->function_id, + test_bit(msg_id, vk->bmap)); + kfree(data); + } + /* Fetch wr_idx to handle more back-to-back events */ + wr_idx = readl(&msgq->wr_idx); + + /* + * cap the max so that even we try to handle more back-to-back events, + * so that it won't hold CPU too long or in case rd/wr idexes are + * corrupted which triggers infinite looping. + */ + if (++msg_processed >= max_msg_to_process) { + dev_warn(dev, "Q[%d] Per loop processing exceeds %d\n", + q_num, max_msg_to_process); + exit_loop = true; + } + } + } +idx_err: + mutex_unlock(&chan->msgq_mutex); + dev_dbg(dev, "total %d drained from queues\n", total); + + return total; +} + +/* + * init routine for all required data structures + */ +static int bcm_vk_data_init(struct bcm_vk *vk) +{ + int i; + + spin_lock_init(&vk->ctx_lock); + for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) { + vk->ctx[i].in_use = false; + vk->ctx[i].idx = i; /* self identity */ + vk->ctx[i].miscdev = NULL; + } + spin_lock_init(&vk->msg_id_lock); + spin_lock_init(&vk->host_alert_lock); + vk->msg_id = 0; + + /* initialize hash table */ + for (i = 0; i < VK_PID_HT_SZ; i++) + INIT_LIST_HEAD(&vk->pid_ht[i].head); + + return 0; +} + +irqreturn_t bcm_vk_msgq_irqhandler(int irq, void *dev_id) +{ + struct bcm_vk *vk = dev_id; + + if (!bcm_vk_drv_access_ok(vk)) { + dev_err(&vk->pdev->dev, + "Interrupt %d received when msgq not inited\n", irq); + goto skip_schedule_work; + } + + queue_work(vk->wq_thread, &vk->wq_work); + +skip_schedule_work: + return IRQ_HANDLED; +} + int bcm_vk_open(struct inode *inode, struct file *p_file) { struct bcm_vk_ctx *ctx; @@ -112,16 +969,346 @@ int bcm_vk_open(struct inode *inode, struct file *p_file) return rc; } +ssize_t bcm_vk_read(struct file *p_file, + char __user *buf, + size_t count, + loff_t *f_pos) +{ + ssize_t rc = -ENOMSG; + struct bcm_vk_ctx *ctx = p_file->private_data; + struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, + miscdev); + struct device *dev = &vk->pdev->dev; + struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan; + struct bcm_vk_wkent *entry = NULL; + u32 q_num; + u32 rsp_length; + bool found = false; + + if (!bcm_vk_drv_access_ok(vk)) + return -EPERM; + + dev_dbg(dev, "Buf count %zu\n", count); + found = false; + + /* + * search through the pendq on the to_h chan, and return only those + * that belongs to the same context. Search is always from the high to + * the low priority queues + */ + spin_lock(&chan->pendq_lock); + for (q_num = 0; q_num < chan->q_nr; q_num++) { + list_for_each_entry(entry, &chan->pendq[q_num], node) { + if (entry->ctx->idx == ctx->idx) { + if (count >= + (entry->to_h_blks * VK_MSGQ_BLK_SIZE)) { + list_del(&entry->node); + atomic_dec(&ctx->pend_cnt); + found = true; + } else { + /* buffer not big enough */ + rc = -EMSGSIZE; + } + goto read_loop_exit; + } + } + } +read_loop_exit: + spin_unlock(&chan->pendq_lock); + + if (found) { + /* retrieve the passed down msg_id */ + set_msg_id(&entry->to_h_msg[0], entry->usr_msg_id); + rsp_length = entry->to_h_blks * VK_MSGQ_BLK_SIZE; + if (copy_to_user(buf, entry->to_h_msg, rsp_length) == 0) + rc = rsp_length; + + bcm_vk_free_wkent(dev, entry); + } else if (rc == -EMSGSIZE) { + struct vk_msg_blk tmp_msg = entry->to_h_msg[0]; + + /* + * in this case, return just the first block, so + * that app knows what size it is looking for. + */ + set_msg_id(&tmp_msg, entry->usr_msg_id); + tmp_msg.size = entry->to_h_blks - 1; + if (copy_to_user(buf, &tmp_msg, VK_MSGQ_BLK_SIZE) != 0) { + dev_err(dev, "Error return 1st block in -EMSGSIZE\n"); + rc = -EFAULT; + } + } + return rc; +} + +ssize_t bcm_vk_write(struct file *p_file, + const char __user *buf, + size_t count, + loff_t *f_pos) +{ + ssize_t rc; + struct bcm_vk_ctx *ctx = p_file->private_data; + struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, + miscdev); + struct bcm_vk_msgq __iomem *msgq; + struct device *dev = &vk->pdev->dev; + struct bcm_vk_wkent *entry; + u32 sgl_extra_blks; + u32 q_num; + u32 msg_size; + u32 msgq_size; + + if (!bcm_vk_drv_access_ok(vk)) + return -EPERM; + + dev_dbg(dev, "Msg count %zu\n", count); + + /* first, do sanity check where count should be multiple of basic blk */ + if (count & (VK_MSGQ_BLK_SIZE - 1)) { + dev_err(dev, "Failure with size %zu not multiple of %zu\n", + count, VK_MSGQ_BLK_SIZE); + rc = -EINVAL; + goto write_err; + } + + /* allocate the work entry + buffer for size count and inband sgl */ + entry = kzalloc(sizeof(*entry) + count + vk->ib_sgl_size, + GFP_KERNEL); + if (!entry) { + rc = -ENOMEM; + goto write_err; + } + + /* now copy msg from user space, and then formulate the work entry */ + if (copy_from_user(&entry->to_v_msg[0], buf, count)) { + rc = -EFAULT; + goto write_free_ent; + } + + entry->to_v_blks = count >> VK_MSGQ_BLK_SZ_SHIFT; + entry->ctx = ctx; + + /* do a check on the blk size which could not exceed queue space */ + q_num = get_q_num(&entry->to_v_msg[0]); + msgq = vk->to_v_msg_chan.msgq[q_num]; + msgq_size = readl_relaxed(&msgq->size); + if (entry->to_v_blks + (vk->ib_sgl_size >> VK_MSGQ_BLK_SZ_SHIFT) + > (msgq_size - 1)) { + dev_err(dev, "Blk size %d exceed max queue size allowed %d\n", + entry->to_v_blks, msgq_size - 1); + rc = -EINVAL; + goto write_free_ent; + } + + /* Use internal message id */ + entry->usr_msg_id = get_msg_id(&entry->to_v_msg[0]); + rc = bcm_vk_get_msg_id(vk); + if (rc == VK_MSG_ID_OVERFLOW) { + dev_err(dev, "msg_id overflow\n"); + rc = -EOVERFLOW; + goto write_free_ent; + } + set_msg_id(&entry->to_v_msg[0], rc); + ctx->q_num = q_num; + + dev_dbg(dev, + "[Q-%d]Message ctx id %d, usr_msg_id 0x%x sent msg_id 0x%x\n", + ctx->q_num, ctx->idx, entry->usr_msg_id, + get_msg_id(&entry->to_v_msg[0])); + + if (entry->to_v_msg[0].function_id == VK_FID_TRANS_BUF) { + /* Convert any pointers to sg list */ + unsigned int num_planes; + int dir; + struct _vk_data *data; + + num_planes = entry->to_v_msg[0].cmd & VK_CMD_PLANES_MASK; + if ((entry->to_v_msg[0].cmd & VK_CMD_MASK) == VK_CMD_DOWNLOAD) + dir = DMA_FROM_DEVICE; + else + dir = DMA_TO_DEVICE; + + /* Calculate vk_data location */ + /* Go to end of the message */ + msg_size = entry->to_v_msg[0].size; + if (msg_size > entry->to_v_blks) { + rc = -EMSGSIZE; + goto write_free_msgid; + } + + data = (struct _vk_data *)&entry->to_v_msg[msg_size + 1]; + + /* Now back up to the start of the pointers */ + data -= num_planes; + + /* Convert user addresses to DMA SG List */ + rc = bcm_vk_sg_alloc(dev, entry->dma, dir, data, num_planes); + if (rc) + goto write_free_msgid; + + atomic_inc(&ctx->dma_cnt); + /* try to embed inband sgl */ + sgl_extra_blks = bcm_vk_append_ib_sgl(vk, entry, data, + num_planes); + entry->to_v_blks += sgl_extra_blks; + entry->to_v_msg[0].size += sgl_extra_blks; + } else if (entry->to_v_msg[0].function_id == VK_FID_INIT && + entry->to_v_msg[0].context_id == VK_NEW_CTX) { + /* + * Init happens in 2 stages, only the first stage contains the + * pid that needs translating. + */ + pid_t org_pid, pid; + + /* + * translate the pid into the unique host space as user + * may run sessions inside containers or process + * namespaces. + */ +#define VK_MSG_PID_MASK 0xffffff00 +#define VK_MSG_PID_SH 8 + org_pid = (entry->to_v_msg[0].arg & VK_MSG_PID_MASK) + >> VK_MSG_PID_SH; + + pid = task_tgid_nr(current); + entry->to_v_msg[0].arg = + (entry->to_v_msg[0].arg & ~VK_MSG_PID_MASK) | + (pid << VK_MSG_PID_SH); + if (org_pid != pid) + dev_dbg(dev, "In PID 0x%x(%d), converted PID 0x%x(%d)\n", + org_pid, org_pid, pid, pid); + } + + /* + * store work entry to pending queue until a response is received. + * This needs to be done before enqueuing the message + */ + bcm_vk_append_pendq(&vk->to_v_msg_chan, q_num, entry); + + rc = bcm_to_v_msg_enqueue(vk, entry); + if (rc) { + dev_err(dev, "Fail to enqueue msg to to_v queue\n"); + + /* remove message from pending list */ + entry = bcm_vk_dequeue_pending + (vk, + &vk->to_v_msg_chan, + q_num, + get_msg_id(&entry->to_v_msg[0])); + goto write_free_ent; + } + + return count; + +write_free_msgid: + bcm_vk_msgid_bitmap_clear(vk, get_msg_id(&entry->to_v_msg[0]), 1); +write_free_ent: + kfree(entry); +write_err: + return rc; +} + +__poll_t bcm_vk_poll(struct file *p_file, struct poll_table_struct *wait) +{ + __poll_t ret = 0; + int cnt; + struct bcm_vk_ctx *ctx = p_file->private_data; + struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev); + struct device *dev = &vk->pdev->dev; + + poll_wait(p_file, &ctx->rd_wq, wait); + + cnt = atomic_read(&ctx->pend_cnt); + if (cnt) { + ret = (__force __poll_t)(POLLIN | POLLRDNORM); + if (cnt < 0) { + dev_err(dev, "Error cnt %d, setting back to 0", cnt); + atomic_set(&ctx->pend_cnt, 0); + } + } + + return ret; +} + int bcm_vk_release(struct inode *inode, struct file *p_file) { int ret; struct bcm_vk_ctx *ctx = p_file->private_data; struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev); + struct device *dev = &vk->pdev->dev; + pid_t pid = ctx->pid; + int dma_cnt; + unsigned long timeout, start_time; + + /* + * if there are outstanding DMA transactions, need to delay long enough + * to ensure that the card side would have stopped touching the host buffer + * and its SGL list. A race condition could happen if the host app is killed + * abruptly, eg kill -9, while some DMA transfer orders are still inflight. + * Nothing could be done except for a delay as host side is running in a + * completely async fashion. + */ + start_time = jiffies; + timeout = start_time + msecs_to_jiffies(BCM_VK_DMA_DRAIN_MAX_MS); + do { + if (time_after(jiffies, timeout)) { + dev_warn(dev, "%d dma still pending for [fd-%d] pid %d\n", + dma_cnt, ctx->idx, pid); + break; + } + dma_cnt = atomic_read(&ctx->dma_cnt); + cpu_relax(); + cond_resched(); + } while (dma_cnt); + dev_dbg(dev, "Draining for [fd-%d] pid %d - delay %d ms\n", + ctx->idx, pid, jiffies_to_msecs(jiffies - start_time)); + + bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, ctx); + bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, ctx); ret = bcm_vk_free_ctx(vk, ctx); + if (ret == 0) + ret = bcm_vk_handle_last_sess(vk, pid, ctx->q_num); + else + ret = 0; kref_put(&vk->kref, bcm_vk_release_data); return ret; } +int bcm_vk_msg_init(struct bcm_vk *vk) +{ + struct device *dev = &vk->pdev->dev; + int ret; + + if (bcm_vk_data_init(vk)) { + dev_err(dev, "Error initializing internal data structures\n"); + return -EINVAL; + } + + if (bcm_vk_msg_chan_init(&vk->to_v_msg_chan) || + bcm_vk_msg_chan_init(&vk->to_h_msg_chan)) { + dev_err(dev, "Error initializing communication channel\n"); + return -EIO; + } + + /* read msgq info if ready */ + ret = bcm_vk_sync_msgq(vk, false); + if (ret && (ret != -EAGAIN)) { + dev_err(dev, "Error reading comm msg Q info\n"); + return -EIO; + } + + return 0; +} + +void bcm_vk_msg_remove(struct bcm_vk *vk) +{ + bcm_vk_blk_drv_access(vk); + + /* drain all pending items */ + bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL); + bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL); +} + diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.h b/drivers/misc/bcm-vk/bcm_vk_msg.h index 32516abcaf89..4eaad84825d6 100644 --- a/drivers/misc/bcm-vk/bcm_vk_msg.h +++ b/drivers/misc/bcm-vk/bcm_vk_msg.h @@ -6,6 +6,78 @@ #ifndef BCM_VK_MSG_H #define BCM_VK_MSG_H +#include +#include "bcm_vk_sg.h" + +/* Single message queue control structure */ +struct bcm_vk_msgq { + u16 type; /* queue type */ + u16 num; /* queue number */ + u32 start; /* offset in BAR1 where the queue memory starts */ + + u32 rd_idx; /* read idx */ + u32 wr_idx; /* write idx */ + + u32 size; /* + * size, which is in number of 16byte blocks, + * to align with the message data structure. + */ + u32 nxt; /* + * nxt offset to the next msg queue struct. + * This is to provide flexibity for alignment purposes. + */ + +/* Least significant 16 bits in below field hold doorbell register offset */ +#define DB_SHIFT 16 + + u32 db_offset; /* queue doorbell register offset in BAR0 */ + + u32 rsvd; +}; + +/* + * Structure to record static info from the msgq sync. We keep local copy + * for some of these variables for both performance + checking purpose. + */ +struct bcm_vk_sync_qinfo { + void __iomem *q_start; + u32 q_size; + u32 q_mask; + u32 q_low; + u32 q_db_offset; +}; + +#define VK_MSGQ_MAX_NR 4 /* Maximum number of message queues */ + +/* + * message block - basic unit in the message where a message's size is always + * N x sizeof(basic_block) + */ +struct vk_msg_blk { + u8 function_id; +#define VK_FID_TRANS_BUF 5 +#define VK_FID_SHUTDOWN 8 +#define VK_FID_INIT 9 + u8 size; /* size of the message in number of vk_msg_blk's */ + u16 trans_id; /* transport id, queue & msg_id */ + u32 context_id; +#define VK_NEW_CTX 0 + u32 cmd; +#define VK_CMD_PLANES_MASK 0x000f /* number of planes to up/download */ +#define VK_CMD_UPLOAD 0x0400 /* memory transfer to vk */ +#define VK_CMD_DOWNLOAD 0x0500 /* memory transfer from vk */ +#define VK_CMD_MASK 0x0f00 /* command mask */ + u32 arg; +}; + +/* vk_msg_blk is 16 bytes fixed */ +#define VK_MSGQ_BLK_SIZE (sizeof(struct vk_msg_blk)) +/* shift for fast division of basic msg blk size */ +#define VK_MSGQ_BLK_SZ_SHIFT 4 + +/* use msg_id 0 for any simplex host2vk communication */ +#define VK_SIMPLEX_MSG_ID 0 + /* context per session opening of sysfs */ struct bcm_vk_ctx { struct list_head node; /* use for linkage in Hash Table */ @@ -13,7 +85,11 @@ struct bcm_vk_ctx { bool in_use; pid_t pid; u32 hash_idx; + u32 q_num; /* queue number used by the stream */ struct miscdevice *miscdev; + atomic_t pend_cnt; /* number of items pending to be read from host */ + atomic_t dma_cnt; /* any dma transaction outstanding */ + wait_queue_head_t rd_wq; }; /* pid hash table entry */ @@ -21,6 +97,55 @@ struct bcm_vk_ht_entry { struct list_head head; }; +#define VK_DMA_MAX_ADDRS 4 /* Max 4 DMA Addresses */ +/* structure for house keeping a single work entry */ +struct bcm_vk_wkent { + struct list_head node; /* for linking purpose */ + struct bcm_vk_ctx *ctx; + + /* Store up to 4 dma pointers */ + struct bcm_vk_dma dma[VK_DMA_MAX_ADDRS]; + + u32 to_h_blks; /* response */ + struct vk_msg_blk *to_h_msg; + + /* + * put the to_v_msg at the end so that we could simply append to_v msg + * to the end of the allocated block + */ + u32 usr_msg_id; + u32 to_v_blks; + u32 seq_num; + struct vk_msg_blk to_v_msg[0]; +}; + +/* queue stats counters */ +struct bcm_vk_qs_cnts { + u32 cnt; /* general counter, used to limit output */ + u32 acc_sum; + u32 max_occ; /* max during a sampling period */ + u32 max_abs; /* the abs max since reset */ +}; + +/* control channel structure for either to_v or to_h communication */ +struct bcm_vk_msg_chan { + u32 q_nr; + /* Mutex to access msgq */ + struct mutex msgq_mutex; + /* pointing to BAR locations */ + struct bcm_vk_msgq __iomem *msgq[VK_MSGQ_MAX_NR]; + /* Spinlock to access pending queue */ + spinlock_t pendq_lock; + /* for temporary storing pending items, one for each queue */ + struct list_head pendq[VK_MSGQ_MAX_NR]; + /* static queue info from the sync */ + struct bcm_vk_sync_qinfo sync_qinfo[VK_MSGQ_MAX_NR]; +}; + +/* totol number of message q allowed by the driver */ +#define VK_MSGQ_PER_CHAN_MAX 3 +#define VK_MSGQ_NUM_DEFAULT (VK_MSGQ_PER_CHAN_MAX - 1) + /* total number of supported ctx, 32 ctx each for 5 components */ #define VK_CMPT_CTX_MAX (32 * 5) @@ -28,4 +153,11 @@ struct bcm_vk_ht_entry { #define VK_PID_HT_SHIFT_BIT 7 /* 128 */ #define VK_PID_HT_SZ BIT(VK_PID_HT_SHIFT_BIT) +/* The following are offsets of DDR info provided by the vk card */ +#define VK_BAR0_SEG_SIZE (4 * SZ_1K) /* segment size for BAR0 */ + +/* shutdown types supported */ +#define VK_SHUTDOWN_PID 1 +#define VK_SHUTDOWN_GRACEFUL 2 + #endif diff --git a/drivers/misc/bcm-vk/bcm_vk_sg.c b/drivers/misc/bcm-vk/bcm_vk_sg.c new file mode 100644 index 000000000000..2e9daaf3e492 --- /dev/null +++ b/drivers/misc/bcm-vk/bcm_vk_sg.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2018-2020 Broadcom. + */ +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include "bcm_vk.h" +#include "bcm_vk_msg.h" +#include "bcm_vk_sg.h" + +/* + * Valkyrie has a hardware limitation of 16M transfer size. + * So limit the SGL chunks to 16M. + */ +#define BCM_VK_MAX_SGL_CHUNK SZ_16M + +static int bcm_vk_dma_alloc(struct device *dev, + struct bcm_vk_dma *dma, + int dir, + struct _vk_data *vkdata); +static int bcm_vk_dma_free(struct device *dev, struct bcm_vk_dma *dma); + +/* Uncomment to dump SGLIST */ +/* #define BCM_VK_DUMP_SGLIST */ + +static int bcm_vk_dma_alloc(struct device *dev, + struct bcm_vk_dma *dma, + int direction, + struct _vk_data *vkdata) +{ + dma_addr_t addr, sg_addr; + int err; + int i; + int offset; + u32 size; + u32 remaining_size; + u32 transfer_size; + u64 data; + unsigned long first, last; + struct _vk_data *sgdata; + + /* Get 64-bit user address */ + data = get_unaligned(&vkdata->address); + + /* offset into first page */ + offset = offset_in_page(data); + + /* Calculate number of pages */ + first = (data & PAGE_MASK) >> PAGE_SHIFT; + last = ((data + vkdata->size - 1) & PAGE_MASK) >> PAGE_SHIFT; + dma->nr_pages = last - first + 1; + + /* Allocate DMA pages */ + dma->pages = kmalloc_array(dma->nr_pages, + sizeof(struct page *), + GFP_KERNEL); + if (!dma->pages) + return -ENOMEM; + + dev_dbg(dev, "Alloc DMA Pages [0x%llx+0x%x => %d pages]\n", + data, vkdata->size, dma->nr_pages); + + dma->direction = direction; + + /* Get user pages into memory */ + err = get_user_pages_fast(data & PAGE_MASK, + dma->nr_pages, + direction == DMA_FROM_DEVICE, + dma->pages); + if (err != dma->nr_pages) { + dma->nr_pages = (err >= 0) ? err : 0; + dev_err(dev, "get_user_pages_fast, err=%d [%d]\n", + err, dma->nr_pages); + return err < 0 ? err : -EINVAL; + } + + /* Max size of sg list is 1 per mapped page + fields at start */ + dma->sglen = (dma->nr_pages * sizeof(*sgdata)) + + (sizeof(u32) * SGLIST_VKDATA_START); + + /* Allocate sglist */ + dma->sglist = dma_alloc_coherent(dev, + dma->sglen, + &dma->handle, + GFP_KERNEL); + if (!dma->sglist) + return -ENOMEM; + + dma->sglist[SGLIST_NUM_SG] = 0; + dma->sglist[SGLIST_TOTALSIZE] = vkdata->size; + remaining_size = vkdata->size; + sgdata = (struct _vk_data *)&dma->sglist[SGLIST_VKDATA_START]; + + /* Map all pages into DMA */ + size = min_t(size_t, PAGE_SIZE - offset, remaining_size); + remaining_size -= size; + sg_addr = dma_map_page(dev, + dma->pages[0], + offset, + size, + dma->direction); + transfer_size = size; + if (unlikely(dma_mapping_error(dev, sg_addr))) { + __free_page(dma->pages[0]); + return -EIO; + } + + for (i = 1; i < dma->nr_pages; i++) { + size = min_t(size_t, PAGE_SIZE, remaining_size); + remaining_size -= size; + addr = dma_map_page(dev, + dma->pages[i], + 0, + size, + dma->direction); + if (unlikely(dma_mapping_error(dev, addr))) { + __free_page(dma->pages[i]); + return -EIO; + } + + /* + * Compress SG list entry when pages are contiguous + * and transfer size less or equal to BCM_VK_MAX_SGL_CHUNK + */ + if ((addr == (sg_addr + transfer_size)) && + ((transfer_size + size) <= BCM_VK_MAX_SGL_CHUNK)) { + /* pages are contiguous, add to same sg entry */ + transfer_size += size; + } else { + /* pages are not contiguous, write sg entry */ + sgdata->size = transfer_size; + put_unaligned(sg_addr, (u64 *)&sgdata->address); + dma->sglist[SGLIST_NUM_SG]++; + + /* start new sg entry */ + sgdata++; + sg_addr = addr; + transfer_size = size; + } + } + /* Write last sg list entry */ + sgdata->size = transfer_size; + put_unaligned(sg_addr, (u64 *)&sgdata->address); + dma->sglist[SGLIST_NUM_SG]++; + + /* Update pointers and size field to point to sglist */ + put_unaligned((u64)dma->handle, &vkdata->address); + vkdata->size = (dma->sglist[SGLIST_NUM_SG] * sizeof(*sgdata)) + + (sizeof(u32) * SGLIST_VKDATA_START); + +#ifdef BCM_VK_DUMP_SGLIST + dev_dbg(dev, + "sgl 0x%llx handle 0x%llx, sglen: 0x%x sgsize: 0x%x\n", + (u64)dma->sglist, + dma->handle, + dma->sglen, + vkdata->size); + for (i = 0; i < vkdata->size / sizeof(u32); i++) + dev_dbg(dev, "i:0x%x 0x%x\n", i, dma->sglist[i]); +#endif + + return 0; +} + +int bcm_vk_sg_alloc(struct device *dev, + struct bcm_vk_dma *dma, + int dir, + struct _vk_data *vkdata, + int num) +{ + int i; + int rc = -EINVAL; + + /* Convert user addresses to DMA SG List */ + for (i = 0; i < num; i++) { + if (vkdata[i].size && vkdata[i].address) { + /* + * If both size and address are non-zero + * then DMA alloc. + */ + rc = bcm_vk_dma_alloc(dev, + &dma[i], + dir, + &vkdata[i]); + } else if (vkdata[i].size || + vkdata[i].address) { + /* + * If one of size and address are zero + * there is a problem. + */ + dev_err(dev, + "Invalid vkdata %x 0x%x 0x%llx\n", + i, vkdata[i].size, vkdata[i].address); + rc = -EINVAL; + } else { + /* + * If size and address are both zero + * don't convert, but return success. + */ + rc = 0; + } + + if (rc) + goto fail_alloc; + } + return rc; + +fail_alloc: + while (i > 0) { + i--; + if (dma[i].sglist) + bcm_vk_dma_free(dev, &dma[i]); + } + return rc; +} + +static int bcm_vk_dma_free(struct device *dev, struct bcm_vk_dma *dma) +{ + dma_addr_t addr; + int i; + int num_sg; + u32 size; + struct _vk_data *vkdata; + + dev_dbg(dev, "free sglist=%p sglen=0x%x\n", dma->sglist, dma->sglen); + + /* Unmap all pages in the sglist */ + num_sg = dma->sglist[SGLIST_NUM_SG]; + vkdata = (struct _vk_data *)&dma->sglist[SGLIST_VKDATA_START]; + for (i = 0; i < num_sg; i++) { + size = vkdata[i].size; + addr = get_unaligned(&vkdata[i].address); + + dma_unmap_page(dev, addr, size, dma->direction); + } + + /* Free allocated sglist */ + dma_free_coherent(dev, dma->sglen, dma->sglist, dma->handle); + + /* Release lock on all pages */ + for (i = 0; i < dma->nr_pages; i++) + put_page(dma->pages[i]); + + /* Free allocated dma pages */ + kfree(dma->pages); + dma->sglist = NULL; + + return 0; +} + +int bcm_vk_sg_free(struct device *dev, struct bcm_vk_dma *dma, int num, + int *proc_cnt) +{ + int i; + + *proc_cnt = 0; + /* Unmap and free all pages and sglists */ + for (i = 0; i < num; i++) { + if (dma[i].sglist) { + bcm_vk_dma_free(dev, &dma[i]); + *proc_cnt += 1; + } + } + + return 0; +} diff --git a/drivers/misc/bcm-vk/bcm_vk_sg.h b/drivers/misc/bcm-vk/bcm_vk_sg.h new file mode 100644 index 000000000000..81b3d0976ddb --- /dev/null +++ b/drivers/misc/bcm-vk/bcm_vk_sg.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2018-2020 Broadcom. + */ + +#ifndef BCM_VK_SG_H +#define BCM_VK_SG_H + +#include + +struct bcm_vk_dma { + /* for userland buffer */ + struct page **pages; + int nr_pages; + + /* common */ + dma_addr_t handle; + /* + * sglist is of the following LE format + * [U32] num_sg = number of sg addresses (N) + * [U32] totalsize = totalsize of data being transferred in sglist + * [U32] size[0] = size of data in address0 + * [U32] addr_l[0] = lower 32-bits of address0 + * [U32] addr_h[0] = higher 32-bits of address0 + * .. + * [U32] size[N-1] = size of data in addressN-1 + * [U32] addr_l[N-1] = lower 32-bits of addressN-1 + * [U32] addr_h[N-1] = higher 32-bits of addressN-1 + */ + u32 *sglist; +#define SGLIST_NUM_SG 0 +#define SGLIST_TOTALSIZE 1 +#define SGLIST_VKDATA_START 2 + + int sglen; /* Length (bytes) of sglist */ + int direction; +}; + +struct _vk_data { + u32 size; /* data size in bytes */ + u64 address; /* Pointer to data */ +} __packed; + +/* + * Scatter-gather DMA buffer API. + * + * These functions provide a simple way to create a page list and a + * scatter-gather list from userspace address and map the memory + * for DMA operation. + */ +int bcm_vk_sg_alloc(struct device *dev, + struct bcm_vk_dma *dma, + int dir, + struct _vk_data *vkdata, + int num); + +int bcm_vk_sg_free(struct device *dev, struct bcm_vk_dma *dma, int num, + int *proc_cnt); + +#endif + -- cgit v1.2.3-58-ga151 From d63d658f74727749088bfe436230b810f3880b0a Mon Sep 17 00:00:00 2001 From: Scott Branden Date: Wed, 20 Jan 2021 09:58:24 -0800 Subject: misc: bcm-vk: reset_pid support Add reset support via ioctl. Kill user processes that are open when VK card is reset. If a particular PID has issued the reset request do not kill that process as it issued the ioctl. Co-developed-by: Desmond Yan Acked-by: Olof Johansson Signed-off-by: Desmond Yan Signed-off-by: Scott Branden Link: https://lore.kernel.org/r/20210120175827.14820-11-scott.branden@broadcom.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/bcm-vk/bcm_vk.h | 1 + drivers/misc/bcm-vk/bcm_vk_dev.c | 158 +++++++++++++++++++++++++++++++++++++-- drivers/misc/bcm-vk/bcm_vk_msg.c | 40 +++++++++- 3 files changed, 191 insertions(+), 8 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h index d847a512d0ed..a1d0bf6e694c 100644 --- a/drivers/misc/bcm-vk/bcm_vk.h +++ b/drivers/misc/bcm-vk/bcm_vk.h @@ -468,6 +468,7 @@ irqreturn_t bcm_vk_msgq_irqhandler(int irq, void *dev_id); irqreturn_t bcm_vk_notf_irqhandler(int irq, void *dev_id); int bcm_vk_msg_init(struct bcm_vk *vk); void bcm_vk_msg_remove(struct bcm_vk *vk); +void bcm_vk_drain_msg_on_reset(struct bcm_vk *vk); int bcm_vk_sync_msgq(struct bcm_vk *vk, bool force_sync); void bcm_vk_blk_drv_access(struct bcm_vk *vk); s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk); diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c index 5d82f02c0f27..e572a7b18fab 100644 --- a/drivers/misc/bcm-vk/bcm_vk_dev.c +++ b/drivers/misc/bcm-vk/bcm_vk_dev.c @@ -504,7 +504,9 @@ void bcm_vk_blk_drv_access(struct bcm_vk *vk) int i; /* - * kill all the apps + * kill all the apps except for the process that is resetting. + * If not called during reset, reset_pid will be 0, and all will be + * killed. */ spin_lock(&vk->ctx_lock); @@ -515,10 +517,12 @@ void bcm_vk_blk_drv_access(struct bcm_vk *vk) struct bcm_vk_ctx *ctx; list_for_each_entry(ctx, &vk->pid_ht[i].head, node) { - dev_dbg(&vk->pdev->dev, - "Send kill signal to pid %d\n", - ctx->pid); - kill_pid(find_vpid(ctx->pid), SIGKILL, 1); + if (ctx->pid != vk->reset_pid) { + dev_dbg(&vk->pdev->dev, + "Send kill signal to pid %d\n", + ctx->pid); + kill_pid(find_vpid(ctx->pid), SIGKILL, 1); + } } } spin_unlock(&vk->ctx_lock); @@ -1001,6 +1005,49 @@ err_idx: return ret; } +static int bcm_vk_reset_successful(struct bcm_vk *vk) +{ + struct device *dev = &vk->pdev->dev; + u32 fw_status, reset_reason; + int ret = -EAGAIN; + + /* + * Reset could be triggered when the card in several state: + * i) in bootROM + * ii) after boot1 + * iii) boot2 running + * + * i) & ii) - no status bits will be updated. If vkboot1 + * runs automatically after reset, it will update the reason + * to be unknown reason + * iii) - reboot reason match + deinit done. + */ + fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS); + /* immediate exit if interface goes down */ + if (BCM_VK_INTF_IS_DOWN(fw_status)) { + dev_err(dev, "PCIe Intf Down!\n"); + goto reset_exit; + } + + reset_reason = (fw_status & VK_FWSTS_RESET_REASON_MASK); + if ((reset_reason == VK_FWSTS_RESET_MBOX_DB) || + (reset_reason == VK_FWSTS_RESET_UNKNOWN)) + ret = 0; + + /* + * if some of the deinit bits are set, but done + * bit is not, this is a failure if triggered while boot2 is running + */ + if ((fw_status & VK_FWSTS_DEINIT_TRIGGERED) && + !(fw_status & VK_FWSTS_RESET_DONE)) + ret = -EAGAIN; + +reset_exit: + dev_dbg(dev, "FW status = 0x%x ret %d\n", fw_status, ret); + + return ret; +} + static void bcm_to_v_reset_doorbell(struct bcm_vk *vk, u32 db_val) { vkwrite32(vk, db_val, BAR_0, VK_BAR0_RESET_DB_BASE); @@ -1010,12 +1057,16 @@ static int bcm_vk_trigger_reset(struct bcm_vk *vk) { u32 i; u32 value, boot_status; + bool is_stdalone, is_boot2; static const u32 bar0_reg_clr_list[] = { BAR_OS_UPTIME, BAR_INTF_VER, BAR_CARD_VOLTAGE, BAR_CARD_TEMPERATURE, BAR_CARD_PWR_AND_THRE }; + /* clean up before pressing the door bell */ + bcm_vk_drain_msg_on_reset(vk); + vkwrite32(vk, 0, BAR_1, VK_BAR1_MSGQ_DEF_RDY); /* make tag '\0' terminated */ vkwrite32(vk, 0, BAR_1, VK_BAR1_BOOT1_VER_TAG); @@ -1026,6 +1077,11 @@ static int bcm_vk_trigger_reset(struct bcm_vk *vk) for (i = 0; i < VK_BAR1_SOTP_REVID_MAX; i++) vkwrite32(vk, 0, BAR_1, VK_BAR1_SOTP_REVID_ADDR(i)); + memset(&vk->card_info, 0, sizeof(vk->card_info)); + memset(&vk->peerlog_info, 0, sizeof(vk->peerlog_info)); + memset(&vk->proc_mon_info, 0, sizeof(vk->proc_mon_info)); + memset(&vk->alert_cnts, 0, sizeof(vk->alert_cnts)); + /* * When boot request fails, the CODE_PUSH_OFFSET stays persistent. * Allowing us to debug the failure. When we call reset, @@ -1046,17 +1102,103 @@ static int bcm_vk_trigger_reset(struct bcm_vk *vk) } vkwrite32(vk, value, BAR_0, BAR_CODEPUSH_SBL); + /* special reset handling */ + is_stdalone = boot_status & BOOT_STDALONE_RUNNING; + is_boot2 = (boot_status & BOOT_STATE_MASK) == BOOT2_RUNNING; + if (vk->peer_alert.flags & ERR_LOG_RAMDUMP) { + /* + * if card is in ramdump mode, it is hitting an error. Don't + * reset the reboot reason as it will contain valid info that + * is important - simply use special reset + */ + vkwrite32(vk, VK_BAR0_RESET_RAMPDUMP, BAR_0, VK_BAR_FWSTS); + return VK_BAR0_RESET_RAMPDUMP; + } else if (is_stdalone && !is_boot2) { + dev_info(&vk->pdev->dev, "Hard reset on Standalone mode"); + bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_HARD); + return VK_BAR0_RESET_DB_HARD; + } + /* reset fw_status with proper reason, and press db */ vkwrite32(vk, VK_FWSTS_RESET_MBOX_DB, BAR_0, VK_BAR_FWSTS); bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_SOFT); - /* clear other necessary registers records */ + /* clear other necessary registers and alert records */ for (i = 0; i < ARRAY_SIZE(bar0_reg_clr_list); i++) vkwrite32(vk, 0, BAR_0, bar0_reg_clr_list[i]); + memset(&vk->host_alert, 0, sizeof(vk->host_alert)); + memset(&vk->peer_alert, 0, sizeof(vk->peer_alert)); + /* clear 4096 bits of bitmap */ + bitmap_clear(vk->bmap, 0, VK_MSG_ID_BITMAP_SIZE); return 0; } +static long bcm_vk_reset(struct bcm_vk *vk, struct vk_reset __user *arg) +{ + struct device *dev = &vk->pdev->dev; + struct vk_reset reset; + int ret = 0; + u32 ramdump_reset; + int special_reset; + + if (copy_from_user(&reset, arg, sizeof(struct vk_reset))) + return -EFAULT; + + /* check if any download is in-progress, if so return error */ + if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) { + dev_err(dev, "Download operation pending - skip reset.\n"); + return -EPERM; + } + + ramdump_reset = vk->peer_alert.flags & ERR_LOG_RAMDUMP; + dev_info(dev, "Issue Reset %s\n", + ramdump_reset ? "in ramdump mode" : ""); + + /* + * The following is the sequence of reset: + * - send card level graceful shut down + * - wait enough time for VK to handle its business, stopping DMA etc + * - kill host apps + * - Trigger interrupt with DB + */ + bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_GRACEFUL, 0, 0); + + spin_lock(&vk->ctx_lock); + if (!vk->reset_pid) { + vk->reset_pid = task_pid_nr(current); + } else { + dev_err(dev, "Reset already launched by process pid %d\n", + vk->reset_pid); + ret = -EACCES; + } + spin_unlock(&vk->ctx_lock); + if (ret) + goto err_exit; + + bcm_vk_blk_drv_access(vk); + special_reset = bcm_vk_trigger_reset(vk); + + /* + * Wait enough time for card os to deinit + * and populate the reset reason. + */ + msleep(BCM_VK_DEINIT_TIME_MS); + + if (special_reset) { + /* if it is special ramdump reset, return the type to user */ + reset.arg2 = special_reset; + if (copy_to_user(arg, &reset, sizeof(reset))) + ret = -EFAULT; + } else { + ret = bcm_vk_reset_successful(vk); + } + +err_exit: + clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload); + return ret; +} + static long bcm_vk_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret = -EINVAL; @@ -1075,6 +1217,10 @@ static long bcm_vk_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ret = bcm_vk_load_image(vk, argp); break; + case VK_IOCTL_RESET: + ret = bcm_vk_reset(vk, argp); + break; + default: break; } diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.c b/drivers/misc/bcm-vk/bcm_vk_msg.c index b05e20a72a8b..eec90494777d 100644 --- a/drivers/misc/bcm-vk/bcm_vk_msg.c +++ b/drivers/misc/bcm-vk/bcm_vk_msg.c @@ -209,6 +209,15 @@ static struct bcm_vk_ctx *bcm_vk_get_ctx(struct bcm_vk *vk, const pid_t pid) spin_lock(&vk->ctx_lock); + /* check if it is in reset, if so, don't allow */ + if (vk->reset_pid) { + dev_err(&vk->pdev->dev, + "No context allowed during reset by pid %d\n", + vk->reset_pid); + + goto in_reset_exit; + } + for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) { if (!vk->ctx[i].in_use) { vk->ctx[i].in_use = true; @@ -237,6 +246,7 @@ static struct bcm_vk_ctx *bcm_vk_get_ctx(struct bcm_vk *vk, const pid_t pid) init_waitqueue_head(&ctx->rd_wq); all_in_use_exit: +in_reset_exit: spin_unlock(&vk->ctx_lock); return ctx; @@ -381,6 +391,12 @@ static void bcm_vk_drain_all_pend(struct device *dev, num, ctx->idx); } +void bcm_vk_drain_msg_on_reset(struct bcm_vk *vk) +{ + bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL); + bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL); +} + /* * Function to sync up the messages queue info that is provided by BAR1 */ @@ -712,13 +728,22 @@ static int bcm_vk_handle_last_sess(struct bcm_vk *vk, const pid_t pid, /* * don't send down or do anything if message queue is not initialized + * and if it is the reset session, clear it. */ - if (!bcm_vk_drv_access_ok(vk)) + if (!bcm_vk_drv_access_ok(vk)) { + if (vk->reset_pid == pid) + vk->reset_pid = 0; return -EPERM; + } dev_dbg(dev, "No more sessions, shut down pid %d\n", pid); - rc = bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_PID, pid, q_num); + /* only need to do it if it is not the reset process */ + if (vk->reset_pid != pid) + rc = bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_PID, pid, q_num); + else + /* put reset_pid to 0 if it is exiting last session */ + vk->reset_pid = 0; return rc; } @@ -1122,6 +1147,17 @@ ssize_t bcm_vk_write(struct file *p_file, int dir; struct _vk_data *data; + /* + * check if we are in reset, if so, no buffer transfer is + * allowed and return error. + */ + if (vk->reset_pid) { + dev_dbg(dev, "No Transfer allowed during reset, pid %d.\n", + ctx->pid); + rc = -EACCES; + goto write_free_msgid; + } + num_planes = entry->to_v_msg[0].cmd & VK_CMD_PLANES_MASK; if ((entry->to_v_msg[0].cmd & VK_CMD_MASK) == VK_CMD_DOWNLOAD) dir = DMA_FROM_DEVICE; -- cgit v1.2.3-58-ga151 From 483050c04738c685e3982c1baec202d0cc1beb0d Mon Sep 17 00:00:00 2001 From: Scott Branden Date: Wed, 20 Jan 2021 09:58:25 -0800 Subject: misc: bcm-vk: add mmap function for exposing BAR2 Add mmap function that allows host application to open up BAR2 memory for remote spooling out messages from the VK logger. Co-developed-by: Desmond Yan Acked-by: Olof Johansson Signed-off-by: Desmond Yan Signed-off-by: Scott Branden Link: https://lore.kernel.org/r/20210120175827.14820-12-scott.branden@broadcom.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/bcm-vk/bcm_vk_dev.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c index e572a7b18fab..cac07419f041 100644 --- a/drivers/misc/bcm-vk/bcm_vk_dev.c +++ b/drivers/misc/bcm-vk/bcm_vk_dev.c @@ -1199,6 +1199,29 @@ err_exit: return ret; } +static int bcm_vk_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct bcm_vk_ctx *ctx = file->private_data; + struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev); + unsigned long pg_size; + + /* only BAR2 is mmap possible, which is bar num 4 due to 64bit */ +#define VK_MMAPABLE_BAR 4 + + pg_size = ((pci_resource_len(vk->pdev, VK_MMAPABLE_BAR) - 1) + >> PAGE_SHIFT) + 1; + if (vma->vm_pgoff + vma_pages(vma) > pg_size) + return -EINVAL; + + vma->vm_pgoff += (pci_resource_start(vk->pdev, VK_MMAPABLE_BAR) + >> PAGE_SHIFT); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + static long bcm_vk_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret = -EINVAL; @@ -1237,6 +1260,7 @@ static const struct file_operations bcm_vk_fops = { .write = bcm_vk_write, .poll = bcm_vk_poll, .release = bcm_vk_release, + .mmap = bcm_vk_mmap, .unlocked_ioctl = bcm_vk_ioctl, }; -- cgit v1.2.3-58-ga151 From 91ca10d6fa0720e35596c720e494d9c18624418a Mon Sep 17 00:00:00 2001 From: Scott Branden Date: Wed, 20 Jan 2021 09:58:27 -0800 Subject: misc: bcm-vk: add ttyVK support Add ttyVK support to driver to allow console access to VK card from host. Device node will be in the follow form /dev/bcm-vk.x_ttyVKy where: x is the instance of the VK card y is the tty device number on the VK card Acked-by: Olof Johansson Signed-off-by: Scott Branden Link: https://lore.kernel.org/r/20210120175827.14820-14-scott.branden@broadcom.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/bcm-vk/Makefile | 3 +- drivers/misc/bcm-vk/bcm_vk.h | 28 ++++ drivers/misc/bcm-vk/bcm_vk_dev.c | 30 +++- drivers/misc/bcm-vk/bcm_vk_tty.c | 333 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 392 insertions(+), 2 deletions(-) create mode 100644 drivers/misc/bcm-vk/bcm_vk_tty.c (limited to 'drivers/misc') diff --git a/drivers/misc/bcm-vk/Makefile b/drivers/misc/bcm-vk/Makefile index 79b4e365c9e6..e4a1486f7209 100644 --- a/drivers/misc/bcm-vk/Makefile +++ b/drivers/misc/bcm-vk/Makefile @@ -7,5 +7,6 @@ obj-$(CONFIG_BCM_VK) += bcm_vk.o bcm_vk-objs := \ bcm_vk_dev.o \ bcm_vk_msg.o \ - bcm_vk_sg.o + bcm_vk_sg.o \ + bcm_vk_tty.o diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h index a1d0bf6e694c..3f37c640a814 100644 --- a/drivers/misc/bcm-vk/bcm_vk.h +++ b/drivers/misc/bcm-vk/bcm_vk.h @@ -8,12 +8,14 @@ #include #include +#include #include #include #include #include #include #include +#include #include #include @@ -84,6 +86,9 @@ #define CODEPUSH_BOOT2_ENTRY 0x60000000 #define BAR_CARD_STATUS 0x410 +/* CARD_STATUS definitions */ +#define CARD_STATUS_TTYVK0_READY BIT(0) +#define CARD_STATUS_TTYVK1_READY BIT(1) #define BAR_BOOT1_STDALONE_PROGRESS 0x420 #define BOOT1_STDALONE_SUCCESS (BIT(13) | BIT(14)) @@ -255,6 +260,19 @@ enum pci_barno { #define BCM_VK_NUM_TTY 2 +struct bcm_vk_tty { + struct tty_port port; + u32 to_offset; /* bar offset to use */ + u32 to_size; /* to VK buffer size */ + u32 wr; /* write offset shadow */ + u32 from_offset; /* bar offset to use */ + u32 from_size; /* from VK buffer size */ + u32 rd; /* read offset shadow */ + pid_t pid; + bool irq_enabled; + bool is_opened; /* tracks tty open/close */ +}; + /* VK device max power state, supports 3, full, reduced and low */ #define MAX_OPP 3 #define MAX_CARD_INFO_TAG_SIZE 64 @@ -348,6 +366,12 @@ struct bcm_vk { struct miscdevice miscdev; int devid; /* dev id allocated */ + struct tty_driver *tty_drv; + struct timer_list serial_timer; + struct bcm_vk_tty tty[BCM_VK_NUM_TTY]; + struct workqueue_struct *tty_wq_thread; + struct work_struct tty_wq_work; + /* Reference-counting to handle file operations */ struct kref kref; @@ -466,6 +490,7 @@ int bcm_vk_release(struct inode *inode, struct file *p_file); void bcm_vk_release_data(struct kref *kref); irqreturn_t bcm_vk_msgq_irqhandler(int irq, void *dev_id); irqreturn_t bcm_vk_notf_irqhandler(int irq, void *dev_id); +irqreturn_t bcm_vk_tty_irqhandler(int irq, void *dev_id); int bcm_vk_msg_init(struct bcm_vk *vk); void bcm_vk_msg_remove(struct bcm_vk *vk); void bcm_vk_drain_msg_on_reset(struct bcm_vk *vk); @@ -476,6 +501,9 @@ int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type, const pid_t pid, const u32 q_num); void bcm_to_v_q_doorbell(struct bcm_vk *vk, u32 q_num, u32 db_val); int bcm_vk_auto_load_all_images(struct bcm_vk *vk); +int bcm_vk_tty_init(struct bcm_vk *vk, char *name); +void bcm_vk_tty_exit(struct bcm_vk *vk); +void bcm_vk_tty_terminate_tty_user(struct bcm_vk *vk); void bcm_vk_hb_init(struct bcm_vk *vk); void bcm_vk_hb_deinit(struct bcm_vk *vk); void bcm_vk_handle_notf(struct bcm_vk *vk); diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c index cac07419f041..c3d2bba68ef1 100644 --- a/drivers/misc/bcm-vk/bcm_vk_dev.c +++ b/drivers/misc/bcm-vk/bcm_vk_dev.c @@ -525,6 +525,7 @@ void bcm_vk_blk_drv_access(struct bcm_vk *vk) } } } + bcm_vk_tty_terminate_tty_user(vk); spin_unlock(&vk->ctx_lock); } @@ -1384,6 +1385,20 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } vk->num_irqs++; + for (i = 0; + (i < VK_MSIX_TTY_MAX) && (vk->num_irqs < irq); + i++, vk->num_irqs++) { + err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs), + bcm_vk_tty_irqhandler, + IRQF_SHARED, DRV_MODULE_NAME, vk); + if (err) { + dev_err(dev, "failed request tty IRQ %d for MSIX %d\n", + pdev->irq + vk->num_irqs, vk->num_irqs + 1); + goto err_irq; + } + vk->tty[i].irq_enabled = true; + } + id = ida_simple_get(&bcm_vk_ida, 0, 0, GFP_KERNEL); if (id < 0) { err = id; @@ -1436,6 +1451,11 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_destroy_workqueue; } + snprintf(name, sizeof(name), KBUILD_MODNAME ".%d_ttyVK", id); + err = bcm_vk_tty_init(vk, name); + if (err) + goto err_unregister_panic_notifier; + /* * lets trigger an auto download. We don't want to do it serially here * because at probing time, it is not supposed to block for a long time. @@ -1444,7 +1464,7 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (auto_load) { if ((boot_status & BOOT_STATE_MASK) == BROM_RUNNING) { if (bcm_vk_trigger_autoload(vk)) - goto err_unregister_panic_notifier; + goto err_bcm_vk_tty_exit; } else { dev_err(dev, "Auto-load skipped - BROM not in proper state (0x%x)\n", @@ -1459,6 +1479,9 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; +err_bcm_vk_tty_exit: + bcm_vk_tty_exit(vk); + err_unregister_panic_notifier: atomic_notifier_chain_unregister(&panic_notifier_list, &vk->panic_nb); @@ -1536,6 +1559,9 @@ static void bcm_vk_remove(struct pci_dev *pdev) atomic_notifier_chain_unregister(&panic_notifier_list, &vk->panic_nb); + bcm_vk_msg_remove(vk); + bcm_vk_tty_exit(vk); + if (vk->tdma_vaddr) dma_free_coherent(&pdev->dev, nr_scratch_pages * PAGE_SIZE, vk->tdma_vaddr, vk->tdma_addr); @@ -1554,6 +1580,8 @@ static void bcm_vk_remove(struct pci_dev *pdev) cancel_work_sync(&vk->wq_work); destroy_workqueue(vk->wq_thread); + cancel_work_sync(&vk->tty_wq_work); + destroy_workqueue(vk->tty_wq_thread); for (i = 0; i < MAX_BAR; i++) { if (vk->bar[i]) diff --git a/drivers/misc/bcm-vk/bcm_vk_tty.c b/drivers/misc/bcm-vk/bcm_vk_tty.c new file mode 100644 index 000000000000..be3964949b63 --- /dev/null +++ b/drivers/misc/bcm-vk/bcm_vk_tty.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2018-2020 Broadcom. + */ + +#include +#include +#include + +#include "bcm_vk.h" + +/* TTYVK base offset is 0x30000 into BAR1 */ +#define BAR1_TTYVK_BASE_OFFSET 0x300000 +/* Each TTYVK channel (TO or FROM) is 0x10000 */ +#define BAR1_TTYVK_CHAN_OFFSET 0x100000 +/* Each TTYVK channel has TO and FROM, hence the * 2 */ +#define BAR1_TTYVK_BASE(index) (BAR1_TTYVK_BASE_OFFSET + \ + ((index) * BAR1_TTYVK_CHAN_OFFSET * 2)) +/* TO TTYVK channel base comes before FROM for each index */ +#define TO_TTYK_BASE(index) BAR1_TTYVK_BASE(index) +#define FROM_TTYK_BASE(index) (BAR1_TTYVK_BASE(index) + \ + BAR1_TTYVK_CHAN_OFFSET) + +struct bcm_vk_tty_chan { + u32 reserved; + u32 size; + u32 wr; + u32 rd; + u32 *data; +}; + +#define VK_BAR_CHAN(v, DIR, e) ((v)->DIR##_offset \ + + offsetof(struct bcm_vk_tty_chan, e)) +#define VK_BAR_CHAN_SIZE(v, DIR) VK_BAR_CHAN(v, DIR, size) +#define VK_BAR_CHAN_WR(v, DIR) VK_BAR_CHAN(v, DIR, wr) +#define VK_BAR_CHAN_RD(v, DIR) VK_BAR_CHAN(v, DIR, rd) +#define VK_BAR_CHAN_DATA(v, DIR, off) (VK_BAR_CHAN(v, DIR, data) + (off)) + +#define VK_BAR0_REGSEG_TTY_DB_OFFSET 0x86c + +/* Poll every 1/10 of second - temp hack till we use MSI interrupt */ +#define SERIAL_TIMER_VALUE (HZ / 10) + +static void bcm_vk_tty_poll(struct timer_list *t) +{ + struct bcm_vk *vk = from_timer(vk, t, serial_timer); + + queue_work(vk->tty_wq_thread, &vk->tty_wq_work); + mod_timer(&vk->serial_timer, jiffies + SERIAL_TIMER_VALUE); +} + +irqreturn_t bcm_vk_tty_irqhandler(int irq, void *dev_id) +{ + struct bcm_vk *vk = dev_id; + + queue_work(vk->tty_wq_thread, &vk->tty_wq_work); + + return IRQ_HANDLED; +} + +static void bcm_vk_tty_wq_handler(struct work_struct *work) +{ + struct bcm_vk *vk = container_of(work, struct bcm_vk, tty_wq_work); + struct bcm_vk_tty *vktty; + int card_status; + int count; + unsigned char c; + int i; + int wr; + + card_status = vkread32(vk, BAR_0, BAR_CARD_STATUS); + if (BCM_VK_INTF_IS_DOWN(card_status)) + return; + + for (i = 0; i < BCM_VK_NUM_TTY; i++) { + count = 0; + /* Check the card status that the tty channel is ready */ + if ((card_status & BIT(i)) == 0) + continue; + + vktty = &vk->tty[i]; + + /* Don't increment read index if tty app is closed */ + if (!vktty->is_opened) + continue; + + /* Fetch the wr offset in buffer from VK */ + wr = vkread32(vk, BAR_1, VK_BAR_CHAN_WR(vktty, from)); + + /* safe to ignore until bar read gives proper size */ + if (vktty->from_size == 0) + continue; + + if (wr >= vktty->from_size) { + dev_err(&vk->pdev->dev, + "ERROR: wq handler ttyVK%d wr:0x%x > 0x%x\n", + i, wr, vktty->from_size); + /* Need to signal and close device in this case */ + continue; + } + + /* + * Simple read of circular buffer and + * insert into tty flip buffer + */ + while (vk->tty[i].rd != wr) { + c = vkread8(vk, BAR_1, + VK_BAR_CHAN_DATA(vktty, from, vktty->rd)); + vktty->rd++; + if (vktty->rd >= vktty->from_size) + vktty->rd = 0; + tty_insert_flip_char(&vktty->port, c, TTY_NORMAL); + count++; + } + + if (count) { + tty_flip_buffer_push(&vktty->port); + + /* Update read offset from shadow register to card */ + vkwrite32(vk, vktty->rd, BAR_1, + VK_BAR_CHAN_RD(vktty, from)); + } + } +} + +static int bcm_vk_tty_open(struct tty_struct *tty, struct file *file) +{ + int card_status; + struct bcm_vk *vk; + struct bcm_vk_tty *vktty; + int index; + + /* initialize the pointer in case something fails */ + tty->driver_data = NULL; + + vk = (struct bcm_vk *)dev_get_drvdata(tty->dev); + index = tty->index; + + if (index >= BCM_VK_NUM_TTY) + return -EINVAL; + + vktty = &vk->tty[index]; + + vktty->pid = task_pid_nr(current); + vktty->to_offset = TO_TTYK_BASE(index); + vktty->from_offset = FROM_TTYK_BASE(index); + + /* Do not allow tty device to be opened if tty on card not ready */ + card_status = vkread32(vk, BAR_0, BAR_CARD_STATUS); + if (BCM_VK_INTF_IS_DOWN(card_status) || ((card_status & BIT(index)) == 0)) + return -EBUSY; + + /* + * Get shadow registers of the buffer sizes and the "to" write offset + * and "from" read offset + */ + vktty->to_size = vkread32(vk, BAR_1, VK_BAR_CHAN_SIZE(vktty, to)); + vktty->wr = vkread32(vk, BAR_1, VK_BAR_CHAN_WR(vktty, to)); + vktty->from_size = vkread32(vk, BAR_1, VK_BAR_CHAN_SIZE(vktty, from)); + vktty->rd = vkread32(vk, BAR_1, VK_BAR_CHAN_RD(vktty, from)); + vktty->is_opened = true; + + if (tty->count == 1 && !vktty->irq_enabled) { + timer_setup(&vk->serial_timer, bcm_vk_tty_poll, 0); + mod_timer(&vk->serial_timer, jiffies + SERIAL_TIMER_VALUE); + } + return 0; +} + +static void bcm_vk_tty_close(struct tty_struct *tty, struct file *file) +{ + struct bcm_vk *vk = dev_get_drvdata(tty->dev); + + if (tty->index >= BCM_VK_NUM_TTY) + return; + + vk->tty[tty->index].is_opened = false; + + if (tty->count == 1) + del_timer_sync(&vk->serial_timer); +} + +static void bcm_vk_tty_doorbell(struct bcm_vk *vk, u32 db_val) +{ + vkwrite32(vk, db_val, BAR_0, + VK_BAR0_REGSEG_DB_BASE + VK_BAR0_REGSEG_TTY_DB_OFFSET); +} + +static int bcm_vk_tty_write(struct tty_struct *tty, + const unsigned char *buffer, + int count) +{ + int index; + struct bcm_vk *vk; + struct bcm_vk_tty *vktty; + int i; + + index = tty->index; + vk = dev_get_drvdata(tty->dev); + vktty = &vk->tty[index]; + + /* Simple write each byte to circular buffer */ + for (i = 0; i < count; i++) { + vkwrite8(vk, buffer[i], BAR_1, + VK_BAR_CHAN_DATA(vktty, to, vktty->wr)); + vktty->wr++; + if (vktty->wr >= vktty->to_size) + vktty->wr = 0; + } + /* Update write offset from shadow register to card */ + vkwrite32(vk, vktty->wr, BAR_1, VK_BAR_CHAN_WR(vktty, to)); + bcm_vk_tty_doorbell(vk, 0); + + return count; +} + +static int bcm_vk_tty_write_room(struct tty_struct *tty) +{ + struct bcm_vk *vk = dev_get_drvdata(tty->dev); + + return vk->tty[tty->index].to_size - 1; +} + +static const struct tty_operations serial_ops = { + .open = bcm_vk_tty_open, + .close = bcm_vk_tty_close, + .write = bcm_vk_tty_write, + .write_room = bcm_vk_tty_write_room, +}; + +int bcm_vk_tty_init(struct bcm_vk *vk, char *name) +{ + int i; + int err; + struct tty_driver *tty_drv; + struct device *dev = &vk->pdev->dev; + + tty_drv = tty_alloc_driver + (BCM_VK_NUM_TTY, + TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV); + if (IS_ERR(tty_drv)) + return PTR_ERR(tty_drv); + + /* Save struct tty_driver for uninstalling the device */ + vk->tty_drv = tty_drv; + + /* initialize the tty driver */ + tty_drv->driver_name = KBUILD_MODNAME; + tty_drv->name = kstrdup(name, GFP_KERNEL); + if (!tty_drv->name) { + err = -ENOMEM; + goto err_put_tty_driver; + } + tty_drv->type = TTY_DRIVER_TYPE_SERIAL; + tty_drv->subtype = SERIAL_TYPE_NORMAL; + tty_drv->init_termios = tty_std_termios; + tty_set_operations(tty_drv, &serial_ops); + + /* register the tty driver */ + err = tty_register_driver(tty_drv); + if (err) { + dev_err(dev, "tty_register_driver failed\n"); + goto err_kfree_tty_name; + } + + for (i = 0; i < BCM_VK_NUM_TTY; i++) { + struct device *tty_dev; + + tty_port_init(&vk->tty[i].port); + tty_dev = tty_port_register_device(&vk->tty[i].port, tty_drv, + i, dev); + if (IS_ERR(tty_dev)) { + err = PTR_ERR(tty_dev); + goto unwind; + } + dev_set_drvdata(tty_dev, vk); + vk->tty[i].is_opened = false; + } + + INIT_WORK(&vk->tty_wq_work, bcm_vk_tty_wq_handler); + vk->tty_wq_thread = create_singlethread_workqueue("tty"); + if (!vk->tty_wq_thread) { + dev_err(dev, "Fail to create tty workqueue thread\n"); + err = -ENOMEM; + goto unwind; + } + return 0; + +unwind: + while (--i >= 0) + tty_port_unregister_device(&vk->tty[i].port, tty_drv, i); + tty_unregister_driver(tty_drv); + +err_kfree_tty_name: + kfree(tty_drv->name); + tty_drv->name = NULL; + +err_put_tty_driver: + put_tty_driver(tty_drv); + + return err; +} + +void bcm_vk_tty_exit(struct bcm_vk *vk) +{ + int i; + + del_timer_sync(&vk->serial_timer); + for (i = 0; i < BCM_VK_NUM_TTY; ++i) { + tty_port_unregister_device(&vk->tty[i].port, + vk->tty_drv, + i); + tty_port_destroy(&vk->tty[i].port); + } + tty_unregister_driver(vk->tty_drv); + + kfree(vk->tty_drv->name); + vk->tty_drv->name = NULL; + + put_tty_driver(vk->tty_drv); +} + +void bcm_vk_tty_terminate_tty_user(struct bcm_vk *vk) +{ + struct bcm_vk_tty *vktty; + int i; + + for (i = 0; i < BCM_VK_NUM_TTY; ++i) { + vktty = &vk->tty[i]; + if (vktty->pid) + kill_pid(find_vpid(vktty->pid), SIGKILL, 1); + } +} -- cgit v1.2.3-58-ga151 From fc716ffb9afe789dcf8a5d792e3980f817e0ad1b Mon Sep 17 00:00:00 2001 From: mateng Date: Tue, 26 Jan 2021 16:40:10 +0800 Subject: misc/vmw_vmci: fix typo change 'addres' to 'address' Signed-off-by: mateng Link: https://lore.kernel.org/r/20210126084010.1941-1-ayowoe@163.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/vmw_vmci/vmci_queue_pair.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h index 00017fc29a52..c4e6e924d9be 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.h +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h @@ -104,7 +104,7 @@ struct vmci_qp_dtch_info { struct vmci_qp_page_store { /* Reference to pages backing the queue pair. */ u64 pages; - /* Length of pageList/virtual addres range (in pages). */ + /* Length of pageList/virtual address range (in pages). */ u32 len; }; -- cgit v1.2.3-58-ga151 From e8266c4c3307d2a64f8ebcb22323347be8854742 Mon Sep 17 00:00:00 2001 From: Jorgen Hansen Date: Wed, 20 Jan 2021 08:32:20 -0800 Subject: VMCI: Stop log spew when qp allocation isn't possible VMCI queue pair allocation is disabled, if a VM is in FT mode. In these cases, VMware Tools may still once in a while attempt to create a vSocket stream connection, resulting in multiple warnings in the kernel logs. Therefore downgrade the error log to a debug log. Reviewed-by: Vishnu Dasa Signed-off-by: Jorgen Hansen Link: https://lore.kernel.org/r/1611160340-30158-1-git-send-email-jhansen@vmware.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/vmw_vmci/vmci_queue_pair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index c49065887e8f..a3691c16d45a 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -1207,7 +1207,7 @@ static int qp_alloc_guest_work(struct vmci_handle *handle, } else { result = qp_alloc_hypercall(queue_pair_entry); if (result < VMCI_SUCCESS) { - pr_warn("qp_alloc_hypercall result = %d\n", result); + pr_devel("qp_alloc_hypercall result = %d\n", result); goto error; } } -- cgit v1.2.3-58-ga151 From 5a16c535409f8dcb7568e20737309e3027ae3e49 Mon Sep 17 00:00:00 2001 From: Jorgen Hansen Date: Wed, 20 Jan 2021 08:32:40 -0800 Subject: VMCI: Use set_page_dirty_lock() when unregistering guest memory When the VMCI host support releases guest memory in the case where the VM was killed, the pinned guest pages aren't locked. Use set_page_dirty_lock() instead of set_page_dirty(). Testing done: Killed VM while having an active VMCI based vSocket connection and observed warning from ext4. With this fix, no warning was observed. Ran various vSocket tests without issues. Fixes: 06164d2b72aa ("VMCI: queue pairs implementation.") Reviewed-by: Vishnu Dasa Signed-off-by: Jorgen Hansen Link: https://lore.kernel.org/r/1611160360-30299-1-git-send-email-jhansen@vmware.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/vmw_vmci/vmci_queue_pair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index a3691c16d45a..525ef96d3a07 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -630,7 +630,7 @@ static void qp_release_pages(struct page **pages, for (i = 0; i < num_pages; i++) { if (dirty) - set_page_dirty(pages[i]); + set_page_dirty_lock(pages[i]); put_page(pages[i]); pages[i] = NULL; -- cgit v1.2.3-58-ga151 From 7eecea89e44f64e60f1410483e79a0cab18ad580 Mon Sep 17 00:00:00 2001 From: Jorgen Hansen Date: Wed, 20 Jan 2021 08:33:40 -0800 Subject: VMCI: Enforce queuepair max size for IOCTL_VMCI_QUEUEPAIR_ALLOC When create the VMCI queue pair tracking data structures on the host side, the IOCTL for creating the VMCI queue pair didn't validate the queue pair size parameters. This change adds checks for this. This avoids a memory allocation issue in qp_host_alloc_queue, as reported by nslusarek@gmx.net. The check in qp_host_alloc_queue has also been updated to enforce the maximum queue pair size as defined by VMCI_MAX_GUEST_QP_MEMORY. The fix has been verified using sample code supplied by nslusarek@gmx.net. Reported-by: nslusarek@gmx.net Reviewed-by: Vishnu Dasa Signed-off-by: Jorgen Hansen Link: https://lore.kernel.org/r/1611160420-30573-1-git-send-email-jhansen@vmware.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/vmw_vmci/vmci_queue_pair.c | 12 ++++++++---- include/linux/vmw_vmci_defs.h | 4 ++-- 2 files changed, 10 insertions(+), 6 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 525ef96d3a07..d787ddecee77 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -237,7 +237,9 @@ static struct qp_list qp_guest_endpoints = { #define QPE_NUM_PAGES(_QPE) ((u32) \ (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \ DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2)) - +#define QP_SIZES_ARE_VALID(_prod_qsize, _cons_qsize) \ + ((_prod_qsize) + (_cons_qsize) >= max(_prod_qsize, _cons_qsize) && \ + (_prod_qsize) + (_cons_qsize) <= VMCI_MAX_GUEST_QP_MEMORY) /* * Frees kernel VA space for a given queue and its queue header, and @@ -528,7 +530,7 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size) u64 num_pages; const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); - if (size > SIZE_MAX - PAGE_SIZE) + if (size > min_t(size_t, VMCI_MAX_GUEST_QP_MEMORY, SIZE_MAX - PAGE_SIZE)) return NULL; num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; if (num_pages > (SIZE_MAX - queue_size) / @@ -1929,6 +1931,9 @@ int vmci_qp_broker_alloc(struct vmci_handle handle, struct vmci_qp_page_store *page_store, struct vmci_ctx *context) { + if (!QP_SIZES_ARE_VALID(produce_size, consume_size)) + return VMCI_ERROR_NO_RESOURCES; + return qp_broker_alloc(handle, peer, flags, priv_flags, produce_size, consume_size, page_store, context, NULL, NULL, NULL, NULL); @@ -2685,8 +2690,7 @@ int vmci_qpair_alloc(struct vmci_qp **qpair, * used by the device is NO_RESOURCES, so use that here too. */ - if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) || - produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY) + if (!QP_SIZES_ARE_VALID(produce_qsize, consume_qsize)) return VMCI_ERROR_NO_RESOURCES; retval = vmci_route(&src, &dst, false, &route); diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index be0afe6f379b..e36cb114c188 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -66,7 +66,7 @@ enum { * consists of at least two pages, the memory limit also dictates the * number of queue pairs a guest can create. */ -#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024) +#define VMCI_MAX_GUEST_QP_MEMORY ((size_t)(128 * 1024 * 1024)) #define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2) /* @@ -80,7 +80,7 @@ enum { * too much kernel memory (especially on vmkernel). We limit a queuepair to * 32 KB, or 16 KB per queue for symmetrical pairs. */ -#define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024) +#define VMCI_MAX_PINNED_QP_MEMORY ((size_t)(32 * 1024)) /* * We have a fixed set of resource IDs available in the VMX. -- cgit v1.2.3-58-ga151 From c41e21dca8dc6899611c6df1fa4e6b55097ceada Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Sat, 23 Jan 2021 08:00:03 -0800 Subject: sgi-xp: remove h from printk format specifier This change fixes the checkpatch warning described in this commit commit cbacb5ab0aa0 ("docs: printk-formats: Stop encouraging use of unnecessary %h[xudi] and %hh[xudi]") Standard integer promotion is already done and %hx and %hhx is useless so do not encourage the use of %hh[xudi] or %h[xudi]. Reviewed-By: Steve Wahl Signed-off-by: Tom Rix Link: https://lore.kernel.org/r/20210123160003.1777766-1-trix@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/sgi-xp/xpnet.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index 23837d0d6f4a..2508f83bdc3f 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c @@ -208,7 +208,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) } else { dst = (void *)((u64)skb->data & ~(L1_CACHE_BYTES - 1)); dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" - "xp_remote_memcpy(0x%p, 0x%p, %hu)\n", dst, + "xp_remote_memcpy(0x%p, 0x%p, %u)\n", dst, (void *)msg->buf_pa, msg->size); ret = xp_remote_memcpy(xp_pa(dst), msg->buf_pa, msg->size); @@ -218,7 +218,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) * !!! appears in_use and we can't just call * !!! dev_kfree_skb. */ - dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) " + dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%x) " "returned error=0x%x\n", dst, (void *)msg->buf_pa, msg->size, ret); -- cgit v1.2.3-58-ga151 From 8ba59e9dee31246fc34b4d4bec032093e9c06510 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 22 Jan 2021 13:43:58 +0200 Subject: misc: pti: Remove driver for deprecated platform Intel Moorestown and Medfield are quite old Intel Atom based 32-bit platforms, which were in limited use in some Android phones, tablets and consumer electronics more than eight years ago. There are no bugs or problems ever reported outside from Intel for breaking any of that platforms for years. It seems no real users exists who run more or less fresh kernel on it. The commit 05f4434bc130 ("ASoC: Intel: remove mfld_machine") also in align with this theory. Due to above and to reduce a burden of supporting outdated drivers we remove the support of outdated platforms completely. Cc: Alexander Shishkin Acked-by: Linus Walleij Acked-by: Arnd Bergmann Acked-by: Alexander Shishkin Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20210122114358.39299-1-andriy.shevchenko@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- Documentation/driver-api/pti_intel_mid.rst | 108 ---- drivers/misc/Kconfig | 13 - drivers/misc/Makefile | 1 - drivers/misc/pti.c | 978 ----------------------------- drivers/tty/Makefile | 2 - drivers/tty/n_tracerouter.c | 233 ------- drivers/tty/n_tracesink.c | 228 ------- drivers/tty/n_tracesink.h | 26 - include/linux/intel-pti.h | 35 -- 9 files changed, 1624 deletions(-) delete mode 100644 Documentation/driver-api/pti_intel_mid.rst delete mode 100644 drivers/misc/pti.c delete mode 100644 drivers/tty/n_tracerouter.c delete mode 100644 drivers/tty/n_tracesink.c delete mode 100644 drivers/tty/n_tracesink.h delete mode 100644 include/linux/intel-pti.h (limited to 'drivers/misc') diff --git a/Documentation/driver-api/pti_intel_mid.rst b/Documentation/driver-api/pti_intel_mid.rst deleted file mode 100644 index bacc2a4ee89f..000000000000 --- a/Documentation/driver-api/pti_intel_mid.rst +++ /dev/null @@ -1,108 +0,0 @@ -.. SPDX-License-Identifier: GPL-2.0 - -============= -Intel MID PTI -============= - -The Intel MID PTI project is HW implemented in Intel Atom -system-on-a-chip designs based on the Parallel Trace -Interface for MIPI P1149.7 cJTAG standard. The kernel solution -for this platform involves the following files:: - - ./include/linux/pti.h - ./drivers/.../n_tracesink.h - ./drivers/.../n_tracerouter.c - ./drivers/.../n_tracesink.c - ./drivers/.../pti.c - -pti.c is the driver that enables various debugging features -popular on platforms from certain mobile manufacturers. -n_tracerouter.c and n_tracesink.c allow extra system information to -be collected and routed to the pti driver, such as trace -debugging data from a modem. Although n_tracerouter -and n_tracesink are a part of the complete PTI solution, -these two line disciplines can work separately from -pti.c and route any data stream from one /dev/tty node -to another /dev/tty node via kernel-space. This provides -a stable, reliable connection that will not break unless -the user-space application shuts down (plus avoids -kernel->user->kernel context switch overheads of routing -data). - -An example debugging usage for this driver system: - - * Hook /dev/ttyPTI0 to syslogd. Opening this port will also start - a console device to further capture debugging messages to PTI. - * Hook /dev/ttyPTI1 to modem debugging data to write to PTI HW. - This is where n_tracerouter and n_tracesink are used. - * Hook /dev/pti to a user-level debugging application for writing - to PTI HW. - * `Use mipi_` Kernel Driver API in other device drivers for - debugging to PTI by first requesting a PTI write address via - mipi_request_masterchannel(1). - -Below is example pseudo-code on how a 'privileged' application -can hook up n_tracerouter and n_tracesink to any tty on -a system. 'Privileged' means the application has enough -privileges to successfully manipulate the ldisc drivers -but is not just blindly executing as 'root'. Keep in mind -the use of ioctl(,TIOCSETD,) is not specific to the n_tracerouter -and n_tracesink line discpline drivers but is a generic -operation for a program to use a line discpline driver -on a tty port other than the default n_tty: - -.. code-block:: c - - /////////// To hook up n_tracerouter and n_tracesink ///////// - - // Note that n_tracerouter depends on n_tracesink. - #include - #define ONE_TTY "/dev/ttyOne" - #define TWO_TTY "/dev/ttyTwo" - - // needed global to hand onto ldisc connection - static int g_fd_source = -1; - static int g_fd_sink = -1; - - // these two vars used to grab LDISC values from loaded ldisc drivers - // in OS. Look at /proc/tty/ldiscs to get the right numbers from - // the ldiscs loaded in the system. - int source_ldisc_num, sink_ldisc_num = -1; - int retval; - - g_fd_source = open(ONE_TTY, O_RDWR); // must be R/W - g_fd_sink = open(TWO_TTY, O_RDWR); // must be R/W - - if (g_fd_source <= 0) || (g_fd_sink <= 0) { - // doubt you'll want to use these exact error lines of code - printf("Error on open(). errno: %d\n",errno); - return errno; - } - - retval = ioctl(g_fd_sink, TIOCSETD, &sink_ldisc_num); - if (retval < 0) { - printf("Error on ioctl(). errno: %d\n", errno); - return errno; - } - - retval = ioctl(g_fd_source, TIOCSETD, &source_ldisc_num); - if (retval < 0) { - printf("Error on ioctl(). errno: %d\n", errno); - return errno; - } - - /////////// To disconnect n_tracerouter and n_tracesink //////// - - // First make sure data through the ldiscs has stopped. - - // Second, disconnect ldiscs. This provides a - // little cleaner shutdown on tty stack. - sink_ldisc_num = 0; - source_ldisc_num = 0; - ioctl(g_fd_uart, TIOCSETD, &sink_ldisc_num); - ioctl(g_fd_gadget, TIOCSETD, &source_ldisc_num); - - // Three, program closes connection, and cleanup: - close(g_fd_uart); - close(g_fd_gadget); - g_fd_uart = g_fd_gadget = NULL; diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 8085213913cc..f532c59bb59b 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -104,19 +104,6 @@ config PHANTOM If you choose to build module, its name will be phantom. If unsure, say N here. -config INTEL_MID_PTI - tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard" - depends on PCI && TTY && (X86_INTEL_MID || COMPILE_TEST) - help - The PTI (Parallel Trace Interface) driver directs - trace data routed from various parts in the system out - through an Intel Penwell PTI port and out of the mobile - device for analysis with a debugging tool (Lauterbach or Fido). - - You should select this driver if the target kernel is meant for - an Intel Atom (non-netbook) mobile device containing a MIPI - P1149.7 standard implementation. - config TIFM_CORE tristate "TI Flash Media interface support" depends on PCI diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 456aa8a4c896..99b6f15a3c70 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -8,7 +8,6 @@ obj-$(CONFIG_IBMVMC) += ibmvmc.o obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o -obj-$(CONFIG_INTEL_MID_PTI) += pti.o obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o obj-$(CONFIG_DUMMY_IRQ) += dummy-irq.o obj-$(CONFIG_ICS932S401) += ics932s401.o diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c deleted file mode 100644 index 7236ae527b19..000000000000 --- a/drivers/misc/pti.c +++ /dev/null @@ -1,978 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * pti.c - PTI driver for cJTAG data extration - * - * Copyright (C) Intel 2010 - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * The PTI (Parallel Trace Interface) driver directs trace data routed from - * various parts in the system out through the Intel Penwell PTI port and - * out of the mobile device for analysis with a debugging tool - * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7, - * compact JTAG, standard. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DRIVERNAME "pti" -#define PCINAME "pciPTI" -#define TTYNAME "ttyPTI" -#define CHARNAME "pti" -#define PTITTY_MINOR_START 0 -#define PTITTY_MINOR_NUM 2 -#define MAX_APP_IDS 16 /* 128 channel ids / u8 bit size */ -#define MAX_OS_IDS 16 /* 128 channel ids / u8 bit size */ -#define MAX_MODEM_IDS 16 /* 128 channel ids / u8 bit size */ -#define MODEM_BASE_ID 71 /* modem master ID address */ -#define CONTROL_ID 72 /* control master ID address */ -#define CONSOLE_ID 73 /* console master ID address */ -#define OS_BASE_ID 74 /* base OS master ID address */ -#define APP_BASE_ID 80 /* base App master ID address */ -#define CONTROL_FRAME_LEN 32 /* PTI control frame maximum size */ -#define USER_COPY_SIZE 8192 /* 8Kb buffer for user space copy */ -#define APERTURE_14 0x3800000 /* offset to first OS write addr */ -#define APERTURE_LEN 0x400000 /* address length */ - -struct pti_tty { - struct pti_masterchannel *mc; -}; - -struct pti_dev { - struct tty_port port[PTITTY_MINOR_NUM]; - unsigned long pti_addr; - unsigned long aperture_base; - void __iomem *pti_ioaddr; - u8 ia_app[MAX_APP_IDS]; - u8 ia_os[MAX_OS_IDS]; - u8 ia_modem[MAX_MODEM_IDS]; -}; - -/* - * This protects access to ia_app, ia_os, and ia_modem, - * which keeps track of channels allocated in - * an aperture write id. - */ -static DEFINE_MUTEX(alloclock); - -static const struct pci_device_id pci_ids[] = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x82B)}, - {0} -}; - -static struct tty_driver *pti_tty_driver; -static struct pti_dev *drv_data; - -static unsigned int pti_console_channel; -static unsigned int pti_control_channel; - -/** - * pti_write_to_aperture()- The private write function to PTI HW. - * - * @mc: The 'aperture'. It's part of a write address that holds - * a master and channel ID. - * @buf: Data being written to the HW that will ultimately be seen - * in a debugging tool (Fido, Lauterbach). - * @len: Size of buffer. - * - * Since each aperture is specified by a unique - * master/channel ID, no two processes will be writing - * to the same aperture at the same time so no lock is required. The - * PTI-Output agent will send these out in the order that they arrived, and - * thus, it will intermix these messages. The debug tool can then later - * regroup the appropriate message segments together reconstituting each - * message. - */ -static void pti_write_to_aperture(struct pti_masterchannel *mc, - u8 *buf, - int len) -{ - int dwordcnt; - int final; - int i; - u32 ptiword; - u32 __iomem *aperture; - u8 *p = buf; - - /* - * calculate the aperture offset from the base using the master and - * channel id's. - */ - aperture = drv_data->pti_ioaddr + (mc->master << 15) - + (mc->channel << 8); - - dwordcnt = len >> 2; - final = len - (dwordcnt << 2); /* final = trailing bytes */ - if (final == 0 && dwordcnt != 0) { /* always need a final dword */ - final += 4; - dwordcnt--; - } - - for (i = 0; i < dwordcnt; i++) { - ptiword = be32_to_cpu(*(u32 *)p); - p += 4; - iowrite32(ptiword, aperture); - } - - aperture += PTI_LASTDWORD_DTS; /* adding DTS signals that is EOM */ - - ptiword = 0; - for (i = 0; i < final; i++) - ptiword |= *p++ << (24-(8*i)); - - iowrite32(ptiword, aperture); - return; -} - -/** - * pti_control_frame_built_and_sent()- control frame build and send function. - * - * @mc: The master / channel structure on which the function - * built a control frame. - * @thread_name: The thread name associated with the master / channel or - * 'NULL' if using the 'current' global variable. - * - * To be able to post process the PTI contents on host side, a control frame - * is added before sending any PTI content. So the host side knows on - * each PTI frame the name of the thread using a dedicated master / channel. - * The thread name is retrieved from 'current' global variable if 'thread_name' - * is 'NULL', else it is retrieved from 'thread_name' parameter. - * This function builds this frame and sends it to a master ID CONTROL_ID. - * The overhead is only 32 bytes since the driver only writes to HW - * in 32 byte chunks. - */ -static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc, - const char *thread_name) -{ - /* - * Since we access the comm member in current's task_struct, we only - * need to be as large as what 'comm' in that structure is. - */ - char comm[TASK_COMM_LEN]; - struct pti_masterchannel mccontrol = {.master = CONTROL_ID, - .channel = 0}; - const char *thread_name_p; - const char *control_format = "%3d %3d %s"; - u8 control_frame[CONTROL_FRAME_LEN]; - - if (!thread_name) { - if (!in_interrupt()) - get_task_comm(comm, current); - else - strncpy(comm, "Interrupt", TASK_COMM_LEN); - - /* Absolutely ensure our buffer is zero terminated. */ - comm[TASK_COMM_LEN-1] = 0; - thread_name_p = comm; - } else { - thread_name_p = thread_name; - } - - mccontrol.channel = pti_control_channel; - pti_control_channel = (pti_control_channel + 1) & 0x7f; - - snprintf(control_frame, CONTROL_FRAME_LEN, control_format, mc->master, - mc->channel, thread_name_p); - pti_write_to_aperture(&mccontrol, control_frame, strlen(control_frame)); -} - -/** - * pti_write_full_frame_to_aperture()- high level function to - * write to PTI. - * - * @mc: The 'aperture'. It's part of a write address that holds - * a master and channel ID. - * @buf: Data being written to the HW that will ultimately be seen - * in a debugging tool (Fido, Lauterbach). - * @len: Size of buffer. - * - * All threads sending data (either console, user space application, ...) - * are calling the high level function to write to PTI meaning that it is - * possible to add a control frame before sending the content. - */ -static void pti_write_full_frame_to_aperture(struct pti_masterchannel *mc, - const unsigned char *buf, - int len) -{ - pti_control_frame_built_and_sent(mc, NULL); - pti_write_to_aperture(mc, (u8 *)buf, len); -} - -/** - * get_id()- Allocate a master and channel ID. - * - * @id_array: an array of bits representing what channel - * id's are allocated for writing. - * @max_ids: The max amount of available write IDs to use. - * @base_id: The starting SW channel ID, based on the Intel - * PTI arch. - * @thread_name: The thread name associated with the master / channel or - * 'NULL' if using the 'current' global variable. - * - * Returns: - * pti_masterchannel struct with master, channel ID address - * 0 for error - * - * Each bit in the arrays ia_app and ia_os correspond to a master and - * channel id. The bit is one if the id is taken and 0 if free. For - * every master there are 128 channel id's. - */ -static struct pti_masterchannel *get_id(u8 *id_array, - int max_ids, - int base_id, - const char *thread_name) -{ - struct pti_masterchannel *mc; - int i, j, mask; - - mc = kmalloc(sizeof(struct pti_masterchannel), GFP_KERNEL); - if (mc == NULL) - return NULL; - - /* look for a byte with a free bit */ - for (i = 0; i < max_ids; i++) - if (id_array[i] != 0xff) - break; - if (i == max_ids) { - kfree(mc); - return NULL; - } - /* find the bit in the 128 possible channel opportunities */ - mask = 0x80; - for (j = 0; j < 8; j++) { - if ((id_array[i] & mask) == 0) - break; - mask >>= 1; - } - - /* grab it */ - id_array[i] |= mask; - mc->master = base_id; - mc->channel = ((i & 0xf)<<3) + j; - /* write new master Id / channel Id allocation to channel control */ - pti_control_frame_built_and_sent(mc, thread_name); - return mc; -} - -/* - * The following three functions: - * pti_request_mastercahannel(), mipi_release_masterchannel() - * and pti_writedata() are an API for other kernel drivers to - * access PTI. - */ - -/** - * pti_request_masterchannel()- Kernel API function used to allocate - * a master, channel ID address - * to write to PTI HW. - * - * @type: 0- request Application master, channel aperture ID - * write address. - * 1- request OS master, channel aperture ID write - * address. - * 2- request Modem master, channel aperture ID - * write address. - * Other values, error. - * @thread_name: The thread name associated with the master / channel or - * 'NULL' if using the 'current' global variable. - * - * Returns: - * pti_masterchannel struct - * 0 for error - */ -struct pti_masterchannel *pti_request_masterchannel(u8 type, - const char *thread_name) -{ - struct pti_masterchannel *mc; - - mutex_lock(&alloclock); - - switch (type) { - - case 0: - mc = get_id(drv_data->ia_app, MAX_APP_IDS, - APP_BASE_ID, thread_name); - break; - - case 1: - mc = get_id(drv_data->ia_os, MAX_OS_IDS, - OS_BASE_ID, thread_name); - break; - - case 2: - mc = get_id(drv_data->ia_modem, MAX_MODEM_IDS, - MODEM_BASE_ID, thread_name); - break; - default: - mc = NULL; - } - - mutex_unlock(&alloclock); - return mc; -} -EXPORT_SYMBOL_GPL(pti_request_masterchannel); - -/** - * pti_release_masterchannel()- Kernel API function used to release - * a master, channel ID address - * used to write to PTI HW. - * - * @mc: master, channel apeture ID address to be released. This - * will de-allocate the structure via kfree(). - */ -void pti_release_masterchannel(struct pti_masterchannel *mc) -{ - u8 master, channel, i; - - mutex_lock(&alloclock); - - if (mc) { - master = mc->master; - channel = mc->channel; - - if (master == APP_BASE_ID) { - i = channel >> 3; - drv_data->ia_app[i] &= ~(0x80>>(channel & 0x7)); - } else if (master == OS_BASE_ID) { - i = channel >> 3; - drv_data->ia_os[i] &= ~(0x80>>(channel & 0x7)); - } else { - i = channel >> 3; - drv_data->ia_modem[i] &= ~(0x80>>(channel & 0x7)); - } - - kfree(mc); - } - - mutex_unlock(&alloclock); -} -EXPORT_SYMBOL_GPL(pti_release_masterchannel); - -/** - * pti_writedata()- Kernel API function used to write trace - * debugging data to PTI HW. - * - * @mc: Master, channel aperture ID address to write to. - * Null value will return with no write occurring. - * @buf: Trace debuging data to write to the PTI HW. - * Null value will return with no write occurring. - * @count: Size of buf. Value of 0 or a negative number will - * return with no write occuring. - */ -void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count) -{ - /* - * since this function is exported, this is treated like an - * API function, thus, all parameters should - * be checked for validity. - */ - if ((mc != NULL) && (buf != NULL) && (count > 0)) - pti_write_to_aperture(mc, buf, count); - return; -} -EXPORT_SYMBOL_GPL(pti_writedata); - -/* - * for the tty_driver_*() basic function descriptions, see tty_driver.h. - * Specific header comments made for PTI-related specifics. - */ - -/** - * pti_tty_driver_open()- Open an Application master, channel aperture - * ID to the PTI device via tty device. - * - * @tty: tty interface. - * @filp: filp interface pased to tty_port_open() call. - * - * Returns: - * int, 0 for success - * otherwise, fail value - * - * The main purpose of using the tty device interface is for - * each tty port to have a unique PTI write aperture. In an - * example use case, ttyPTI0 gets syslogd and an APP aperture - * ID and ttyPTI1 is where the n_tracesink ldisc hooks to route - * modem messages into PTI. Modem trace data does not have to - * go to ttyPTI1, but ttyPTI0 and ttyPTI1 do need to be distinct - * master IDs. These messages go through the PTI HW and out of - * the handheld platform and to the Fido/Lauterbach device. - */ -static int pti_tty_driver_open(struct tty_struct *tty, struct file *filp) -{ - /* - * we actually want to allocate a new channel per open, per - * system arch. HW gives more than plenty channels for a single - * system task to have its own channel to write trace data. This - * also removes a locking requirement for the actual write - * procedure. - */ - return tty_port_open(tty->port, tty, filp); -} - -/** - * pti_tty_driver_close()- close tty device and release Application - * master, channel aperture ID to the PTI device via tty device. - * - * @tty: tty interface. - * @filp: filp interface pased to tty_port_close() call. - * - * The main purpose of using the tty device interface is to route - * syslog daemon messages to the PTI HW and out of the handheld platform - * and to the Fido/Lauterbach device. - */ -static void pti_tty_driver_close(struct tty_struct *tty, struct file *filp) -{ - tty_port_close(tty->port, tty, filp); -} - -/** - * pti_tty_install()- Used to set up specific master-channels - * to tty ports for organizational purposes when - * tracing viewed from debuging tools. - * - * @driver: tty driver information. - * @tty: tty struct containing pti information. - * - * Returns: - * 0 for success - * otherwise, error - */ -static int pti_tty_install(struct tty_driver *driver, struct tty_struct *tty) -{ - int idx = tty->index; - struct pti_tty *pti_tty_data; - int ret = tty_standard_install(driver, tty); - - if (ret == 0) { - pti_tty_data = kmalloc(sizeof(struct pti_tty), GFP_KERNEL); - if (pti_tty_data == NULL) - return -ENOMEM; - - if (idx == PTITTY_MINOR_START) - pti_tty_data->mc = pti_request_masterchannel(0, NULL); - else - pti_tty_data->mc = pti_request_masterchannel(2, NULL); - - if (pti_tty_data->mc == NULL) { - kfree(pti_tty_data); - return -ENXIO; - } - tty->driver_data = pti_tty_data; - } - - return ret; -} - -/** - * pti_tty_cleanup()- Used to de-allocate master-channel resources - * tied to tty's of this driver. - * - * @tty: tty struct containing pti information. - */ -static void pti_tty_cleanup(struct tty_struct *tty) -{ - struct pti_tty *pti_tty_data = tty->driver_data; - if (pti_tty_data == NULL) - return; - pti_release_masterchannel(pti_tty_data->mc); - kfree(pti_tty_data); - tty->driver_data = NULL; -} - -/** - * pti_tty_driver_write()- Write trace debugging data through the char - * interface to the PTI HW. Part of the misc device implementation. - * - * @tty: tty struct containing pti information. - * @buf: trace data to be written. - * @len: # of byte to write. - * - * Returns: - * int, # of bytes written - * otherwise, error - */ -static int pti_tty_driver_write(struct tty_struct *tty, - const unsigned char *buf, int len) -{ - struct pti_tty *pti_tty_data = tty->driver_data; - if ((pti_tty_data != NULL) && (pti_tty_data->mc != NULL)) { - pti_write_to_aperture(pti_tty_data->mc, (u8 *)buf, len); - return len; - } - /* - * we can't write to the pti hardware if the private driver_data - * and the mc address is not there. - */ - else - return -EFAULT; -} - -/** - * pti_tty_write_room()- Always returns 2048. - * - * @tty: contains tty info of the pti driver. - */ -static int pti_tty_write_room(struct tty_struct *tty) -{ - return 2048; -} - -/** - * pti_char_open()- Open an Application master, channel aperture - * ID to the PTI device. Part of the misc device implementation. - * - * @inode: not used. - * @filp: Output- will have a masterchannel struct set containing - * the allocated application PTI aperture write address. - * - * Returns: - * int, 0 for success - * otherwise, a fail value - */ -static int pti_char_open(struct inode *inode, struct file *filp) -{ - struct pti_masterchannel *mc; - - /* - * We really do want to fail immediately if - * pti_request_masterchannel() fails, - * before assigning the value to filp->private_data. - * Slightly easier to debug if this driver needs debugging. - */ - mc = pti_request_masterchannel(0, NULL); - if (mc == NULL) - return -ENOMEM; - filp->private_data = mc; - return 0; -} - -/** - * pti_char_release()- Close a char channel to the PTI device. Part - * of the misc device implementation. - * - * @inode: Not used in this implementaiton. - * @filp: Contains private_data that contains the master, channel - * ID to be released by the PTI device. - * - * Returns: - * always 0 - */ -static int pti_char_release(struct inode *inode, struct file *filp) -{ - pti_release_masterchannel(filp->private_data); - filp->private_data = NULL; - return 0; -} - -/** - * pti_char_write()- Write trace debugging data through the char - * interface to the PTI HW. Part of the misc device implementation. - * - * @filp: Contains private data which is used to obtain - * master, channel write ID. - * @data: trace data to be written. - * @len: # of byte to write. - * @ppose: Not used in this function implementation. - * - * Returns: - * int, # of bytes written - * otherwise, error value - * - * Notes: From side discussions with Alan Cox and experimenting - * with PTI debug HW like Nokia's Fido box and Lauterbach - * devices, 8192 byte write buffer used by USER_COPY_SIZE was - * deemed an appropriate size for this type of usage with - * debugging HW. - */ -static ssize_t pti_char_write(struct file *filp, const char __user *data, - size_t len, loff_t *ppose) -{ - struct pti_masterchannel *mc; - void *kbuf; - const char __user *tmp; - size_t size = USER_COPY_SIZE; - size_t n = 0; - - tmp = data; - mc = filp->private_data; - - kbuf = kmalloc(size, GFP_KERNEL); - if (kbuf == NULL) { - pr_err("%s(%d): buf allocation failed\n", - __func__, __LINE__); - return -ENOMEM; - } - - do { - if (len - n > USER_COPY_SIZE) - size = USER_COPY_SIZE; - else - size = len - n; - - if (copy_from_user(kbuf, tmp, size)) { - kfree(kbuf); - return n ? n : -EFAULT; - } - - pti_write_to_aperture(mc, kbuf, size); - n += size; - tmp += size; - - } while (len > n); - - kfree(kbuf); - return len; -} - -static const struct tty_operations pti_tty_driver_ops = { - .open = pti_tty_driver_open, - .close = pti_tty_driver_close, - .write = pti_tty_driver_write, - .write_room = pti_tty_write_room, - .install = pti_tty_install, - .cleanup = pti_tty_cleanup -}; - -static const struct file_operations pti_char_driver_ops = { - .owner = THIS_MODULE, - .write = pti_char_write, - .open = pti_char_open, - .release = pti_char_release, -}; - -static struct miscdevice pti_char_driver = { - .minor = MISC_DYNAMIC_MINOR, - .name = CHARNAME, - .fops = &pti_char_driver_ops -}; - -/** - * pti_console_write()- Write to the console that has been acquired. - * - * @c: Not used in this implementaiton. - * @buf: Data to be written. - * @len: Length of buf. - */ -static void pti_console_write(struct console *c, const char *buf, unsigned len) -{ - static struct pti_masterchannel mc = {.master = CONSOLE_ID, - .channel = 0}; - - mc.channel = pti_console_channel; - pti_console_channel = (pti_console_channel + 1) & 0x7f; - - pti_write_full_frame_to_aperture(&mc, buf, len); -} - -/** - * pti_console_device()- Return the driver tty structure and set the - * associated index implementation. - * - * @c: Console device of the driver. - * @index: index associated with c. - * - * Returns: - * always value of pti_tty_driver structure when this function - * is called. - */ -static struct tty_driver *pti_console_device(struct console *c, int *index) -{ - *index = c->index; - return pti_tty_driver; -} - -/** - * pti_console_setup()- Initialize console variables used by the driver. - * - * @c: Not used. - * @opts: Not used. - * - * Returns: - * always 0. - */ -static int pti_console_setup(struct console *c, char *opts) -{ - pti_console_channel = 0; - pti_control_channel = 0; - return 0; -} - -/* - * pti_console struct, used to capture OS printk()'s and shift - * out to the PTI device for debugging. This cannot be - * enabled upon boot because of the possibility of eating - * any serial console printk's (race condition discovered). - * The console should be enabled upon when the tty port is - * used for the first time. Since the primary purpose for - * the tty port is to hook up syslog to it, the tty port - * will be open for a really long time. - */ -static struct console pti_console = { - .name = TTYNAME, - .write = pti_console_write, - .device = pti_console_device, - .setup = pti_console_setup, - .flags = CON_PRINTBUFFER, - .index = 0, -}; - -/** - * pti_port_activate()- Used to start/initialize any items upon - * first opening of tty_port(). - * - * @port: The tty port number of the PTI device. - * @tty: The tty struct associated with this device. - * - * Returns: - * always returns 0 - * - * Notes: The primary purpose of the PTI tty port 0 is to hook - * the syslog daemon to it; thus this port will be open for a - * very long time. - */ -static int pti_port_activate(struct tty_port *port, struct tty_struct *tty) -{ - if (port->tty->index == PTITTY_MINOR_START) - console_start(&pti_console); - return 0; -} - -/** - * pti_port_shutdown()- Used to stop/shutdown any items upon the - * last tty port close. - * - * @port: The tty port number of the PTI device. - * - * Notes: The primary purpose of the PTI tty port 0 is to hook - * the syslog daemon to it; thus this port will be open for a - * very long time. - */ -static void pti_port_shutdown(struct tty_port *port) -{ - if (port->tty->index == PTITTY_MINOR_START) - console_stop(&pti_console); -} - -static const struct tty_port_operations tty_port_ops = { - .activate = pti_port_activate, - .shutdown = pti_port_shutdown, -}; - -/* - * Note the _probe() call sets everything up and ties the char and tty - * to successfully detecting the PTI device on the pci bus. - */ - -/** - * pti_pci_probe()- Used to detect pti on the pci bus and set - * things up in the driver. - * - * @pdev: pci_dev struct values for pti. - * @ent: pci_device_id struct for pti driver. - * - * Returns: - * 0 for success - * otherwise, error - */ -static int pti_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - unsigned int a; - int retval; - int pci_bar = 1; - - dev_dbg(&pdev->dev, "%s %s(%d): PTI PCI ID %04x:%04x\n", __FILE__, - __func__, __LINE__, pdev->vendor, pdev->device); - - retval = misc_register(&pti_char_driver); - if (retval) { - pr_err("%s(%d): CHAR registration failed of pti driver\n", - __func__, __LINE__); - pr_err("%s(%d): Error value returned: %d\n", - __func__, __LINE__, retval); - goto err; - } - - retval = pci_enable_device(pdev); - if (retval != 0) { - dev_err(&pdev->dev, - "%s: pci_enable_device() returned error %d\n", - __func__, retval); - goto err_unreg_misc; - } - - drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL); - if (drv_data == NULL) { - retval = -ENOMEM; - dev_err(&pdev->dev, - "%s(%d): kmalloc() returned NULL memory.\n", - __func__, __LINE__); - goto err_disable_pci; - } - drv_data->pti_addr = pci_resource_start(pdev, pci_bar); - - retval = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev)); - if (retval != 0) { - dev_err(&pdev->dev, - "%s(%d): pci_request_region() returned error %d\n", - __func__, __LINE__, retval); - goto err_free_dd; - } - drv_data->aperture_base = drv_data->pti_addr+APERTURE_14; - drv_data->pti_ioaddr = - ioremap((u32)drv_data->aperture_base, - APERTURE_LEN); - if (!drv_data->pti_ioaddr) { - retval = -ENOMEM; - goto err_rel_reg; - } - - pci_set_drvdata(pdev, drv_data); - - for (a = 0; a < PTITTY_MINOR_NUM; a++) { - struct tty_port *port = &drv_data->port[a]; - tty_port_init(port); - port->ops = &tty_port_ops; - - tty_port_register_device(port, pti_tty_driver, a, &pdev->dev); - } - - register_console(&pti_console); - - return 0; -err_rel_reg: - pci_release_region(pdev, pci_bar); -err_free_dd: - kfree(drv_data); -err_disable_pci: - pci_disable_device(pdev); -err_unreg_misc: - misc_deregister(&pti_char_driver); -err: - return retval; -} - -/** - * pti_pci_remove()- Driver exit method to remove PTI from - * PCI bus. - * @pdev: variable containing pci info of PTI. - */ -static void pti_pci_remove(struct pci_dev *pdev) -{ - struct pti_dev *drv_data = pci_get_drvdata(pdev); - unsigned int a; - - unregister_console(&pti_console); - - for (a = 0; a < PTITTY_MINOR_NUM; a++) { - tty_unregister_device(pti_tty_driver, a); - tty_port_destroy(&drv_data->port[a]); - } - - iounmap(drv_data->pti_ioaddr); - kfree(drv_data); - pci_release_region(pdev, 1); - pci_disable_device(pdev); - - misc_deregister(&pti_char_driver); -} - -static struct pci_driver pti_pci_driver = { - .name = PCINAME, - .id_table = pci_ids, - .probe = pti_pci_probe, - .remove = pti_pci_remove, -}; - -/** - * pti_init()- Overall entry/init call to the pti driver. - * It starts the registration process with the kernel. - * - * Returns: - * int __init, 0 for success - * otherwise value is an error - * - */ -static int __init pti_init(void) -{ - int retval; - - /* First register module as tty device */ - - pti_tty_driver = alloc_tty_driver(PTITTY_MINOR_NUM); - if (pti_tty_driver == NULL) { - pr_err("%s(%d): Memory allocation failed for ptiTTY driver\n", - __func__, __LINE__); - return -ENOMEM; - } - - pti_tty_driver->driver_name = DRIVERNAME; - pti_tty_driver->name = TTYNAME; - pti_tty_driver->major = 0; - pti_tty_driver->minor_start = PTITTY_MINOR_START; - pti_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM; - pti_tty_driver->subtype = SYSTEM_TYPE_SYSCONS; - pti_tty_driver->flags = TTY_DRIVER_REAL_RAW | - TTY_DRIVER_DYNAMIC_DEV; - pti_tty_driver->init_termios = tty_std_termios; - - tty_set_operations(pti_tty_driver, &pti_tty_driver_ops); - - retval = tty_register_driver(pti_tty_driver); - if (retval) { - pr_err("%s(%d): TTY registration failed of pti driver\n", - __func__, __LINE__); - pr_err("%s(%d): Error value returned: %d\n", - __func__, __LINE__, retval); - - goto put_tty; - } - - retval = pci_register_driver(&pti_pci_driver); - if (retval) { - pr_err("%s(%d): PCI registration failed of pti driver\n", - __func__, __LINE__); - pr_err("%s(%d): Error value returned: %d\n", - __func__, __LINE__, retval); - goto unreg_tty; - } - - return 0; -unreg_tty: - tty_unregister_driver(pti_tty_driver); -put_tty: - put_tty_driver(pti_tty_driver); - pti_tty_driver = NULL; - return retval; -} - -/** - * pti_exit()- Unregisters this module as a tty and pci driver. - */ -static void __exit pti_exit(void) -{ - tty_unregister_driver(pti_tty_driver); - pci_unregister_driver(&pti_pci_driver); - put_tty_driver(pti_tty_driver); -} - -module_init(pti_init); -module_exit(pti_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Ken Mills, Jay Freyensee"); -MODULE_DESCRIPTION("PTI Driver"); - diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile index b3ccae932660..730de6bf048b 100644 --- a/drivers/tty/Makefile +++ b/drivers/tty/Makefile @@ -9,8 +9,6 @@ obj-$(CONFIG_AUDIT) += tty_audit.o obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o obj-$(CONFIG_N_HDLC) += n_hdlc.o obj-$(CONFIG_N_GSM) += n_gsm.o -obj-$(CONFIG_TRACE_ROUTER) += n_tracerouter.o -obj-$(CONFIG_TRACE_SINK) += n_tracesink.o obj-$(CONFIG_R3964) += n_r3964.o obj-y += vt/ diff --git a/drivers/tty/n_tracerouter.c b/drivers/tty/n_tracerouter.c deleted file mode 100644 index 4479af4d2fa5..000000000000 --- a/drivers/tty/n_tracerouter.c +++ /dev/null @@ -1,233 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * n_tracerouter.c - Trace data router through tty space - * - * Copyright (C) Intel 2011 - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This trace router uses the Linux line discipline framework to route - * trace data coming from a HW Modem to a PTI (Parallel Trace Module) port. - * The solution is not specific to a HW modem and this line disciple can - * be used to route any stream of data in kernel space. - * This is part of a solution for the P1149.7, compact JTAG, standard. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "n_tracesink.h" - -/* - * Other ldisc drivers use 65536 which basically means, - * 'I can always accept 64k' and flow control is off. - * This number is deemed appropriate for this driver. - */ -#define RECEIVE_ROOM 65536 -#define DRIVERNAME "n_tracerouter" - -/* - * struct to hold private configuration data for this ldisc. - * opencalled is used to hold if this ldisc has been opened. - * kref_tty holds the tty reference the ldisc sits on top of. - */ -struct tracerouter_data { - u8 opencalled; - struct tty_struct *kref_tty; -}; -static struct tracerouter_data *tr_data; - -/* lock for when tty reference is being used */ -static DEFINE_MUTEX(routelock); - -/** - * n_tracerouter_open() - Called when a tty is opened by a SW entity. - * @tty: terminal device to the ldisc. - * - * Return: - * 0 for success. - * - * Caveats: This should only be opened one time per SW entity. - */ -static int n_tracerouter_open(struct tty_struct *tty) -{ - int retval = -EEXIST; - - mutex_lock(&routelock); - if (tr_data->opencalled == 0) { - - tr_data->kref_tty = tty_kref_get(tty); - if (tr_data->kref_tty == NULL) { - retval = -EFAULT; - } else { - tr_data->opencalled = 1; - tty->disc_data = tr_data; - tty->receive_room = RECEIVE_ROOM; - tty_driver_flush_buffer(tty); - retval = 0; - } - } - mutex_unlock(&routelock); - return retval; -} - -/** - * n_tracerouter_close() - close connection - * @tty: terminal device to the ldisc. - * - * Called when a software entity wants to close a connection. - */ -static void n_tracerouter_close(struct tty_struct *tty) -{ - struct tracerouter_data *tptr = tty->disc_data; - - mutex_lock(&routelock); - WARN_ON(tptr->kref_tty != tr_data->kref_tty); - tty_driver_flush_buffer(tty); - tty_kref_put(tr_data->kref_tty); - tr_data->kref_tty = NULL; - tr_data->opencalled = 0; - tty->disc_data = NULL; - mutex_unlock(&routelock); -} - -/** - * n_tracerouter_read() - read request from user space - * @tty: terminal device passed into the ldisc. - * @file: pointer to open file object. - * @buf: pointer to the data buffer that gets eventually returned. - * @nr: number of bytes of the data buffer that is returned. - * - * function that allows read() functionality in userspace. By default if this - * is not implemented it returns -EIO. This module is functioning like a - * router via n_tracerouter_receivebuf(), and there is no real requirement - * to implement this function. However, an error return value other than - * -EIO should be used just to show that there was an intent not to have - * this function implemented. Return value based on read() man pages. - * - * Return: - * -EINVAL - */ -static ssize_t n_tracerouter_read(struct tty_struct *tty, struct file *file, - unsigned char __user *buf, size_t nr) { - return -EINVAL; -} - -/** - * n_tracerouter_write() - Function that allows write() in userspace. - * @tty: terminal device passed into the ldisc. - * @file: pointer to open file object. - * @buf: pointer to the data buffer that gets eventually returned. - * @nr: number of bytes of the data buffer that is returned. - * - * By default if this is not implemented, it returns -EIO. - * This should not be implemented, ever, because - * 1. this driver is functioning like a router via - * n_tracerouter_receivebuf() - * 2. No writes to HW will ever go through this line discpline driver. - * However, an error return value other than -EIO should be used - * just to show that there was an intent not to have this function - * implemented. Return value based on write() man pages. - * - * Return: - * -EINVAL - */ -static ssize_t n_tracerouter_write(struct tty_struct *tty, struct file *file, - const unsigned char *buf, size_t nr) { - return -EINVAL; -} - -/** - * n_tracerouter_receivebuf() - Routing function for driver. - * @tty: terminal device passed into the ldisc. It's assumed - * tty will never be NULL. - * @cp: buffer, block of characters to be eventually read by - * someone, somewhere (user read() call or some kernel function). - * @fp: flag buffer. - * @count: number of characters (aka, bytes) in cp. - * - * This function takes the input buffer, cp, and passes it to - * an external API function for processing. - */ -static void n_tracerouter_receivebuf(struct tty_struct *tty, - const unsigned char *cp, - char *fp, int count) -{ - mutex_lock(&routelock); - n_tracesink_datadrain((u8 *) cp, count); - mutex_unlock(&routelock); -} - -/* - * Flush buffer is not impelemented as the ldisc has no internal buffering - * so the tty_driver_flush_buffer() is sufficient for this driver's needs. - */ - -static struct tty_ldisc_ops tty_ptirouter_ldisc = { - .owner = THIS_MODULE, - .magic = TTY_LDISC_MAGIC, - .name = DRIVERNAME, - .open = n_tracerouter_open, - .close = n_tracerouter_close, - .read = n_tracerouter_read, - .write = n_tracerouter_write, - .receive_buf = n_tracerouter_receivebuf -}; - -/** - * n_tracerouter_init - module initialisation - * - * Registers this module as a line discipline driver. - * - * Return: - * 0 for success, any other value error. - */ -static int __init n_tracerouter_init(void) -{ - int retval; - - tr_data = kzalloc(sizeof(struct tracerouter_data), GFP_KERNEL); - if (tr_data == NULL) - return -ENOMEM; - - - /* Note N_TRACEROUTER is defined in linux/tty.h */ - retval = tty_register_ldisc(N_TRACEROUTER, &tty_ptirouter_ldisc); - if (retval < 0) { - pr_err("%s: Registration failed: %d\n", __func__, retval); - kfree(tr_data); - } - return retval; -} - -/** - * n_tracerouter_exit - module unload - * - * Removes this module as a line discipline driver. - */ -static void __exit n_tracerouter_exit(void) -{ - int retval = tty_unregister_ldisc(N_TRACEROUTER); - - if (retval < 0) - pr_err("%s: Unregistration failed: %d\n", __func__, retval); - else - kfree(tr_data); -} - -module_init(n_tracerouter_init); -module_exit(n_tracerouter_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Jay Freyensee"); -MODULE_ALIAS_LDISC(N_TRACEROUTER); -MODULE_DESCRIPTION("Trace router ldisc driver"); diff --git a/drivers/tty/n_tracesink.c b/drivers/tty/n_tracesink.c deleted file mode 100644 index d96ba82cc356..000000000000 --- a/drivers/tty/n_tracesink.c +++ /dev/null @@ -1,228 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * n_tracesink.c - Trace data router and sink path through tty space. - * - * Copyright (C) Intel 2011 - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * The trace sink uses the Linux line discipline framework to receive - * trace data coming from the PTI source line discipline driver - * to a user-desired tty port, like USB. - * This is to provide a way to extract modem trace data on - * devices that do not have a PTI HW module, or just need modem - * trace data to come out of a different HW output port. - * This is part of a solution for the P1149.7, compact JTAG, standard. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "n_tracesink.h" - -/* - * Other ldisc drivers use 65536 which basically means, - * 'I can always accept 64k' and flow control is off. - * This number is deemed appropriate for this driver. - */ -#define RECEIVE_ROOM 65536 -#define DRIVERNAME "n_tracesink" - -/* - * there is a quirk with this ldisc is he can write data - * to a tty from anyone calling his kernel API, which - * meets customer requirements in the drivers/misc/pti.c - * project. So he needs to know when he can and cannot write when - * the API is called. In theory, the API can be called - * after an init() but before a successful open() which - * would crash the system if tty is not checked. - */ -static struct tty_struct *this_tty; -static DEFINE_MUTEX(writelock); - -/** - * n_tracesink_open() - Called when a tty is opened by a SW entity. - * @tty: terminal device to the ldisc. - * - * Return: - * 0 for success, - * -EFAULT = couldn't get a tty kref n_tracesink will sit - * on top of - * -EEXIST = open() called successfully once and it cannot - * be called again. - * - * Caveats: open() should only be successful the first time a - * SW entity calls it. - */ -static int n_tracesink_open(struct tty_struct *tty) -{ - int retval = -EEXIST; - - mutex_lock(&writelock); - if (this_tty == NULL) { - this_tty = tty_kref_get(tty); - if (this_tty == NULL) { - retval = -EFAULT; - } else { - tty->disc_data = this_tty; - tty_driver_flush_buffer(tty); - retval = 0; - } - } - mutex_unlock(&writelock); - - return retval; -} - -/** - * n_tracesink_close() - close connection - * @tty: terminal device to the ldisc. - * - * Called when a software entity wants to close a connection. - */ -static void n_tracesink_close(struct tty_struct *tty) -{ - mutex_lock(&writelock); - tty_driver_flush_buffer(tty); - tty_kref_put(this_tty); - this_tty = NULL; - tty->disc_data = NULL; - mutex_unlock(&writelock); -} - -/** - * n_tracesink_read() - read request from user space - * @tty: terminal device passed into the ldisc. - * @file: pointer to open file object. - * @buf: pointer to the data buffer that gets eventually returned. - * @nr: number of bytes of the data buffer that is returned. - * - * function that allows read() functionality in userspace. By default if this - * is not implemented it returns -EIO. This module is functioning like a - * router via n_tracesink_receivebuf(), and there is no real requirement - * to implement this function. However, an error return value other than - * -EIO should be used just to show that there was an intent not to have - * this function implemented. Return value based on read() man pages. - * - * Return: - * -EINVAL - */ -static ssize_t n_tracesink_read(struct tty_struct *tty, struct file *file, - unsigned char __user *buf, size_t nr) { - return -EINVAL; -} - -/** - * n_tracesink_write() - Function that allows write() in userspace. - * @tty: terminal device passed into the ldisc. - * @file: pointer to open file object. - * @buf: pointer to the data buffer that gets eventually returned. - * @nr: number of bytes of the data buffer that is returned. - * - * By default if this is not implemented, it returns -EIO. - * This should not be implemented, ever, because - * 1. this driver is functioning like a router via - * n_tracesink_receivebuf() - * 2. No writes to HW will ever go through this line discpline driver. - * However, an error return value other than -EIO should be used - * just to show that there was an intent not to have this function - * implemented. Return value based on write() man pages. - * - * Return: - * -EINVAL - */ -static ssize_t n_tracesink_write(struct tty_struct *tty, struct file *file, - const unsigned char *buf, size_t nr) { - return -EINVAL; -} - -/** - * n_tracesink_datadrain() - Kernel API function used to route - * trace debugging data to user-defined - * port like USB. - * - * @buf: Trace debuging data buffer to write to tty target - * port. Null value will return with no write occurring. - * @count: Size of buf. Value of 0 or a negative number will - * return with no write occuring. - * - * Caveat: If this line discipline does not set the tty it sits - * on top of via an open() call, this API function will not - * call the tty's write() call because it will have no pointer - * to call the write(). - */ -void n_tracesink_datadrain(u8 *buf, int count) -{ - mutex_lock(&writelock); - - if ((buf != NULL) && (count > 0) && (this_tty != NULL)) - this_tty->ops->write(this_tty, buf, count); - - mutex_unlock(&writelock); -} -EXPORT_SYMBOL_GPL(n_tracesink_datadrain); - -/* - * Flush buffer is not impelemented as the ldisc has no internal buffering - * so the tty_driver_flush_buffer() is sufficient for this driver's needs. - */ - -/* - * tty_ldisc function operations for this driver. - */ -static struct tty_ldisc_ops tty_n_tracesink = { - .owner = THIS_MODULE, - .magic = TTY_LDISC_MAGIC, - .name = DRIVERNAME, - .open = n_tracesink_open, - .close = n_tracesink_close, - .read = n_tracesink_read, - .write = n_tracesink_write -}; - -/** - * n_tracesink_init- module initialisation - * - * Registers this module as a line discipline driver. - * - * Return: - * 0 for success, any other value error. - */ -static int __init n_tracesink_init(void) -{ - /* Note N_TRACESINK is defined in linux/tty.h */ - int retval = tty_register_ldisc(N_TRACESINK, &tty_n_tracesink); - - if (retval < 0) - pr_err("%s: Registration failed: %d\n", __func__, retval); - - return retval; -} - -/** - * n_tracesink_exit - module unload - * - * Removes this module as a line discipline driver. - */ -static void __exit n_tracesink_exit(void) -{ - int retval = tty_unregister_ldisc(N_TRACESINK); - - if (retval < 0) - pr_err("%s: Unregistration failed: %d\n", __func__, retval); -} - -module_init(n_tracesink_init); -module_exit(n_tracesink_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Jay Freyensee"); -MODULE_ALIAS_LDISC(N_TRACESINK); -MODULE_DESCRIPTION("Trace sink ldisc driver"); diff --git a/drivers/tty/n_tracesink.h b/drivers/tty/n_tracesink.h deleted file mode 100644 index 7031d515a700..000000000000 --- a/drivers/tty/n_tracesink.h +++ /dev/null @@ -1,26 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * n_tracesink.h - Kernel driver API to route trace data in kernel space. - * - * Copyright (C) Intel 2011 - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * The PTI (Parallel Trace Interface) driver directs trace data routed from - * various parts in the system out through the Intel Penwell PTI port and - * out of the mobile device for analysis with a debugging tool - * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7, - * compact JTAG, standard. - * - * This header file is used by n_tracerouter to be able to send the - * data of it's tty port to the tty port this module sits. This - * mechanism can also be used independent of the PTI module. - * - */ - -#ifndef N_TRACESINK_H_ -#define N_TRACESINK_H_ - -void n_tracesink_datadrain(u8 *buf, int count); - -#endif diff --git a/include/linux/intel-pti.h b/include/linux/intel-pti.h deleted file mode 100644 index fcd841a90f2f..000000000000 --- a/include/linux/intel-pti.h +++ /dev/null @@ -1,35 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) Intel 2011 - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * The PTI (Parallel Trace Interface) driver directs trace data routed from - * various parts in the system out through the Intel Penwell PTI port and - * out of the mobile device for analysis with a debugging tool - * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7, - * compact JTAG, standard. - * - * This header file will allow other parts of the OS to use the - * interface to write out it's contents for debugging a mobile system. - */ - -#ifndef LINUX_INTEL_PTI_H_ -#define LINUX_INTEL_PTI_H_ - -/* offset for last dword of any PTI message. Part of MIPI P1149.7 */ -#define PTI_LASTDWORD_DTS 0x30 - -/* basic structure used as a write address to the PTI HW */ -struct pti_masterchannel { - u8 master; - u8 channel; -}; - -/* the following functions are defined in misc/pti.c */ -void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count); -struct pti_masterchannel *pti_request_masterchannel(u8 type, - const char *thread_name); -void pti_release_masterchannel(struct pti_masterchannel *mc); - -#endif /* LINUX_INTEL_PTI_H_ */ -- cgit v1.2.3-58-ga151 From 487709fa1be2f27aa8e7de6c60587b4302a21467 Mon Sep 17 00:00:00 2001 From: zhenwei pi Date: Sun, 10 Jan 2021 19:53:57 +0800 Subject: misc: pvpanic: introduce device capability According to pvpanic spec: https://git.qemu.org/?p=qemu.git;a=blob_plain;f=docs/specs/pvpanic.txt The guest should determine pvpanic capability by RDPT, so initialize capability during device probing. There is no need to register panic notifier callback function if no events supported. Before sending event to host side, check capability firstly. Suggested by Greg KH, use sysfs to expose capability to user space, also add new sysfs attribute in document. Signed-off-by: zhenwei pi Reviewed-by: Paolo Bonzini Link: https://lore.kernel.org/r/20210110115358.79100-2-pizhenwei@bytedance.com Signed-off-by: Greg Kroah-Hartman --- .../ABI/testing/sysfs-bus-pci-devices-pvpanic | 12 +++++++++ drivers/misc/pvpanic.c | 31 ++++++++++++++++++---- 2 files changed, 38 insertions(+), 5 deletions(-) create mode 100644 Documentation/ABI/testing/sysfs-bus-pci-devices-pvpanic (limited to 'drivers/misc') diff --git a/Documentation/ABI/testing/sysfs-bus-pci-devices-pvpanic b/Documentation/ABI/testing/sysfs-bus-pci-devices-pvpanic new file mode 100644 index 000000000000..79b7dc31cd55 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-pci-devices-pvpanic @@ -0,0 +1,12 @@ +What: /sys/devices/pci0000:00/*/QEMU0001:00/capability +Date: Jan 2021 +Contact: zhenwei pi +Description: + Read-only attribute. Capabilities of pvpanic device which + are supported by QEMU. + + Format: %x. + + Detailed bit definition refers to section + from pvpanic device specification: + https://git.qemu.org/?p=qemu.git;a=blob_plain;f=docs/specs/pvpanic.txt diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c index 41cab297d66e..80cdfa8f951a 100644 --- a/drivers/misc/pvpanic.c +++ b/drivers/misc/pvpanic.c @@ -19,6 +19,20 @@ #include static void __iomem *base; +static unsigned int capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED; + +static ssize_t capability_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%x", capability); +} +static DEVICE_ATTR_RO(capability); + +static struct attribute *pvpanic_dev_attrs[] = { + &dev_attr_capability.attr, + NULL +}; +ATTRIBUTE_GROUPS(pvpanic_dev); MODULE_AUTHOR("Hu Tao "); MODULE_DESCRIPTION("pvpanic device driver"); @@ -27,7 +41,8 @@ MODULE_LICENSE("GPL"); static void pvpanic_send_event(unsigned int event) { - iowrite8(event, base); + if (event & capability) + iowrite8(event, base); } static int @@ -73,8 +88,12 @@ static int pvpanic_mmio_probe(struct platform_device *pdev) return -EINVAL; } - atomic_notifier_chain_register(&panic_notifier_list, - &pvpanic_panic_nb); + /* initlize capability by RDPT */ + capability &= ioread8(base); + + if (capability) + atomic_notifier_chain_register(&panic_notifier_list, + &pvpanic_panic_nb); return 0; } @@ -82,8 +101,9 @@ static int pvpanic_mmio_probe(struct platform_device *pdev) static int pvpanic_mmio_remove(struct platform_device *pdev) { - atomic_notifier_chain_unregister(&panic_notifier_list, - &pvpanic_panic_nb); + if (capability) + atomic_notifier_chain_unregister(&panic_notifier_list, + &pvpanic_panic_nb); return 0; } @@ -104,6 +124,7 @@ static struct platform_driver pvpanic_mmio_driver = { .name = "pvpanic-mmio", .of_match_table = pvpanic_mmio_match, .acpi_match_table = pvpanic_device_ids, + .dev_groups = pvpanic_dev_groups, }, .probe = pvpanic_mmio_probe, .remove = pvpanic_mmio_remove, -- cgit v1.2.3-58-ga151 From 8d6da6575ffec171161d36a06c015142b0049637 Mon Sep 17 00:00:00 2001 From: zhenwei pi Date: Sun, 10 Jan 2021 19:53:58 +0800 Subject: misc: pvpanic: introduce events device attribue Suggested by Paolo & Greg, add 'events' device attribute that can be used to limit which capabilities the driver uses. Finally, the pvpanic guest driver works by the limitation of both device capability and user setting. Signed-off-by: zhenwei pi Reviewed-by: Paolo Bonzini Link: https://lore.kernel.org/r/20210110115358.79100-3-pizhenwei@bytedance.com Signed-off-by: Greg Kroah-Hartman --- .../ABI/testing/sysfs-bus-pci-devices-pvpanic | 12 +++++++++ drivers/misc/pvpanic.c | 30 +++++++++++++++++++++- 2 files changed, 41 insertions(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/Documentation/ABI/testing/sysfs-bus-pci-devices-pvpanic b/Documentation/ABI/testing/sysfs-bus-pci-devices-pvpanic index 79b7dc31cd55..1936f7324155 100644 --- a/Documentation/ABI/testing/sysfs-bus-pci-devices-pvpanic +++ b/Documentation/ABI/testing/sysfs-bus-pci-devices-pvpanic @@ -10,3 +10,15 @@ Description: Detailed bit definition refers to section from pvpanic device specification: https://git.qemu.org/?p=qemu.git;a=blob_plain;f=docs/specs/pvpanic.txt + +What: /sys/devices/pci0000:00/*/QEMU0001:00/events +Date: Jan 2021 +Contact: zhenwei pi +Description: + RW attribute. Set/get which features in-use. This attribute + is used to enable/disable feature(s) of pvpanic device. + Notice that this value should be a subset of capability. + + Format: %x. + + Also refer to pvpanic device specification. diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c index 80cdfa8f951a..b1e4922a7fda 100644 --- a/drivers/misc/pvpanic.c +++ b/drivers/misc/pvpanic.c @@ -20,6 +20,7 @@ static void __iomem *base; static unsigned int capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED; +static unsigned int events; static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -28,8 +29,34 @@ static ssize_t capability_show(struct device *dev, } static DEVICE_ATTR_RO(capability); +static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%x", events); +} + +static ssize_t events_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int tmp; + int err; + + err = kstrtouint(buf, 16, &tmp); + if (err) + return err; + + if ((tmp & capability) != tmp) + return -EINVAL; + + events = tmp; + + return count; + +} +static DEVICE_ATTR_RW(events); + static struct attribute *pvpanic_dev_attrs[] = { &dev_attr_capability.attr, + &dev_attr_events.attr, NULL }; ATTRIBUTE_GROUPS(pvpanic_dev); @@ -41,7 +68,7 @@ MODULE_LICENSE("GPL"); static void pvpanic_send_event(unsigned int event) { - if (event & capability) + if (event & capability & events) iowrite8(event, base); } @@ -90,6 +117,7 @@ static int pvpanic_mmio_probe(struct platform_device *pdev) /* initlize capability by RDPT */ capability &= ioread8(base); + events = capability; if (capability) atomic_notifier_chain_register(&panic_notifier_list, -- cgit v1.2.3-58-ga151 From 4c998836d413d8e4ca8749f876b75a74a6fb4c5d Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Fri, 4 Dec 2020 21:58:10 +0200 Subject: habanalabs: update firmware boot interface Update to latest firmware hl_boot_if.h file. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/include/common/hl_boot_if.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h index b637dfd69f6e..1115456cca85 100644 --- a/drivers/misc/habanalabs/include/common/hl_boot_if.h +++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h @@ -162,6 +162,10 @@ * statuses. * Initialized in: preboot * + * CPU_BOOT_DEV_STS0_SP_SRAM_EN SP SRAM is initialized and available + * for use. + * Initialized in: preboot + * */ #define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << 0) #define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << 1) @@ -175,6 +179,7 @@ #define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << 9) #define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << 10) #define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << 11) +#define CPU_BOOT_DEV_STS0_SP_SRAM_EN (1 << 12) #define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << 13) #define CPU_BOOT_DEV_STS0_ENABLED (1 << 31) -- cgit v1.2.3-58-ga151 From cb6ef0ee6d760281cc0b50711564415f39328bfb Mon Sep 17 00:00:00 2001 From: Ohad Sharabi Date: Thu, 26 Nov 2020 09:39:26 +0200 Subject: habanalabs: refactor MMU locks code remove mmu_cache_lock as it protects a section which is already protected by mmu_lock. in addition, wrap mmu cache invalidate calls in hl_vm_ctx_fini with mmu_lock. Signed-off-by: Ohad Sharabi Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/device.c | 2 -- drivers/misc/habanalabs/common/habanalabs.h | 2 -- drivers/misc/habanalabs/common/memory.c | 4 ++++ drivers/misc/habanalabs/gaudi/gaudi.c | 8 -------- drivers/misc/habanalabs/goya/goya.c | 8 -------- 5 files changed, 4 insertions(+), 20 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 69d04eca767f..ff2bab69e8fc 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -373,7 +373,6 @@ static int device_early_init(struct hl_device *hdev) mutex_init(&hdev->send_cpu_message_lock); mutex_init(&hdev->debug_lock); - mutex_init(&hdev->mmu_cache_lock); INIT_LIST_HEAD(&hdev->cs_mirror_list); spin_lock_init(&hdev->cs_mirror_lock); INIT_LIST_HEAD(&hdev->fpriv_list); @@ -414,7 +413,6 @@ static void device_early_fini(struct hl_device *hdev) { int i; - mutex_destroy(&hdev->mmu_cache_lock); mutex_destroy(&hdev->debug_lock); mutex_destroy(&hdev->send_cpu_message_lock); diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 60e16dc4bcac..2dbee9440a5f 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -1764,7 +1764,6 @@ struct hl_mmu_funcs { * @asic_funcs: ASIC specific functions. * @asic_specific: ASIC specific information to use only from ASIC files. * @vm: virtual memory manager for MMU. - * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context. * @hwmon_dev: H/W monitor device. * @pm_mng_profile: current power management profile. * @hl_chip_info: ASIC's sensors information. @@ -1879,7 +1878,6 @@ struct hl_device { const struct hl_asic_funcs *asic_funcs; void *asic_specific; struct hl_vm vm; - struct mutex mmu_cache_lock; struct device *hwmon_dev; enum hl_pm_mng_profile pm_mng_profile; struct hwmon_chip_info *hl_chip_info; diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 5d4fbdcaefe3..eb09d63bb5a1 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -1895,10 +1895,14 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) unmap_device_va(ctx, hnode->vaddr, true); } + mutex_lock(&ctx->mmu_lock); + /* invalidate the cache once after the unmapping loop */ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK); + mutex_unlock(&ctx->mmu_lock); + spin_lock(&vm->idr_lock); idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i) if (phys_pg_list->asid == ctx->asid) { diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index b328ddaa64ee..ce81935fb28e 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -7330,8 +7330,6 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, else timeout_usec = MMU_CONFIG_TIMEOUT_USEC; - mutex_lock(&hdev->mmu_cache_lock); - /* L0 & L1 invalidation */ WREG32(mmSTLB_INV_PS, 3); WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++); @@ -7347,8 +7345,6 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, WREG32(mmSTLB_INV_SET, 0); - mutex_unlock(&hdev->mmu_cache_lock); - if (rc) { dev_err_ratelimited(hdev->dev, "MMU cache invalidation timeout\n"); @@ -7371,8 +7367,6 @@ static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev, hdev->hard_reset_pending) return 0; - mutex_lock(&hdev->mmu_cache_lock); - if (hdev->pldm) timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC; else @@ -7400,8 +7394,6 @@ static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev, 1000, timeout_usec); - mutex_unlock(&hdev->mmu_cache_lock); - if (rc) { dev_err_ratelimited(hdev->dev, "MMU cache invalidation timeout\n"); diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 63679a747d2c..19b5bcc8b7ac 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -5073,8 +5073,6 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, else timeout_usec = MMU_CONFIG_TIMEOUT_USEC; - mutex_lock(&hdev->mmu_cache_lock); - /* L0 & L1 invalidation */ WREG32(mmSTLB_INV_ALL_START, 1); @@ -5086,8 +5084,6 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, 1000, timeout_usec); - mutex_unlock(&hdev->mmu_cache_lock); - if (rc) { dev_err_ratelimited(hdev->dev, "MMU cache invalidation timeout\n"); @@ -5117,8 +5113,6 @@ static int goya_mmu_invalidate_cache_range(struct hl_device *hdev, else timeout_usec = MMU_CONFIG_TIMEOUT_USEC; - mutex_lock(&hdev->mmu_cache_lock); - /* * TODO: currently invalidate entire L0 & L1 as in regular hard * invalidation. Need to apply invalidation of specific cache lines with @@ -5141,8 +5135,6 @@ static int goya_mmu_invalidate_cache_range(struct hl_device *hdev, 1000, timeout_usec); - mutex_unlock(&hdev->mmu_cache_lock); - if (rc) { dev_err_ratelimited(hdev->dev, "MMU cache invalidation timeout\n"); -- cgit v1.2.3-58-ga151 From 8e39e75a134f4cebc6503f21ba6e51c0cf434497 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 12 Nov 2020 11:03:32 +0200 Subject: habanalabs: Init the VM module for kernel context In order for reserving VA ranges for kernel memory, we need to allow the VM module to be initiated with kernel context. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/context.c | 16 +++++++++++++--- drivers/misc/habanalabs/common/device.c | 15 ++++++++++----- drivers/misc/habanalabs/common/memory.c | 3 ++- drivers/misc/habanalabs/gaudi/gaudi.c | 10 ++++------ 4 files changed, 29 insertions(+), 15 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c index f65e6559149b..3d86b83f4ca6 100644 --- a/drivers/misc/habanalabs/common/context.c +++ b/drivers/misc/habanalabs/common/context.c @@ -56,6 +56,8 @@ static void hl_ctx_fini(struct hl_ctx *ctx) idle_mask); } else { dev_dbg(hdev->dev, "closing kernel context\n"); + hdev->asic_funcs->ctx_fini(ctx); + hl_vm_ctx_fini(ctx); hl_mmu_ctx_fini(ctx); } } @@ -151,11 +153,18 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx) if (is_kernel_ctx) { ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */ - rc = hl_mmu_ctx_init(ctx); + rc = hl_vm_ctx_init(ctx); if (rc) { - dev_err(hdev->dev, "Failed to init mmu ctx module\n"); + dev_err(hdev->dev, "Failed to init mem ctx module\n"); + rc = -ENOMEM; goto err_free_cs_pending; } + + rc = hdev->asic_funcs->ctx_init(ctx); + if (rc) { + dev_err(hdev->dev, "ctx_init failed\n"); + goto err_vm_ctx_fini; + } } else { ctx->asid = hl_asid_alloc(hdev); if (!ctx->asid) { @@ -194,7 +203,8 @@ err_cb_va_pool_fini: err_vm_ctx_fini: hl_vm_ctx_fini(ctx); err_asid_free: - hl_asid_free(hdev, ctx->asid); + if (ctx->asid != HL_KERNEL_ASID_ID) + hl_asid_free(hdev, ctx->asid); err_free_cs_pending: kfree(ctx->cs_pending); diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index ff2bab69e8fc..29358a949686 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -1312,11 +1312,16 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) hdev->compute_ctx = NULL; + hl_debugfs_add_device(hdev); + + /* debugfs nodes are created in hl_ctx_init so it must be called after + * hl_debugfs_add_device. + */ rc = hl_ctx_init(hdev, hdev->kernel_ctx, true); if (rc) { dev_err(hdev->dev, "failed to initialize kernel context\n"); kfree(hdev->kernel_ctx); - goto mmu_fini; + goto remove_device_from_debugfs; } rc = hl_cb_pool_init(hdev); @@ -1325,8 +1330,6 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) goto release_ctx; } - hl_debugfs_add_device(hdev); - /* * From this point, in case of an error, add char devices and create * sysfs nodes as part of the error flow, to allow debugging. @@ -1415,6 +1418,8 @@ release_ctx: if (hl_ctx_put(hdev->kernel_ctx) != 1) dev_err(hdev->dev, "kernel ctx is still alive on initialization failure\n"); +remove_device_from_debugfs: + hl_debugfs_remove_device(hdev); mmu_fini: hl_mmu_fini(hdev); eq_fini: @@ -1513,8 +1518,6 @@ void hl_device_fini(struct hl_device *hdev) device_late_fini(hdev); - hl_debugfs_remove_device(hdev); - /* * Halt the engines and disable interrupts so we won't get any more * completions from H/W and we won't have any accesses from the @@ -1546,6 +1549,8 @@ void hl_device_fini(struct hl_device *hdev) if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1)) dev_err(hdev->dev, "kernel ctx is still alive\n"); + hl_debugfs_remove_device(hdev); + hl_vm_fini(hdev); hl_mmu_fini(hdev); diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index eb09d63bb5a1..fb2c268a262b 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -1929,7 +1929,8 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) * because the user notifies us on allocations. If the user is no more, * all DRAM is available */ - if (!ctx->hdev->asic_prop.dram_supports_virtual_memory) + if (ctx->asid != HL_KERNEL_ASID_ID && + !ctx->hdev->asic_prop.dram_supports_virtual_memory) atomic64_set(&ctx->hdev->dram_used_mem, 0); } diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index ce81935fb28e..58ad1630192e 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -7868,18 +7868,16 @@ static void gaudi_internal_cb_pool_fini(struct hl_device *hdev, static int gaudi_ctx_init(struct hl_ctx *ctx) { + if (ctx->asid == HL_KERNEL_ASID_ID) + return 0; + gaudi_mmu_prepare(ctx->hdev, ctx->asid); return gaudi_internal_cb_pool_init(ctx->hdev, ctx); } static void gaudi_ctx_fini(struct hl_ctx *ctx) { - struct hl_device *hdev = ctx->hdev; - - /* Gaudi will NEVER support more then a single compute context. - * Therefore, don't clear anything unless it is the compute context - */ - if (hdev->compute_ctx != ctx) + if (ctx->asid == HL_KERNEL_ASID_ID) return; gaudi_internal_cb_pool_fini(ctx->hdev, ctx); -- cgit v1.2.3-58-ga151 From ac6fdbfe2ea833b610a51bc6fc078fbce8457d97 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 3 Dec 2020 16:59:28 +0200 Subject: habanalabs/gaudi: support CS with no completion As part of the staged submission feature, we need Gaudi to support command submissions that will never get a completion. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/command_submission.c | 1 + drivers/misc/habanalabs/common/habanalabs.h | 2 ++ drivers/misc/habanalabs/gaudi/gaudi.c | 10 +++++++--- 3 files changed, 10 insertions(+), 3 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index b2b3d2b0f808..ccb4972a555f 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -225,6 +225,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) parser.queue_type = job->queue_type; parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb; job->patched_cb = NULL; + parser.completion = true; rc = hdev->asic_funcs->cs_parser(hdev, &parser); diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 2dbee9440a5f..dee73a04411b 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -1223,6 +1223,7 @@ struct hl_cs_job { * MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't * have streams so the engine can't be busy by another * stream. + * @completion: true if we need completion for this CS. */ struct hl_cs_parser { struct hl_cb *user_cb; @@ -1237,6 +1238,7 @@ struct hl_cs_parser { u8 job_id; u8 is_kernel_allocated_cb; u8 contains_dma_pkt; + u8 completion; }; /* diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 58ad1630192e..0324114ab4ee 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -5060,7 +5060,8 @@ static int gaudi_validate_cb(struct hl_device *hdev, * 1. A packet that will act as a completion packet * 2. A packet that will generate MSI-X interrupt */ - parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2; + if (parser->completion) + parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2; return rc; } @@ -5287,8 +5288,11 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev, * 1. A packet that will act as a completion packet * 2. A packet that will generate MSI interrupt */ - parser->patched_cb_size = parser->user_cb_size + - sizeof(struct packet_msg_prot) * 2; + if (parser->completion) + parser->patched_cb_size = parser->user_cb_size + + sizeof(struct packet_msg_prot) * 2; + else + parser->patched_cb_size = parser->user_cb_size; rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, parser->patched_cb_size, false, false, -- cgit v1.2.3-58-ga151 From c209e742141bc0064ecdb73ef0dd825310de28cb Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 3 Dec 2020 17:12:09 +0200 Subject: habanalabs: allow user to pass a staged submission seq In order to support the staged submission feature, user must be allowed to use the same CS sequence for all submissions in the same staged submission. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 16 +++++++++---- include/uapi/misc/habanalabs.h | 28 +++++++++++++++------- 2 files changed, 30 insertions(+), 14 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index ccb4972a555f..348b0ecb4489 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -807,7 +807,7 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev, } static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, - u32 num_chunks, u64 *cs_seq, bool timestamp) + u32 num_chunks, u64 *cs_seq, u32 flags) { bool int_queues_only = true; struct hl_device *hdev = hpriv->hdev; @@ -836,7 +836,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, goto free_cs_chunk_array; } - cs->timestamp = !!timestamp; + cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP); *cs_seq = cs->sequence; hl_debugfs_add_cs(cs); @@ -1004,7 +1004,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args, rc = 0; } else { rc = cs_ioctl_default(hpriv, chunks, num_chunks, - cs_seq, false); + cs_seq, 0); } mutex_unlock(&hpriv->restore_phase_mutex); @@ -1347,7 +1347,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) enum hl_cs_type cs_type; u64 cs_seq = ULONG_MAX; void __user *chunks; - u32 num_chunks; + u32 num_chunks, flags; int rc; rc = hl_cs_sanity_checks(hpriv, args); @@ -1362,6 +1362,12 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) ~HL_CS_FLAGS_FORCE_RESTORE); chunks = (void __user *) (uintptr_t) args->in.chunks_execute; num_chunks = args->in.num_chunks_execute; + flags = args->in.cs_flags; + + /* In case this is a staged CS, user should supply the CS sequence */ + if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) && + !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST)) + cs_seq = args->in.seq; switch (cs_type) { case CS_TYPE_SIGNAL: @@ -1372,7 +1378,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) break; default: rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq, - args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP); + args->in.cs_flags); break; } diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index dba3827c43ca..b9afe1cdac24 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -604,11 +604,14 @@ struct hl_cs_chunk { }; /* SIGNAL and WAIT/COLLECTIVE_WAIT flags are mutually exclusive */ -#define HL_CS_FLAGS_FORCE_RESTORE 0x1 -#define HL_CS_FLAGS_SIGNAL 0x2 -#define HL_CS_FLAGS_WAIT 0x4 -#define HL_CS_FLAGS_COLLECTIVE_WAIT 0x8 -#define HL_CS_FLAGS_TIMESTAMP 0x20 +#define HL_CS_FLAGS_FORCE_RESTORE 0x1 +#define HL_CS_FLAGS_SIGNAL 0x2 +#define HL_CS_FLAGS_WAIT 0x4 +#define HL_CS_FLAGS_COLLECTIVE_WAIT 0x8 +#define HL_CS_FLAGS_TIMESTAMP 0x20 +#define HL_CS_FLAGS_STAGED_SUBMISSION 0x40 +#define HL_CS_FLAGS_STAGED_SUBMISSION_FIRST 0x80 +#define HL_CS_FLAGS_STAGED_SUBMISSION_LAST 0x100 #define HL_CS_STATUS_SUCCESS 0 @@ -622,10 +625,17 @@ struct hl_cs_in { /* holds address of array of hl_cs_chunk for execution phase */ __u64 chunks_execute; - /* this holds address of array of hl_cs_chunk for store phase - - * Currently not in use - */ - __u64 chunks_store; + union { + /* this holds address of array of hl_cs_chunk for store phase - + * Currently not in use + */ + __u64 chunks_store; + + /* Sequence number of a staged submission CS + * valid only if HL_CS_FLAGS_STAGED_SUBMISSION is set + */ + __u64 seq; + }; /* Number of chunks in restore phase array. Maximum number is * HL_MAX_JOBS_PER_CS -- cgit v1.2.3-58-ga151 From f8b0f2ecc57023c1988c592cfc02fbca987a068b Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Sun, 6 Dec 2020 10:22:32 +0200 Subject: habanalabs/gaudi: remove duplicated gaudi packets masks As all packets use the same CTL register masks, we remove duplicated masks and use common masks instead. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 32 +++++++++++----------- .../misc/habanalabs/include/gaudi/gaudi_packets.h | 24 ---------------- 2 files changed, 16 insertions(+), 40 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 0324114ab4ee..5281cad4b552 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -7922,10 +7922,10 @@ static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id, ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, sob_id * 4); ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */ - ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT); - ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, eb); - ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1); - ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, eb); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1); pkt->value = cpu_to_le32(value); pkt->ctl = cpu_to_le32(ctl); @@ -7942,10 +7942,10 @@ static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value, ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr); ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */ - ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT); - ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0); - ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1); - ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 0); /* last pkt MB */ + ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 0); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 0); /* last pkt MB */ pkt->value = cpu_to_le32(value); pkt->ctl = cpu_to_le32(ctl); @@ -7991,10 +7991,10 @@ static u32 gaudi_add_arm_monitor_pkt(struct hl_device *hdev, ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, msg_addr_offset); ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */ - ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT); - ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0); - ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1); - ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 0); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1); pkt->value = cpu_to_le32(value); pkt->ctl = cpu_to_le32(ctl); @@ -8012,10 +8012,10 @@ static u32 gaudi_add_fence_pkt(struct packet_fence *pkt) cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_TARGET_VAL_MASK, 1); cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_ID_MASK, 2); - ctl = FIELD_PREP(GAUDI_PKT_FENCE_CTL_OPCODE_MASK, PACKET_FENCE); - ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0); - ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1); - ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1); + ctl = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_FENCE); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 0); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1); pkt->cfg = cpu_to_le32(cfg); pkt->ctl = cpu_to_le32(ctl); diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h index f30f2c0458d7..784b5bc8d0ba 100644 --- a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h @@ -111,18 +111,6 @@ struct packet_msg_long { #define GAUDI_PKT_SHORT_CTL_BASE_SHIFT 22 #define GAUDI_PKT_SHORT_CTL_BASE_MASK 0x00C00000 -#define GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT 24 -#define GAUDI_PKT_SHORT_CTL_OPCODE_MASK 0x1F000000 - -#define GAUDI_PKT_SHORT_CTL_EB_SHIFT 29 -#define GAUDI_PKT_SHORT_CTL_EB_MASK 0x20000000 - -#define GAUDI_PKT_SHORT_CTL_RB_SHIFT 30 -#define GAUDI_PKT_SHORT_CTL_RB_MASK 0x40000000 - -#define GAUDI_PKT_SHORT_CTL_MB_SHIFT 31 -#define GAUDI_PKT_SHORT_CTL_MB_MASK 0x80000000 - struct packet_msg_short { __le32 value; __le32 ctl; @@ -146,18 +134,6 @@ struct packet_msg_prot { #define GAUDI_PKT_FENCE_CTL_PRED_SHIFT 0 #define GAUDI_PKT_FENCE_CTL_PRED_MASK 0x0000001F -#define GAUDI_PKT_FENCE_CTL_OPCODE_SHIFT 24 -#define GAUDI_PKT_FENCE_CTL_OPCODE_MASK 0x1F000000 - -#define GAUDI_PKT_FENCE_CTL_EB_SHIFT 29 -#define GAUDI_PKT_FENCE_CTL_EB_MASK 0x20000000 - -#define GAUDI_PKT_FENCE_CTL_RB_SHIFT 30 -#define GAUDI_PKT_FENCE_CTL_RB_MASK 0x40000000 - -#define GAUDI_PKT_FENCE_CTL_MB_SHIFT 31 -#define GAUDI_PKT_FENCE_CTL_MB_MASK 0x80000000 - struct packet_fence { __le32 cfg; __le32 ctl; -- cgit v1.2.3-58-ga151 From e1b85dbaf06d2e2a61797318738e52abeafa4f6b Mon Sep 17 00:00:00 2001 From: Ohad Sharabi Date: Tue, 1 Dec 2020 14:06:27 +0200 Subject: habanalabs/goya: move mmu_prepare to context init Currently mmu_prepare is located at context switch. Since we support a single context, no reason to reconfigure the MMU registers every context switch. Signed-off-by: Ohad Sharabi Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/goya/goya.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 19b5bcc8b7ac..720484bffcab 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -4877,8 +4877,6 @@ int goya_context_switch(struct hl_device *hdev, u32 asid) WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020); - goya_mmu_prepare(hdev, asid); - goya_clear_sm_regs(hdev); return 0; @@ -5313,6 +5311,9 @@ static int goya_get_eeprom_data(struct hl_device *hdev, void *data, static int goya_ctx_init(struct hl_ctx *ctx) { + if (ctx->asid != HL_KERNEL_ASID_ID) + goya_mmu_prepare(ctx->hdev, ctx->asid); + return 0; } -- cgit v1.2.3-58-ga151 From 0eda23d77e1beede9c61b0cba6d9267d3a92bd4e Mon Sep 17 00:00:00 2001 From: Moti Haimovski Date: Mon, 7 Dec 2020 09:10:34 +0200 Subject: habanalabs: report dram_page_size in hw_ip_info ioctl Instead of having it hard-coded as a define, pass it to the user in runtime. Signed-off-by: Moti Haimovski Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/habanalabs_ioctl.c | 1 + include/uapi/misc/habanalabs.h | 2 ++ 2 files changed, 3 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index d25892d61ec9..394a2e1767ce 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -63,6 +63,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args) hw_ip.dram_size = prop->dram_size - dram_kmd_size; if (hw_ip.dram_size > PAGE_SIZE) hw_ip.dram_enabled = 1; + hw_ip.dram_page_size = prop->dram_page_size; hw_ip.num_of_events = prop->num_of_events; memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version, diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index b9afe1cdac24..b431a70e1b8b 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -320,6 +320,8 @@ struct hl_info_hw_ip_info { __u8 pad[2]; __u8 cpucp_version[HL_INFO_VERSION_MAX_LEN]; __u8 card_name[HL_INFO_CARD_NAME_MAX_LEN]; + __u64 reserved2; + __u64 dram_page_size; }; struct hl_info_dram_usage { -- cgit v1.2.3-58-ga151 From 75d9a2a0aa28c4519c9648d501825e2c6860cb81 Mon Sep 17 00:00:00 2001 From: Alon Mizrahi Date: Thu, 3 Dec 2020 17:32:19 +0200 Subject: habanalabs: replace WARN/WARN_ON with dev_crit in driver Often WARN is defined in data-centers as BUG and we would like to avoid hanging the entire server on some internal error of the driver (important as it might be). Therefore, use dev_crit instead. Signed-off-by: Alon Mizrahi Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/asid.c | 6 ++++-- drivers/misc/habanalabs/common/command_buffer.c | 8 +++++--- drivers/misc/habanalabs/common/command_submission.c | 4 ++-- drivers/misc/habanalabs/common/device.c | 3 ++- drivers/misc/habanalabs/common/mmu.c | 7 ++++--- drivers/misc/habanalabs/gaudi/gaudi.c | 14 +++++++------- drivers/misc/habanalabs/goya/goya.c | 14 +++++++------- 7 files changed, 31 insertions(+), 25 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/asid.c b/drivers/misc/habanalabs/common/asid.c index a2fdf31cf27c..ede04c032b6e 100644 --- a/drivers/misc/habanalabs/common/asid.c +++ b/drivers/misc/habanalabs/common/asid.c @@ -50,8 +50,10 @@ unsigned long hl_asid_alloc(struct hl_device *hdev) void hl_asid_free(struct hl_device *hdev, unsigned long asid) { - if (WARN((asid == 0 || asid >= hdev->asic_prop.max_asid), - "Invalid ASID %lu", asid)) + if (asid == HL_KERNEL_ASID_ID || asid >= hdev->asic_prop.max_asid) { + dev_crit(hdev->dev, "Invalid ASID %lu", asid); return; + } + clear_bit(asid, hdev->asid_bitmap); } diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c index 6f6a904ab6ca..d9adb9a5e4d8 100644 --- a/drivers/misc/habanalabs/common/command_buffer.c +++ b/drivers/misc/habanalabs/common/command_buffer.c @@ -635,10 +635,12 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size, cb_handle >>= PAGE_SHIFT; cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle); - /* hl_cb_get should never fail here so use kernel WARN */ - WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle); - if (!cb) + /* hl_cb_get should never fail here */ + if (!cb) { + dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n", + (u32) cb_handle); goto destroy_cb; + } return cb; diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 348b0ecb4489..f7fac82ac41d 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -48,8 +48,8 @@ void hl_sob_reset_error(struct kref *ref) struct hl_device *hdev = hw_sob->hdev; dev_crit(hdev->dev, - "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n", - hw_sob->q_idx, hw_sob->sob_id); + "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n", + hw_sob->q_idx, hw_sob->sob_id); } /** diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 29358a949686..13405d47d843 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -1485,7 +1485,8 @@ void hl_device_fini(struct hl_device *hdev) usleep_range(50, 200); rc = atomic_cmpxchg(&hdev->in_reset, 0, 1); if (ktime_compare(ktime_get(), timeout) > 0) { - WARN(1, "Failed to remove device because reset function did not finish\n"); + dev_crit(hdev->dev, + "Failed to remove device because reset function did not finish\n"); return; } } diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c index 28a4638741d8..62cfa4190fe4 100644 --- a/drivers/misc/habanalabs/common/mmu.c +++ b/drivers/misc/habanalabs/common/mmu.c @@ -261,9 +261,10 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, return -EFAULT; } - WARN_ONCE((phys_addr & (real_page_size - 1)), - "Mapping 0x%llx with page size of 0x%x is erroneous! Address must be divisible by page size", - phys_addr, real_page_size); + if (phys_addr & (real_page_size - 1)) + dev_crit(hdev->dev, + "Mapping 0x%llx with page size of 0x%x is erroneous! Address must be divisible by page size", + phys_addr, real_page_size); npages = page_size / real_page_size; real_virt_addr = virt_addr; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 5281cad4b552..19c9e3895983 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -5308,10 +5308,10 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev, patched_cb_handle >>= PAGE_SHIFT; parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) patched_cb_handle); - /* hl_cb_get should never fail here so use kernel WARN */ - WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n", - (u32) patched_cb_handle); + /* hl_cb_get should never fail */ if (!parser->patched_cb) { + dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n", + (u32) patched_cb_handle); rc = -EFAULT; goto out; } @@ -5380,10 +5380,10 @@ static int gaudi_parse_cb_no_mmu(struct hl_device *hdev, patched_cb_handle >>= PAGE_SHIFT; parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) patched_cb_handle); - /* hl_cb_get should never fail here so use kernel WARN */ - WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n", - (u32) patched_cb_handle); + /* hl_cb_get should never fail here */ if (!parser->patched_cb) { + dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n", + (u32) patched_cb_handle); rc = -EFAULT; goto out; } @@ -5928,7 +5928,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid) return; if (asid & ~DMA0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK) { - WARN(1, "asid %u is too big\n", asid); + dev_crit(hdev->dev, "asid %u is too big\n", asid); return; } diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 720484bffcab..12d9e5205f52 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -3876,10 +3876,10 @@ static int goya_parse_cb_mmu(struct hl_device *hdev, patched_cb_handle >>= PAGE_SHIFT; parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) patched_cb_handle); - /* hl_cb_get should never fail here so use kernel WARN */ - WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n", - (u32) patched_cb_handle); + /* hl_cb_get should never fail here */ if (!parser->patched_cb) { + dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n", + (u32) patched_cb_handle); rc = -EFAULT; goto out; } @@ -3948,10 +3948,10 @@ static int goya_parse_cb_no_mmu(struct hl_device *hdev, patched_cb_handle >>= PAGE_SHIFT; parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) patched_cb_handle); - /* hl_cb_get should never fail here so use kernel WARN */ - WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n", - (u32) patched_cb_handle); + /* hl_cb_get should never fail here */ if (!parser->patched_cb) { + dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n", + (u32) patched_cb_handle); rc = -EFAULT; goto out; } @@ -5042,7 +5042,7 @@ static void goya_mmu_prepare(struct hl_device *hdev, u32 asid) return; if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) { - WARN(1, "asid %u is too big\n", asid); + dev_crit(hdev->dev, "asid %u is too big\n", asid); return; } -- cgit v1.2.3-58-ga151 From 3b762f55aa27df4cd3e2da0d42ad570d9018d86d Mon Sep 17 00:00:00 2001 From: Omer Shpigelman Date: Wed, 9 Dec 2020 13:28:46 +0200 Subject: habanalabs: kernel doc format in memory functions Change all memory functions documentation according to kernel doc format. Signed-off-by: Omer Shpigelman Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/memory.c | 377 +++++++++++++++----------------- 1 file changed, 180 insertions(+), 197 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index fb2c268a262b..53707e9c5c3e 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -38,15 +38,14 @@ */ /* - * alloc_device_memory - allocate device memory - * - * @ctx : current context - * @args : host parameters containing the requested size - * @ret_handle : result handle + * alloc_device_memory() - allocate device memory. + * @ctx: pointer to the context structure. + * @args: host parameters containing the requested size. + * @ret_handle: result handle. * * This function does the following: - * - Allocate the requested size rounded up to 'dram_page_size' pages - * - Return unique handle + * - Allocate the requested size rounded up to 'dram_page_size' pages. + * - Return unique handle for later map/unmap/free. */ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, u32 *ret_handle) @@ -182,17 +181,17 @@ pages_pack_err: return rc; } -/* - * dma_map_host_va - DMA mapping of the given host virtual address. - * @hdev: habanalabs device structure - * @addr: the host virtual address of the memory area - * @size: the size of the memory area - * @p_userptr: pointer to result userptr structure +/** + * dma_map_host_va() - DMA mapping of the given host virtual address. + * @hdev: habanalabs device structure. + * @addr: the host virtual address of the memory area. + * @size: the size of the memory area. + * @p_userptr: pointer to result userptr structure. * * This function does the following: - * - Allocate userptr structure - * - Pin the given host memory using the userptr structure - * - Perform DMA mapping to have the DMA addresses of the pages + * - Allocate userptr structure. + * - Pin the given host memory using the userptr structure. + * - Perform DMA mapping to have the DMA addresses of the pages. */ static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size, struct hl_userptr **p_userptr) @@ -236,14 +235,14 @@ userptr_err: return rc; } -/* - * dma_unmap_host_va - DMA unmapping of the given host virtual address. - * @hdev: habanalabs device structure - * @userptr: userptr to free +/** + * dma_unmap_host_va() - DMA unmapping of the given host virtual address. + * @hdev: habanalabs device structure. + * @userptr: userptr to free. * * This function does the following: - * - Unpins the physical pages - * - Frees the userptr structure + * - Unpins the physical pages. + * - Frees the userptr structure. */ static void dma_unmap_host_va(struct hl_device *hdev, struct hl_userptr *userptr) @@ -252,14 +251,13 @@ static void dma_unmap_host_va(struct hl_device *hdev, kfree(userptr); } -/* - * dram_pg_pool_do_release - free DRAM pages pool - * - * @ref : pointer to reference object +/** + * dram_pg_pool_do_release() - free DRAM pages pool + * @ref: pointer to reference object. * * This function does the following: - * - Frees the idr structure of physical pages handles - * - Frees the generic pool of DRAM physical pages + * - Frees the idr structure of physical pages handles. + * - Frees the generic pool of DRAM physical pages. */ static void dram_pg_pool_do_release(struct kref *ref) { @@ -274,15 +272,15 @@ static void dram_pg_pool_do_release(struct kref *ref) gen_pool_destroy(vm->dram_pg_pool); } -/* - * free_phys_pg_pack - free physical page pack - * @hdev: habanalabs device structure - * @phys_pg_pack: physical page pack to free +/** + * free_phys_pg_pack() - free physical page pack. + * @hdev: habanalabs device structure. + * @phys_pg_pack: physical page pack to free. * * This function does the following: * - For DRAM memory only, iterate over the pack and free each physical block - * structure by returning it to the general pool - * - Free the hl_vm_phys_pg_pack structure + * structure by returning it to the general pool. + * - Free the hl_vm_phys_pg_pack structure. */ static void free_phys_pg_pack(struct hl_device *hdev, struct hl_vm_phys_pg_pack *phys_pg_pack) @@ -313,14 +311,13 @@ static void free_phys_pg_pack(struct hl_device *hdev, kfree(phys_pg_pack); } -/* - * free_device_memory - free device memory - * - * @ctx : current context - * @handle : handle of the memory chunk to free +/** + * free_device_memory() - free device memory. + * @ctx: pointer to the context structure. + * @handle: handle of the memory chunk to free. * * This function does the following: - * - Free the device memory related to the given handle + * - Free the device memory related to the given handle. */ static int free_device_memory(struct hl_ctx *ctx, u32 handle) { @@ -361,16 +358,15 @@ static int free_device_memory(struct hl_ctx *ctx, u32 handle) return 0; } -/* - * clear_va_list_locked - free virtual addresses list - * - * @hdev : habanalabs device structure - * @va_list : list of virtual addresses to free +/** + * clear_va_list_locked() - free virtual addresses list. + * @hdev: habanalabs device structure. + * @va_list: list of virtual addresses to free. * * This function does the following: - * - Iterate over the list and free each virtual addresses block + * - Iterate over the list and free each virtual addresses block. * - * This function should be called only when va_list lock is taken + * This function should be called only when va_list lock is taken. */ static void clear_va_list_locked(struct hl_device *hdev, struct list_head *va_list) @@ -383,16 +379,15 @@ static void clear_va_list_locked(struct hl_device *hdev, } } -/* - * print_va_list_locked - print virtual addresses list - * - * @hdev : habanalabs device structure - * @va_list : list of virtual addresses to print +/** + * print_va_list_locked() - print virtual addresses list. + * @hdev: habanalabs device structure. + * @va_list: list of virtual addresses to print. * * This function does the following: - * - Iterate over the list and print each virtual addresses block + * - Iterate over the list and print each virtual addresses block. * - * This function should be called only when va_list lock is taken + * This function should be called only when va_list lock is taken. */ static void print_va_list_locked(struct hl_device *hdev, struct list_head *va_list) @@ -409,18 +404,17 @@ static void print_va_list_locked(struct hl_device *hdev, #endif } -/* - * merge_va_blocks_locked - merge a virtual block if possible - * - * @hdev : pointer to the habanalabs device structure - * @va_list : pointer to the virtual addresses block list - * @va_block : virtual block to merge with adjacent blocks +/** + * merge_va_blocks_locked() - merge a virtual block if possible. + * @hdev: pointer to the habanalabs device structure. + * @va_list: pointer to the virtual addresses block list. + * @va_block: virtual block to merge with adjacent blocks. * * This function does the following: * - Merge the given blocks with the adjacent blocks if their virtual ranges - * create a contiguous virtual range + * create a contiguous virtual range. * - * This Function should be called only when va_list lock is taken + * This Function should be called only when va_list lock is taken. */ static void merge_va_blocks_locked(struct hl_device *hdev, struct list_head *va_list, struct hl_vm_va_block *va_block) @@ -445,19 +439,18 @@ static void merge_va_blocks_locked(struct hl_device *hdev, } } -/* - * add_va_block_locked - add a virtual block to the virtual addresses list - * - * @hdev : pointer to the habanalabs device structure - * @va_list : pointer to the virtual addresses block list - * @start : start virtual address - * @end : end virtual address +/** + * add_va_block_locked() - add a virtual block to the virtual addresses list. + * @hdev: pointer to the habanalabs device structure. + * @va_list: pointer to the virtual addresses block list. + * @start: start virtual address. + * @end: end virtual address. * * This function does the following: - * - Add the given block to the virtual blocks list and merge with other - * blocks if a contiguous virtual block can be created + * - Add the given block to the virtual blocks list and merge with other blocks + * if a contiguous virtual block can be created. * - * This Function should be called only when va_list lock is taken + * This Function should be called only when va_list lock is taken. */ static int add_va_block_locked(struct hl_device *hdev, struct list_head *va_list, u64 start, u64 end) @@ -501,16 +494,15 @@ static int add_va_block_locked(struct hl_device *hdev, return 0; } -/* - * add_va_block - wrapper for add_va_block_locked - * - * @hdev : pointer to the habanalabs device structure - * @va_list : pointer to the virtual addresses block list - * @start : start virtual address - * @end : end virtual address +/** + * add_va_block() - wrapper for add_va_block_locked. + * @hdev: pointer to the habanalabs device structure. + * @va_list: pointer to the virtual addresses block list. + * @start: start virtual address. + * @end: end virtual address. * * This function does the following: - * - Takes the list lock and calls add_va_block_locked + * - Takes the list lock and calls add_va_block_locked. */ static inline int add_va_block(struct hl_device *hdev, struct hl_va_range *va_range, u64 start, u64 end) @@ -524,7 +516,7 @@ static inline int add_va_block(struct hl_device *hdev, return rc; } -/* +/** * get_va_block() - get a virtual block for the given size and alignment. * @hdev: pointer to the habanalabs device structure. * @va_range: pointer to the virtual addresses range. @@ -620,7 +612,7 @@ out: return res_valid_start; } -/* +/** * hl_reserve_va_block() - reserve a virtual block of a given size. * @hdev: pointer to the habanalabs device structure. * @ctx: current context @@ -644,9 +636,9 @@ u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, /** * hl_get_va_range_type() - get va_range type for the given address and size. - * @address: The start address of the area we want to validate. - * @size: The size in bytes of the area we want to validate. - * @type: returned va_range type + * @address: the start address of the area we want to validate. + * @size: the size in bytes of the area we want to validate. + * @type: returned va_range type. * * Return: true if the area is inside a valid range, false otherwise. */ @@ -667,16 +659,15 @@ static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size, return -EINVAL; } -/* - * hl_unreserve_va_block - wrapper for add_va_block for unreserving a va block - * +/** + * hl_unreserve_va_block() - wrapper for add_va_block to unreserve a va block. * @hdev: pointer to the habanalabs device structure - * @ctx: current context - * @start: start virtual address - * @end: end virtual address + * @ctx: pointer to the context structure. + * @start: start virtual address. + * @end: end virtual address. * * This function does the following: - * - Takes the list lock and calls add_va_block_locked + * - Takes the list lock and calls add_va_block_locked. */ int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, u64 start_addr, u64 size) @@ -701,11 +692,10 @@ int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, return rc; } -/* - * get_sg_info - get number of pages and the DMA address from SG list - * - * @sg : the SG list - * @dma_addr : pointer to DMA address to return +/** + * get_sg_info() - get number of pages and the DMA address from SG list. + * @sg: the SG list. + * @dma_addr: pointer to DMA address to return. * * Calculate the number of consecutive pages described by the SG list. Take the * offset of the address in the first page, add to it the length and round it up @@ -719,17 +709,17 @@ static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr) (PAGE_SIZE - 1)) >> PAGE_SHIFT; } -/* - * init_phys_pg_pack_from_userptr - initialize physical page pack from host - * memory - * @ctx: current context - * @userptr: userptr to initialize from - * @pphys_pg_pack: result pointer +/** + * init_phys_pg_pack_from_userptr() - initialize physical page pack from host + * memory + * @ctx: pointer to the context structure. + * @userptr: userptr to initialize from. + * @pphys_pg_pack: result pointer. * * This function does the following: - * - Pin the physical pages related to the given virtual block + * - Pin the physical pages related to the given virtual block. * - Create a physical page pack from the physical pages related to the given - * virtual block + * virtual block. */ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, struct hl_userptr *userptr, @@ -821,16 +811,16 @@ page_pack_arr_mem_err: return rc; } -/* - * map_phys_pg_pack - maps the physical page pack. - * @ctx: current context - * @vaddr: start address of the virtual area to map from - * @phys_pg_pack: the pack of physical pages to map to +/** + * map_phys_pg_pack() - maps the physical page pack.. + * @ctx: pointer to the context structure. + * @vaddr: start address of the virtual area to map from. + * @phys_pg_pack: the pack of physical pages to map to. * * This function does the following: - * - Maps each chunk of virtual memory to matching physical chunk - * - Stores number of successful mappings in the given argument - * - Returns 0 on success, error code otherwise + * - Maps each chunk of virtual memory to matching physical chunk. + * - Stores number of successful mappings in the given argument. + * - Returns 0 on success, error code otherwise. */ static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr, struct hl_vm_phys_pg_pack *phys_pg_pack) @@ -875,11 +865,11 @@ err: return rc; } -/* - * unmap_phys_pg_pack - unmaps the physical page pack - * @ctx: current context - * @vaddr: start address of the virtual area to unmap - * @phys_pg_pack: the pack of physical pages to unmap +/** + * unmap_phys_pg_pack() - unmaps the physical page pack. + * @ctx: pointer to the context structure. + * @vaddr: start address of the virtual area to unmap. + * @phys_pg_pack: the pack of physical pages to unmap. */ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr, struct hl_vm_phys_pg_pack *phys_pg_pack) @@ -913,7 +903,7 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr, } static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args, - u64 *paddr) + u64 *paddr) { struct hl_device *hdev = ctx->hdev; struct hl_vm *vm = &hdev->vm; @@ -936,19 +926,18 @@ static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args, return 0; } -/* - * map_device_va - map the given memory - * - * @ctx : current context - * @args : host parameters with handle/host virtual address - * @device_addr : pointer to result device virtual address +/** + * map_device_va() - map the given memory. + * @ctx: pointer to the context structure. + * @args: host parameters with handle/host virtual address. + * @device_addr: pointer to result device virtual address. * * This function does the following: * - If given a physical device memory handle, map to a device virtual block - * and return the start address of this block + * and return the start address of this block. * - If given a host virtual address and size, find the related physical pages, * map a device virtual block to this pages and return the start address of - * this block + * this block. */ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device_addr) @@ -1125,16 +1114,15 @@ init_page_pack_err: return rc; } -/* - * unmap_device_va - unmap the given device virtual address - * - * @ctx : current context - * @vaddr : device virtual address to unmap - * @ctx_free : true if in context free flow, false otherwise. +/** + * unmap_device_va() - unmap the given device virtual address. + * @ctx: pointer to the context structure. + * @vaddr: device virtual address to unmap. + * @ctx_free: true if in context free flow, false otherwise. * * This function does the following: - * - Unmap the physical pages related to the given virtual address - * - return the device virtual block to the virtual block list + * - unmap the physical pages related to the given virtual address. + * - return the device virtual block to the virtual block list. */ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free) { @@ -1301,7 +1289,7 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args) rc = 0; } else { rc = get_paddr_from_handle(ctx, &args->in, - &device_addr); + &device_addr); } memset(args, 0, sizeof(*args)); @@ -1478,16 +1466,16 @@ destroy_framevec: return rc; } -/* - * hl_pin_host_memory - pins a chunk of host memory. - * @hdev: pointer to the habanalabs device structure - * @addr: the host virtual address of the memory area - * @size: the size of the memory area - * @userptr: pointer to hl_userptr structure +/** + * hl_pin_host_memory() - pins a chunk of host memory. + * @hdev: pointer to the habanalabs device structure. + * @addr: the host virtual address of the memory area. + * @size: the size of the memory area. + * @userptr: pointer to hl_userptr structure. * * This function does the following: - * - Pins the physical pages - * - Create an SG list from those pages + * - Pins the physical pages. + * - Create an SG list from those pages. */ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, struct hl_userptr *userptr) @@ -1585,11 +1573,10 @@ void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr) kfree(userptr->sgt); } -/* - * hl_userptr_delete_list - clear userptr list - * - * @hdev : pointer to the habanalabs device structure - * @userptr_list : pointer to the list to clear +/** + * hl_userptr_delete_list() - clear userptr list. + * @hdev: pointer to the habanalabs device structure. + * @userptr_list: pointer to the list to clear. * * This function does the following: * - Iterates over the list and unpins the host memory and frees the userptr @@ -1608,12 +1595,11 @@ void hl_userptr_delete_list(struct hl_device *hdev, INIT_LIST_HEAD(userptr_list); } -/* - * hl_userptr_is_pinned - returns whether the given userptr is pinned - * - * @hdev : pointer to the habanalabs device structure - * @userptr_list : pointer to the list to clear - * @userptr : pointer to userptr to check +/** + * hl_userptr_is_pinned() - returns whether the given userptr is pinned. + * @hdev: pointer to the habanalabs device structure. + * @userptr_list: pointer to the list to clear. + * @userptr: pointer to userptr to check. * * This function does the following: * - Iterates over the list and checks if the given userptr is in it, means is @@ -1631,12 +1617,12 @@ bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, return false; } -/* - * va_range_init - initialize virtual addresses range - * @hdev: pointer to the habanalabs device structure - * @va_range: pointer to the range to initialize - * @start: range start address - * @end: range end address +/** + * va_range_init() - initialize virtual addresses range. + * @hdev: pointer to the habanalabs device structure. + * @va_range: pointer to the range to initialize. + * @start: range start address. + * @end: range end address. * * This function does the following: * - Initializes the virtual addresses list of the given range with the given @@ -1678,13 +1664,13 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range, return 0; } -/* - * va_range_fini() - clear a virtual addresses range - * @hdev: pointer to the habanalabs structure - * va_range: pointer to virtual addresses range +/** + * va_range_fini() - clear a virtual addresses range. + * @hdev: pointer to the habanalabs structure. + * va_range: pointer to virtual addresses rang.e * * This function does the following: - * - Frees the virtual addresses block list and its lock + * - Frees the virtual addresses block list and its lock. */ static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range) { @@ -1696,22 +1682,22 @@ static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range) kfree(va_range); } -/* - * vm_ctx_init_with_ranges() - initialize virtual memory for context - * @ctx: pointer to the habanalabs context structure +/** + * vm_ctx_init_with_ranges() - initialize virtual memory for context. + * @ctx: pointer to the habanalabs context structure. * @host_range_start: host virtual addresses range start. * @host_range_end: host virtual addresses range end. * @host_huge_range_start: host virtual addresses range start for memory - * allocated with huge pages. + * allocated with huge pages. * @host_huge_range_end: host virtual addresses range end for memory allocated * with huge pages. * @dram_range_start: dram virtual addresses range start. * @dram_range_end: dram virtual addresses range end. * * This function initializes the following: - * - MMU for context - * - Virtual address to area descriptor hashtable - * - Virtual block list of available virtual memory + * - MMU for context. + * - Virtual address to area descriptor hashtable. + * - Virtual block list of available virtual memory. */ static int vm_ctx_init_with_ranges(struct hl_ctx *ctx, u64 host_range_start, @@ -1846,15 +1832,14 @@ int hl_vm_ctx_init(struct hl_ctx *ctx) dram_range_start, dram_range_end, dram_page_size); } -/* - * hl_vm_ctx_fini - virtual memory teardown of context - * - * @ctx : pointer to the habanalabs context structure +/** + * hl_vm_ctx_fini() - virtual memory teardown of context. + * @ctx: pointer to the habanalabs context structure. * * This function perform teardown the following: - * - Virtual block list of available virtual memory - * - Virtual address to area descriptor hashtable - * - MMU for context + * - Virtual block list of available virtual memory. + * - Virtual address to area descriptor hashtable. + * - MMU for context. * * In addition this function does the following: * - Unmaps the existing hashtable nodes if the hashtable is not empty. The @@ -1875,7 +1860,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) struct hlist_node *tmp_node; int i; - if (!ctx->hdev->mmu_enable) + if (!hdev->mmu_enable) return; hl_debugfs_remove_ctx_mem_hash(hdev, ctx); @@ -1930,19 +1915,18 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) * all DRAM is available */ if (ctx->asid != HL_KERNEL_ASID_ID && - !ctx->hdev->asic_prop.dram_supports_virtual_memory) - atomic64_set(&ctx->hdev->dram_used_mem, 0); + !hdev->asic_prop.dram_supports_virtual_memory) + atomic64_set(&hdev->dram_used_mem, 0); } -/* - * hl_vm_init - initialize virtual memory module - * - * @hdev : pointer to the habanalabs device structure +/** + * hl_vm_init() - initialize virtual memory module. + * @hdev: pointer to the habanalabs device structure. * * This function initializes the following: - * - MMU module - * - DRAM physical pages pool of 2MB - * - Idr for device memory allocation handles + * - MMU module. + * - DRAM physical pages pool of 2MB. + * - Idr for device memory allocation handles. */ int hl_vm_init(struct hl_device *hdev) { @@ -1983,15 +1967,14 @@ pool_add_err: return rc; } -/* - * hl_vm_fini - virtual memory module teardown - * - * @hdev : pointer to the habanalabs device structure +/** + * hl_vm_fini() - virtual memory module teardown. + * @hdev: pointer to the habanalabs device structure. * * This function perform teardown to the following: - * - Idr for device memory allocation handles - * - DRAM physical pages pool of 2MB - * - MMU module + * - Idr for device memory allocation handles. + * - DRAM physical pages pool of 2MB. + * - MMU module. */ void hl_vm_fini(struct hl_device *hdev) { -- cgit v1.2.3-58-ga151 From f19040ce418d6fb4837dba45a920c0ae0d5c698f Mon Sep 17 00:00:00 2001 From: Omer Shpigelman Date: Wed, 9 Dec 2020 13:34:11 +0200 Subject: habanalabs: modify memory functions signatures For consistency, modify all memory ioctl functions to get the ioctl arguments structure rather than the arguments themselves. Signed-off-by: Omer Shpigelman Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/memory.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 53707e9c5c3e..207fda36cac9 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -314,16 +314,17 @@ static void free_phys_pg_pack(struct hl_device *hdev, /** * free_device_memory() - free device memory. * @ctx: pointer to the context structure. - * @handle: handle of the memory chunk to free. + * @args: host parameters containing the requested size. * * This function does the following: * - Free the device memory related to the given handle. */ -static int free_device_memory(struct hl_ctx *ctx, u32 handle) +static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args) { struct hl_device *hdev = ctx->hdev; struct hl_vm *vm = &hdev->vm; struct hl_vm_phys_pg_pack *phys_pg_pack; + u32 handle = args->free.handle; spin_lock(&vm->idr_lock); phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); @@ -1117,20 +1118,22 @@ init_page_pack_err: /** * unmap_device_va() - unmap the given device virtual address. * @ctx: pointer to the context structure. - * @vaddr: device virtual address to unmap. + * @args: host parameters with device virtual address to unmap. * @ctx_free: true if in context free flow, false otherwise. * * This function does the following: * - unmap the physical pages related to the given virtual address. * - return the device virtual block to the virtual block list. */ -static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free) +static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, + bool ctx_free) { struct hl_device *hdev = ctx->hdev; struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; struct hl_vm_hash_node *hnode = NULL; struct hl_userptr *userptr = NULL; struct hl_va_range *va_range; + u64 vaddr = args->unmap.device_virt_addr; enum vm_type_t *vm_type; bool is_userptr; int rc = 0; @@ -1280,7 +1283,7 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args) break; case HL_MEM_OP_FREE: - rc = free_device_memory(ctx, args->in.free.handle); + rc = free_device_memory(ctx, &args->in); break; case HL_MEM_OP_MAP: @@ -1388,7 +1391,7 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) goto out; } - rc = free_device_memory(ctx, args->in.free.handle); + rc = free_device_memory(ctx, &args->in); break; case HL_MEM_OP_MAP: @@ -1399,8 +1402,7 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) break; case HL_MEM_OP_UNMAP: - rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr, - false); + rc = unmap_device_va(ctx, &args->in, false); break; default: @@ -1858,6 +1860,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) struct hl_vm_phys_pg_pack *phys_pg_list; struct hl_vm_hash_node *hnode; struct hlist_node *tmp_node; + struct hl_mem_in args; int i; if (!hdev->mmu_enable) @@ -1877,7 +1880,8 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) dev_dbg(hdev->dev, "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n", hnode->vaddr, ctx->asid); - unmap_device_va(ctx, hnode->vaddr, true); + args.unmap.device_virt_addr = hnode->vaddr; + unmap_device_va(ctx, &args, true); } mutex_lock(&ctx->mmu_lock); -- cgit v1.2.3-58-ga151 From 2e368560080cf020cf149578e70d0594ca4d19d0 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Wed, 16 Dec 2020 09:12:08 +0200 Subject: habanalabs/gaudi: add debug prints for security status In order to have more information while debugging boot issues, we should print the firmware security status at every boot stage. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/firmware_if.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index c9a12980218a..3cb3d32aa814 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -659,6 +659,9 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg, prop->fw_security_disabled = true; } + dev_dbg(hdev->dev, "Firmware preboot security status %#x\n", + security_status); + dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n", prop->hard_reset_done_by_fw ? "enabled" : "disabled"); @@ -753,6 +756,10 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, if (prop->fw_boot_cpu_security_map & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN) prop->hard_reset_done_by_fw = true; + + dev_dbg(hdev->dev, + "Firmware boot CPU security status %#x\n", + prop->fw_boot_cpu_security_map); } dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n", @@ -837,6 +844,10 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, if (prop->fw_app_security_map & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN) prop->hard_reset_done_by_fw = true; + + dev_dbg(hdev->dev, + "Firmware application CPU security status %#x\n", + prop->fw_app_security_map); } dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n", -- cgit v1.2.3-58-ga151 From 1530d468178d90e1a2870f0693e5f1a240e74dc5 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Fri, 18 Dec 2020 19:12:56 +0200 Subject: habanalabs: add ASIC property of functional HBMs The number of functional HBMs in the same ASIC can be different due to malfunctioning HBM banks. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/habanalabs.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index dee73a04411b..f083487ca563 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -416,6 +416,7 @@ struct hl_mmu_properties { * from BOOT_DEV_STS0 * @dram_supports_virtual_memory: is there an MMU towards the DRAM * @hard_reset_done_by_fw: true if firmware is handling hard reset flow + * @num_functional_hbms: number of functional HBMs in each DCORE. */ struct asic_fixed_properties { struct hw_queue_properties *hw_queues_props; @@ -474,6 +475,7 @@ struct asic_fixed_properties { u8 fw_security_status_valid; u8 dram_supports_virtual_memory; u8 hard_reset_done_by_fw; + u8 num_functional_hbms; }; /** -- cgit v1.2.3-58-ga151 From 3abe1040ba61a0a7148364ee4b4d969d7afd375b Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Fri, 18 Dec 2020 21:27:54 +0200 Subject: habanalabs: update to latest hl_boot_if.h Update the latest version of this file that the F/W exports Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/include/common/hl_boot_if.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h index 1115456cca85..93552d6b59b3 100644 --- a/drivers/misc/habanalabs/include/common/hl_boot_if.h +++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h @@ -150,6 +150,10 @@ * CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled. * Initialized in: linux * + * CPU_BOOT_DEV_STS0_SP_SRAM_EN SP SRAM is initialized and available + * for use. + * Initialized in: preboot + * * CPU_BOOT_DEV_STS0_CLK_GATE_EN Clock Gating enabled. * FW initialized Clock Gating. * Initialized in: preboot @@ -162,10 +166,6 @@ * statuses. * Initialized in: preboot * - * CPU_BOOT_DEV_STS0_SP_SRAM_EN SP SRAM is initialized and available - * for use. - * Initialized in: preboot - * */ #define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << 0) #define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << 1) -- cgit v1.2.3-58-ga151 From 9402a3362462d080b998e00ed33fc193c12adbbf Mon Sep 17 00:00:00 2001 From: Alon Mizrahi Date: Wed, 23 Dec 2020 17:53:17 +0200 Subject: habanalabs: return dram virtual address in info ioctl When working with DRAM MMU, we should supply the userspace with the virtual start address of the DRAM instead of the physical one. This is because the physical one has no meaning for the user as he only knows the virtual address range. Signed-off-by: Alon Mizrahi Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/habanalabs_ioctl.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index 394a2e1767ce..018320e0331b 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -57,7 +57,9 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args) hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev); hw_ip.sram_base_address = prop->sram_user_base_address; - hw_ip.dram_base_address = prop->dram_user_base_address; + hw_ip.dram_base_address = + hdev->mmu_enable && prop->dram_supports_virtual_memory ? + prop->dmmu.start_addr : prop->dram_user_base_address; hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask; hw_ip.sram_size = prop->sram_size - sram_kmd_size; hw_ip.dram_size = prop->dram_size - dram_kmd_size; -- cgit v1.2.3-58-ga151 From 266cdfa2b7bd2d2ae2fdc895f10254b71276c751 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Tue, 22 Dec 2020 15:56:12 +0200 Subject: habanalabs/gaudi: set uninitialized symbol Initialize local variable that is returned by the function, in case it is never assigned. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 19c9e3895983..e105f1c6ab74 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -1248,7 +1248,7 @@ static int gaudi_collective_wait_create_jobs(struct hl_device *hdev, u32 queue_id, collective_queue, num_jobs; u32 stream, nic_queue, nic_idx = 0; bool skip; - int i, rc; + int i, rc = 0; /* Verify wait queue id is configured as master */ hw_queue_prop = &hdev->asic_prop.hw_queues_props[wait_queue_id]; -- cgit v1.2.3-58-ga151 From a1f8533269aa6e96154fddc33b2cb3696cde356e Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Mon, 28 Dec 2020 18:28:08 +0200 Subject: habanalabs: remove access to kernel memory using debugfs Accessing kernel allocated memory through debugfs should not be allowed as it introduces a security vulnerability. We remove the option to read/write kernel memory for all asics. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 8 -------- drivers/misc/habanalabs/goya/goya.c | 12 ------------ 2 files changed, 20 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index e105f1c6ab74..3d8414d3afa8 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -5734,8 +5734,6 @@ static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val) } if (hbm_bar_addr == U64_MAX) rc = -EIO; - } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) { - *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE); } else { rc = -EFAULT; } @@ -5781,8 +5779,6 @@ static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val) } if (hbm_bar_addr == U64_MAX) rc = -EIO; - } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) { - *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val; } else { rc = -EFAULT; } @@ -5832,8 +5828,6 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val) } if (hbm_bar_addr == U64_MAX) rc = -EIO; - } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) { - *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE); } else { rc = -EFAULT; } @@ -5882,8 +5876,6 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val) } if (hbm_bar_addr == U64_MAX) rc = -EIO; - } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) { - *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val; } else { rc = -EFAULT; } diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 12d9e5205f52..eeade9a45bc0 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -4122,9 +4122,6 @@ static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val) if (ddr_bar_addr == U64_MAX) rc = -EIO; - } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) { - *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE); - } else { rc = -EFAULT; } @@ -4178,9 +4175,6 @@ static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val) if (ddr_bar_addr == U64_MAX) rc = -EIO; - } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) { - *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val; - } else { rc = -EFAULT; } @@ -4223,9 +4217,6 @@ static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val) if (ddr_bar_addr == U64_MAX) rc = -EIO; - } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) { - *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE); - } else { rc = -EFAULT; } @@ -4266,9 +4257,6 @@ static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val) if (ddr_bar_addr == U64_MAX) rc = -EIO; - } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) { - *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val; - } else { rc = -EFAULT; } -- cgit v1.2.3-58-ga151 From b19dc67aa8c008661b10b1c2372a29f403bf5dac Mon Sep 17 00:00:00 2001 From: Moti Haimovski Date: Wed, 18 Nov 2020 20:15:29 +0200 Subject: habanalabs: support non power-of-2 DRAM phys page sizes DRAM physical page sizes depend of the amount of HBMs available in the device. this number is device-dependent and may also be subject to binning when one or more of the DRAM controllers are found to to be faulty. Such a configuration may lead to partitioning the DRAM to non-power-of-2 pages. To support this feature we also need to add infrastructure of address scarmbling. Signed-off-by: Moti Haimovski Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/debugfs.c | 6 +- drivers/misc/habanalabs/common/habanalabs.h | 8 ++ drivers/misc/habanalabs/common/memory.c | 176 +++++++++++++++++++++++----- drivers/misc/habanalabs/common/mmu.c | 61 ++++++++-- drivers/misc/habanalabs/gaudi/gaudi.c | 3 +- drivers/misc/habanalabs/goya/goya.c | 3 +- 6 files changed, 213 insertions(+), 44 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index cef716643979..50ca8eea6648 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -333,8 +333,10 @@ static int mmu_show(struct seq_file *s, void *data) return 0; } - seq_printf(s, "asid: %u, virt_addr: 0x%llx\n", - dev_entry->mmu_asid, dev_entry->mmu_addr); + seq_printf(s, + "asid: %u, virt_addr: 0x%llx, scrambled virt_addr: 0x%llx\n", + dev_entry->mmu_asid, dev_entry->mmu_addr, + hops_info.scrambled_vaddr); for (i = 0 ; i < hops_info.used_hops ; i++) { seq_printf(s, "hop%d_addr: 0x%llx\n", diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index f083487ca563..5da26e81a251 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -848,6 +848,8 @@ enum div_select_defs { * @collective_wait_init_cs: Generate collective master/slave packets * and place them in the relevant cs jobs * @collective_wait_create_jobs: allocate collective wait cs jobs + * @scramble_vaddr: Routine to scramble the virtual address prior of mapping it + * in the MMU. */ struct hl_asic_funcs { int (*early_init)(struct hl_device *hdev); @@ -957,6 +959,7 @@ struct hl_asic_funcs { int (*collective_wait_create_jobs)(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id, u32 collective_engine_id); + u64 (*scramble_vaddr)(struct hl_device *hdev, u64 virt_addr); }; @@ -1690,10 +1693,14 @@ struct hl_mmu_per_hop_info { * struct hl_mmu_hop_info - A structure describing the TLB hops and their * hop-entries that were created in order to translate a virtual address to a * physical one. + * @scrambled_vaddr: The value of the virtual address after scrambling. This + * address replaces the original virtual-address when mapped + * in the MMU tables. * @hop_info: Array holding the per-hop information used for the translation. * @used_hops: The number of hops used for the translation. */ struct hl_mmu_hop_info { + u64 scrambled_vaddr; struct hl_mmu_per_hop_info hop_info[MMU_ARCH_5_HOPS]; u32 used_hops; }; @@ -2184,6 +2191,7 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu); int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr); int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops); +u64 hl_mmu_scramble_vaddr(struct hl_device *hdev, u64 virt_addr); bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr); int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name, diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 207fda36cac9..ada977eb136c 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -14,6 +14,9 @@ #define HL_MMU_DEBUG 0 +/* use small pages for supporting non-pow2 (32M/40M/48M) DRAM phys page sizes */ +#define DRAM_POOL_PAGE_SIZE SZ_8M + /* * The va ranges in context object contain a list with the available chunks of * device virtual memory. @@ -54,15 +57,14 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, struct hl_vm *vm = &hdev->vm; struct hl_vm_phys_pg_pack *phys_pg_pack; u64 paddr = 0, total_size, num_pgs, i; - u32 num_curr_pgs, page_size, page_shift; + u32 num_curr_pgs, page_size; int handle, rc; bool contiguous; num_curr_pgs = 0; page_size = hdev->asic_prop.dram_page_size; - page_shift = __ffs(page_size); - num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift; - total_size = num_pgs << page_shift; + num_pgs = DIV_ROUND_UP_ULL(args->alloc.mem_size, page_size); + total_size = num_pgs * page_size; if (!total_size) { dev_err(hdev->dev, "Cannot allocate 0 bytes\n"); @@ -518,7 +520,8 @@ static inline int add_va_block(struct hl_device *hdev, } /** - * get_va_block() - get a virtual block for the given size and alignment. + * get_va_block_pow2() - get a virtual block for the given size and alignment + * where alignment is a power of 2. * @hdev: pointer to the habanalabs device structure. * @va_range: pointer to the virtual addresses range. * @size: requested block size. @@ -531,12 +534,13 @@ static inline int add_va_block(struct hl_device *hdev, * - Reserve the requested block and update the list. * - Return the start address of the virtual block. */ -static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range, - u64 size, u64 hint_addr, u32 va_block_align) +static u64 get_va_block_pow2(struct hl_device *hdev, + struct hl_va_range *va_range, + u64 size, u64 hint_addr, u32 va_block_align) { struct hl_vm_va_block *va_block, *new_va_block = NULL; u64 valid_start, valid_size, prev_start, prev_end, align_mask, - res_valid_start = 0, res_valid_size = 0; + reserved_valid_start = 0, reserved_valid_size = 0; bool add_prev = false; align_mask = ~((u64)va_block_align - 1); @@ -562,34 +566,34 @@ static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range, valid_size = va_block->end - valid_start; - if (valid_size >= size && - (!new_va_block || valid_size < res_valid_size)) { + if (valid_size >= size && (!new_va_block || + valid_size < reserved_valid_size)) { new_va_block = va_block; - res_valid_start = valid_start; - res_valid_size = valid_size; + reserved_valid_start = valid_start; + reserved_valid_size = valid_size; } if (hint_addr && hint_addr >= valid_start && - ((hint_addr + size) <= va_block->end)) { + (hint_addr + size) <= va_block->end) { new_va_block = va_block; - res_valid_start = hint_addr; - res_valid_size = valid_size; + reserved_valid_start = hint_addr; + reserved_valid_size = valid_size; break; } } if (!new_va_block) { dev_err(hdev->dev, "no available va block for size %llu\n", - size); + size); goto out; } - if (res_valid_start > new_va_block->start) { + if (reserved_valid_start > new_va_block->start) { prev_start = new_va_block->start; - prev_end = res_valid_start - 1; + prev_end = reserved_valid_start - 1; - new_va_block->start = res_valid_start; - new_va_block->size = res_valid_size; + new_va_block->start = reserved_valid_start; + new_va_block->size = reserved_valid_size; add_prev = true; } @@ -610,10 +614,98 @@ static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range, out: mutex_unlock(&va_range->lock); - return res_valid_start; + return reserved_valid_start; } /** + * get_va_block_non_pow2() - get a virtual block for the given size and + * alignment where alignment is not a power of 2. + * @hdev: pointer to the habanalabs device structure. + * @va_range: pointer to the virtual addresses range. + * @size: requested block size. + * @hint_addr: hint for requested address by the user. + * @va_block_align: required alignment of the virtual block start address. + * + * This function does the following: + * - Iterate on the virtual block list to find a suitable virtual block for the + * given size and alignment. + * - Reserve the requested block and update the list. + * - Return the start address of the virtual block. + */ +static u64 get_va_block_non_pow2(struct hl_device *hdev, + struct hl_va_range *va_range, + u64 size, u64 hint_addr, u32 va_block_align) +{ + struct hl_vm_va_block *va_block, *new_va_block = NULL; + u64 reserved_valid_start = 0; + + /* + * with non-power-of-2 range we work only with page granularity and the + * start address is page aligned, so no need for alignment checking. + */ + size = DIV_ROUND_UP_ULL(size, va_range->page_size) * + va_range->page_size; + + mutex_lock(&va_range->lock); + + print_va_list_locked(hdev, &va_range->list); + + list_for_each_entry(va_block, &va_range->list, node) { + if ((va_block->start + size) > va_block->end) + continue; + + new_va_block = va_block; + reserved_valid_start = va_block->start; + break; + } + + if (!new_va_block) { + dev_err(hdev->dev, "no available va block for size %llu\n", + size); + goto out; + } + + if (new_va_block->size > size) { + new_va_block->start += size; + new_va_block->size = new_va_block->end - new_va_block->start; + } else { + list_del(&new_va_block->node); + kfree(new_va_block); + } + + print_va_list_locked(hdev, &va_range->list); +out: + mutex_unlock(&va_range->lock); + + return reserved_valid_start; +} + +/* + * get_va_block() - get a virtual block for the given size and alignment. + * @hdev: pointer to the habanalabs device structure. + * @va_range: pointer to the virtual addresses range. + * @size: requested block size. + * @hint_addr: hint for requested address by the user. + * @va_block_align: required alignment of the virtual block start address. + * + * This function does the following: + * - Iterate on the virtual block list to find a suitable virtual block for the + * given size and alignment. + * - Reserve the requested block and update the list. + * - Return the start address of the virtual block. + */ +static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range, + u64 size, u64 hint_addr, u32 va_block_align) +{ + if (is_power_of_2(va_range->page_size)) + return get_va_block_pow2(hdev, va_range, + size, hint_addr, va_block_align); + else + return get_va_block_non_pow2(hdev, va_range, + size, hint_addr, va_block_align); +} + +/* * hl_reserve_va_block() - reserve a virtual block of a given size. * @hdev: pointer to the habanalabs device structure. * @ctx: current context @@ -1024,7 +1116,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, hint_addr = args->map_device.hint_addr; - /* DRAM VA alignment is the same as the DRAM page size */ + /* DRAM VA alignment is the same as the MMU page size */ va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM]; va_block_align = hdev->asic_prop.dmmu.page_size; } @@ -1129,6 +1221,7 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, bool ctx_free) { struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; struct hl_vm_hash_node *hnode = NULL; struct hl_userptr *userptr = NULL; @@ -1192,7 +1285,13 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, goto mapping_cnt_err; } - vaddr &= ~(((u64) phys_pg_pack->page_size) - 1); + if (!is_userptr && !is_power_of_2(phys_pg_pack->page_size)) + vaddr = prop->dram_base_address + + DIV_ROUND_DOWN_ULL(vaddr - prop->dram_base_address, + phys_pg_pack->page_size) * + phys_pg_pack->page_size; + else + vaddr &= ~(((u64) phys_pg_pack->page_size) - 1); mutex_lock(&ctx->mmu_lock); @@ -1637,16 +1736,22 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range, INIT_LIST_HEAD(&va_range->list); - /* PAGE_SIZE alignment */ + /* + * PAGE_SIZE alignment + * it is the callers responsibility to align the addresses if the + * page size is not a power of 2 + */ + + if (is_power_of_2(page_size)) { + if (start & (PAGE_SIZE - 1)) { + start &= PAGE_MASK; + start += PAGE_SIZE; + } - if (start & (PAGE_SIZE - 1)) { - start &= PAGE_MASK; - start += PAGE_SIZE; + if (end & (PAGE_SIZE - 1)) + end &= PAGE_MASK; } - if (end & (PAGE_SIZE - 1)) - end &= PAGE_MASK; - if (start >= end) { dev_err(hdev->dev, "too small vm range for va list\n"); return -EFAULT; @@ -1820,7 +1925,8 @@ int hl_vm_ctx_init(struct hl_ctx *ctx) dram_range_start = prop->dmmu.start_addr; dram_range_end = prop->dmmu.end_addr; - dram_page_size = prop->dmmu.page_size; + dram_page_size = prop->dram_page_size ? + prop->dram_page_size : prop->dmmu.page_size; host_range_start = prop->pmmu.start_addr; host_range_end = prop->pmmu.end_addr; host_page_size = prop->pmmu.page_size; @@ -1938,7 +2044,13 @@ int hl_vm_init(struct hl_device *hdev) struct hl_vm *vm = &hdev->vm; int rc; - vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1); + if (is_power_of_2(prop->dram_page_size)) + vm->dram_pg_pool = + gen_pool_create(__ffs(prop->dram_page_size), -1); + else + vm->dram_pg_pool = + gen_pool_create(__ffs(DRAM_POOL_PAGE_SIZE), -1); + if (!vm->dram_pg_pool) { dev_err(hdev->dev, "Failed to create dram page pool\n"); return -ENOMEM; diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c index 62cfa4190fe4..38234c243b21 100644 --- a/drivers/misc/habanalabs/common/mmu.c +++ b/drivers/misc/habanalabs/common/mmu.c @@ -166,7 +166,6 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, mmu_prop = &prop->pmmu; pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT; - /* * The H/W handles mapping of specific page sizes. Hence if the page * size is bigger, we break it to sub-pages and unmap them separately. @@ -174,11 +173,21 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, if ((page_size % mmu_prop->page_size) == 0) { real_page_size = mmu_prop->page_size; } else { - dev_err(hdev->dev, - "page size of %u is not %uKB aligned, can't unmap\n", - page_size, mmu_prop->page_size >> 10); + /* + * MMU page size may differ from DRAM page size. + * In such case work with the DRAM page size and let the MMU + * scrambling routine to handle this mismatch when + * calculating the address to remove from the MMU page table + */ + if (is_dram_addr && ((page_size % prop->dram_page_size) == 0)) { + real_page_size = prop->dram_page_size; + } else { + dev_err(hdev->dev, + "page size of %u is not %uKB aligned, can't unmap\n", + page_size, mmu_prop->page_size >> 10); - return -EFAULT; + return -EFAULT; + } } npages = page_size / real_page_size; @@ -253,6 +262,17 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, */ if ((page_size % mmu_prop->page_size) == 0) { real_page_size = mmu_prop->page_size; + } else if (is_dram_addr && ((page_size % prop->dram_page_size) == 0) && + (prop->dram_page_size < mmu_prop->page_size)) { + /* + * MMU page size may differ from DRAM page size. + * In such case work with the DRAM page size and let the MMU + * scrambling routine handle this mismatch when calculating + * the address to place in the MMU page table. (in that case + * also make sure that the dram_page_size smaller than the + * mmu page size) + */ + real_page_size = prop->dram_page_size; } else { dev_err(hdev->dev, "page size of %u is not %uKB aligned, can't map\n", @@ -261,10 +281,21 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, return -EFAULT; } - if (phys_addr & (real_page_size - 1)) + /* + * Verify that the phys and virt addresses are aligned with the + * MMU page size (in dram this means checking the address and MMU + * after scrambling) + */ + if ((is_dram_addr && + ((hdev->asic_funcs->scramble_vaddr(hdev, phys_addr) & + (mmu_prop->page_size - 1)) || + (hdev->asic_funcs->scramble_vaddr(hdev, virt_addr) & + (mmu_prop->page_size - 1)))) || + (!is_dram_addr && ((phys_addr & (real_page_size - 1)) || + (virt_addr & (real_page_size - 1))))) dev_crit(hdev->dev, - "Mapping 0x%llx with page size of 0x%x is erroneous! Address must be divisible by page size", - phys_addr, real_page_size); + "Mapping address 0x%llx with virtual address 0x%llx and page size of 0x%x is erroneous! Addresses must be divisible by page size", + phys_addr, virt_addr, real_page_size); npages = page_size / real_page_size; real_virt_addr = virt_addr; @@ -474,6 +505,8 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, if (!hdev->mmu_enable) return -EOPNOTSUPP; + hops->scrambled_vaddr = virt_addr; /* assume no scrambling */ + is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, prop->dmmu.start_addr, prop->dmmu.end_addr); @@ -513,3 +546,15 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev) return 0; } + +/** + * hl_mmu_scramble_vaddr() - The generic mmu virtual address scrambling routine. + * @hdev: pointer to device data. + * @virt_addr: The virtual address to scramble. + * + * Return: The scrambled virtual address. + */ +u64 hl_mmu_scramble_vaddr(struct hl_device *hdev, u64 virt_addr) +{ + return virt_addr; +} diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 3d8414d3afa8..66194b7d3806 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -8308,7 +8308,8 @@ static const struct hl_asic_funcs gaudi_funcs = { .set_dma_mask_from_fw = gaudi_set_dma_mask_from_fw, .get_device_time = gaudi_get_device_time, .collective_wait_init_cs = gaudi_collective_wait_init_cs, - .collective_wait_create_jobs = gaudi_collective_wait_create_jobs + .collective_wait_create_jobs = gaudi_collective_wait_create_jobs, + .scramble_vaddr = hl_mmu_scramble_vaddr }; /** diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index eeade9a45bc0..80198c39985a 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -5456,7 +5456,8 @@ static const struct hl_asic_funcs goya_funcs = { .set_dma_mask_from_fw = goya_set_dma_mask_from_fw, .get_device_time = goya_get_device_time, .collective_wait_init_cs = goya_collective_wait_init_cs, - .collective_wait_create_jobs = goya_collective_wait_create_jobs + .collective_wait_create_jobs = goya_collective_wait_create_jobs, + .scramble_vaddr = hl_mmu_scramble_vaddr }; /* -- cgit v1.2.3-58-ga151 From 6769cea8de590920c95603c4f3ce063e38033423 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 31 Dec 2020 12:00:55 +0200 Subject: habanalabs: report correct dram size in info ioctl In case MMU is enabled, we must take MMU page size into consideration when reporting dram size to the user. This is because the MMU page size can be a value which is NOT a power-of-2 value. As a result, the total DRAM size (which is always a power-of-2 value) needed to be rounded-down. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/habanalabs_ioctl.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index 018320e0331b..dfac5c8cadb3 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -62,7 +62,15 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args) prop->dmmu.start_addr : prop->dram_user_base_address; hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask; hw_ip.sram_size = prop->sram_size - sram_kmd_size; - hw_ip.dram_size = prop->dram_size - dram_kmd_size; + + if (hdev->mmu_enable) + hw_ip.dram_size = + DIV_ROUND_DOWN_ULL(prop->dram_size - dram_kmd_size, + prop->dram_page_size) * + prop->dram_page_size; + else + hw_ip.dram_size = prop->dram_size - dram_kmd_size; + if (hw_ip.dram_size > PAGE_SIZE) hw_ip.dram_enabled = 1; hw_ip.dram_page_size = prop->dram_page_size; -- cgit v1.2.3-58-ga151 From edb07cb69caacb9be06a299cdf62f266292cc890 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Sun, 27 Dec 2020 17:09:09 +0200 Subject: habanalabs: read device boot errors after cpucp is up Boot cpu can report errors in various boot stages. Current implementaion does not take into consideration errors reported in late stages, hence we will check for errors at the most late stage when fetching cpucp information. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/firmware_if.c | 126 ++++++++++++--------- drivers/misc/habanalabs/common/habanalabs.h | 3 +- drivers/misc/habanalabs/gaudi/gaudi.c | 2 +- drivers/misc/habanalabs/goya/goya.c | 2 +- .../misc/habanalabs/include/common/hl_boot_if.h | 3 + 5 files changed, 80 insertions(+), 56 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index 3cb3d32aa814..9f4ae21a126f 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -279,8 +279,68 @@ int hl_fw_send_heartbeat(struct hl_device *hdev) return rc; } +static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg, + u32 cpu_security_boot_status_reg) +{ + u32 err_val, security_val; + + /* Some of the firmware status codes are deprecated in newer f/w + * versions. In those versions, the errors are reported + * in different registers. Therefore, we need to check those + * registers and print the exact errors. Moreover, there + * may be multiple errors, so we need to report on each error + * separately. Some of the error codes might indicate a state + * that is not an error per-se, but it is an error in production + * environment + */ + err_val = RREG32(boot_err0_reg); + if (!(err_val & CPU_BOOT_ERR0_ENABLED)) + return 0; + + if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL) + dev_err(hdev->dev, + "Device boot error - DRAM initialization failed\n"); + if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED) + dev_err(hdev->dev, "Device boot error - FIT image corrupted\n"); + if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL) + dev_err(hdev->dev, + "Device boot error - Thermal Sensor initialization failed\n"); + if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) + dev_warn(hdev->dev, + "Device boot warning - Skipped DRAM initialization\n"); + if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) + dev_warn(hdev->dev, + "Device boot error - Skipped waiting for BMC\n"); + if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY) + dev_err(hdev->dev, + "Device boot error - Serdes data from BMC not available\n"); + if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL) + dev_err(hdev->dev, + "Device boot error - NIC F/W initialization failed\n"); + if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) + dev_warn(hdev->dev, + "Device boot warning - security not ready\n"); + if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) + dev_err(hdev->dev, "Device boot error - security failure\n"); + if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL) + dev_err(hdev->dev, "Device boot error - eFuse failure\n"); + if (err_val & CPU_BOOT_ERR0_PLL_FAIL) + dev_err(hdev->dev, "Device boot error - PLL failure\n"); + + security_val = RREG32(cpu_security_boot_status_reg); + if (security_val & CPU_BOOT_DEV_STS0_ENABLED) + dev_dbg(hdev->dev, "Device security status %#x\n", + security_val); + + if (err_val & ~CPU_BOOT_ERR0_ENABLED) + return -EIO; + + return 0; +} + int hl_fw_cpucp_info_get(struct hl_device *hdev, - u32 cpu_security_boot_status_reg) + u32 cpu_security_boot_status_reg, + u32 boot_err0_reg) { struct asic_fixed_properties *prop = &hdev->asic_prop; struct cpucp_packet pkt = {}; @@ -314,6 +374,12 @@ int hl_fw_cpucp_info_get(struct hl_device *hdev, goto out; } + rc = fw_read_errors(hdev, boot_err0_reg, cpu_security_boot_status_reg); + if (rc) { + dev_err(hdev->dev, "Errors in device boot\n"); + goto out; + } + memcpy(&prop->cpucp_info, cpucp_info_cpu_addr, sizeof(prop->cpucp_info)); @@ -483,58 +549,6 @@ int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u16 pll_index, return rc; } -static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg, - u32 cpu_security_boot_status_reg) -{ - u32 err_val, security_val; - - /* Some of the firmware status codes are deprecated in newer f/w - * versions. In those versions, the errors are reported - * in different registers. Therefore, we need to check those - * registers and print the exact errors. Moreover, there - * may be multiple errors, so we need to report on each error - * separately. Some of the error codes might indicate a state - * that is not an error per-se, but it is an error in production - * environment - */ - err_val = RREG32(boot_err0_reg); - if (!(err_val & CPU_BOOT_ERR0_ENABLED)) - return; - - if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL) - dev_err(hdev->dev, - "Device boot error - DRAM initialization failed\n"); - if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED) - dev_err(hdev->dev, "Device boot error - FIT image corrupted\n"); - if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL) - dev_err(hdev->dev, - "Device boot error - Thermal Sensor initialization failed\n"); - if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) - dev_warn(hdev->dev, - "Device boot warning - Skipped DRAM initialization\n"); - if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) - dev_warn(hdev->dev, - "Device boot error - Skipped waiting for BMC\n"); - if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY) - dev_err(hdev->dev, - "Device boot error - Serdes data from BMC not available\n"); - if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL) - dev_err(hdev->dev, - "Device boot error - NIC F/W initialization failed\n"); - if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) - dev_warn(hdev->dev, - "Device boot warning - security not ready\n"); - if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) - dev_err(hdev->dev, "Device boot error - security failure\n"); - if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL) - dev_err(hdev->dev, "Device boot error - eFuse failure\n"); - - security_val = RREG32(cpu_security_boot_status_reg); - if (security_val & CPU_BOOT_DEV_STS0_ENABLED) - dev_dbg(hdev->dev, "Device security status %#x\n", - security_val); -} - static void detect_cpu_boot_status(struct hl_device *hdev, u32 status) { /* Some of the status codes below are deprecated in newer f/w @@ -833,6 +847,10 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, goto out; } + rc = fw_read_errors(hdev, boot_err0_reg, cpu_security_boot_status_reg); + if (rc) + return rc; + /* Clear reset status since we need to read again from app */ prop->hard_reset_done_by_fw = false; @@ -855,6 +873,8 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, dev_info(hdev->dev, "Successfully loaded firmware to device\n"); + return 0; + out: fw_read_errors(hdev, boot_err0_reg, cpu_security_boot_status_reg); diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 5da26e81a251..eb43fb3065ce 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -2209,7 +2209,8 @@ void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr); int hl_fw_send_heartbeat(struct hl_device *hdev); int hl_fw_cpucp_info_get(struct hl_device *hdev, - u32 cpu_security_boot_status_reg); + u32 cpu_security_boot_status_reg, + u32 boot_err0_reg); int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size); int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev, struct hl_info_pci_counters *counters); diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 66194b7d3806..f6cfd6e075e2 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -7451,7 +7451,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev) if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) return 0; - rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0); + rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0); if (rc) return rc; diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 80198c39985a..cf0496b04044 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -5150,7 +5150,7 @@ int goya_cpucp_info_get(struct hl_device *hdev) if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) return 0; - rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0); + rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0); if (rc) return rc; diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h index 93552d6b59b3..e29c77bdea07 100644 --- a/drivers/misc/habanalabs/include/common/hl_boot_if.h +++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h @@ -69,6 +69,8 @@ * image has failed to match expected * checksum. Trying to program image again * might solve this. + * CPU_BOOT_ERR0_PLL_FAIL PLL settings failed, meaning that one + * of the PLLs remained in REF_CLK * * CPU_BOOT_ERR0_ENABLED Error registers enabled. * This is a main indication that the @@ -88,6 +90,7 @@ #define CPU_BOOT_ERR0_EFUSE_FAIL (1 << 9) #define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << 10) #define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << 11) +#define CPU_BOOT_ERR0_PLL_FAIL (1 << 12) #define CPU_BOOT_ERR0_ENABLED (1 << 31) /* -- cgit v1.2.3-58-ga151 From 8563e19159b02ec26a6ec7dc7bac9766a8ac494c Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Mon, 28 Dec 2020 14:36:47 +0200 Subject: habanalabs: separate common code to dedicated folders We separate some of the common code source files to different folders for a better maintainability and testability. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/Makefile | 10 +- drivers/misc/habanalabs/common/mmu.c | 560 ---------------- drivers/misc/habanalabs/common/mmu/Makefile | 2 + drivers/misc/habanalabs/common/mmu/mmu.c | 560 ++++++++++++++++ drivers/misc/habanalabs/common/mmu/mmu_v1.c | 966 ++++++++++++++++++++++++++++ drivers/misc/habanalabs/common/mmu_v1.c | 966 ---------------------------- drivers/misc/habanalabs/common/pci.c | 407 ------------ drivers/misc/habanalabs/common/pci/Makefile | 2 + drivers/misc/habanalabs/common/pci/pci.c | 407 ++++++++++++ 9 files changed, 1945 insertions(+), 1935 deletions(-) delete mode 100644 drivers/misc/habanalabs/common/mmu.c create mode 100644 drivers/misc/habanalabs/common/mmu/Makefile create mode 100644 drivers/misc/habanalabs/common/mmu/mmu.c create mode 100644 drivers/misc/habanalabs/common/mmu/mmu_v1.c delete mode 100644 drivers/misc/habanalabs/common/mmu_v1.c delete mode 100644 drivers/misc/habanalabs/common/pci.c create mode 100644 drivers/misc/habanalabs/common/pci/Makefile create mode 100644 drivers/misc/habanalabs/common/pci/pci.c (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile index eccd8c7dc62d..5d8b48288cf4 100644 --- a/drivers/misc/habanalabs/common/Makefile +++ b/drivers/misc/habanalabs/common/Makefile @@ -1,7 +1,13 @@ # SPDX-License-Identifier: GPL-2.0-only + +include $(src)/common/mmu/Makefile +habanalabs-y += $(HL_COMMON_MMU_FILES) + +include $(src)/common/pci/Makefile +habanalabs-y += $(HL_COMMON_PCI_FILES) + HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \ common/asid.o common/habanalabs_ioctl.o \ common/command_buffer.o common/hw_queue.o common/irq.o \ common/sysfs.o common/hwmon.o common/memory.o \ - common/command_submission.o common/mmu.o common/mmu_v1.o \ - common/firmware_if.o common/pci.o + common/command_submission.o common/firmware_if.o diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c deleted file mode 100644 index 38234c243b21..000000000000 --- a/drivers/misc/habanalabs/common/mmu.c +++ /dev/null @@ -1,560 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* - * Copyright 2016-2020 HabanaLabs, Ltd. - * All Rights Reserved. - */ - -#include - -#include "habanalabs.h" - -bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr) -{ - struct asic_fixed_properties *prop = &hdev->asic_prop; - - return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, - prop->dmmu.start_addr, - prop->dmmu.end_addr); -} - -/** - * hl_mmu_init() - initialize the MMU module. - * @hdev: habanalabs device structure. - * - * Return: 0 for success, non-zero for failure. - */ -int hl_mmu_init(struct hl_device *hdev) -{ - int rc = -EOPNOTSUPP; - - if (!hdev->mmu_enable) - return 0; - - if (hdev->mmu_func[MMU_DR_PGT].init != NULL) { - rc = hdev->mmu_func[MMU_DR_PGT].init(hdev); - if (rc) - return rc; - } - - if (hdev->mmu_func[MMU_HR_PGT].init != NULL) - rc = hdev->mmu_func[MMU_HR_PGT].init(hdev); - - return rc; -} - -/** - * hl_mmu_fini() - release the MMU module. - * @hdev: habanalabs device structure. - * - * This function does the following: - * - Disable MMU in H/W. - * - Free the pgt_infos pool. - * - * All contexts should be freed before calling this function. - */ -void hl_mmu_fini(struct hl_device *hdev) -{ - if (!hdev->mmu_enable) - return; - - if (hdev->mmu_func[MMU_DR_PGT].fini != NULL) - hdev->mmu_func[MMU_DR_PGT].fini(hdev); - - if (hdev->mmu_func[MMU_HR_PGT].fini != NULL) - hdev->mmu_func[MMU_HR_PGT].fini(hdev); -} - -/** - * hl_mmu_ctx_init() - initialize a context for using the MMU module. - * @ctx: pointer to the context structure to initialize. - * - * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all - * page tables hops related to this context. - * Return: 0 on success, non-zero otherwise. - */ -int hl_mmu_ctx_init(struct hl_ctx *ctx) -{ - struct hl_device *hdev = ctx->hdev; - int rc = -EOPNOTSUPP; - - if (!hdev->mmu_enable) - return 0; - - mutex_init(&ctx->mmu_lock); - - if (hdev->mmu_func[MMU_DR_PGT].ctx_init != NULL) { - rc = hdev->mmu_func[MMU_DR_PGT].ctx_init(ctx); - if (rc) - return rc; - } - - if (hdev->mmu_func[MMU_HR_PGT].ctx_init != NULL) - rc = hdev->mmu_func[MMU_HR_PGT].ctx_init(ctx); - - return rc; -} - -/* - * hl_mmu_ctx_fini - disable a ctx from using the mmu module - * - * @ctx: pointer to the context structure - * - * This function does the following: - * - Free any pgts which were not freed yet - * - Free the mutex - * - Free DRAM default page mapping hops - */ -void hl_mmu_ctx_fini(struct hl_ctx *ctx) -{ - struct hl_device *hdev = ctx->hdev; - - if (!hdev->mmu_enable) - return; - - if (hdev->mmu_func[MMU_DR_PGT].ctx_fini != NULL) - hdev->mmu_func[MMU_DR_PGT].ctx_fini(ctx); - - if (hdev->mmu_func[MMU_HR_PGT].ctx_fini != NULL) - hdev->mmu_func[MMU_HR_PGT].ctx_fini(ctx); - - mutex_destroy(&ctx->mmu_lock); -} - -/* - * hl_mmu_unmap_page - unmaps a virtual addr - * - * @ctx: pointer to the context structure - * @virt_addr: virt addr to map from - * @page_size: size of the page to unmap - * @flush_pte: whether to do a PCI flush - * - * This function does the following: - * - Check that the virt addr is mapped - * - Unmap the virt addr and frees pgts if possible - * - Returns 0 on success, -EINVAL if the given addr is not mapped - * - * Because this function changes the page tables in the device and because it - * changes the MMU hash, it must be protected by a lock. - * However, because it maps only a single page, the lock should be implemented - * in a higher level in order to protect the entire mapping of the memory area - * - * For optimization reasons PCI flush may be requested once after unmapping of - * large area. - */ -int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, - bool flush_pte) -{ - struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; - struct hl_mmu_properties *mmu_prop; - u64 real_virt_addr; - u32 real_page_size, npages; - int i, rc = 0, pgt_residency; - bool is_dram_addr; - - if (!hdev->mmu_enable) - return 0; - - is_dram_addr = hl_is_dram_va(hdev, virt_addr); - - if (is_dram_addr) - mmu_prop = &prop->dmmu; - else if ((page_size % prop->pmmu_huge.page_size) == 0) - mmu_prop = &prop->pmmu_huge; - else - mmu_prop = &prop->pmmu; - - pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT; - /* - * The H/W handles mapping of specific page sizes. Hence if the page - * size is bigger, we break it to sub-pages and unmap them separately. - */ - if ((page_size % mmu_prop->page_size) == 0) { - real_page_size = mmu_prop->page_size; - } else { - /* - * MMU page size may differ from DRAM page size. - * In such case work with the DRAM page size and let the MMU - * scrambling routine to handle this mismatch when - * calculating the address to remove from the MMU page table - */ - if (is_dram_addr && ((page_size % prop->dram_page_size) == 0)) { - real_page_size = prop->dram_page_size; - } else { - dev_err(hdev->dev, - "page size of %u is not %uKB aligned, can't unmap\n", - page_size, mmu_prop->page_size >> 10); - - return -EFAULT; - } - } - - npages = page_size / real_page_size; - real_virt_addr = virt_addr; - - for (i = 0 ; i < npages ; i++) { - rc = hdev->mmu_func[pgt_residency].unmap(ctx, - real_virt_addr, is_dram_addr); - if (rc) - break; - - real_virt_addr += real_page_size; - } - - if (flush_pte) - hdev->mmu_func[pgt_residency].flush(ctx); - - return rc; -} - -/* - * hl_mmu_map_page - maps a virtual addr to physical addr - * - * @ctx: pointer to the context structure - * @virt_addr: virt addr to map from - * @phys_addr: phys addr to map to - * @page_size: physical page size - * @flush_pte: whether to do a PCI flush - * - * This function does the following: - * - Check that the virt addr is not mapped - * - Allocate pgts as necessary in order to map the virt addr to the phys - * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM. - * - * Because this function changes the page tables in the device and because it - * changes the MMU hash, it must be protected by a lock. - * However, because it maps only a single page, the lock should be implemented - * in a higher level in order to protect the entire mapping of the memory area - * - * For optimization reasons PCI flush may be requested once after mapping of - * large area. - */ -int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, - u32 page_size, bool flush_pte) -{ - struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; - struct hl_mmu_properties *mmu_prop; - u64 real_virt_addr, real_phys_addr; - u32 real_page_size, npages; - int i, rc, pgt_residency, mapped_cnt = 0; - bool is_dram_addr; - - - if (!hdev->mmu_enable) - return 0; - - is_dram_addr = hl_is_dram_va(hdev, virt_addr); - - if (is_dram_addr) - mmu_prop = &prop->dmmu; - else if ((page_size % prop->pmmu_huge.page_size) == 0) - mmu_prop = &prop->pmmu_huge; - else - mmu_prop = &prop->pmmu; - - pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT; - - /* - * The H/W handles mapping of specific page sizes. Hence if the page - * size is bigger, we break it to sub-pages and map them separately. - */ - if ((page_size % mmu_prop->page_size) == 0) { - real_page_size = mmu_prop->page_size; - } else if (is_dram_addr && ((page_size % prop->dram_page_size) == 0) && - (prop->dram_page_size < mmu_prop->page_size)) { - /* - * MMU page size may differ from DRAM page size. - * In such case work with the DRAM page size and let the MMU - * scrambling routine handle this mismatch when calculating - * the address to place in the MMU page table. (in that case - * also make sure that the dram_page_size smaller than the - * mmu page size) - */ - real_page_size = prop->dram_page_size; - } else { - dev_err(hdev->dev, - "page size of %u is not %uKB aligned, can't map\n", - page_size, mmu_prop->page_size >> 10); - - return -EFAULT; - } - - /* - * Verify that the phys and virt addresses are aligned with the - * MMU page size (in dram this means checking the address and MMU - * after scrambling) - */ - if ((is_dram_addr && - ((hdev->asic_funcs->scramble_vaddr(hdev, phys_addr) & - (mmu_prop->page_size - 1)) || - (hdev->asic_funcs->scramble_vaddr(hdev, virt_addr) & - (mmu_prop->page_size - 1)))) || - (!is_dram_addr && ((phys_addr & (real_page_size - 1)) || - (virt_addr & (real_page_size - 1))))) - dev_crit(hdev->dev, - "Mapping address 0x%llx with virtual address 0x%llx and page size of 0x%x is erroneous! Addresses must be divisible by page size", - phys_addr, virt_addr, real_page_size); - - npages = page_size / real_page_size; - real_virt_addr = virt_addr; - real_phys_addr = phys_addr; - - for (i = 0 ; i < npages ; i++) { - rc = hdev->mmu_func[pgt_residency].map(ctx, - real_virt_addr, real_phys_addr, - real_page_size, is_dram_addr); - if (rc) - goto err; - - real_virt_addr += real_page_size; - real_phys_addr += real_page_size; - mapped_cnt++; - } - - if (flush_pte) - hdev->mmu_func[pgt_residency].flush(ctx); - - return 0; - -err: - real_virt_addr = virt_addr; - for (i = 0 ; i < mapped_cnt ; i++) { - if (hdev->mmu_func[pgt_residency].unmap(ctx, - real_virt_addr, is_dram_addr)) - dev_warn_ratelimited(hdev->dev, - "failed to unmap va: 0x%llx\n", real_virt_addr); - - real_virt_addr += real_page_size; - } - - hdev->mmu_func[pgt_residency].flush(ctx); - - return rc; -} - -/* - * hl_mmu_map_contiguous - implements a wrapper for hl_mmu_map_page - * for mapping contiguous physical memory - * - * @ctx: pointer to the context structure - * @virt_addr: virt addr to map from - * @phys_addr: phys addr to map to - * @size: size to map - * - */ -int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr, - u64 phys_addr, u32 size) -{ - struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; - u64 curr_va, curr_pa; - u32 page_size; - bool flush_pte; - int rc = 0, off; - - if (hl_mem_area_inside_range(virt_addr, size, - prop->dmmu.start_addr, prop->dmmu.end_addr)) - page_size = prop->dmmu.page_size; - else if (hl_mem_area_inside_range(virt_addr, size, - prop->pmmu.start_addr, prop->pmmu.end_addr)) - page_size = prop->pmmu.page_size; - else if (hl_mem_area_inside_range(virt_addr, size, - prop->pmmu_huge.start_addr, prop->pmmu_huge.end_addr)) - page_size = prop->pmmu_huge.page_size; - else - return -EINVAL; - - for (off = 0 ; off < size ; off += page_size) { - curr_va = virt_addr + off; - curr_pa = phys_addr + off; - flush_pte = (off + page_size) >= size; - rc = hl_mmu_map_page(ctx, curr_va, curr_pa, page_size, - flush_pte); - if (rc) { - dev_err(hdev->dev, - "Map failed for va 0x%llx to pa 0x%llx\n", - curr_va, curr_pa); - goto unmap; - } - } - - return rc; - -unmap: - for (; off >= 0 ; off -= page_size) { - curr_va = virt_addr + off; - flush_pte = (off - (s32) page_size) < 0; - if (hl_mmu_unmap_page(ctx, curr_va, page_size, flush_pte)) - dev_warn_ratelimited(hdev->dev, - "failed to unmap va 0x%llx\n", curr_va); - } - - return rc; -} - -/* - * hl_mmu_unmap_contiguous - implements a wrapper for hl_mmu_unmap_page - * for unmapping contiguous physical memory - * - * @ctx: pointer to the context structure - * @virt_addr: virt addr to unmap - * @size: size to unmap - * - */ -int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size) -{ - struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; - u64 curr_va; - u32 page_size; - bool flush_pte; - int rc = 0, off; - - if (hl_mem_area_inside_range(virt_addr, size, - prop->dmmu.start_addr, prop->dmmu.end_addr)) - page_size = prop->dmmu.page_size; - else if (hl_mem_area_inside_range(virt_addr, size, - prop->pmmu.start_addr, prop->pmmu.end_addr)) - page_size = prop->pmmu.page_size; - else if (hl_mem_area_inside_range(virt_addr, size, - prop->pmmu_huge.start_addr, prop->pmmu_huge.end_addr)) - page_size = prop->pmmu_huge.page_size; - else - return -EINVAL; - - for (off = 0 ; off < size ; off += page_size) { - curr_va = virt_addr + off; - flush_pte = (off + page_size) >= size; - rc = hl_mmu_unmap_page(ctx, curr_va, page_size, flush_pte); - if (rc) - dev_warn_ratelimited(hdev->dev, - "Unmap failed for va 0x%llx\n", curr_va); - } - - return rc; -} - -/* - * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out - * - * @ctx: pointer to the context structure - * - */ -void hl_mmu_swap_out(struct hl_ctx *ctx) -{ - struct hl_device *hdev = ctx->hdev; - - if (!hdev->mmu_enable) - return; - - if (hdev->mmu_func[MMU_DR_PGT].swap_out != NULL) - hdev->mmu_func[MMU_DR_PGT].swap_out(ctx); - - if (hdev->mmu_func[MMU_HR_PGT].swap_out != NULL) - hdev->mmu_func[MMU_HR_PGT].swap_out(ctx); -} - -/* - * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in - * - * @ctx: pointer to the context structure - * - */ -void hl_mmu_swap_in(struct hl_ctx *ctx) -{ - struct hl_device *hdev = ctx->hdev; - - if (!hdev->mmu_enable) - return; - - if (hdev->mmu_func[MMU_DR_PGT].swap_in != NULL) - hdev->mmu_func[MMU_DR_PGT].swap_in(ctx); - - if (hdev->mmu_func[MMU_HR_PGT].swap_in != NULL) - hdev->mmu_func[MMU_HR_PGT].swap_in(ctx); -} - -int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr) -{ - struct hl_mmu_hop_info hops; - u64 tmp_addr; - int rc; - - rc = hl_mmu_get_tlb_info(ctx, virt_addr, &hops); - if (rc) - return rc; - - /* last hop holds the phys address and flags */ - tmp_addr = hops.hop_info[hops.used_hops - 1].hop_pte_val; - *phys_addr = (tmp_addr & HOP_PHYS_ADDR_MASK) | (virt_addr & FLAGS_MASK); - - return 0; -} - -int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, - struct hl_mmu_hop_info *hops) -{ - struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; - struct hl_mmu_properties *mmu_prop; - int rc; - bool is_dram_addr; - - if (!hdev->mmu_enable) - return -EOPNOTSUPP; - - hops->scrambled_vaddr = virt_addr; /* assume no scrambling */ - - is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, - prop->dmmu.start_addr, - prop->dmmu.end_addr); - - /* host-residency is the same in PMMU and HPMMU, use one of them */ - mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; - - mutex_lock(&ctx->mmu_lock); - - if (mmu_prop->host_resident) - rc = hdev->mmu_func[MMU_HR_PGT].get_tlb_info(ctx, - virt_addr, hops); - else - rc = hdev->mmu_func[MMU_DR_PGT].get_tlb_info(ctx, - virt_addr, hops); - - mutex_unlock(&ctx->mmu_lock); - - return rc; -} - -int hl_mmu_if_set_funcs(struct hl_device *hdev) -{ - if (!hdev->mmu_enable) - return 0; - - switch (hdev->asic_type) { - case ASIC_GOYA: - case ASIC_GAUDI: - hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]); - break; - default: - dev_err(hdev->dev, "Unrecognized ASIC type %d\n", - hdev->asic_type); - return -EOPNOTSUPP; - } - - return 0; -} - -/** - * hl_mmu_scramble_vaddr() - The generic mmu virtual address scrambling routine. - * @hdev: pointer to device data. - * @virt_addr: The virtual address to scramble. - * - * Return: The scrambled virtual address. - */ -u64 hl_mmu_scramble_vaddr(struct hl_device *hdev, u64 virt_addr) -{ - return virt_addr; -} diff --git a/drivers/misc/habanalabs/common/mmu/Makefile b/drivers/misc/habanalabs/common/mmu/Makefile new file mode 100644 index 000000000000..d852c3874658 --- /dev/null +++ b/drivers/misc/habanalabs/common/mmu/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +HL_COMMON_MMU_FILES := common/mmu/mmu.o common/mmu/mmu_v1.o diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c new file mode 100644 index 000000000000..97c51686fcfe --- /dev/null +++ b/drivers/misc/habanalabs/common/mmu/mmu.c @@ -0,0 +1,560 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2020 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include + +#include "../habanalabs.h" + +bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + + return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, + prop->dmmu.start_addr, + prop->dmmu.end_addr); +} + +/** + * hl_mmu_init() - initialize the MMU module. + * @hdev: habanalabs device structure. + * + * Return: 0 for success, non-zero for failure. + */ +int hl_mmu_init(struct hl_device *hdev) +{ + int rc = -EOPNOTSUPP; + + if (!hdev->mmu_enable) + return 0; + + if (hdev->mmu_func[MMU_DR_PGT].init != NULL) { + rc = hdev->mmu_func[MMU_DR_PGT].init(hdev); + if (rc) + return rc; + } + + if (hdev->mmu_func[MMU_HR_PGT].init != NULL) + rc = hdev->mmu_func[MMU_HR_PGT].init(hdev); + + return rc; +} + +/** + * hl_mmu_fini() - release the MMU module. + * @hdev: habanalabs device structure. + * + * This function does the following: + * - Disable MMU in H/W. + * - Free the pgt_infos pool. + * + * All contexts should be freed before calling this function. + */ +void hl_mmu_fini(struct hl_device *hdev) +{ + if (!hdev->mmu_enable) + return; + + if (hdev->mmu_func[MMU_DR_PGT].fini != NULL) + hdev->mmu_func[MMU_DR_PGT].fini(hdev); + + if (hdev->mmu_func[MMU_HR_PGT].fini != NULL) + hdev->mmu_func[MMU_HR_PGT].fini(hdev); +} + +/** + * hl_mmu_ctx_init() - initialize a context for using the MMU module. + * @ctx: pointer to the context structure to initialize. + * + * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all + * page tables hops related to this context. + * Return: 0 on success, non-zero otherwise. + */ +int hl_mmu_ctx_init(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + int rc = -EOPNOTSUPP; + + if (!hdev->mmu_enable) + return 0; + + mutex_init(&ctx->mmu_lock); + + if (hdev->mmu_func[MMU_DR_PGT].ctx_init != NULL) { + rc = hdev->mmu_func[MMU_DR_PGT].ctx_init(ctx); + if (rc) + return rc; + } + + if (hdev->mmu_func[MMU_HR_PGT].ctx_init != NULL) + rc = hdev->mmu_func[MMU_HR_PGT].ctx_init(ctx); + + return rc; +} + +/* + * hl_mmu_ctx_fini - disable a ctx from using the mmu module + * + * @ctx: pointer to the context structure + * + * This function does the following: + * - Free any pgts which were not freed yet + * - Free the mutex + * - Free DRAM default page mapping hops + */ +void hl_mmu_ctx_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + + if (!hdev->mmu_enable) + return; + + if (hdev->mmu_func[MMU_DR_PGT].ctx_fini != NULL) + hdev->mmu_func[MMU_DR_PGT].ctx_fini(ctx); + + if (hdev->mmu_func[MMU_HR_PGT].ctx_fini != NULL) + hdev->mmu_func[MMU_HR_PGT].ctx_fini(ctx); + + mutex_destroy(&ctx->mmu_lock); +} + +/* + * hl_mmu_unmap_page - unmaps a virtual addr + * + * @ctx: pointer to the context structure + * @virt_addr: virt addr to map from + * @page_size: size of the page to unmap + * @flush_pte: whether to do a PCI flush + * + * This function does the following: + * - Check that the virt addr is mapped + * - Unmap the virt addr and frees pgts if possible + * - Returns 0 on success, -EINVAL if the given addr is not mapped + * + * Because this function changes the page tables in the device and because it + * changes the MMU hash, it must be protected by a lock. + * However, because it maps only a single page, the lock should be implemented + * in a higher level in order to protect the entire mapping of the memory area + * + * For optimization reasons PCI flush may be requested once after unmapping of + * large area. + */ +int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, + bool flush_pte) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_mmu_properties *mmu_prop; + u64 real_virt_addr; + u32 real_page_size, npages; + int i, rc = 0, pgt_residency; + bool is_dram_addr; + + if (!hdev->mmu_enable) + return 0; + + is_dram_addr = hl_is_dram_va(hdev, virt_addr); + + if (is_dram_addr) + mmu_prop = &prop->dmmu; + else if ((page_size % prop->pmmu_huge.page_size) == 0) + mmu_prop = &prop->pmmu_huge; + else + mmu_prop = &prop->pmmu; + + pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT; + /* + * The H/W handles mapping of specific page sizes. Hence if the page + * size is bigger, we break it to sub-pages and unmap them separately. + */ + if ((page_size % mmu_prop->page_size) == 0) { + real_page_size = mmu_prop->page_size; + } else { + /* + * MMU page size may differ from DRAM page size. + * In such case work with the DRAM page size and let the MMU + * scrambling routine to handle this mismatch when + * calculating the address to remove from the MMU page table + */ + if (is_dram_addr && ((page_size % prop->dram_page_size) == 0)) { + real_page_size = prop->dram_page_size; + } else { + dev_err(hdev->dev, + "page size of %u is not %uKB aligned, can't unmap\n", + page_size, mmu_prop->page_size >> 10); + + return -EFAULT; + } + } + + npages = page_size / real_page_size; + real_virt_addr = virt_addr; + + for (i = 0 ; i < npages ; i++) { + rc = hdev->mmu_func[pgt_residency].unmap(ctx, + real_virt_addr, is_dram_addr); + if (rc) + break; + + real_virt_addr += real_page_size; + } + + if (flush_pte) + hdev->mmu_func[pgt_residency].flush(ctx); + + return rc; +} + +/* + * hl_mmu_map_page - maps a virtual addr to physical addr + * + * @ctx: pointer to the context structure + * @virt_addr: virt addr to map from + * @phys_addr: phys addr to map to + * @page_size: physical page size + * @flush_pte: whether to do a PCI flush + * + * This function does the following: + * - Check that the virt addr is not mapped + * - Allocate pgts as necessary in order to map the virt addr to the phys + * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM. + * + * Because this function changes the page tables in the device and because it + * changes the MMU hash, it must be protected by a lock. + * However, because it maps only a single page, the lock should be implemented + * in a higher level in order to protect the entire mapping of the memory area + * + * For optimization reasons PCI flush may be requested once after mapping of + * large area. + */ +int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, + u32 page_size, bool flush_pte) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_mmu_properties *mmu_prop; + u64 real_virt_addr, real_phys_addr; + u32 real_page_size, npages; + int i, rc, pgt_residency, mapped_cnt = 0; + bool is_dram_addr; + + + if (!hdev->mmu_enable) + return 0; + + is_dram_addr = hl_is_dram_va(hdev, virt_addr); + + if (is_dram_addr) + mmu_prop = &prop->dmmu; + else if ((page_size % prop->pmmu_huge.page_size) == 0) + mmu_prop = &prop->pmmu_huge; + else + mmu_prop = &prop->pmmu; + + pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT; + + /* + * The H/W handles mapping of specific page sizes. Hence if the page + * size is bigger, we break it to sub-pages and map them separately. + */ + if ((page_size % mmu_prop->page_size) == 0) { + real_page_size = mmu_prop->page_size; + } else if (is_dram_addr && ((page_size % prop->dram_page_size) == 0) && + (prop->dram_page_size < mmu_prop->page_size)) { + /* + * MMU page size may differ from DRAM page size. + * In such case work with the DRAM page size and let the MMU + * scrambling routine handle this mismatch when calculating + * the address to place in the MMU page table. (in that case + * also make sure that the dram_page_size smaller than the + * mmu page size) + */ + real_page_size = prop->dram_page_size; + } else { + dev_err(hdev->dev, + "page size of %u is not %uKB aligned, can't map\n", + page_size, mmu_prop->page_size >> 10); + + return -EFAULT; + } + + /* + * Verify that the phys and virt addresses are aligned with the + * MMU page size (in dram this means checking the address and MMU + * after scrambling) + */ + if ((is_dram_addr && + ((hdev->asic_funcs->scramble_vaddr(hdev, phys_addr) & + (mmu_prop->page_size - 1)) || + (hdev->asic_funcs->scramble_vaddr(hdev, virt_addr) & + (mmu_prop->page_size - 1)))) || + (!is_dram_addr && ((phys_addr & (real_page_size - 1)) || + (virt_addr & (real_page_size - 1))))) + dev_crit(hdev->dev, + "Mapping address 0x%llx with virtual address 0x%llx and page size of 0x%x is erroneous! Addresses must be divisible by page size", + phys_addr, virt_addr, real_page_size); + + npages = page_size / real_page_size; + real_virt_addr = virt_addr; + real_phys_addr = phys_addr; + + for (i = 0 ; i < npages ; i++) { + rc = hdev->mmu_func[pgt_residency].map(ctx, + real_virt_addr, real_phys_addr, + real_page_size, is_dram_addr); + if (rc) + goto err; + + real_virt_addr += real_page_size; + real_phys_addr += real_page_size; + mapped_cnt++; + } + + if (flush_pte) + hdev->mmu_func[pgt_residency].flush(ctx); + + return 0; + +err: + real_virt_addr = virt_addr; + for (i = 0 ; i < mapped_cnt ; i++) { + if (hdev->mmu_func[pgt_residency].unmap(ctx, + real_virt_addr, is_dram_addr)) + dev_warn_ratelimited(hdev->dev, + "failed to unmap va: 0x%llx\n", real_virt_addr); + + real_virt_addr += real_page_size; + } + + hdev->mmu_func[pgt_residency].flush(ctx); + + return rc; +} + +/* + * hl_mmu_map_contiguous - implements a wrapper for hl_mmu_map_page + * for mapping contiguous physical memory + * + * @ctx: pointer to the context structure + * @virt_addr: virt addr to map from + * @phys_addr: phys addr to map to + * @size: size to map + * + */ +int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr, + u64 phys_addr, u32 size) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 curr_va, curr_pa; + u32 page_size; + bool flush_pte; + int rc = 0, off; + + if (hl_mem_area_inside_range(virt_addr, size, + prop->dmmu.start_addr, prop->dmmu.end_addr)) + page_size = prop->dmmu.page_size; + else if (hl_mem_area_inside_range(virt_addr, size, + prop->pmmu.start_addr, prop->pmmu.end_addr)) + page_size = prop->pmmu.page_size; + else if (hl_mem_area_inside_range(virt_addr, size, + prop->pmmu_huge.start_addr, prop->pmmu_huge.end_addr)) + page_size = prop->pmmu_huge.page_size; + else + return -EINVAL; + + for (off = 0 ; off < size ; off += page_size) { + curr_va = virt_addr + off; + curr_pa = phys_addr + off; + flush_pte = (off + page_size) >= size; + rc = hl_mmu_map_page(ctx, curr_va, curr_pa, page_size, + flush_pte); + if (rc) { + dev_err(hdev->dev, + "Map failed for va 0x%llx to pa 0x%llx\n", + curr_va, curr_pa); + goto unmap; + } + } + + return rc; + +unmap: + for (; off >= 0 ; off -= page_size) { + curr_va = virt_addr + off; + flush_pte = (off - (s32) page_size) < 0; + if (hl_mmu_unmap_page(ctx, curr_va, page_size, flush_pte)) + dev_warn_ratelimited(hdev->dev, + "failed to unmap va 0x%llx\n", curr_va); + } + + return rc; +} + +/* + * hl_mmu_unmap_contiguous - implements a wrapper for hl_mmu_unmap_page + * for unmapping contiguous physical memory + * + * @ctx: pointer to the context structure + * @virt_addr: virt addr to unmap + * @size: size to unmap + * + */ +int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 curr_va; + u32 page_size; + bool flush_pte; + int rc = 0, off; + + if (hl_mem_area_inside_range(virt_addr, size, + prop->dmmu.start_addr, prop->dmmu.end_addr)) + page_size = prop->dmmu.page_size; + else if (hl_mem_area_inside_range(virt_addr, size, + prop->pmmu.start_addr, prop->pmmu.end_addr)) + page_size = prop->pmmu.page_size; + else if (hl_mem_area_inside_range(virt_addr, size, + prop->pmmu_huge.start_addr, prop->pmmu_huge.end_addr)) + page_size = prop->pmmu_huge.page_size; + else + return -EINVAL; + + for (off = 0 ; off < size ; off += page_size) { + curr_va = virt_addr + off; + flush_pte = (off + page_size) >= size; + rc = hl_mmu_unmap_page(ctx, curr_va, page_size, flush_pte); + if (rc) + dev_warn_ratelimited(hdev->dev, + "Unmap failed for va 0x%llx\n", curr_va); + } + + return rc; +} + +/* + * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out + * + * @ctx: pointer to the context structure + * + */ +void hl_mmu_swap_out(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + + if (!hdev->mmu_enable) + return; + + if (hdev->mmu_func[MMU_DR_PGT].swap_out != NULL) + hdev->mmu_func[MMU_DR_PGT].swap_out(ctx); + + if (hdev->mmu_func[MMU_HR_PGT].swap_out != NULL) + hdev->mmu_func[MMU_HR_PGT].swap_out(ctx); +} + +/* + * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in + * + * @ctx: pointer to the context structure + * + */ +void hl_mmu_swap_in(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + + if (!hdev->mmu_enable) + return; + + if (hdev->mmu_func[MMU_DR_PGT].swap_in != NULL) + hdev->mmu_func[MMU_DR_PGT].swap_in(ctx); + + if (hdev->mmu_func[MMU_HR_PGT].swap_in != NULL) + hdev->mmu_func[MMU_HR_PGT].swap_in(ctx); +} + +int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr) +{ + struct hl_mmu_hop_info hops; + u64 tmp_addr; + int rc; + + rc = hl_mmu_get_tlb_info(ctx, virt_addr, &hops); + if (rc) + return rc; + + /* last hop holds the phys address and flags */ + tmp_addr = hops.hop_info[hops.used_hops - 1].hop_pte_val; + *phys_addr = (tmp_addr & HOP_PHYS_ADDR_MASK) | (virt_addr & FLAGS_MASK); + + return 0; +} + +int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, + struct hl_mmu_hop_info *hops) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_mmu_properties *mmu_prop; + int rc; + bool is_dram_addr; + + if (!hdev->mmu_enable) + return -EOPNOTSUPP; + + hops->scrambled_vaddr = virt_addr; /* assume no scrambling */ + + is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, + prop->dmmu.start_addr, + prop->dmmu.end_addr); + + /* host-residency is the same in PMMU and HPMMU, use one of them */ + mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; + + mutex_lock(&ctx->mmu_lock); + + if (mmu_prop->host_resident) + rc = hdev->mmu_func[MMU_HR_PGT].get_tlb_info(ctx, + virt_addr, hops); + else + rc = hdev->mmu_func[MMU_DR_PGT].get_tlb_info(ctx, + virt_addr, hops); + + mutex_unlock(&ctx->mmu_lock); + + return rc; +} + +int hl_mmu_if_set_funcs(struct hl_device *hdev) +{ + if (!hdev->mmu_enable) + return 0; + + switch (hdev->asic_type) { + case ASIC_GOYA: + case ASIC_GAUDI: + hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]); + break; + default: + dev_err(hdev->dev, "Unrecognized ASIC type %d\n", + hdev->asic_type); + return -EOPNOTSUPP; + } + + return 0; +} + +/** + * hl_mmu_scramble_vaddr() - The generic mmu virtual address scrambling routine. + * @hdev: pointer to device data. + * @virt_addr: The virtual address to scramble. + * + * Return: The scrambled virtual address. + */ +u64 hl_mmu_scramble_vaddr(struct hl_device *hdev, u64 virt_addr) +{ + return virt_addr; +} diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c new file mode 100644 index 000000000000..c5e93ff32586 --- /dev/null +++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c @@ -0,0 +1,966 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "../habanalabs.h" +#include "../../include/hw_ip/mmu/mmu_general.h" + +#include + +static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr); + +static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr) +{ + struct pgt_info *pgt_info = NULL; + + hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node, + (unsigned long) hop_addr) + if (hop_addr == pgt_info->shadow_addr) + break; + + return pgt_info; +} + +static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info) +{ + struct hl_device *hdev = ctx->hdev; + + gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr, + hdev->asic_prop.mmu_hop_table_size); + hash_del(&pgt_info->node); + kfree((u64 *) (uintptr_t) pgt_info->shadow_addr); + kfree(pgt_info); +} + +static void free_hop(struct hl_ctx *ctx, u64 hop_addr) +{ + struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr); + + _free_hop(ctx, pgt_info); +} + +static u64 alloc_hop(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct pgt_info *pgt_info; + u64 phys_addr, shadow_addr; + + pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL); + if (!pgt_info) + return ULLONG_MAX; + + phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool, + prop->mmu_hop_table_size); + if (!phys_addr) { + dev_err(hdev->dev, "failed to allocate page\n"); + goto pool_add_err; + } + + shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size, + GFP_KERNEL); + if (!shadow_addr) + goto shadow_err; + + pgt_info->phys_addr = phys_addr; + pgt_info->shadow_addr = shadow_addr; + pgt_info->ctx = ctx; + pgt_info->num_of_ptes = 0; + hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr); + + return shadow_addr; + +shadow_err: + gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, phys_addr, + prop->mmu_hop_table_size); +pool_add_err: + kfree(pgt_info); + + return ULLONG_MAX; +} + +static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx) +{ + return ctx->hdev->asic_prop.mmu_pgt_addr + + (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); +} + +static inline u64 get_hop0_addr(struct hl_ctx *ctx) +{ + return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 + + (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); +} + +static void flush(struct hl_ctx *ctx) +{ + /* flush all writes from all cores to reach PCI */ + mb(); + ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx)); +} + +/* transform the value to physical address when writing to H/W */ +static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val) +{ + /* + * The value to write is actually the address of the next shadow hop + + * flags at the 12 LSBs. + * Hence in order to get the value to write to the physical PTE, we + * clear the 12 LSBs and translate the shadow hop to its associated + * physical hop, and add back the original 12 LSBs. + */ + u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) | + (val & FLAGS_MASK); + + ctx->hdev->asic_funcs->write_pte(ctx->hdev, + get_phys_addr(ctx, shadow_pte_addr), + phys_val); + + *(u64 *) (uintptr_t) shadow_pte_addr = val; +} + +/* do not transform the value to physical address when writing to H/W */ +static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, + u64 val) +{ + ctx->hdev->asic_funcs->write_pte(ctx->hdev, + get_phys_addr(ctx, shadow_pte_addr), + val); + *(u64 *) (uintptr_t) shadow_pte_addr = val; +} + +/* clear the last and present bits */ +static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr) +{ + /* no need to transform the value to physical address */ + write_final_pte(ctx, pte_addr, 0); +} + +static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr) +{ + get_pgt_info(ctx, hop_addr)->num_of_ptes++; +} + +/* + * put_pte - decrement the num of ptes and free the hop if possible + * + * @ctx: pointer to the context structure + * @hop_addr: addr of the hop + * + * This function returns the number of ptes left on this hop. If the number is + * 0, it means the pte was freed. + */ +static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr) +{ + struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr); + int num_of_ptes_left; + + pgt_info->num_of_ptes--; + + /* + * Need to save the number of ptes left because free_hop might free + * the pgt_info + */ + num_of_ptes_left = pgt_info->num_of_ptes; + if (!num_of_ptes_left) + _free_hop(ctx, pgt_info); + + return num_of_ptes_left; +} + +static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr, + u64 virt_addr, u64 mask, u64 shift) +{ + return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * + ((virt_addr & mask) >> shift); +} + +static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_prop, + u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask, + mmu_prop->hop0_shift); +} + +static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_prop, + u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask, + mmu_prop->hop1_shift); +} + +static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_prop, + u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask, + mmu_prop->hop2_shift); +} + +static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_prop, + u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask, + mmu_prop->hop3_shift); +} + +static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_prop, + u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask, + mmu_prop->hop4_shift); +} + +static inline u64 get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte) +{ + if (curr_pte & PAGE_PRESENT_MASK) + return curr_pte & HOP_PHYS_ADDR_MASK; + else + return ULLONG_MAX; +} + +static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, + bool *is_new_hop) +{ + u64 hop_addr = get_next_hop_addr(ctx, curr_pte); + + if (hop_addr == ULLONG_MAX) { + hop_addr = alloc_hop(ctx); + *is_new_hop = (hop_addr != ULLONG_MAX); + } + + return hop_addr; +} + +/* translates shadow address inside hop to a physical address */ +static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr) +{ + u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1); + u64 shadow_hop_addr = shadow_addr & ~page_mask; + u64 pte_offset = shadow_addr & page_mask; + u64 phys_hop_addr; + + if (shadow_hop_addr != get_hop0_addr(ctx)) + phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr; + else + phys_hop_addr = get_phys_hop0_addr(ctx); + + return phys_hop_addr + pte_offset; +} + +static int dram_default_mapping_init(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr, + hop2_pte_addr, hop3_pte_addr, pte_val; + int rc, i, j, hop3_allocated = 0; + + if ((!prop->dram_supports_virtual_memory) || + (!hdev->dram_default_page_mapping) || + (ctx->asid == HL_KERNEL_ASID_ID)) + return 0; + + num_of_hop3 = prop->dram_size_for_default_page_mapping; + do_div(num_of_hop3, prop->dram_page_size); + do_div(num_of_hop3, PTE_ENTRIES_IN_HOP); + + /* add hop1 and hop2 */ + total_hops = num_of_hop3 + 2; + + ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL); + if (!ctx->dram_default_hops) + return -ENOMEM; + + hop0_addr = get_hop0_addr(ctx); + + hop1_addr = alloc_hop(ctx); + if (hop1_addr == ULLONG_MAX) { + dev_err(hdev->dev, "failed to alloc hop 1\n"); + rc = -ENOMEM; + goto hop1_err; + } + + ctx->dram_default_hops[total_hops - 1] = hop1_addr; + + hop2_addr = alloc_hop(ctx); + if (hop2_addr == ULLONG_MAX) { + dev_err(hdev->dev, "failed to alloc hop 2\n"); + rc = -ENOMEM; + goto hop2_err; + } + + ctx->dram_default_hops[total_hops - 2] = hop2_addr; + + for (i = 0 ; i < num_of_hop3 ; i++) { + ctx->dram_default_hops[i] = alloc_hop(ctx); + if (ctx->dram_default_hops[i] == ULLONG_MAX) { + dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i); + rc = -ENOMEM; + goto hop3_err; + } + hop3_allocated++; + } + + /* need only pte 0 in hops 0 and 1 */ + pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; + write_pte(ctx, hop0_addr, pte_val); + + pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; + write_pte(ctx, hop1_addr, pte_val); + get_pte(ctx, hop1_addr); + + hop2_pte_addr = hop2_addr; + for (i = 0 ; i < num_of_hop3 ; i++) { + pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) | + PAGE_PRESENT_MASK; + write_pte(ctx, hop2_pte_addr, pte_val); + get_pte(ctx, hop2_addr); + hop2_pte_addr += HL_PTE_SIZE; + } + + pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) | + LAST_MASK | PAGE_PRESENT_MASK; + + for (i = 0 ; i < num_of_hop3 ; i++) { + hop3_pte_addr = ctx->dram_default_hops[i]; + for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) { + write_final_pte(ctx, hop3_pte_addr, pte_val); + get_pte(ctx, ctx->dram_default_hops[i]); + hop3_pte_addr += HL_PTE_SIZE; + } + } + + flush(ctx); + + return 0; + +hop3_err: + for (i = 0 ; i < hop3_allocated ; i++) + free_hop(ctx, ctx->dram_default_hops[i]); + + free_hop(ctx, hop2_addr); +hop2_err: + free_hop(ctx, hop1_addr); +hop1_err: + kfree(ctx->dram_default_hops); + + return rc; +} + +static void dram_default_mapping_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr, + hop2_pte_addr, hop3_pte_addr; + int i, j; + + if ((!prop->dram_supports_virtual_memory) || + (!hdev->dram_default_page_mapping) || + (ctx->asid == HL_KERNEL_ASID_ID)) + return; + + num_of_hop3 = prop->dram_size_for_default_page_mapping; + do_div(num_of_hop3, prop->dram_page_size); + do_div(num_of_hop3, PTE_ENTRIES_IN_HOP); + + hop0_addr = get_hop0_addr(ctx); + /* add hop1 and hop2 */ + total_hops = num_of_hop3 + 2; + hop1_addr = ctx->dram_default_hops[total_hops - 1]; + hop2_addr = ctx->dram_default_hops[total_hops - 2]; + + for (i = 0 ; i < num_of_hop3 ; i++) { + hop3_pte_addr = ctx->dram_default_hops[i]; + for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) { + clear_pte(ctx, hop3_pte_addr); + put_pte(ctx, ctx->dram_default_hops[i]); + hop3_pte_addr += HL_PTE_SIZE; + } + } + + hop2_pte_addr = hop2_addr; + hop2_pte_addr = hop2_addr; + for (i = 0 ; i < num_of_hop3 ; i++) { + clear_pte(ctx, hop2_pte_addr); + put_pte(ctx, hop2_addr); + hop2_pte_addr += HL_PTE_SIZE; + } + + clear_pte(ctx, hop1_addr); + put_pte(ctx, hop1_addr); + clear_pte(ctx, hop0_addr); + + kfree(ctx->dram_default_hops); + + flush(ctx); +} + +/** + * hl_mmu_v1_init() - initialize the MMU module. + * @hdev: habanalabs device structure. + * + * This function does the following: + * - Create a pool of pages for pgt_infos. + * - Create a shadow table for pgt + * + * Return: 0 for success, non-zero for failure. + */ +static int hl_mmu_v1_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + int rc; + + hdev->mmu_priv.dr.mmu_pgt_pool = + gen_pool_create(__ffs(prop->mmu_hop_table_size), -1); + + if (!hdev->mmu_priv.dr.mmu_pgt_pool) { + dev_err(hdev->dev, "Failed to create page gen pool\n"); + return -ENOMEM; + } + + rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr + + prop->mmu_hop0_tables_total_size, + prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size, + -1); + if (rc) { + dev_err(hdev->dev, "Failed to add memory to page gen pool\n"); + goto err_pool_add; + } + + hdev->mmu_priv.dr.mmu_shadow_hop0 = kvmalloc_array(prop->max_asid, + prop->mmu_hop_table_size, + GFP_KERNEL | __GFP_ZERO); + if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) { + rc = -ENOMEM; + goto err_pool_add; + } + + /* MMU H/W init will be done in device hw_init() */ + + return 0; + +err_pool_add: + gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); + + return rc; +} + +/** + * hl_mmu_fini() - release the MMU module. + * @hdev: habanalabs device structure. + * + * This function does the following: + * - Disable MMU in H/W. + * - Free the pgt_infos pool. + * + * All contexts should be freed before calling this function. + */ +static void hl_mmu_v1_fini(struct hl_device *hdev) +{ + /* MMU H/W fini was already done in device hw_fini() */ + + if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) { + kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0); + gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); + } + + /* Make sure that if we arrive here again without init was called we + * won't cause kernel panic. This can happen for example if we fail + * during hard reset code at certain points + */ + hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL; +} + +/** + * hl_mmu_ctx_init() - initialize a context for using the MMU module. + * @ctx: pointer to the context structure to initialize. + * + * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all + * page tables hops related to this context. + * Return: 0 on success, non-zero otherwise. + */ +static int hl_mmu_v1_ctx_init(struct hl_ctx *ctx) +{ + hash_init(ctx->mmu_shadow_hash); + return dram_default_mapping_init(ctx); +} + +/* + * hl_mmu_ctx_fini - disable a ctx from using the mmu module + * + * @ctx: pointer to the context structure + * + * This function does the following: + * - Free any pgts which were not freed yet + * - Free the mutex + * - Free DRAM default page mapping hops + */ +static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct pgt_info *pgt_info; + struct hlist_node *tmp; + int i; + + dram_default_mapping_fini(ctx); + + if (!hash_empty(ctx->mmu_shadow_hash)) + dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n", + ctx->asid); + + hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) { + dev_err_ratelimited(hdev->dev, + "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n", + pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes); + _free_hop(ctx, pgt_info); + } +} + +static int _hl_mmu_v1_unmap(struct hl_ctx *ctx, + u64 virt_addr, bool is_dram_addr) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_mmu_properties *mmu_prop; + u64 hop0_addr = 0, hop0_pte_addr = 0, + hop1_addr = 0, hop1_pte_addr = 0, + hop2_addr = 0, hop2_pte_addr = 0, + hop3_addr = 0, hop3_pte_addr = 0, + hop4_addr = 0, hop4_pte_addr = 0, + curr_pte; + bool is_huge, clear_hop3 = true; + + /* shifts and masks are the same in PMMU and HPMMU, use one of them */ + mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; + + hop0_addr = get_hop0_addr(ctx); + hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr); + + curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr; + + hop1_addr = get_next_hop_addr(ctx, curr_pte); + + if (hop1_addr == ULLONG_MAX) + goto not_mapped; + + hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr); + + curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr; + + hop2_addr = get_next_hop_addr(ctx, curr_pte); + + if (hop2_addr == ULLONG_MAX) + goto not_mapped; + + hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr); + + curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr; + + hop3_addr = get_next_hop_addr(ctx, curr_pte); + + if (hop3_addr == ULLONG_MAX) + goto not_mapped; + + hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr); + + curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr; + + is_huge = curr_pte & LAST_MASK; + + if (is_dram_addr && !is_huge) { + dev_err(hdev->dev, + "DRAM unmapping should use huge pages only\n"); + return -EFAULT; + } + + if (!is_huge) { + hop4_addr = get_next_hop_addr(ctx, curr_pte); + + if (hop4_addr == ULLONG_MAX) + goto not_mapped; + + hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr, + virt_addr); + + curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr; + + clear_hop3 = false; + } + + if (hdev->dram_default_page_mapping && is_dram_addr) { + u64 default_pte = (prop->mmu_dram_default_page_addr & + HOP_PHYS_ADDR_MASK) | LAST_MASK | + PAGE_PRESENT_MASK; + if (curr_pte == default_pte) { + dev_err(hdev->dev, + "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n", + virt_addr); + goto not_mapped; + } + + if (!(curr_pte & PAGE_PRESENT_MASK)) { + dev_err(hdev->dev, + "DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n", + virt_addr); + goto not_mapped; + } + + write_final_pte(ctx, hop3_pte_addr, default_pte); + put_pte(ctx, hop3_addr); + } else { + if (!(curr_pte & PAGE_PRESENT_MASK)) + goto not_mapped; + + if (hop4_addr) + clear_pte(ctx, hop4_pte_addr); + else + clear_pte(ctx, hop3_pte_addr); + + if (hop4_addr && !put_pte(ctx, hop4_addr)) + clear_hop3 = true; + + if (!clear_hop3) + goto mapped; + + clear_pte(ctx, hop3_pte_addr); + + if (put_pte(ctx, hop3_addr)) + goto mapped; + + clear_pte(ctx, hop2_pte_addr); + + if (put_pte(ctx, hop2_addr)) + goto mapped; + + clear_pte(ctx, hop1_pte_addr); + + if (put_pte(ctx, hop1_addr)) + goto mapped; + + clear_pte(ctx, hop0_pte_addr); + } + +mapped: + return 0; + +not_mapped: + dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", + virt_addr); + + return -EINVAL; +} + +static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, + u32 page_size, bool is_dram_addr) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_mmu_properties *mmu_prop; + u64 hop0_addr = 0, hop0_pte_addr = 0, + hop1_addr = 0, hop1_pte_addr = 0, + hop2_addr = 0, hop2_pte_addr = 0, + hop3_addr = 0, hop3_pte_addr = 0, + hop4_addr = 0, hop4_pte_addr = 0, + curr_pte = 0; + bool hop1_new = false, hop2_new = false, hop3_new = false, + hop4_new = false, is_huge; + int rc = -ENOMEM; + + /* + * This mapping function can map a page or a huge page. For huge page + * there are only 3 hops rather than 4. Currently the DRAM allocation + * uses huge pages only but user memory could have been allocated with + * one of the two page sizes. Since this is a common code for all the + * three cases, we need this hugs page check. + */ + if (is_dram_addr) { + mmu_prop = &prop->dmmu; + is_huge = true; + } else if (page_size == prop->pmmu_huge.page_size) { + mmu_prop = &prop->pmmu_huge; + is_huge = true; + } else { + mmu_prop = &prop->pmmu; + is_huge = false; + } + + hop0_addr = get_hop0_addr(ctx); + hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr); + curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr; + + hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new); + if (hop1_addr == ULLONG_MAX) + goto err; + + hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr); + curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr; + + hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new); + if (hop2_addr == ULLONG_MAX) + goto err; + + hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr); + curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr; + + hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new); + if (hop3_addr == ULLONG_MAX) + goto err; + + hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr); + curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr; + + if (!is_huge) { + hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new); + if (hop4_addr == ULLONG_MAX) + goto err; + + hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr, + virt_addr); + curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr; + } + + if (hdev->dram_default_page_mapping && is_dram_addr) { + u64 default_pte = (prop->mmu_dram_default_page_addr & + HOP_PHYS_ADDR_MASK) | LAST_MASK | + PAGE_PRESENT_MASK; + + if (curr_pte != default_pte) { + dev_err(hdev->dev, + "DRAM: mapping already exists for virt_addr 0x%llx\n", + virt_addr); + rc = -EINVAL; + goto err; + } + + if (hop1_new || hop2_new || hop3_new || hop4_new) { + dev_err(hdev->dev, + "DRAM mapping should not allocate more hops\n"); + rc = -EFAULT; + goto err; + } + } else if (curr_pte & PAGE_PRESENT_MASK) { + dev_err(hdev->dev, + "mapping already exists for virt_addr 0x%llx\n", + virt_addr); + + dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n", + *(u64 *) (uintptr_t) hop0_pte_addr, hop0_pte_addr); + dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n", + *(u64 *) (uintptr_t) hop1_pte_addr, hop1_pte_addr); + dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n", + *(u64 *) (uintptr_t) hop2_pte_addr, hop2_pte_addr); + dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n", + *(u64 *) (uintptr_t) hop3_pte_addr, hop3_pte_addr); + + if (!is_huge) + dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n", + *(u64 *) (uintptr_t) hop4_pte_addr, + hop4_pte_addr); + + rc = -EINVAL; + goto err; + } + + curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | LAST_MASK + | PAGE_PRESENT_MASK; + + if (is_huge) + write_final_pte(ctx, hop3_pte_addr, curr_pte); + else + write_final_pte(ctx, hop4_pte_addr, curr_pte); + + if (hop1_new) { + curr_pte = + (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; + write_pte(ctx, hop0_pte_addr, curr_pte); + } + if (hop2_new) { + curr_pte = + (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; + write_pte(ctx, hop1_pte_addr, curr_pte); + get_pte(ctx, hop1_addr); + } + if (hop3_new) { + curr_pte = + (hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; + write_pte(ctx, hop2_pte_addr, curr_pte); + get_pte(ctx, hop2_addr); + } + + if (!is_huge) { + if (hop4_new) { + curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) | + PAGE_PRESENT_MASK; + write_pte(ctx, hop3_pte_addr, curr_pte); + get_pte(ctx, hop3_addr); + } + + get_pte(ctx, hop4_addr); + } else { + get_pte(ctx, hop3_addr); + } + + return 0; + +err: + if (hop4_new) + free_hop(ctx, hop4_addr); + if (hop3_new) + free_hop(ctx, hop3_addr); + if (hop2_new) + free_hop(ctx, hop2_addr); + if (hop1_new) + free_hop(ctx, hop1_addr); + + return rc; +} + +/* + * hl_mmu_v1_swap_out - marks all mapping of the given ctx as swapped out + * + * @ctx: pointer to the context structure + * + */ +static void hl_mmu_v1_swap_out(struct hl_ctx *ctx) +{ + +} + +/* + * hl_mmu_v1_swap_in - marks all mapping of the given ctx as swapped in + * + * @ctx: pointer to the context structure + * + */ +static void hl_mmu_v1_swap_in(struct hl_ctx *ctx) +{ + +} + +static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_prop, + int hop_num, u64 hop_addr, u64 virt_addr) +{ + switch (hop_num) { + case 0: + return get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + case 1: + return get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + case 2: + return get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + case 3: + return get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + case 4: + return get_hop4_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + default: + break; + } + return U64_MAX; +} + +static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, + struct hl_mmu_hop_info *hops) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_mmu_properties *mmu_prop; + bool is_dram_addr, is_pmmu_addr, is_pmmu_h_addr, is_huge; + int i, used_hops; + + is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, + prop->dmmu.start_addr, + prop->dmmu.end_addr); + is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size, + prop->pmmu.start_addr, + prop->pmmu.end_addr); + is_pmmu_h_addr = hl_mem_area_inside_range(virt_addr, + prop->pmmu_huge.page_size, + prop->pmmu_huge.start_addr, + prop->pmmu_huge.end_addr); + if (is_dram_addr) { + mmu_prop = &prop->dmmu; + is_huge = true; + } else if (is_pmmu_addr) { + mmu_prop = &prop->pmmu; + is_huge = false; + } else if (is_pmmu_h_addr) { + mmu_prop = &prop->pmmu_huge; + is_huge = true; + } else { + return -EINVAL; + } + + used_hops = mmu_prop->num_hops; + + /* huge pages use lesser hops */ + if (is_huge) + used_hops--; + + hops->hop_info[0].hop_addr = get_phys_hop0_addr(ctx); + hops->hop_info[0].hop_pte_addr = + get_hop_pte_addr(ctx, mmu_prop, 0, + hops->hop_info[0].hop_addr, virt_addr); + hops->hop_info[0].hop_pte_val = + hdev->asic_funcs->read_pte(hdev, + hops->hop_info[0].hop_pte_addr); + + for (i = 1 ; i < used_hops ; i++) { + hops->hop_info[i].hop_addr = + get_next_hop_addr(ctx, + hops->hop_info[i - 1].hop_pte_val); + if (hops->hop_info[i].hop_addr == ULLONG_MAX) + return -EFAULT; + + hops->hop_info[i].hop_pte_addr = + get_hop_pte_addr(ctx, mmu_prop, i, + hops->hop_info[i].hop_addr, + virt_addr); + hops->hop_info[i].hop_pte_val = + hdev->asic_funcs->read_pte(hdev, + hops->hop_info[i].hop_pte_addr); + + if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK)) + return -EFAULT; + + if (hops->hop_info[i].hop_pte_val & LAST_MASK) + break; + } + + /* if passed over all hops then no last hop was found */ + if (i == mmu_prop->num_hops) + return -EFAULT; + + if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK)) + return -EFAULT; + + hops->used_hops = i + 1; + + return 0; +} + +/* + * hl_mmu_v1_prepare - prepare mmu for working with mmu v1 + * + * @hdev: pointer to the device structure + */ +void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu) +{ + mmu->init = hl_mmu_v1_init; + mmu->fini = hl_mmu_v1_fini; + mmu->ctx_init = hl_mmu_v1_ctx_init; + mmu->ctx_fini = hl_mmu_v1_ctx_fini; + mmu->map = _hl_mmu_v1_map; + mmu->unmap = _hl_mmu_v1_unmap; + mmu->flush = flush; + mmu->swap_out = hl_mmu_v1_swap_out; + mmu->swap_in = hl_mmu_v1_swap_in; + mmu->get_tlb_info = hl_mmu_v1_get_tlb_info; +} diff --git a/drivers/misc/habanalabs/common/mmu_v1.c b/drivers/misc/habanalabs/common/mmu_v1.c deleted file mode 100644 index 06d8a44dd5d4..000000000000 --- a/drivers/misc/habanalabs/common/mmu_v1.c +++ /dev/null @@ -1,966 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* - * Copyright 2016-2019 HabanaLabs, Ltd. - * All Rights Reserved. - */ - -#include "habanalabs.h" -#include "../include/hw_ip/mmu/mmu_general.h" - -#include - -static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr); - -static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr) -{ - struct pgt_info *pgt_info = NULL; - - hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node, - (unsigned long) hop_addr) - if (hop_addr == pgt_info->shadow_addr) - break; - - return pgt_info; -} - -static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info) -{ - struct hl_device *hdev = ctx->hdev; - - gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr, - hdev->asic_prop.mmu_hop_table_size); - hash_del(&pgt_info->node); - kfree((u64 *) (uintptr_t) pgt_info->shadow_addr); - kfree(pgt_info); -} - -static void free_hop(struct hl_ctx *ctx, u64 hop_addr) -{ - struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr); - - _free_hop(ctx, pgt_info); -} - -static u64 alloc_hop(struct hl_ctx *ctx) -{ - struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; - struct pgt_info *pgt_info; - u64 phys_addr, shadow_addr; - - pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL); - if (!pgt_info) - return ULLONG_MAX; - - phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool, - prop->mmu_hop_table_size); - if (!phys_addr) { - dev_err(hdev->dev, "failed to allocate page\n"); - goto pool_add_err; - } - - shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size, - GFP_KERNEL); - if (!shadow_addr) - goto shadow_err; - - pgt_info->phys_addr = phys_addr; - pgt_info->shadow_addr = shadow_addr; - pgt_info->ctx = ctx; - pgt_info->num_of_ptes = 0; - hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr); - - return shadow_addr; - -shadow_err: - gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, phys_addr, - prop->mmu_hop_table_size); -pool_add_err: - kfree(pgt_info); - - return ULLONG_MAX; -} - -static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx) -{ - return ctx->hdev->asic_prop.mmu_pgt_addr + - (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); -} - -static inline u64 get_hop0_addr(struct hl_ctx *ctx) -{ - return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 + - (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); -} - -static void flush(struct hl_ctx *ctx) -{ - /* flush all writes from all cores to reach PCI */ - mb(); - ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx)); -} - -/* transform the value to physical address when writing to H/W */ -static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val) -{ - /* - * The value to write is actually the address of the next shadow hop + - * flags at the 12 LSBs. - * Hence in order to get the value to write to the physical PTE, we - * clear the 12 LSBs and translate the shadow hop to its associated - * physical hop, and add back the original 12 LSBs. - */ - u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) | - (val & FLAGS_MASK); - - ctx->hdev->asic_funcs->write_pte(ctx->hdev, - get_phys_addr(ctx, shadow_pte_addr), - phys_val); - - *(u64 *) (uintptr_t) shadow_pte_addr = val; -} - -/* do not transform the value to physical address when writing to H/W */ -static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, - u64 val) -{ - ctx->hdev->asic_funcs->write_pte(ctx->hdev, - get_phys_addr(ctx, shadow_pte_addr), - val); - *(u64 *) (uintptr_t) shadow_pte_addr = val; -} - -/* clear the last and present bits */ -static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr) -{ - /* no need to transform the value to physical address */ - write_final_pte(ctx, pte_addr, 0); -} - -static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr) -{ - get_pgt_info(ctx, hop_addr)->num_of_ptes++; -} - -/* - * put_pte - decrement the num of ptes and free the hop if possible - * - * @ctx: pointer to the context structure - * @hop_addr: addr of the hop - * - * This function returns the number of ptes left on this hop. If the number is - * 0, it means the pte was freed. - */ -static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr) -{ - struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr); - int num_of_ptes_left; - - pgt_info->num_of_ptes--; - - /* - * Need to save the number of ptes left because free_hop might free - * the pgt_info - */ - num_of_ptes_left = pgt_info->num_of_ptes; - if (!num_of_ptes_left) - _free_hop(ctx, pgt_info); - - return num_of_ptes_left; -} - -static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr, - u64 virt_addr, u64 mask, u64 shift) -{ - return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * - ((virt_addr & mask) >> shift); -} - -static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_prop, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask, - mmu_prop->hop0_shift); -} - -static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_prop, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask, - mmu_prop->hop1_shift); -} - -static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_prop, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask, - mmu_prop->hop2_shift); -} - -static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_prop, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask, - mmu_prop->hop3_shift); -} - -static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_prop, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask, - mmu_prop->hop4_shift); -} - -static inline u64 get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte) -{ - if (curr_pte & PAGE_PRESENT_MASK) - return curr_pte & HOP_PHYS_ADDR_MASK; - else - return ULLONG_MAX; -} - -static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, - bool *is_new_hop) -{ - u64 hop_addr = get_next_hop_addr(ctx, curr_pte); - - if (hop_addr == ULLONG_MAX) { - hop_addr = alloc_hop(ctx); - *is_new_hop = (hop_addr != ULLONG_MAX); - } - - return hop_addr; -} - -/* translates shadow address inside hop to a physical address */ -static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr) -{ - u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1); - u64 shadow_hop_addr = shadow_addr & ~page_mask; - u64 pte_offset = shadow_addr & page_mask; - u64 phys_hop_addr; - - if (shadow_hop_addr != get_hop0_addr(ctx)) - phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr; - else - phys_hop_addr = get_phys_hop0_addr(ctx); - - return phys_hop_addr + pte_offset; -} - -static int dram_default_mapping_init(struct hl_ctx *ctx) -{ - struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; - u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr, - hop2_pte_addr, hop3_pte_addr, pte_val; - int rc, i, j, hop3_allocated = 0; - - if ((!prop->dram_supports_virtual_memory) || - (!hdev->dram_default_page_mapping) || - (ctx->asid == HL_KERNEL_ASID_ID)) - return 0; - - num_of_hop3 = prop->dram_size_for_default_page_mapping; - do_div(num_of_hop3, prop->dram_page_size); - do_div(num_of_hop3, PTE_ENTRIES_IN_HOP); - - /* add hop1 and hop2 */ - total_hops = num_of_hop3 + 2; - - ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL); - if (!ctx->dram_default_hops) - return -ENOMEM; - - hop0_addr = get_hop0_addr(ctx); - - hop1_addr = alloc_hop(ctx); - if (hop1_addr == ULLONG_MAX) { - dev_err(hdev->dev, "failed to alloc hop 1\n"); - rc = -ENOMEM; - goto hop1_err; - } - - ctx->dram_default_hops[total_hops - 1] = hop1_addr; - - hop2_addr = alloc_hop(ctx); - if (hop2_addr == ULLONG_MAX) { - dev_err(hdev->dev, "failed to alloc hop 2\n"); - rc = -ENOMEM; - goto hop2_err; - } - - ctx->dram_default_hops[total_hops - 2] = hop2_addr; - - for (i = 0 ; i < num_of_hop3 ; i++) { - ctx->dram_default_hops[i] = alloc_hop(ctx); - if (ctx->dram_default_hops[i] == ULLONG_MAX) { - dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i); - rc = -ENOMEM; - goto hop3_err; - } - hop3_allocated++; - } - - /* need only pte 0 in hops 0 and 1 */ - pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; - write_pte(ctx, hop0_addr, pte_val); - - pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; - write_pte(ctx, hop1_addr, pte_val); - get_pte(ctx, hop1_addr); - - hop2_pte_addr = hop2_addr; - for (i = 0 ; i < num_of_hop3 ; i++) { - pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) | - PAGE_PRESENT_MASK; - write_pte(ctx, hop2_pte_addr, pte_val); - get_pte(ctx, hop2_addr); - hop2_pte_addr += HL_PTE_SIZE; - } - - pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) | - LAST_MASK | PAGE_PRESENT_MASK; - - for (i = 0 ; i < num_of_hop3 ; i++) { - hop3_pte_addr = ctx->dram_default_hops[i]; - for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) { - write_final_pte(ctx, hop3_pte_addr, pte_val); - get_pte(ctx, ctx->dram_default_hops[i]); - hop3_pte_addr += HL_PTE_SIZE; - } - } - - flush(ctx); - - return 0; - -hop3_err: - for (i = 0 ; i < hop3_allocated ; i++) - free_hop(ctx, ctx->dram_default_hops[i]); - - free_hop(ctx, hop2_addr); -hop2_err: - free_hop(ctx, hop1_addr); -hop1_err: - kfree(ctx->dram_default_hops); - - return rc; -} - -static void dram_default_mapping_fini(struct hl_ctx *ctx) -{ - struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; - u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr, - hop2_pte_addr, hop3_pte_addr; - int i, j; - - if ((!prop->dram_supports_virtual_memory) || - (!hdev->dram_default_page_mapping) || - (ctx->asid == HL_KERNEL_ASID_ID)) - return; - - num_of_hop3 = prop->dram_size_for_default_page_mapping; - do_div(num_of_hop3, prop->dram_page_size); - do_div(num_of_hop3, PTE_ENTRIES_IN_HOP); - - hop0_addr = get_hop0_addr(ctx); - /* add hop1 and hop2 */ - total_hops = num_of_hop3 + 2; - hop1_addr = ctx->dram_default_hops[total_hops - 1]; - hop2_addr = ctx->dram_default_hops[total_hops - 2]; - - for (i = 0 ; i < num_of_hop3 ; i++) { - hop3_pte_addr = ctx->dram_default_hops[i]; - for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) { - clear_pte(ctx, hop3_pte_addr); - put_pte(ctx, ctx->dram_default_hops[i]); - hop3_pte_addr += HL_PTE_SIZE; - } - } - - hop2_pte_addr = hop2_addr; - hop2_pte_addr = hop2_addr; - for (i = 0 ; i < num_of_hop3 ; i++) { - clear_pte(ctx, hop2_pte_addr); - put_pte(ctx, hop2_addr); - hop2_pte_addr += HL_PTE_SIZE; - } - - clear_pte(ctx, hop1_addr); - put_pte(ctx, hop1_addr); - clear_pte(ctx, hop0_addr); - - kfree(ctx->dram_default_hops); - - flush(ctx); -} - -/** - * hl_mmu_v1_init() - initialize the MMU module. - * @hdev: habanalabs device structure. - * - * This function does the following: - * - Create a pool of pages for pgt_infos. - * - Create a shadow table for pgt - * - * Return: 0 for success, non-zero for failure. - */ -static int hl_mmu_v1_init(struct hl_device *hdev) -{ - struct asic_fixed_properties *prop = &hdev->asic_prop; - int rc; - - hdev->mmu_priv.dr.mmu_pgt_pool = - gen_pool_create(__ffs(prop->mmu_hop_table_size), -1); - - if (!hdev->mmu_priv.dr.mmu_pgt_pool) { - dev_err(hdev->dev, "Failed to create page gen pool\n"); - return -ENOMEM; - } - - rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr + - prop->mmu_hop0_tables_total_size, - prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size, - -1); - if (rc) { - dev_err(hdev->dev, "Failed to add memory to page gen pool\n"); - goto err_pool_add; - } - - hdev->mmu_priv.dr.mmu_shadow_hop0 = kvmalloc_array(prop->max_asid, - prop->mmu_hop_table_size, - GFP_KERNEL | __GFP_ZERO); - if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) { - rc = -ENOMEM; - goto err_pool_add; - } - - /* MMU H/W init will be done in device hw_init() */ - - return 0; - -err_pool_add: - gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); - - return rc; -} - -/** - * hl_mmu_fini() - release the MMU module. - * @hdev: habanalabs device structure. - * - * This function does the following: - * - Disable MMU in H/W. - * - Free the pgt_infos pool. - * - * All contexts should be freed before calling this function. - */ -static void hl_mmu_v1_fini(struct hl_device *hdev) -{ - /* MMU H/W fini was already done in device hw_fini() */ - - if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) { - kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0); - gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); - } - - /* Make sure that if we arrive here again without init was called we - * won't cause kernel panic. This can happen for example if we fail - * during hard reset code at certain points - */ - hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL; -} - -/** - * hl_mmu_ctx_init() - initialize a context for using the MMU module. - * @ctx: pointer to the context structure to initialize. - * - * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all - * page tables hops related to this context. - * Return: 0 on success, non-zero otherwise. - */ -static int hl_mmu_v1_ctx_init(struct hl_ctx *ctx) -{ - hash_init(ctx->mmu_shadow_hash); - return dram_default_mapping_init(ctx); -} - -/* - * hl_mmu_ctx_fini - disable a ctx from using the mmu module - * - * @ctx: pointer to the context structure - * - * This function does the following: - * - Free any pgts which were not freed yet - * - Free the mutex - * - Free DRAM default page mapping hops - */ -static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx) -{ - struct hl_device *hdev = ctx->hdev; - struct pgt_info *pgt_info; - struct hlist_node *tmp; - int i; - - dram_default_mapping_fini(ctx); - - if (!hash_empty(ctx->mmu_shadow_hash)) - dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n", - ctx->asid); - - hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) { - dev_err_ratelimited(hdev->dev, - "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n", - pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes); - _free_hop(ctx, pgt_info); - } -} - -static int _hl_mmu_v1_unmap(struct hl_ctx *ctx, - u64 virt_addr, bool is_dram_addr) -{ - struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; - struct hl_mmu_properties *mmu_prop; - u64 hop0_addr = 0, hop0_pte_addr = 0, - hop1_addr = 0, hop1_pte_addr = 0, - hop2_addr = 0, hop2_pte_addr = 0, - hop3_addr = 0, hop3_pte_addr = 0, - hop4_addr = 0, hop4_pte_addr = 0, - curr_pte; - bool is_huge, clear_hop3 = true; - - /* shifts and masks are the same in PMMU and HPMMU, use one of them */ - mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; - - hop0_addr = get_hop0_addr(ctx); - hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr); - - curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr; - - hop1_addr = get_next_hop_addr(ctx, curr_pte); - - if (hop1_addr == ULLONG_MAX) - goto not_mapped; - - hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr); - - curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr; - - hop2_addr = get_next_hop_addr(ctx, curr_pte); - - if (hop2_addr == ULLONG_MAX) - goto not_mapped; - - hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr); - - curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr; - - hop3_addr = get_next_hop_addr(ctx, curr_pte); - - if (hop3_addr == ULLONG_MAX) - goto not_mapped; - - hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr); - - curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr; - - is_huge = curr_pte & LAST_MASK; - - if (is_dram_addr && !is_huge) { - dev_err(hdev->dev, - "DRAM unmapping should use huge pages only\n"); - return -EFAULT; - } - - if (!is_huge) { - hop4_addr = get_next_hop_addr(ctx, curr_pte); - - if (hop4_addr == ULLONG_MAX) - goto not_mapped; - - hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr, - virt_addr); - - curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr; - - clear_hop3 = false; - } - - if (hdev->dram_default_page_mapping && is_dram_addr) { - u64 default_pte = (prop->mmu_dram_default_page_addr & - HOP_PHYS_ADDR_MASK) | LAST_MASK | - PAGE_PRESENT_MASK; - if (curr_pte == default_pte) { - dev_err(hdev->dev, - "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n", - virt_addr); - goto not_mapped; - } - - if (!(curr_pte & PAGE_PRESENT_MASK)) { - dev_err(hdev->dev, - "DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n", - virt_addr); - goto not_mapped; - } - - write_final_pte(ctx, hop3_pte_addr, default_pte); - put_pte(ctx, hop3_addr); - } else { - if (!(curr_pte & PAGE_PRESENT_MASK)) - goto not_mapped; - - if (hop4_addr) - clear_pte(ctx, hop4_pte_addr); - else - clear_pte(ctx, hop3_pte_addr); - - if (hop4_addr && !put_pte(ctx, hop4_addr)) - clear_hop3 = true; - - if (!clear_hop3) - goto mapped; - - clear_pte(ctx, hop3_pte_addr); - - if (put_pte(ctx, hop3_addr)) - goto mapped; - - clear_pte(ctx, hop2_pte_addr); - - if (put_pte(ctx, hop2_addr)) - goto mapped; - - clear_pte(ctx, hop1_pte_addr); - - if (put_pte(ctx, hop1_addr)) - goto mapped; - - clear_pte(ctx, hop0_pte_addr); - } - -mapped: - return 0; - -not_mapped: - dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", - virt_addr); - - return -EINVAL; -} - -static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, - u32 page_size, bool is_dram_addr) -{ - struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; - struct hl_mmu_properties *mmu_prop; - u64 hop0_addr = 0, hop0_pte_addr = 0, - hop1_addr = 0, hop1_pte_addr = 0, - hop2_addr = 0, hop2_pte_addr = 0, - hop3_addr = 0, hop3_pte_addr = 0, - hop4_addr = 0, hop4_pte_addr = 0, - curr_pte = 0; - bool hop1_new = false, hop2_new = false, hop3_new = false, - hop4_new = false, is_huge; - int rc = -ENOMEM; - - /* - * This mapping function can map a page or a huge page. For huge page - * there are only 3 hops rather than 4. Currently the DRAM allocation - * uses huge pages only but user memory could have been allocated with - * one of the two page sizes. Since this is a common code for all the - * three cases, we need this hugs page check. - */ - if (is_dram_addr) { - mmu_prop = &prop->dmmu; - is_huge = true; - } else if (page_size == prop->pmmu_huge.page_size) { - mmu_prop = &prop->pmmu_huge; - is_huge = true; - } else { - mmu_prop = &prop->pmmu; - is_huge = false; - } - - hop0_addr = get_hop0_addr(ctx); - hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr); - curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr; - - hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new); - if (hop1_addr == ULLONG_MAX) - goto err; - - hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr); - curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr; - - hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new); - if (hop2_addr == ULLONG_MAX) - goto err; - - hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr); - curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr; - - hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new); - if (hop3_addr == ULLONG_MAX) - goto err; - - hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr); - curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr; - - if (!is_huge) { - hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new); - if (hop4_addr == ULLONG_MAX) - goto err; - - hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr, - virt_addr); - curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr; - } - - if (hdev->dram_default_page_mapping && is_dram_addr) { - u64 default_pte = (prop->mmu_dram_default_page_addr & - HOP_PHYS_ADDR_MASK) | LAST_MASK | - PAGE_PRESENT_MASK; - - if (curr_pte != default_pte) { - dev_err(hdev->dev, - "DRAM: mapping already exists for virt_addr 0x%llx\n", - virt_addr); - rc = -EINVAL; - goto err; - } - - if (hop1_new || hop2_new || hop3_new || hop4_new) { - dev_err(hdev->dev, - "DRAM mapping should not allocate more hops\n"); - rc = -EFAULT; - goto err; - } - } else if (curr_pte & PAGE_PRESENT_MASK) { - dev_err(hdev->dev, - "mapping already exists for virt_addr 0x%llx\n", - virt_addr); - - dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n", - *(u64 *) (uintptr_t) hop0_pte_addr, hop0_pte_addr); - dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n", - *(u64 *) (uintptr_t) hop1_pte_addr, hop1_pte_addr); - dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n", - *(u64 *) (uintptr_t) hop2_pte_addr, hop2_pte_addr); - dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n", - *(u64 *) (uintptr_t) hop3_pte_addr, hop3_pte_addr); - - if (!is_huge) - dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n", - *(u64 *) (uintptr_t) hop4_pte_addr, - hop4_pte_addr); - - rc = -EINVAL; - goto err; - } - - curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | LAST_MASK - | PAGE_PRESENT_MASK; - - if (is_huge) - write_final_pte(ctx, hop3_pte_addr, curr_pte); - else - write_final_pte(ctx, hop4_pte_addr, curr_pte); - - if (hop1_new) { - curr_pte = - (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; - write_pte(ctx, hop0_pte_addr, curr_pte); - } - if (hop2_new) { - curr_pte = - (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; - write_pte(ctx, hop1_pte_addr, curr_pte); - get_pte(ctx, hop1_addr); - } - if (hop3_new) { - curr_pte = - (hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; - write_pte(ctx, hop2_pte_addr, curr_pte); - get_pte(ctx, hop2_addr); - } - - if (!is_huge) { - if (hop4_new) { - curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) | - PAGE_PRESENT_MASK; - write_pte(ctx, hop3_pte_addr, curr_pte); - get_pte(ctx, hop3_addr); - } - - get_pte(ctx, hop4_addr); - } else { - get_pte(ctx, hop3_addr); - } - - return 0; - -err: - if (hop4_new) - free_hop(ctx, hop4_addr); - if (hop3_new) - free_hop(ctx, hop3_addr); - if (hop2_new) - free_hop(ctx, hop2_addr); - if (hop1_new) - free_hop(ctx, hop1_addr); - - return rc; -} - -/* - * hl_mmu_v1_swap_out - marks all mapping of the given ctx as swapped out - * - * @ctx: pointer to the context structure - * - */ -static void hl_mmu_v1_swap_out(struct hl_ctx *ctx) -{ - -} - -/* - * hl_mmu_v1_swap_in - marks all mapping of the given ctx as swapped in - * - * @ctx: pointer to the context structure - * - */ -static void hl_mmu_v1_swap_in(struct hl_ctx *ctx) -{ - -} - -static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_prop, - int hop_num, u64 hop_addr, u64 virt_addr) -{ - switch (hop_num) { - case 0: - return get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); - case 1: - return get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); - case 2: - return get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); - case 3: - return get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); - case 4: - return get_hop4_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); - default: - break; - } - return U64_MAX; -} - -static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, - struct hl_mmu_hop_info *hops) -{ - struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; - struct hl_mmu_properties *mmu_prop; - bool is_dram_addr, is_pmmu_addr, is_pmmu_h_addr, is_huge; - int i, used_hops; - - is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, - prop->dmmu.start_addr, - prop->dmmu.end_addr); - is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size, - prop->pmmu.start_addr, - prop->pmmu.end_addr); - is_pmmu_h_addr = hl_mem_area_inside_range(virt_addr, - prop->pmmu_huge.page_size, - prop->pmmu_huge.start_addr, - prop->pmmu_huge.end_addr); - if (is_dram_addr) { - mmu_prop = &prop->dmmu; - is_huge = true; - } else if (is_pmmu_addr) { - mmu_prop = &prop->pmmu; - is_huge = false; - } else if (is_pmmu_h_addr) { - mmu_prop = &prop->pmmu_huge; - is_huge = true; - } else { - return -EINVAL; - } - - used_hops = mmu_prop->num_hops; - - /* huge pages use lesser hops */ - if (is_huge) - used_hops--; - - hops->hop_info[0].hop_addr = get_phys_hop0_addr(ctx); - hops->hop_info[0].hop_pte_addr = - get_hop_pte_addr(ctx, mmu_prop, 0, - hops->hop_info[0].hop_addr, virt_addr); - hops->hop_info[0].hop_pte_val = - hdev->asic_funcs->read_pte(hdev, - hops->hop_info[0].hop_pte_addr); - - for (i = 1 ; i < used_hops ; i++) { - hops->hop_info[i].hop_addr = - get_next_hop_addr(ctx, - hops->hop_info[i - 1].hop_pte_val); - if (hops->hop_info[i].hop_addr == ULLONG_MAX) - return -EFAULT; - - hops->hop_info[i].hop_pte_addr = - get_hop_pte_addr(ctx, mmu_prop, i, - hops->hop_info[i].hop_addr, - virt_addr); - hops->hop_info[i].hop_pte_val = - hdev->asic_funcs->read_pte(hdev, - hops->hop_info[i].hop_pte_addr); - - if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK)) - return -EFAULT; - - if (hops->hop_info[i].hop_pte_val & LAST_MASK) - break; - } - - /* if passed over all hops then no last hop was found */ - if (i == mmu_prop->num_hops) - return -EFAULT; - - if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK)) - return -EFAULT; - - hops->used_hops = i + 1; - - return 0; -} - -/* - * hl_mmu_v1_prepare - prepare mmu for working with mmu v1 - * - * @hdev: pointer to the device structure - */ -void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu) -{ - mmu->init = hl_mmu_v1_init; - mmu->fini = hl_mmu_v1_fini; - mmu->ctx_init = hl_mmu_v1_ctx_init; - mmu->ctx_fini = hl_mmu_v1_ctx_fini; - mmu->map = _hl_mmu_v1_map; - mmu->unmap = _hl_mmu_v1_unmap; - mmu->flush = flush; - mmu->swap_out = hl_mmu_v1_swap_out; - mmu->swap_in = hl_mmu_v1_swap_in; - mmu->get_tlb_info = hl_mmu_v1_get_tlb_info; -} diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci.c deleted file mode 100644 index b4725e6101f6..000000000000 --- a/drivers/misc/habanalabs/common/pci.c +++ /dev/null @@ -1,407 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* - * Copyright 2016-2019 HabanaLabs, Ltd. - * All Rights Reserved. - */ - -#include "habanalabs.h" -#include "../include/hw_ip/pci/pci_general.h" - -#include - -#define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 10) - -#define IATU_REGION_CTRL_REGION_EN_MASK BIT(31) -#define IATU_REGION_CTRL_MATCH_MODE_MASK BIT(30) -#define IATU_REGION_CTRL_NUM_MATCH_EN_MASK BIT(19) -#define IATU_REGION_CTRL_BAR_NUM_MASK GENMASK(10, 8) - -/** - * hl_pci_bars_map() - Map PCI BARs. - * @hdev: Pointer to hl_device structure. - * @name: Array of BAR names. - * @is_wc: Array with flag per BAR whether a write-combined mapping is needed. - * - * Request PCI regions and map them to kernel virtual addresses. - * - * Return: 0 on success, non-zero for failure. - */ -int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3], - bool is_wc[3]) -{ - struct pci_dev *pdev = hdev->pdev; - int rc, i, bar; - - rc = pci_request_regions(pdev, HL_NAME); - if (rc) { - dev_err(hdev->dev, "Cannot obtain PCI resources\n"); - return rc; - } - - for (i = 0 ; i < 3 ; i++) { - bar = i * 2; /* 64-bit BARs */ - hdev->pcie_bar[bar] = is_wc[i] ? - pci_ioremap_wc_bar(pdev, bar) : - pci_ioremap_bar(pdev, bar); - if (!hdev->pcie_bar[bar]) { - dev_err(hdev->dev, "pci_ioremap%s_bar failed for %s\n", - is_wc[i] ? "_wc" : "", name[i]); - rc = -ENODEV; - goto err; - } - } - - return 0; - -err: - for (i = 2 ; i >= 0 ; i--) { - bar = i * 2; /* 64-bit BARs */ - if (hdev->pcie_bar[bar]) - iounmap(hdev->pcie_bar[bar]); - } - - pci_release_regions(pdev); - - return rc; -} - -/** - * hl_pci_bars_unmap() - Unmap PCI BARS. - * @hdev: Pointer to hl_device structure. - * - * Release all PCI BARs and unmap their virtual addresses. - */ -static void hl_pci_bars_unmap(struct hl_device *hdev) -{ - struct pci_dev *pdev = hdev->pdev; - int i, bar; - - for (i = 2 ; i >= 0 ; i--) { - bar = i * 2; /* 64-bit BARs */ - iounmap(hdev->pcie_bar[bar]); - } - - pci_release_regions(pdev); -} - -/** - * hl_pci_elbi_write() - Write through the ELBI interface. - * @hdev: Pointer to hl_device structure. - * @addr: Address to write to - * @data: Data to write - * - * Return: 0 on success, negative value for failure. - */ -static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data) -{ - struct pci_dev *pdev = hdev->pdev; - ktime_t timeout; - u64 msec; - u32 val; - - if (hdev->pldm) - msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC; - else - msec = HL_PCI_ELBI_TIMEOUT_MSEC; - - /* Clear previous status */ - pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0); - - pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr); - pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data); - pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL, - PCI_CONFIG_ELBI_CTRL_WRITE); - - timeout = ktime_add_ms(ktime_get(), msec); - for (;;) { - pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val); - if (val & PCI_CONFIG_ELBI_STS_MASK) - break; - if (ktime_compare(ktime_get(), timeout) > 0) { - pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, - &val); - break; - } - - usleep_range(300, 500); - } - - if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) - return 0; - - if (val & PCI_CONFIG_ELBI_STS_ERR) - return -EIO; - - if (!(val & PCI_CONFIG_ELBI_STS_MASK)) { - dev_err(hdev->dev, "ELBI write didn't finish in time\n"); - return -EIO; - } - - dev_err(hdev->dev, "ELBI write has undefined bits in status\n"); - return -EIO; -} - -/** - * hl_pci_iatu_write() - iatu write routine. - * @hdev: Pointer to hl_device structure. - * @addr: Address to write to - * @data: Data to write - * - * Return: 0 on success, negative value for failure. - */ -int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data) -{ - struct asic_fixed_properties *prop = &hdev->asic_prop; - u32 dbi_offset; - int rc; - - dbi_offset = addr & 0xFFF; - - /* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail - * in case the firmware security is enabled - */ - hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000); - - rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset, - data); - - if (rc) - return -EIO; - - return 0; -} - -/** - * hl_pci_reset_link_through_bridge() - Reset PCI link. - * @hdev: Pointer to hl_device structure. - */ -static void hl_pci_reset_link_through_bridge(struct hl_device *hdev) -{ - struct pci_dev *pdev = hdev->pdev; - struct pci_dev *parent_port; - u16 val; - - parent_port = pdev->bus->self; - pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val); - val |= PCI_BRIDGE_CTL_BUS_RESET; - pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val); - ssleep(1); - - val &= ~(PCI_BRIDGE_CTL_BUS_RESET); - pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val); - ssleep(3); -} - -/** - * hl_pci_set_inbound_region() - Configure inbound region - * @hdev: Pointer to hl_device structure. - * @region: Inbound region number. - * @pci_region: Inbound region parameters. - * - * Configure the iATU inbound region. - * - * Return: 0 on success, negative value for failure. - */ -int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region, - struct hl_inbound_pci_region *pci_region) -{ - struct asic_fixed_properties *prop = &hdev->asic_prop; - u64 bar_phys_base, region_base, region_end_address; - u32 offset, ctrl_reg_val; - int rc = 0; - - /* region offset */ - offset = (0x200 * region) + 0x100; - - if (pci_region->mode == PCI_ADDRESS_MATCH_MODE) { - bar_phys_base = hdev->pcie_bar_phys[pci_region->bar]; - region_base = bar_phys_base + pci_region->offset_in_bar; - region_end_address = region_base + pci_region->size - 1; - - rc |= hl_pci_iatu_write(hdev, offset + 0x8, - lower_32_bits(region_base)); - rc |= hl_pci_iatu_write(hdev, offset + 0xC, - upper_32_bits(region_base)); - rc |= hl_pci_iatu_write(hdev, offset + 0x10, - lower_32_bits(region_end_address)); - } - - /* Point to the specified address */ - rc |= hl_pci_iatu_write(hdev, offset + 0x14, - lower_32_bits(pci_region->addr)); - rc |= hl_pci_iatu_write(hdev, offset + 0x18, - upper_32_bits(pci_region->addr)); - rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0); - - /* Enable + bar/address match + match enable + bar number */ - ctrl_reg_val = FIELD_PREP(IATU_REGION_CTRL_REGION_EN_MASK, 1); - ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK, - pci_region->mode); - ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_NUM_MATCH_EN_MASK, 1); - - if (pci_region->mode == PCI_BAR_MATCH_MODE) - ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK, - pci_region->bar); - - rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val); - - /* Return the DBI window to the default location - * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail - * in case the firmware security is enabled - */ - hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0); - - if (rc) - dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n", - pci_region->bar, pci_region->addr); - - return rc; -} - -/** - * hl_pci_set_outbound_region() - Configure outbound region 0 - * @hdev: Pointer to hl_device structure. - * @pci_region: Outbound region parameters. - * - * Configure the iATU outbound region 0. - * - * Return: 0 on success, negative value for failure. - */ -int hl_pci_set_outbound_region(struct hl_device *hdev, - struct hl_outbound_pci_region *pci_region) -{ - struct asic_fixed_properties *prop = &hdev->asic_prop; - u64 outbound_region_end_address; - int rc = 0; - - /* Outbound Region 0 */ - outbound_region_end_address = - pci_region->addr + pci_region->size - 1; - rc |= hl_pci_iatu_write(hdev, 0x008, - lower_32_bits(pci_region->addr)); - rc |= hl_pci_iatu_write(hdev, 0x00C, - upper_32_bits(pci_region->addr)); - rc |= hl_pci_iatu_write(hdev, 0x010, - lower_32_bits(outbound_region_end_address)); - rc |= hl_pci_iatu_write(hdev, 0x014, 0); - - if ((hdev->power9_64bit_dma_enable) && (hdev->dma_mask == 64)) - rc |= hl_pci_iatu_write(hdev, 0x018, 0x08000000); - else - rc |= hl_pci_iatu_write(hdev, 0x018, 0); - - rc |= hl_pci_iatu_write(hdev, 0x020, - upper_32_bits(outbound_region_end_address)); - /* Increase region size */ - rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000); - /* Enable */ - rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000); - - /* Return the DBI window to the default location - * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail - * in case the firmware security is enabled - */ - hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0); - - return rc; -} - -/** - * hl_pci_set_dma_mask() - Set DMA masks for the device. - * @hdev: Pointer to hl_device structure. - * - * This function sets the DMA masks (regular and consistent) for a specified - * value. If it doesn't succeed, it tries to set it to a fall-back value - * - * Return: 0 on success, non-zero for failure. - */ -static int hl_pci_set_dma_mask(struct hl_device *hdev) -{ - struct pci_dev *pdev = hdev->pdev; - int rc; - - /* set DMA mask */ - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask)); - if (rc) { - dev_err(hdev->dev, - "Failed to set pci dma mask to %d bits, error %d\n", - hdev->dma_mask, rc); - return rc; - } - - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask)); - if (rc) { - dev_err(hdev->dev, - "Failed to set pci consistent dma mask to %d bits, error %d\n", - hdev->dma_mask, rc); - return rc; - } - - return 0; -} - -/** - * hl_pci_init() - PCI initialization code. - * @hdev: Pointer to hl_device structure. - * - * Set DMA masks, initialize the PCI controller and map the PCI BARs. - * - * Return: 0 on success, non-zero for failure. - */ -int hl_pci_init(struct hl_device *hdev) -{ - struct pci_dev *pdev = hdev->pdev; - int rc; - - if (hdev->reset_pcilink) - hl_pci_reset_link_through_bridge(hdev); - - rc = pci_enable_device_mem(pdev); - if (rc) { - dev_err(hdev->dev, "can't enable PCI device\n"); - return rc; - } - - pci_set_master(pdev); - - rc = hdev->asic_funcs->pci_bars_map(hdev); - if (rc) { - dev_err(hdev->dev, "Failed to initialize PCI BARs\n"); - goto disable_device; - } - - rc = hdev->asic_funcs->init_iatu(hdev); - if (rc) { - dev_err(hdev->dev, "Failed to initialize iATU\n"); - goto unmap_pci_bars; - } - - rc = hl_pci_set_dma_mask(hdev); - if (rc) - goto unmap_pci_bars; - - return 0; - -unmap_pci_bars: - hl_pci_bars_unmap(hdev); -disable_device: - pci_clear_master(pdev); - pci_disable_device(pdev); - - return rc; -} - -/** - * hl_fw_fini() - PCI finalization code. - * @hdev: Pointer to hl_device structure - * - * Unmap PCI bars and disable PCI device. - */ -void hl_pci_fini(struct hl_device *hdev) -{ - hl_pci_bars_unmap(hdev); - - pci_clear_master(hdev->pdev); - pci_disable_device(hdev->pdev); -} diff --git a/drivers/misc/habanalabs/common/pci/Makefile b/drivers/misc/habanalabs/common/pci/Makefile new file mode 100644 index 000000000000..dc922a686683 --- /dev/null +++ b/drivers/misc/habanalabs/common/pci/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +HL_COMMON_PCI_FILES := common/pci/pci.o diff --git a/drivers/misc/habanalabs/common/pci/pci.c b/drivers/misc/habanalabs/common/pci/pci.c new file mode 100644 index 000000000000..c56ec1574127 --- /dev/null +++ b/drivers/misc/habanalabs/common/pci/pci.c @@ -0,0 +1,407 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "../habanalabs.h" +#include "../../include/hw_ip/pci/pci_general.h" + +#include + +#define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 10) + +#define IATU_REGION_CTRL_REGION_EN_MASK BIT(31) +#define IATU_REGION_CTRL_MATCH_MODE_MASK BIT(30) +#define IATU_REGION_CTRL_NUM_MATCH_EN_MASK BIT(19) +#define IATU_REGION_CTRL_BAR_NUM_MASK GENMASK(10, 8) + +/** + * hl_pci_bars_map() - Map PCI BARs. + * @hdev: Pointer to hl_device structure. + * @name: Array of BAR names. + * @is_wc: Array with flag per BAR whether a write-combined mapping is needed. + * + * Request PCI regions and map them to kernel virtual addresses. + * + * Return: 0 on success, non-zero for failure. + */ +int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3], + bool is_wc[3]) +{ + struct pci_dev *pdev = hdev->pdev; + int rc, i, bar; + + rc = pci_request_regions(pdev, HL_NAME); + if (rc) { + dev_err(hdev->dev, "Cannot obtain PCI resources\n"); + return rc; + } + + for (i = 0 ; i < 3 ; i++) { + bar = i * 2; /* 64-bit BARs */ + hdev->pcie_bar[bar] = is_wc[i] ? + pci_ioremap_wc_bar(pdev, bar) : + pci_ioremap_bar(pdev, bar); + if (!hdev->pcie_bar[bar]) { + dev_err(hdev->dev, "pci_ioremap%s_bar failed for %s\n", + is_wc[i] ? "_wc" : "", name[i]); + rc = -ENODEV; + goto err; + } + } + + return 0; + +err: + for (i = 2 ; i >= 0 ; i--) { + bar = i * 2; /* 64-bit BARs */ + if (hdev->pcie_bar[bar]) + iounmap(hdev->pcie_bar[bar]); + } + + pci_release_regions(pdev); + + return rc; +} + +/** + * hl_pci_bars_unmap() - Unmap PCI BARS. + * @hdev: Pointer to hl_device structure. + * + * Release all PCI BARs and unmap their virtual addresses. + */ +static void hl_pci_bars_unmap(struct hl_device *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int i, bar; + + for (i = 2 ; i >= 0 ; i--) { + bar = i * 2; /* 64-bit BARs */ + iounmap(hdev->pcie_bar[bar]); + } + + pci_release_regions(pdev); +} + +/** + * hl_pci_elbi_write() - Write through the ELBI interface. + * @hdev: Pointer to hl_device structure. + * @addr: Address to write to + * @data: Data to write + * + * Return: 0 on success, negative value for failure. + */ +static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data) +{ + struct pci_dev *pdev = hdev->pdev; + ktime_t timeout; + u64 msec; + u32 val; + + if (hdev->pldm) + msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC; + else + msec = HL_PCI_ELBI_TIMEOUT_MSEC; + + /* Clear previous status */ + pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0); + + pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr); + pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data); + pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL, + PCI_CONFIG_ELBI_CTRL_WRITE); + + timeout = ktime_add_ms(ktime_get(), msec); + for (;;) { + pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val); + if (val & PCI_CONFIG_ELBI_STS_MASK) + break; + if (ktime_compare(ktime_get(), timeout) > 0) { + pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, + &val); + break; + } + + usleep_range(300, 500); + } + + if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) + return 0; + + if (val & PCI_CONFIG_ELBI_STS_ERR) + return -EIO; + + if (!(val & PCI_CONFIG_ELBI_STS_MASK)) { + dev_err(hdev->dev, "ELBI write didn't finish in time\n"); + return -EIO; + } + + dev_err(hdev->dev, "ELBI write has undefined bits in status\n"); + return -EIO; +} + +/** + * hl_pci_iatu_write() - iatu write routine. + * @hdev: Pointer to hl_device structure. + * @addr: Address to write to + * @data: Data to write + * + * Return: 0 on success, negative value for failure. + */ +int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u32 dbi_offset; + int rc; + + dbi_offset = addr & 0xFFF; + + /* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail + * in case the firmware security is enabled + */ + hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000); + + rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset, + data); + + if (rc) + return -EIO; + + return 0; +} + +/** + * hl_pci_reset_link_through_bridge() - Reset PCI link. + * @hdev: Pointer to hl_device structure. + */ +static void hl_pci_reset_link_through_bridge(struct hl_device *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct pci_dev *parent_port; + u16 val; + + parent_port = pdev->bus->self; + pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val); + val |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val); + ssleep(1); + + val &= ~(PCI_BRIDGE_CTL_BUS_RESET); + pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val); + ssleep(3); +} + +/** + * hl_pci_set_inbound_region() - Configure inbound region + * @hdev: Pointer to hl_device structure. + * @region: Inbound region number. + * @pci_region: Inbound region parameters. + * + * Configure the iATU inbound region. + * + * Return: 0 on success, negative value for failure. + */ +int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region, + struct hl_inbound_pci_region *pci_region) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 bar_phys_base, region_base, region_end_address; + u32 offset, ctrl_reg_val; + int rc = 0; + + /* region offset */ + offset = (0x200 * region) + 0x100; + + if (pci_region->mode == PCI_ADDRESS_MATCH_MODE) { + bar_phys_base = hdev->pcie_bar_phys[pci_region->bar]; + region_base = bar_phys_base + pci_region->offset_in_bar; + region_end_address = region_base + pci_region->size - 1; + + rc |= hl_pci_iatu_write(hdev, offset + 0x8, + lower_32_bits(region_base)); + rc |= hl_pci_iatu_write(hdev, offset + 0xC, + upper_32_bits(region_base)); + rc |= hl_pci_iatu_write(hdev, offset + 0x10, + lower_32_bits(region_end_address)); + } + + /* Point to the specified address */ + rc |= hl_pci_iatu_write(hdev, offset + 0x14, + lower_32_bits(pci_region->addr)); + rc |= hl_pci_iatu_write(hdev, offset + 0x18, + upper_32_bits(pci_region->addr)); + rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0); + + /* Enable + bar/address match + match enable + bar number */ + ctrl_reg_val = FIELD_PREP(IATU_REGION_CTRL_REGION_EN_MASK, 1); + ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK, + pci_region->mode); + ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_NUM_MATCH_EN_MASK, 1); + + if (pci_region->mode == PCI_BAR_MATCH_MODE) + ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK, + pci_region->bar); + + rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val); + + /* Return the DBI window to the default location + * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail + * in case the firmware security is enabled + */ + hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0); + + if (rc) + dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n", + pci_region->bar, pci_region->addr); + + return rc; +} + +/** + * hl_pci_set_outbound_region() - Configure outbound region 0 + * @hdev: Pointer to hl_device structure. + * @pci_region: Outbound region parameters. + * + * Configure the iATU outbound region 0. + * + * Return: 0 on success, negative value for failure. + */ +int hl_pci_set_outbound_region(struct hl_device *hdev, + struct hl_outbound_pci_region *pci_region) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 outbound_region_end_address; + int rc = 0; + + /* Outbound Region 0 */ + outbound_region_end_address = + pci_region->addr + pci_region->size - 1; + rc |= hl_pci_iatu_write(hdev, 0x008, + lower_32_bits(pci_region->addr)); + rc |= hl_pci_iatu_write(hdev, 0x00C, + upper_32_bits(pci_region->addr)); + rc |= hl_pci_iatu_write(hdev, 0x010, + lower_32_bits(outbound_region_end_address)); + rc |= hl_pci_iatu_write(hdev, 0x014, 0); + + if ((hdev->power9_64bit_dma_enable) && (hdev->dma_mask == 64)) + rc |= hl_pci_iatu_write(hdev, 0x018, 0x08000000); + else + rc |= hl_pci_iatu_write(hdev, 0x018, 0); + + rc |= hl_pci_iatu_write(hdev, 0x020, + upper_32_bits(outbound_region_end_address)); + /* Increase region size */ + rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000); + /* Enable */ + rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000); + + /* Return the DBI window to the default location + * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail + * in case the firmware security is enabled + */ + hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0); + + return rc; +} + +/** + * hl_pci_set_dma_mask() - Set DMA masks for the device. + * @hdev: Pointer to hl_device structure. + * + * This function sets the DMA masks (regular and consistent) for a specified + * value. If it doesn't succeed, it tries to set it to a fall-back value + * + * Return: 0 on success, non-zero for failure. + */ +static int hl_pci_set_dma_mask(struct hl_device *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int rc; + + /* set DMA mask */ + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask)); + if (rc) { + dev_err(hdev->dev, + "Failed to set pci dma mask to %d bits, error %d\n", + hdev->dma_mask, rc); + return rc; + } + + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask)); + if (rc) { + dev_err(hdev->dev, + "Failed to set pci consistent dma mask to %d bits, error %d\n", + hdev->dma_mask, rc); + return rc; + } + + return 0; +} + +/** + * hl_pci_init() - PCI initialization code. + * @hdev: Pointer to hl_device structure. + * + * Set DMA masks, initialize the PCI controller and map the PCI BARs. + * + * Return: 0 on success, non-zero for failure. + */ +int hl_pci_init(struct hl_device *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int rc; + + if (hdev->reset_pcilink) + hl_pci_reset_link_through_bridge(hdev); + + rc = pci_enable_device_mem(pdev); + if (rc) { + dev_err(hdev->dev, "can't enable PCI device\n"); + return rc; + } + + pci_set_master(pdev); + + rc = hdev->asic_funcs->pci_bars_map(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to initialize PCI BARs\n"); + goto disable_device; + } + + rc = hdev->asic_funcs->init_iatu(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to initialize iATU\n"); + goto unmap_pci_bars; + } + + rc = hl_pci_set_dma_mask(hdev); + if (rc) + goto unmap_pci_bars; + + return 0; + +unmap_pci_bars: + hl_pci_bars_unmap(hdev); +disable_device: + pci_clear_master(pdev); + pci_disable_device(pdev); + + return rc; +} + +/** + * hl_fw_fini() - PCI finalization code. + * @hdev: Pointer to hl_device structure + * + * Unmap PCI bars and disable PCI device. + */ +void hl_pci_fini(struct hl_device *hdev) +{ + hl_pci_bars_unmap(hdev); + + pci_clear_master(hdev->pdev); + pci_disable_device(hdev->pdev); +} -- cgit v1.2.3-58-ga151 From 1e3f2536a8fc12d5b82164349715861ccc7968a7 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Sun, 3 Jan 2021 14:13:06 +0200 Subject: habanalabs: increment ctx ref from within a cs allocation A CS must increment the relevant context reference count. We want to increment the reference inside the CS allocation function as opposed for today where we increment it outside. This is logical since we want to avoid explicitly incrementing the context every time we call the CS allocate function. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/command_submission.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index f7fac82ac41d..3affb350070c 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -479,6 +479,9 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, return -ENOMEM; } + /* increment refcnt for context */ + hl_ctx_get(hdev, ctx); + cs->ctx = ctx; cs->submitted = false; cs->completed = false; @@ -550,6 +553,7 @@ free_fence: kfree(cs_cmpl); free_cs: kfree(cs); + hl_ctx_put(ctx); return rc; } @@ -827,14 +831,9 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, if (rc) goto out; - /* increment refcnt for context */ - hl_ctx_get(hdev, hpriv->ctx); - rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT, &cs); - if (rc) { - hl_ctx_put(hpriv->ctx); + if (rc) goto free_cs_chunk_array; - } cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP); *cs_seq = cs->sequence; @@ -1276,15 +1275,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, } } - /* increment refcnt for context */ - hl_ctx_get(hdev, ctx); - rc = allocate_cs(hdev, ctx, cs_type, &cs); if (rc) { if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) hl_fence_put(sig_fence); - hl_ctx_put(ctx); goto free_cs_chunk_array; } -- cgit v1.2.3-58-ga151 From d3f139c46280035509600900a6ead3d7e680218f Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Wed, 18 Nov 2020 15:46:57 +0200 Subject: habanalabs: add driver support for internal cb scheduling In order to support scnenarios in which driver needs access to HW components but it cannot access them directly, we add support for scheduling command buffers internally. These command buffers will be transmitted upon next user command submission context. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 139 +++++++++++++++++++++ drivers/misc/habanalabs/common/context.c | 8 ++ drivers/misc/habanalabs/common/habanalabs.h | 26 ++++ 3 files changed, 173 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 3affb350070c..a5e9bb0a4855 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -585,6 +585,18 @@ void hl_cs_rollback_all(struct hl_device *hdev) } } +void hl_pending_cb_list_flush(struct hl_ctx *ctx) +{ + struct hl_pending_cb *pending_cb, *tmp; + + list_for_each_entry_safe(pending_cb, tmp, + &ctx->pending_cb_list, cb_node) { + list_del(&pending_cb->cb_node); + hl_cb_put(pending_cb->cb); + kfree(pending_cb); + } +} + static void job_wq_completion(struct work_struct *work) { struct hl_cs_job *job = container_of(work, struct hl_cs_job, @@ -954,6 +966,129 @@ out: return rc; } +static int pending_cb_create_job(struct hl_device *hdev, struct hl_ctx *ctx, + struct hl_cs *cs, struct hl_cb *cb, u32 size, u32 hw_queue_id) +{ + struct hw_queue_properties *hw_queue_prop; + struct hl_cs_counters_atomic *cntr; + struct hl_cs_job *job; + + hw_queue_prop = &hdev->asic_prop.hw_queues_props[hw_queue_id]; + cntr = &hdev->aggregated_cs_counters; + + job = hl_cs_allocate_job(hdev, hw_queue_prop->type, true); + if (!job) { + atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); + atomic64_inc(&cntr->out_of_mem_drop_cnt); + dev_err(hdev->dev, "Failed to allocate a new job\n"); + return -ENOMEM; + } + + job->id = 0; + job->cs = cs; + job->user_cb = cb; + atomic_inc(&job->user_cb->cs_cnt); + job->user_cb_size = size; + job->hw_queue_id = hw_queue_id; + job->patched_cb = job->user_cb; + job->job_cb_size = job->user_cb_size; + + /* increment refcount as for external queues we get completion */ + cs_get(cs); + + cs->jobs_in_queue_cnt[job->hw_queue_id]++; + + list_add_tail(&job->cs_node, &cs->job_list); + + hl_debugfs_add_job(hdev, job); + + return 0; +} + +static int hl_submit_pending_cb(struct hl_fpriv *hpriv) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_ctx *ctx = hpriv->ctx; + struct hl_pending_cb *pending_cb, *tmp; + struct list_head local_cb_list; + struct hl_cs *cs; + struct hl_cb *cb; + u32 hw_queue_id; + u32 cb_size; + int process_list, rc = 0; + + if (list_empty(&ctx->pending_cb_list)) + return 0; + + process_list = atomic_cmpxchg(&ctx->thread_pending_cb_token, 1, 0); + + /* Only a single thread is allowed to process the list */ + if (!process_list) + return 0; + + if (list_empty(&ctx->pending_cb_list)) + goto free_pending_cb_token; + + /* move all list elements to a local list */ + INIT_LIST_HEAD(&local_cb_list); + spin_lock(&ctx->pending_cb_lock); + list_for_each_entry_safe(pending_cb, tmp, &ctx->pending_cb_list, + cb_node) + list_move_tail(&pending_cb->cb_node, &local_cb_list); + spin_unlock(&ctx->pending_cb_lock); + + rc = allocate_cs(hdev, ctx, CS_TYPE_DEFAULT, &cs); + if (rc) + goto add_list_elements; + + hl_debugfs_add_cs(cs); + + /* Iterate through pending cb list, create jobs and add to CS */ + list_for_each_entry(pending_cb, &local_cb_list, cb_node) { + cb = pending_cb->cb; + cb_size = pending_cb->cb_size; + hw_queue_id = pending_cb->hw_queue_id; + + rc = pending_cb_create_job(hdev, ctx, cs, cb, cb_size, + hw_queue_id); + if (rc) + goto free_cs_object; + } + + rc = hl_hw_queue_schedule_cs(cs); + if (rc) { + if (rc != -EAGAIN) + dev_err(hdev->dev, + "Failed to submit CS %d.%llu (%d)\n", + ctx->asid, cs->sequence, rc); + goto free_cs_object; + } + + /* pending cb was scheduled successfully */ + list_for_each_entry_safe(pending_cb, tmp, &local_cb_list, cb_node) { + list_del(&pending_cb->cb_node); + kfree(pending_cb); + } + + cs_put(cs); + + goto free_pending_cb_token; + +free_cs_object: + cs_rollback(hdev, cs); + cs_put(cs); +add_list_elements: + spin_lock(&ctx->pending_cb_lock); + list_for_each_entry_safe_reverse(pending_cb, tmp, &local_cb_list, + cb_node) + list_move(&pending_cb->cb_node, &ctx->pending_cb_list); + spin_unlock(&ctx->pending_cb_lock); +free_pending_cb_token: + atomic_set(&ctx->thread_pending_cb_token, 1); + + return rc; +} + static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args, u64 *cs_seq) { @@ -1353,6 +1488,10 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) if (rc) goto out; + rc = hl_submit_pending_cb(hpriv); + if (rc) + goto out; + cs_type = hl_cs_get_cs_type(args->in.cs_flags & ~HL_CS_FLAGS_FORCE_RESTORE); chunks = (void __user *) (uintptr_t) args->in.chunks_execute; diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c index 3d86b83f4ca6..829fe98eed61 100644 --- a/drivers/misc/habanalabs/common/context.c +++ b/drivers/misc/habanalabs/common/context.c @@ -15,6 +15,11 @@ static void hl_ctx_fini(struct hl_ctx *ctx) u64 idle_mask = 0; int i; + /* Release all allocated pending cb's, those cb's were never + * scheduled so it is safe to release them here + */ + hl_pending_cb_list_flush(ctx); + /* * If we arrived here, there are no jobs waiting for this context * on its queues so we can safely remove it. @@ -142,8 +147,11 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx) kref_init(&ctx->refcount); ctx->cs_sequence = 1; + INIT_LIST_HEAD(&ctx->pending_cb_list); + spin_lock_init(&ctx->pending_cb_lock); spin_lock_init(&ctx->cs_lock); atomic_set(&ctx->thread_ctx_switch_token, 1); + atomic_set(&ctx->thread_pending_cb_token, 1); ctx->thread_ctx_switch_wait_token = 0; ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs, sizeof(struct hl_fence *), diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index eb43fb3065ce..8e0553bf3e0e 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -1016,6 +1016,20 @@ struct hl_cs_counters_atomic { atomic64_t validation_drop_cnt; }; +/** + * struct hl_pending_cb - pending command buffer structure + * @cb_node: cb node in pending cb list + * @cb: command buffer to send in next submission + * @cb_size: command buffer size + * @hw_queue_id: destination queue id + */ +struct hl_pending_cb { + struct list_head cb_node; + struct hl_cb *cb; + u32 cb_size; + u32 hw_queue_id; +}; + /** * struct hl_ctx - user/kernel context. * @mem_hash: holds mapping from virtual address to virtual memory area @@ -1031,6 +1045,8 @@ struct hl_cs_counters_atomic { * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the * MMU hash or walking the PGT requires talking this lock. * @debugfs_list: node in debugfs list of contexts. + * pending_cb_list: list of pending command buffers waiting to be sent upon + * next user command submission context. * @cs_counters: context command submission counters. * @cb_va_pool: device VA pool for command buffers which are mapped to the * device's MMU. @@ -1039,11 +1055,17 @@ struct hl_cs_counters_atomic { * index to cs_pending array. * @dram_default_hops: array that holds all hops addresses needed for default * DRAM mapping. + * @pending_cb_lock: spinlock to protect pending cb list * @cs_lock: spinlock to protect cs_sequence. * @dram_phys_mem: amount of used physical DRAM memory by this context. * @thread_ctx_switch_token: token to prevent multiple threads of the same * context from running the context switch phase. * Only a single thread should run it. + * @thread_pending_cb_token: token to prevent multiple threads from processing + * the pending CB list. Only a single thread should + * process the list since it is protected by a + * spinlock and we don't want to halt the entire + * command submission sequence. * @thread_ctx_switch_wait_token: token to prevent the threads that didn't run * the context switch phase from moving to their * execution phase before the context switch phase @@ -1062,13 +1084,16 @@ struct hl_ctx { struct mutex mem_hash_lock; struct mutex mmu_lock; struct list_head debugfs_list; + struct list_head pending_cb_list; struct hl_cs_counters_atomic cs_counters; struct gen_pool *cb_va_pool; u64 cs_sequence; u64 *dram_default_hops; + spinlock_t pending_cb_lock; spinlock_t cs_lock; atomic64_t dram_phys_mem; atomic_t thread_ctx_switch_token; + atomic_t thread_pending_cb_token; u32 thread_ctx_switch_wait_token; u32 asid; u32 handle; @@ -2143,6 +2168,7 @@ int hl_cb_va_pool_init(struct hl_ctx *ctx); void hl_cb_va_pool_fini(struct hl_ctx *ctx); void hl_cs_rollback_all(struct hl_device *hdev); +void hl_pending_cb_list_flush(struct hl_ctx *ctx); struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, enum hl_queue_type queue_type, bool is_kernel_allocated_cb); void hl_sob_reset_error(struct kref *ref); -- cgit v1.2.3-58-ga151 From 423815bf02e257091d5337be5c63b57fc29e4254 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Tue, 5 Jan 2021 09:04:07 +0200 Subject: habanalabs/gaudi: remove PCI access to SM block Due to HW limitation we must remove all direct access to SM registers, in order to do that we will access SM registers using the HW QMANS. When possible and no user context is present, we can directly access the HW QMANS. Whenever there is an active user, driver will prepare a pending command buffer list which will be sent upon user submissions. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 248 ++++++++++++++++++--- drivers/misc/habanalabs/gaudi/gaudiP.h | 2 + .../misc/habanalabs/include/gaudi/gaudi_packets.h | 3 + 3 files changed, 227 insertions(+), 26 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index f6cfd6e075e2..0d2edb25c92a 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -354,6 +354,10 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job); static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr, u32 size, u64 val); +static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base, + u32 num_regs, u32 val); +static int gaudi_schedule_register_memset(struct hl_device *hdev, + u32 hw_queue_id, u64 reg_base, u32 num_regs, u32 val); static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel, u32 tpc_id); static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev); @@ -913,11 +917,17 @@ static void gaudi_sob_group_hw_reset(struct kref *ref) struct gaudi_hw_sob_group *hw_sob_group = container_of(ref, struct gaudi_hw_sob_group, kref); struct hl_device *hdev = hw_sob_group->hdev; - int i; + u64 base_addr; + int rc; - for (i = 0 ; i < NUMBER_OF_SOBS_IN_GRP ; i++) - WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + - (hw_sob_group->base_sob_id + i) * 4, 0); + base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + + hw_sob_group->base_sob_id * 4; + rc = gaudi_schedule_register_memset(hdev, hw_sob_group->queue_id, + base_addr, NUMBER_OF_SOBS_IN_GRP, 0); + if (rc) + dev_err(hdev->dev, + "failed resetting sob group - sob base %u, count %u", + hw_sob_group->base_sob_id, NUMBER_OF_SOBS_IN_GRP); kref_init(&hw_sob_group->kref); } @@ -1008,6 +1018,8 @@ static void gaudi_collective_master_init_job(struct hl_device *hdev, cprop->hw_sob_group[sob_group_offset].base_sob_id; master_monitor = prop->collective_mstr_mon_id[0]; + cprop->hw_sob_group[sob_group_offset].queue_id = queue_id; + dev_dbg(hdev->dev, "Generate master wait CBs, sob %d (mask %#x), val:0x%x, mon %u, q %d\n", master_sob_base, cprop->mstr_sob_mask[0], @@ -5583,31 +5595,206 @@ release_cb: return rc; } -static void gaudi_restore_sm_registers(struct hl_device *hdev) +static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base, + u32 num_regs, u32 val) +{ + struct packet_msg_long *pkt; + struct hl_cs_job *job; + u32 cb_size, ctl; + struct hl_cb *cb; + int i, rc; + + cb_size = (sizeof(*pkt) * num_regs) + sizeof(struct packet_msg_prot); + + if (cb_size > SZ_2M) { + dev_err(hdev->dev, "CB size must be smaller than %uMB", SZ_2M); + return -ENOMEM; + } + + cb = hl_cb_kernel_create(hdev, cb_size, false); + if (!cb) + return -EFAULT; + + pkt = cb->kernel_address; + + ctl = FIELD_PREP(GAUDI_PKT_LONG_CTL_OP_MASK, 0); /* write the value */ + ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_LONG); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1); + + for (i = 0; i < num_regs ; i++, pkt++) { + pkt->ctl = cpu_to_le32(ctl); + pkt->value = cpu_to_le32(val); + pkt->addr = cpu_to_le64(reg_base + (i * 4)); + } + + job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true); + if (!job) { + dev_err(hdev->dev, "Failed to allocate a new job\n"); + rc = -ENOMEM; + goto release_cb; + } + + job->id = 0; + job->user_cb = cb; + atomic_inc(&job->user_cb->cs_cnt); + job->user_cb_size = cb_size; + job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0; + job->patched_cb = job->user_cb; + job->job_cb_size = cb_size; + + hl_debugfs_add_job(hdev, job); + + rc = gaudi_send_job_on_qman0(hdev, job); + hl_debugfs_remove_job(hdev, job); + kfree(job); + atomic_dec(&cb->cs_cnt); + +release_cb: + hl_cb_put(cb); + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + + return rc; +} + +static int gaudi_schedule_register_memset(struct hl_device *hdev, + u32 hw_queue_id, u64 reg_base, u32 num_regs, u32 val) { + struct hl_ctx *ctx = hdev->compute_ctx; + struct hl_pending_cb *pending_cb; + struct packet_msg_long *pkt; + u32 cb_size, ctl; + struct hl_cb *cb; int i; - for (i = 0 ; i < NUM_OF_SOB_IN_BLOCK << 2 ; i += 4) { - WREG32(mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0); - WREG32(mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0); - WREG32(mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0); + /* If no compute context available or context is going down + * memset registers directly + */ + if (!ctx || kref_read(&ctx->refcount) == 0) + return gaudi_memset_registers(hdev, reg_base, num_regs, val); + + cb_size = (sizeof(*pkt) * num_regs) + + sizeof(struct packet_msg_prot) * 2; + + if (cb_size > SZ_2M) { + dev_err(hdev->dev, "CB size must be smaller than %uMB", SZ_2M); + return -ENOMEM; + } + + pending_cb = kzalloc(sizeof(*pending_cb), GFP_KERNEL); + if (!pending_cb) + return -ENOMEM; + + cb = hl_cb_kernel_create(hdev, cb_size, false); + if (!cb) { + kfree(pending_cb); + return -EFAULT; + } + + pkt = cb->kernel_address; + + ctl = FIELD_PREP(GAUDI_PKT_LONG_CTL_OP_MASK, 0); /* write the value */ + ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_LONG); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1); + ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1); + + for (i = 0; i < num_regs ; i++, pkt++) { + pkt->ctl = cpu_to_le32(ctl); + pkt->value = cpu_to_le32(val); + pkt->addr = cpu_to_le64(reg_base + (i * 4)); + } + + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + + pending_cb->cb = cb; + pending_cb->cb_size = cb_size; + /* The queue ID MUST be an external queue ID. Otherwise, we will + * have undefined behavior + */ + pending_cb->hw_queue_id = hw_queue_id; + + spin_lock(&ctx->pending_cb_lock); + list_add_tail(&pending_cb->cb_node, &ctx->pending_cb_list); + spin_unlock(&ctx->pending_cb_lock); + + return 0; +} + +static int gaudi_restore_sm_registers(struct hl_device *hdev) +{ + u64 base_addr; + u32 num_regs; + int rc; + + base_addr = CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0; + num_regs = NUM_OF_SOB_IN_BLOCK; + rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0); + if (rc) { + dev_err(hdev->dev, "failed resetting SM registers"); + return -ENOMEM; + } + + base_addr = CFG_BASE + mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_SOB_OBJ_0; + num_regs = NUM_OF_SOB_IN_BLOCK; + rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0); + if (rc) { + dev_err(hdev->dev, "failed resetting SM registers"); + return -ENOMEM; + } + + base_addr = CFG_BASE + mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0; + num_regs = NUM_OF_SOB_IN_BLOCK; + rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0); + if (rc) { + dev_err(hdev->dev, "failed resetting SM registers"); + return -ENOMEM; } - for (i = 0 ; i < NUM_OF_MONITORS_IN_BLOCK << 2 ; i += 4) { - WREG32(mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0); - WREG32(mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0); - WREG32(mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0); + base_addr = CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0; + num_regs = NUM_OF_MONITORS_IN_BLOCK; + rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0); + if (rc) { + dev_err(hdev->dev, "failed resetting SM registers"); + return -ENOMEM; } - i = GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT * 4; + base_addr = CFG_BASE + mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_MON_STATUS_0; + num_regs = NUM_OF_MONITORS_IN_BLOCK; + rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0); + if (rc) { + dev_err(hdev->dev, "failed resetting SM registers"); + return -ENOMEM; + } - for (; i < NUM_OF_SOB_IN_BLOCK << 2 ; i += 4) - WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0); + base_addr = CFG_BASE + mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_MON_STATUS_0; + num_regs = NUM_OF_MONITORS_IN_BLOCK; + rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0); + if (rc) { + dev_err(hdev->dev, "failed resetting SM registers"); + return -ENOMEM; + } + + base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + + (GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT * 4); + num_regs = NUM_OF_SOB_IN_BLOCK - GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT; + rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0); + if (rc) { + dev_err(hdev->dev, "failed resetting SM registers"); + return -ENOMEM; + } - i = GAUDI_FIRST_AVAILABLE_W_S_MONITOR * 4; + base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0 + + (GAUDI_FIRST_AVAILABLE_W_S_MONITOR * 4); + num_regs = NUM_OF_MONITORS_IN_BLOCK - GAUDI_FIRST_AVAILABLE_W_S_MONITOR; + rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0); + if (rc) { + dev_err(hdev->dev, "failed resetting SM registers"); + return -ENOMEM; + } - for (; i < NUM_OF_MONITORS_IN_BLOCK << 2 ; i += 4) - WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0); + return 0; } static void gaudi_restore_dma_registers(struct hl_device *hdev) @@ -5664,18 +5851,23 @@ static void gaudi_restore_qm_registers(struct hl_device *hdev) } } -static void gaudi_restore_user_registers(struct hl_device *hdev) +static int gaudi_restore_user_registers(struct hl_device *hdev) { - gaudi_restore_sm_registers(hdev); + int rc; + + rc = gaudi_restore_sm_registers(hdev); + if (rc) + return rc; + gaudi_restore_dma_registers(hdev); gaudi_restore_qm_registers(hdev); + + return 0; } static int gaudi_context_switch(struct hl_device *hdev, u32 asid) { - gaudi_restore_user_registers(hdev); - - return 0; + return gaudi_restore_user_registers(hdev); } static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev) @@ -8203,12 +8395,16 @@ static u32 gaudi_gen_wait_cb(struct hl_device *hdev, static void gaudi_reset_sob(struct hl_device *hdev, void *data) { struct hl_hw_sob *hw_sob = (struct hl_hw_sob *) data; + int rc; dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx, hw_sob->sob_id); - WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + hw_sob->sob_id * 4, - 0); + rc = gaudi_schedule_register_memset(hdev, hw_sob->q_idx, + CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + + hw_sob->sob_id * 4, 1, 0); + if (rc) + dev_err(hdev->dev, "failed resetting sob %u", hw_sob->sob_id); kref_init(&hw_sob->kref); } diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h index a7ab2d7e57d4..78830443341d 100644 --- a/drivers/misc/habanalabs/gaudi/gaudiP.h +++ b/drivers/misc/habanalabs/gaudi/gaudiP.h @@ -251,11 +251,13 @@ enum gaudi_nic_mask { * @hdev: habanalabs device structure. * @kref: refcount of this SOB group. group will reset once refcount is zero. * @base_sob_id: base sob id of this SOB group. + * @queue_id: id of the queue that waits on this sob group */ struct gaudi_hw_sob_group { struct hl_device *hdev; struct kref kref; u32 base_sob_id; + u32 queue_id; }; #define NUM_SOB_GROUPS (HL_RSVD_SOBS * QMAN_STREAMS) diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h index 784b5bc8d0ba..6e097ace2e96 100644 --- a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h @@ -78,6 +78,9 @@ struct packet_wreg_bulk { __le64 values[0]; /* data starts here */ }; +#define GAUDI_PKT_LONG_CTL_OP_SHIFT 20 +#define GAUDI_PKT_LONG_CTL_OP_MASK 0x00300000 + struct packet_msg_long { __le32 value; __le32 ctl; -- cgit v1.2.3-58-ga151 From 825b30c4f37bd4a6846916a18379dcd211ea3fbf Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 4 Jan 2021 17:08:46 +0200 Subject: habanalabs: Use 'dma_set_mask_and_coherent()' Axe 'hl_pci_set_dma_mask()' and replace it with an equivalent 'dma_set_mask_and_coherent()' call. This makes the code a bit less verbose. It also removes an erroneous comment, because 'hl_pci_set_dma_mask()' does not try to use a fall-back value. Signed-off-by: Christophe JAILLET Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/pci/pci.c | 43 ++++++-------------------------- 1 file changed, 7 insertions(+), 36 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/pci/pci.c b/drivers/misc/habanalabs/common/pci/pci.c index c56ec1574127..b799f9258fb0 100644 --- a/drivers/misc/habanalabs/common/pci/pci.c +++ b/drivers/misc/habanalabs/common/pci/pci.c @@ -307,40 +307,6 @@ int hl_pci_set_outbound_region(struct hl_device *hdev, return rc; } -/** - * hl_pci_set_dma_mask() - Set DMA masks for the device. - * @hdev: Pointer to hl_device structure. - * - * This function sets the DMA masks (regular and consistent) for a specified - * value. If it doesn't succeed, it tries to set it to a fall-back value - * - * Return: 0 on success, non-zero for failure. - */ -static int hl_pci_set_dma_mask(struct hl_device *hdev) -{ - struct pci_dev *pdev = hdev->pdev; - int rc; - - /* set DMA mask */ - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask)); - if (rc) { - dev_err(hdev->dev, - "Failed to set pci dma mask to %d bits, error %d\n", - hdev->dma_mask, rc); - return rc; - } - - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask)); - if (rc) { - dev_err(hdev->dev, - "Failed to set pci consistent dma mask to %d bits, error %d\n", - hdev->dma_mask, rc); - return rc; - } - - return 0; -} - /** * hl_pci_init() - PCI initialization code. * @hdev: Pointer to hl_device structure. @@ -377,9 +343,14 @@ int hl_pci_init(struct hl_device *hdev) goto unmap_pci_bars; } - rc = hl_pci_set_dma_mask(hdev); - if (rc) + rc = dma_set_mask_and_coherent(&pdev->dev, + DMA_BIT_MASK(hdev->dma_mask)); + if (rc) { + dev_err(hdev->dev, + "Failed to set dma mask to %d bits, error %d\n", + hdev->dma_mask, rc); goto unmap_pci_bars; + } return 0; -- cgit v1.2.3-58-ga151 From f8bc7f091cc0ef115bd284e68df247b56c422d93 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Sun, 3 Jan 2021 20:52:40 +0200 Subject: habanalabs/gaudi: print sync manager SEI interrupt info Driver must print sync manager SEI information upon receiving interrupt from FW. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 41 ++++++++++++++++++++++ drivers/misc/habanalabs/include/common/cpucp_if.h | 7 ++++ .../habanalabs/include/gaudi/gaudi_async_events.h | 4 +++ 3 files changed, 52 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 0d2edb25c92a..d9f1b646aac9 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -225,6 +225,12 @@ gaudi_qman_arb_error_cause[GAUDI_NUM_OF_QM_ARB_ERR_CAUSE] = { "MSG AXI LBW returned with error" }; +enum gaudi_sm_sei_cause { + GAUDI_SM_SEI_SO_OVERFLOW, + GAUDI_SM_SEI_LBW_4B_UNALIGNED, + GAUDI_SM_SEI_AXI_RESPONSE_ERR +}; + static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = { QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_0 */ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_1 */ @@ -6846,6 +6852,34 @@ static void gaudi_handle_qman_err_generic(struct hl_device *hdev, } } +static void gaudi_print_sm_sei_info(struct hl_device *hdev, u16 event_type, + struct hl_eq_sm_sei_data *sei_data) +{ + u32 index = event_type - GAUDI_EVENT_DMA_IF_SEI_0; + + switch (sei_data->sei_cause) { + case GAUDI_SM_SEI_SO_OVERFLOW: + dev_err(hdev->dev, + "SM %u SEI Error: SO %u overflow/underflow", + index, le16_to_cpu(sei_data->sei_log)); + break; + case GAUDI_SM_SEI_LBW_4B_UNALIGNED: + dev_err(hdev->dev, + "SM %u SEI Error: Unaligned 4B LBW access, monitor agent address low - %#x", + index, le16_to_cpu(sei_data->sei_log)); + break; + case GAUDI_SM_SEI_AXI_RESPONSE_ERR: + dev_err(hdev->dev, + "SM %u SEI Error: AXI ID %u response error", + index, le16_to_cpu(sei_data->sei_log)); + break; + default: + dev_err(hdev->dev, "Unknown SM SEI cause %u", + le16_to_cpu(sei_data->sei_log)); + break; + } +} + static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type, struct hl_eq_ecc_data *ecc_data) { @@ -7469,6 +7503,13 @@ static void gaudi_handle_eqe(struct hl_device *hdev, hl_fw_unmask_irq(hdev, event_type); break; + case GAUDI_EVENT_DMA_IF_SEI_0 ... GAUDI_EVENT_DMA_IF_SEI_3: + gaudi_print_irq_info(hdev, event_type, false); + gaudi_print_sm_sei_info(hdev, event_type, + &eq_entry->sm_sei_data); + hl_fw_unmask_irq(hdev, event_type); + break; + case GAUDI_EVENT_FIX_POWER_ENV_S ... GAUDI_EVENT_FIX_THERMAL_ENV_E: gaudi_print_clk_change_info(hdev, event_type); hl_fw_unmask_irq(hdev, event_type); diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h index 00bd9b392f93..d75d1077461b 100644 --- a/drivers/misc/habanalabs/include/common/cpucp_if.h +++ b/drivers/misc/habanalabs/include/common/cpucp_if.h @@ -58,11 +58,18 @@ struct hl_eq_ecc_data { __u8 pad[7]; }; +struct hl_eq_sm_sei_data { + __le16 sei_log; + __u8 sei_cause; + __u8 pad[5]; +}; + struct hl_eq_entry { struct hl_eq_header hdr; union { struct hl_eq_ecc_data ecc_data; struct hl_eq_hbm_ecc_data hbm_ecc_data; + struct hl_eq_sm_sei_data sm_sei_data; __le64 data[7]; }; }; diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h b/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h index 9ccba8437ec9..49335e8334b4 100644 --- a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h @@ -212,6 +212,10 @@ enum gaudi_async_event_id { GAUDI_EVENT_NIC_SEI_2 = 266, GAUDI_EVENT_NIC_SEI_3 = 267, GAUDI_EVENT_NIC_SEI_4 = 268, + GAUDI_EVENT_DMA_IF_SEI_0 = 277, + GAUDI_EVENT_DMA_IF_SEI_1 = 278, + GAUDI_EVENT_DMA_IF_SEI_2 = 279, + GAUDI_EVENT_DMA_IF_SEI_3 = 280, GAUDI_EVENT_PCIE_FLR = 290, GAUDI_EVENT_TPC0_BMON_SPMU = 300, GAUDI_EVENT_TPC0_KRN_ERR = 301, -- cgit v1.2.3-58-ga151 From eea4c2557cb986584c4960eb15dd17cb3f4f99a2 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Sun, 10 Jan 2021 10:34:42 +0200 Subject: habanalabs: ignore F/W BMC errors in case no BMC present In order to support operation mode in which BMC is not active, driver must not take BMC errors into consideration. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/firmware_if.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index 9f4ae21a126f..ba6920f2b4ab 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -308,9 +308,15 @@ static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg, if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) dev_warn(hdev->dev, "Device boot warning - Skipped DRAM initialization\n"); - if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) - dev_warn(hdev->dev, - "Device boot error - Skipped waiting for BMC\n"); + + if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) { + if (hdev->bmc_enable) + dev_warn(hdev->dev, + "Device boot error - Skipped waiting for BMC\n"); + else + err_val &= ~CPU_BOOT_ERR0_BMC_WAIT_SKIPPED; + } + if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY) dev_err(hdev->dev, "Device boot error - Serdes data from BMC not available\n"); -- cgit v1.2.3-58-ga151 From d2b980f3299e9bdef55399c9cde6a72e0b6d446c Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 7 Jan 2021 12:14:17 +0200 Subject: habanalabs: add security violations dump to debugfs In order to improve driver security debuggability, we add security violations dump to debugfs. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../ABI/testing/debugfs-driver-habanalabs | 8 ++++++++ drivers/misc/habanalabs/common/debugfs.c | 22 ++++++++++++++++++++++ drivers/misc/habanalabs/common/habanalabs.h | 2 ++ drivers/misc/habanalabs/gaudi/gaudi.c | 3 ++- drivers/misc/habanalabs/gaudi/gaudiP.h | 1 + drivers/misc/habanalabs/gaudi/gaudi_security.c | 5 +++++ drivers/misc/habanalabs/goya/goya.c | 3 ++- drivers/misc/habanalabs/goya/goyaP.h | 1 + drivers/misc/habanalabs/goya/goya_security.c | 5 +++++ 9 files changed, 48 insertions(+), 2 deletions(-) (limited to 'drivers/misc') diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs index c5d678d39144..3979bfdaa080 100644 --- a/Documentation/ABI/testing/debugfs-driver-habanalabs +++ b/Documentation/ABI/testing/debugfs-driver-habanalabs @@ -182,3 +182,11 @@ KernelVersion: 5.6 Contact: oded.gabbay@gmail.com Description: Sets the stop-on_error option for the device engines. Value of "0" is for disable, otherwise enable. + +What: /sys/kernel/debug/habanalabs/hl/dump_security_violations +Date: Jan 2021 +KernelVersion: 5.12 +Contact: oded.gabbay@gmail.com +Description: Dumps all security violations to dmesg. This will also ack + all security violations meanings those violations will not be + dumped next time user calls this API diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index 50ca8eea6648..323d0381a60a 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -867,6 +867,17 @@ static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf, return count; } +static ssize_t hl_security_violations_read(struct file *f, char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + + hdev->asic_funcs->ack_protection_bits_errors(hdev); + + return 0; +} + static const struct file_operations hl_data32b_fops = { .owner = THIS_MODULE, .read = hl_data_read32, @@ -924,6 +935,11 @@ static const struct file_operations hl_stop_on_err_fops = { .write = hl_stop_on_err_write }; +static const struct file_operations hl_security_violations_fops = { + .owner = THIS_MODULE, + .read = hl_security_violations_read +}; + static const struct hl_info_list hl_debugfs_list[] = { {"command_buffers", command_buffers_show, NULL}, {"command_submission", command_submission_show, NULL}, @@ -1073,6 +1089,12 @@ void hl_debugfs_add_device(struct hl_device *hdev) dev_entry, &hl_stop_on_err_fops); + debugfs_create_file("dump_security_violations", + 0644, + dev_entry->root, + dev_entry, + &hl_security_violations_fops); + for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) { ent = debugfs_create_file(hl_debugfs_list[i].name, diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 8e0553bf3e0e..3923b03e99aa 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -850,6 +850,7 @@ enum div_select_defs { * @collective_wait_create_jobs: allocate collective wait cs jobs * @scramble_vaddr: Routine to scramble the virtual address prior of mapping it * in the MMU. + * @ack_protection_bits_errors: ack and dump all security violations */ struct hl_asic_funcs { int (*early_init)(struct hl_device *hdev); @@ -960,6 +961,7 @@ struct hl_asic_funcs { struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id, u32 collective_engine_id); u64 (*scramble_vaddr)(struct hl_device *hdev, u64 virt_addr); + void (*ack_protection_bits_errors)(struct hl_device *hdev); }; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index d9f1b646aac9..2b01c081404a 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -8546,7 +8546,8 @@ static const struct hl_asic_funcs gaudi_funcs = { .get_device_time = gaudi_get_device_time, .collective_wait_init_cs = gaudi_collective_wait_init_cs, .collective_wait_create_jobs = gaudi_collective_wait_create_jobs, - .scramble_vaddr = hl_mmu_scramble_vaddr + .scramble_vaddr = hl_mmu_scramble_vaddr, + .ack_protection_bits_errors = gaudi_ack_protection_bits_errors }; /** diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h index 78830443341d..50bb4ad570fd 100644 --- a/drivers/misc/habanalabs/gaudi/gaudiP.h +++ b/drivers/misc/habanalabs/gaudi/gaudiP.h @@ -335,6 +335,7 @@ struct gaudi_device { }; void gaudi_init_security(struct hl_device *hdev); +void gaudi_ack_protection_bits_errors(struct hl_device *hdev); void gaudi_add_device_attr(struct hl_device *hdev, struct attribute_group *dev_attr_grp); void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq); diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c index e10181692d0b..7085f45814ae 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi_security.c +++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c @@ -13052,3 +13052,8 @@ void gaudi_init_security(struct hl_device *hdev) gaudi_init_protection_bits(hdev); } + +void gaudi_ack_protection_bits_errors(struct hl_device *hdev) +{ + +} diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index cf0496b04044..50dcefc02cdd 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -5457,7 +5457,8 @@ static const struct hl_asic_funcs goya_funcs = { .get_device_time = goya_get_device_time, .collective_wait_init_cs = goya_collective_wait_init_cs, .collective_wait_create_jobs = goya_collective_wait_create_jobs, - .scramble_vaddr = hl_mmu_scramble_vaddr + .scramble_vaddr = hl_mmu_scramble_vaddr, + .ack_protection_bits_errors = goya_ack_protection_bits_errors }; /* diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h index 8b3408211af6..23fe099ed218 100644 --- a/drivers/misc/habanalabs/goya/goyaP.h +++ b/drivers/misc/habanalabs/goya/goyaP.h @@ -173,6 +173,7 @@ void goya_init_mme_qmans(struct hl_device *hdev); void goya_init_tpc_qmans(struct hl_device *hdev); int goya_init_cpu_queues(struct hl_device *hdev); void goya_init_security(struct hl_device *hdev); +void goya_ack_protection_bits_errors(struct hl_device *hdev); int goya_late_init(struct hl_device *hdev); void goya_late_fini(struct hl_device *hdev); diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c index 14701836f92b..14c3bae3ccdc 100644 --- a/drivers/misc/habanalabs/goya/goya_security.c +++ b/drivers/misc/habanalabs/goya/goya_security.c @@ -3120,3 +3120,8 @@ void goya_init_security(struct hl_device *hdev) goya_init_protection_bits(hdev); } + +void goya_ack_protection_bits_errors(struct hl_device *hdev) +{ + +} -- cgit v1.2.3-58-ga151 From 8d79ce162e2b9bc57d8dca814f784654efe179da Mon Sep 17 00:00:00 2001 From: farah kassabri Date: Mon, 11 Jan 2021 10:10:00 +0200 Subject: habanalabs: always try to use the hint address Currently hint address is ignored in case va block page size is not power of 2. We need to support th user hint address also in this case, but only if the hint address is aligned to page size. Signed-off-by: farah kassabri Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/memory.c | 139 +++++++++----------------------- 1 file changed, 37 insertions(+), 102 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index ada977eb136c..36de0d05d3a2 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -520,8 +520,8 @@ static inline int add_va_block(struct hl_device *hdev, } /** - * get_va_block_pow2() - get a virtual block for the given size and alignment - * where alignment is a power of 2. + * get_va_block() - get a virtual block for the given size and alignment. + * * @hdev: pointer to the habanalabs device structure. * @va_range: pointer to the virtual addresses range. * @size: requested block size. @@ -530,34 +530,51 @@ static inline int add_va_block(struct hl_device *hdev, * * This function does the following: * - Iterate on the virtual block list to find a suitable virtual block for the - * given size and alignment. + * given size, hint address and alignment. * - Reserve the requested block and update the list. * - Return the start address of the virtual block. */ -static u64 get_va_block_pow2(struct hl_device *hdev, +static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range, u64 size, u64 hint_addr, u32 va_block_align) { struct hl_vm_va_block *va_block, *new_va_block = NULL; - u64 valid_start, valid_size, prev_start, prev_end, align_mask, - reserved_valid_start = 0, reserved_valid_size = 0; + u64 tmp_hint_addr, valid_start, valid_size, prev_start, prev_end, + align_mask, reserved_valid_start = 0, reserved_valid_size = 0; bool add_prev = false; + bool is_align_pow_2 = is_power_of_2(va_range->page_size); - align_mask = ~((u64)va_block_align - 1); + if (is_align_pow_2) + align_mask = ~((u64)va_block_align - 1); + else + /* + * with non-power-of-2 range we work only with page granularity + * and the start address is page aligned, + * so no need for alignment checking. + */ + size = DIV_ROUND_UP_ULL(size, va_range->page_size) * + va_range->page_size; + + tmp_hint_addr = hint_addr; - /* check if hint_addr is aligned */ - if (hint_addr & (va_block_align - 1)) + /* Check if we need to ignore hint address */ + if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) || + (!is_align_pow_2 && + do_div(tmp_hint_addr, va_range->page_size))) { + dev_info(hdev->dev, "Hint address 0x%llx will be ignored\n", + hint_addr); hint_addr = 0; + } mutex_lock(&va_range->lock); print_va_list_locked(hdev, &va_range->list); list_for_each_entry(va_block, &va_range->list, node) { - /* calc the first possible aligned addr */ + /* Calc the first possible aligned addr */ valid_start = va_block->start; - if (valid_start & (va_block_align - 1)) { + if (is_align_pow_2 && (valid_start & (va_block_align - 1))) { valid_start &= align_mask; valid_start += va_block_align; if (valid_start > va_block->end) @@ -565,9 +582,11 @@ static u64 get_va_block_pow2(struct hl_device *hdev, } valid_size = va_block->end - valid_start; + if (valid_size < size) + continue; - if (valid_size >= size && (!new_va_block || - valid_size < reserved_valid_size)) { + /* Pick the minimal length block which has the required size */ + if (!new_va_block || (valid_size < reserved_valid_size)) { new_va_block = va_block; reserved_valid_start = valid_start; reserved_valid_size = valid_size; @@ -584,10 +603,14 @@ static u64 get_va_block_pow2(struct hl_device *hdev, if (!new_va_block) { dev_err(hdev->dev, "no available va block for size %llu\n", - size); + size); goto out; } + /* + * Check if there is some leftover range due to reserving the new + * va block, then return it to the main virtual addresses list. + */ if (reserved_valid_start > new_va_block->start) { prev_start = new_va_block->start; prev_end = reserved_valid_start - 1; @@ -617,94 +640,6 @@ out: return reserved_valid_start; } -/** - * get_va_block_non_pow2() - get a virtual block for the given size and - * alignment where alignment is not a power of 2. - * @hdev: pointer to the habanalabs device structure. - * @va_range: pointer to the virtual addresses range. - * @size: requested block size. - * @hint_addr: hint for requested address by the user. - * @va_block_align: required alignment of the virtual block start address. - * - * This function does the following: - * - Iterate on the virtual block list to find a suitable virtual block for the - * given size and alignment. - * - Reserve the requested block and update the list. - * - Return the start address of the virtual block. - */ -static u64 get_va_block_non_pow2(struct hl_device *hdev, - struct hl_va_range *va_range, - u64 size, u64 hint_addr, u32 va_block_align) -{ - struct hl_vm_va_block *va_block, *new_va_block = NULL; - u64 reserved_valid_start = 0; - - /* - * with non-power-of-2 range we work only with page granularity and the - * start address is page aligned, so no need for alignment checking. - */ - size = DIV_ROUND_UP_ULL(size, va_range->page_size) * - va_range->page_size; - - mutex_lock(&va_range->lock); - - print_va_list_locked(hdev, &va_range->list); - - list_for_each_entry(va_block, &va_range->list, node) { - if ((va_block->start + size) > va_block->end) - continue; - - new_va_block = va_block; - reserved_valid_start = va_block->start; - break; - } - - if (!new_va_block) { - dev_err(hdev->dev, "no available va block for size %llu\n", - size); - goto out; - } - - if (new_va_block->size > size) { - new_va_block->start += size; - new_va_block->size = new_va_block->end - new_va_block->start; - } else { - list_del(&new_va_block->node); - kfree(new_va_block); - } - - print_va_list_locked(hdev, &va_range->list); -out: - mutex_unlock(&va_range->lock); - - return reserved_valid_start; -} - -/* - * get_va_block() - get a virtual block for the given size and alignment. - * @hdev: pointer to the habanalabs device structure. - * @va_range: pointer to the virtual addresses range. - * @size: requested block size. - * @hint_addr: hint for requested address by the user. - * @va_block_align: required alignment of the virtual block start address. - * - * This function does the following: - * - Iterate on the virtual block list to find a suitable virtual block for the - * given size and alignment. - * - Reserve the requested block and update the list. - * - Return the start address of the virtual block. - */ -static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range, - u64 size, u64 hint_addr, u32 va_block_align) -{ - if (is_power_of_2(va_range->page_size)) - return get_va_block_pow2(hdev, va_range, - size, hint_addr, va_block_align); - else - return get_va_block_non_pow2(hdev, va_range, - size, hint_addr, va_block_align); -} - /* * hl_reserve_va_block() - reserve a virtual block of a given size. * @hdev: pointer to the habanalabs device structure. -- cgit v1.2.3-58-ga151 From e1fa724dd17a6a9b9934636226e683912d12c876 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Wed, 6 Jan 2021 15:40:37 +0200 Subject: habanalabs: add user available interrupt to hw_ip In order to support completions that arrive directly to the user, the driver needs to supply the user with the first available msix interrupt available. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/habanalabs.h | 3 +++ drivers/misc/habanalabs/common/habanalabs_ioctl.c | 2 ++ drivers/misc/habanalabs/gaudi/gaudi.c | 2 ++ drivers/misc/habanalabs/goya/goya.c | 2 ++ include/uapi/misc/habanalabs.h | 6 ++++-- 5 files changed, 13 insertions(+), 2 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 3923b03e99aa..ce1a1e70a6d5 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -408,6 +408,8 @@ struct hl_mmu_properties { * @sync_stream_first_mon: first monitor available for sync stream use * @first_available_user_sob: first sob available for the user * @first_available_user_mon: first monitor available for the user + * @first_available_user_msix_interrupt: first available msix interrupt + * reserved for the user * @tpc_enabled_mask: which TPCs are enabled. * @completion_queues_count: number of completion queues. * @fw_security_disabled: true if security measures are disabled in firmware, @@ -469,6 +471,7 @@ struct asic_fixed_properties { u16 sync_stream_first_mon; u16 first_available_user_sob[HL_MAX_DCORES]; u16 first_available_user_mon[HL_MAX_DCORES]; + u16 first_available_user_msix_interrupt; u8 tpc_enabled_mask; u8 completion_queues_count; u8 fw_security_disabled; diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index dfac5c8cadb3..628bdc56dca3 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -90,6 +90,8 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args) hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od; hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor; + hw_ip.first_available_interrupt_id = + prop->first_available_user_msix_interrupt; return copy_to_user(out, &hw_ip, min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0; } diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 2b01c081404a..69b3867bc151 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -527,6 +527,8 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev) prop->sync_stream_first_mon + (num_sync_stream_queues * HL_RSVD_MONS); + prop->first_available_user_msix_interrupt = USHRT_MAX; + /* disable fw security for now, set it in a later stage */ prop->fw_security_disabled = true; prop->fw_security_status_valid = false; diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 50dcefc02cdd..82f69274def7 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -455,6 +455,8 @@ int goya_get_fixed_properties(struct hl_device *hdev) prop->max_pending_cs = GOYA_MAX_PENDING_CS; + prop->first_available_user_msix_interrupt = USHRT_MAX; + /* disable fw security for now, set it in a later stage */ prop->fw_security_disabled = true; prop->fw_security_status_valid = false; diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index b431a70e1b8b..866355a53188 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -309,7 +309,9 @@ struct hl_info_hw_ip_info { __u32 num_of_events; __u32 device_id; /* PCI Device ID */ __u32 module_id; /* For mezzanine cards in servers (From OCP spec.) */ - __u32 reserved[2]; + __u32 reserved; + __u16 first_available_interrupt_id; + __u16 reserved2; __u32 cpld_version; __u32 psoc_pci_pll_nr; __u32 psoc_pci_pll_nf; @@ -320,7 +322,7 @@ struct hl_info_hw_ip_info { __u8 pad[2]; __u8 cpucp_version[HL_INFO_VERSION_MAX_LEN]; __u8 card_name[HL_INFO_CARD_NAME_MAX_LEN]; - __u64 reserved2; + __u64 reserved3; __u64 dram_page_size; }; -- cgit v1.2.3-58-ga151 From 89473a1fc3607d9ee5a4f859a2684d0abd0c4ded Mon Sep 17 00:00:00 2001 From: farah kassabri Date: Tue, 12 Jan 2021 17:24:00 +0200 Subject: habanalabs: fix MMU debugfs related nodes In mmu debugfs node show un-scrambled physical addresses. before read/write through data nodes, need to unscramble the physical address before using it for pci transaction. Signed-off-by: farah kassabri Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/debugfs.c | 21 +++++--- drivers/misc/habanalabs/common/habanalabs.h | 14 ++++-- drivers/misc/habanalabs/common/mmu/mmu.c | 74 ++++++++++++++++++++++++----- drivers/misc/habanalabs/gaudi/gaudi.c | 3 +- drivers/misc/habanalabs/goya/goya.c | 3 +- 5 files changed, 93 insertions(+), 22 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index 323d0381a60a..9e3c1efe56ba 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -310,8 +310,8 @@ static int mmu_show(struct seq_file *s, void *data) struct hl_dbg_device_entry *dev_entry = entry->dev_entry; struct hl_device *hdev = dev_entry->hdev; struct hl_ctx *ctx; - struct hl_mmu_hop_info hops_info; - u64 virt_addr = dev_entry->mmu_addr; + struct hl_mmu_hop_info hops_info = {0}; + u64 virt_addr = dev_entry->mmu_addr, phys_addr; int i; if (!hdev->mmu_enable) @@ -333,10 +333,19 @@ static int mmu_show(struct seq_file *s, void *data) return 0; } - seq_printf(s, - "asid: %u, virt_addr: 0x%llx, scrambled virt_addr: 0x%llx\n", - dev_entry->mmu_asid, dev_entry->mmu_addr, - hops_info.scrambled_vaddr); + phys_addr = hops_info.hop_info[hops_info.used_hops - 1].hop_pte_val; + + if (hops_info.scrambled_vaddr && + (dev_entry->mmu_addr != hops_info.scrambled_vaddr)) + seq_printf(s, + "asid: %u, virt_addr: 0x%llx, scrambled virt_addr: 0x%llx,\nphys_addr: 0x%llx, scrambled_phys_addr: 0x%llx\n", + dev_entry->mmu_asid, dev_entry->mmu_addr, + hops_info.scrambled_vaddr, + hops_info.unscrambled_paddr, phys_addr); + else + seq_printf(s, + "asid: %u, virt_addr: 0x%llx, phys_addr: 0x%llx\n", + dev_entry->mmu_asid, dev_entry->mmu_addr, phys_addr); for (i = 0 ; i < hops_info.used_hops ; i++) { seq_printf(s, "hop%d_addr: 0x%llx\n", diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index ce1a1e70a6d5..4129e4e6a7b5 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -851,8 +851,10 @@ enum div_select_defs { * @collective_wait_init_cs: Generate collective master/slave packets * and place them in the relevant cs jobs * @collective_wait_create_jobs: allocate collective wait cs jobs - * @scramble_vaddr: Routine to scramble the virtual address prior of mapping it + * @scramble_addr: Routine to scramble the address prior of mapping it * in the MMU. + * @descramble_addr: Routine to de-scramble the address prior of + * showing it to users. * @ack_protection_bits_errors: ack and dump all security violations */ struct hl_asic_funcs { @@ -963,7 +965,8 @@ struct hl_asic_funcs { int (*collective_wait_create_jobs)(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id, u32 collective_engine_id); - u64 (*scramble_vaddr)(struct hl_device *hdev, u64 virt_addr); + u64 (*scramble_addr)(struct hl_device *hdev, u64 addr); + u64 (*descramble_addr)(struct hl_device *hdev, u64 addr); void (*ack_protection_bits_errors)(struct hl_device *hdev); }; @@ -1726,13 +1729,17 @@ struct hl_mmu_per_hop_info { * @scrambled_vaddr: The value of the virtual address after scrambling. This * address replaces the original virtual-address when mapped * in the MMU tables. + * @unscrambled_paddr: The un-scrambled physical address. * @hop_info: Array holding the per-hop information used for the translation. * @used_hops: The number of hops used for the translation. + * @range_type: virtual address range type. */ struct hl_mmu_hop_info { u64 scrambled_vaddr; + u64 unscrambled_paddr; struct hl_mmu_per_hop_info hop_info[MMU_ARCH_5_HOPS]; u32 used_hops; + enum hl_va_range_type range_type; }; /** @@ -2222,7 +2229,8 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu); int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr); int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops); -u64 hl_mmu_scramble_vaddr(struct hl_device *hdev, u64 virt_addr); +u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr); +u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr); bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr); int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name, diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c index 97c51686fcfe..27dc0d116db5 100644 --- a/drivers/misc/habanalabs/common/mmu/mmu.c +++ b/drivers/misc/habanalabs/common/mmu/mmu.c @@ -287,9 +287,9 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, * after scrambling) */ if ((is_dram_addr && - ((hdev->asic_funcs->scramble_vaddr(hdev, phys_addr) & + ((hdev->asic_funcs->scramble_addr(hdev, phys_addr) & (mmu_prop->page_size - 1)) || - (hdev->asic_funcs->scramble_vaddr(hdev, virt_addr) & + (hdev->asic_funcs->scramble_addr(hdev, virt_addr) & (mmu_prop->page_size - 1)))) || (!is_dram_addr && ((phys_addr & (real_page_size - 1)) || (virt_addr & (real_page_size - 1))))) @@ -476,19 +476,53 @@ void hl_mmu_swap_in(struct hl_ctx *ctx) hdev->mmu_func[MMU_HR_PGT].swap_in(ctx); } +static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr, + struct hl_mmu_hop_info *hops, + u64 *phys_addr) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 offset_mask, addr_mask, hop_shift, tmp_phys_addr; + u32 hop0_shift_off; + void *p; + + /* last hop holds the phys address and flags */ + if (hops->unscrambled_paddr) + tmp_phys_addr = hops->unscrambled_paddr; + else + tmp_phys_addr = hops->hop_info[hops->used_hops - 1].hop_pte_val; + + if (hops->range_type == HL_VA_RANGE_TYPE_HOST_HUGE) + p = &prop->pmmu_huge; + else if (hops->range_type == HL_VA_RANGE_TYPE_HOST) + p = &prop->pmmu; + else /* HL_VA_RANGE_TYPE_DRAM */ + p = &prop->dmmu; + + /* + * find the correct hop shift field in hl_mmu_properties structure + * in order to determine the right maks for the page offset. + */ + hop0_shift_off = offsetof(struct hl_mmu_properties, hop0_shift); + p = (char *)p + hop0_shift_off; + p = (char *)p + ((hops->used_hops - 1) * sizeof(u64)); + hop_shift = *(u64 *)p; + offset_mask = (1 << hop_shift) - 1; + addr_mask = ~(offset_mask); + *phys_addr = (tmp_phys_addr & addr_mask) | + (virt_addr & offset_mask); +} + int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr) { struct hl_mmu_hop_info hops; - u64 tmp_addr; int rc; rc = hl_mmu_get_tlb_info(ctx, virt_addr, &hops); if (rc) return rc; - /* last hop holds the phys address and flags */ - tmp_addr = hops.hop_info[hops.used_hops - 1].hop_pte_val; - *phys_addr = (tmp_addr & HOP_PHYS_ADDR_MASK) | (virt_addr & FLAGS_MASK); + hl_mmu_pa_page_with_offset(ctx, virt_addr, &hops, phys_addr); return 0; } @@ -525,6 +559,11 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, mutex_unlock(&ctx->mmu_lock); + /* add page offset to physical address */ + if (hops->unscrambled_paddr) + hl_mmu_pa_page_with_offset(ctx, virt_addr, hops, + &hops->unscrambled_paddr); + return rc; } @@ -548,13 +587,26 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev) } /** - * hl_mmu_scramble_vaddr() - The generic mmu virtual address scrambling routine. + * hl_mmu_scramble_addr() - The generic mmu address scrambling routine. + * @hdev: pointer to device data. + * @addr: The address to scramble. + * + * Return: The scrambled address. + */ +u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr) +{ + return addr; +} + +/** + * hl_mmu_descramble_addr() - The generic mmu address descrambling + * routine. * @hdev: pointer to device data. - * @virt_addr: The virtual address to scramble. + * @addr: The address to descramble. * - * Return: The scrambled virtual address. + * Return: The un-scrambled address. */ -u64 hl_mmu_scramble_vaddr(struct hl_device *hdev, u64 virt_addr) +u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr) { - return virt_addr; + return addr; } diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 69b3867bc151..b49e10394ed4 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -8548,7 +8548,8 @@ static const struct hl_asic_funcs gaudi_funcs = { .get_device_time = gaudi_get_device_time, .collective_wait_init_cs = gaudi_collective_wait_init_cs, .collective_wait_create_jobs = gaudi_collective_wait_create_jobs, - .scramble_vaddr = hl_mmu_scramble_vaddr, + .scramble_addr = hl_mmu_scramble_addr, + .descramble_addr = hl_mmu_descramble_addr, .ack_protection_bits_errors = gaudi_ack_protection_bits_errors }; diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 82f69274def7..db951d622ad5 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -5459,7 +5459,8 @@ static const struct hl_asic_funcs goya_funcs = { .get_device_time = goya_get_device_time, .collective_wait_init_cs = goya_collective_wait_init_cs, .collective_wait_create_jobs = goya_collective_wait_create_jobs, - .scramble_vaddr = hl_mmu_scramble_vaddr, + .scramble_addr = hl_mmu_scramble_addr, + .descramble_addr = hl_mmu_descramble_addr, .ack_protection_bits_errors = goya_ack_protection_bits_errors }; -- cgit v1.2.3-58-ga151 From d00697fbe13ccc1dfb7adb82204e1469a7783b07 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Tue, 5 Jan 2021 12:55:06 +0200 Subject: habanalabs: add new mem ioctl op for mapping hw blocks For future ASIC support the driver allows user to map certain regions in the device's configuration space for direct access from userspace. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/device.c | 3 + drivers/misc/habanalabs/common/habanalabs.h | 16 +++-- drivers/misc/habanalabs/common/memory.c | 93 ++++++++++++++++++++++++++++- drivers/misc/habanalabs/gaudi/gaudi.c | 17 +++++- drivers/misc/habanalabs/goya/goya.c | 16 ++++- include/uapi/misc/habanalabs.h | 18 +++++- 6 files changed, 153 insertions(+), 10 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 13405d47d843..59219c862ca0 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -142,6 +142,9 @@ static int hl_mmap(struct file *filp, struct vm_area_struct *vma) switch (vm_pgoff & HL_MMAP_TYPE_MASK) { case HL_MMAP_TYPE_CB: return hl_cb_mmap(hpriv, vma); + + case HL_MMAP_TYPE_BLOCK: + return hl_hw_block_mmap(hpriv, vma); } return -EINVAL; diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 4129e4e6a7b5..e105612ed577 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -28,17 +28,18 @@ #define HL_NAME "habanalabs" /* Use upper bits of mmap offset to store habana driver specific information. - * bits[63:62] - Encode mmap type + * bits[63:61] - Encode mmap type * bits[45:0] - mmap offset value * * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these * defines are w.r.t to PAGE_SIZE */ -#define HL_MMAP_TYPE_SHIFT (62 - PAGE_SHIFT) -#define HL_MMAP_TYPE_MASK (0x3ull << HL_MMAP_TYPE_SHIFT) +#define HL_MMAP_TYPE_SHIFT (61 - PAGE_SHIFT) +#define HL_MMAP_TYPE_MASK (0x7ull << HL_MMAP_TYPE_SHIFT) +#define HL_MMAP_TYPE_BLOCK (0x4ull << HL_MMAP_TYPE_SHIFT) #define HL_MMAP_TYPE_CB (0x2ull << HL_MMAP_TYPE_SHIFT) -#define HL_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFull >> PAGE_SHIFT) +#define HL_MMAP_OFFSET_VALUE_MASK (0x1FFFFFFFFFFFull >> PAGE_SHIFT) #define HL_MMAP_OFFSET_VALUE_GET(off) (off & HL_MMAP_OFFSET_VALUE_MASK) #define HL_PENDING_RESET_PER_SEC 10 @@ -856,6 +857,8 @@ enum div_select_defs { * @descramble_addr: Routine to de-scramble the address prior of * showing it to users. * @ack_protection_bits_errors: ack and dump all security violations + * @get_hw_block_id: retrieve a HW block id to be used by the user to mmap it. + * @hw_block_mmap: mmap a HW block with a given id. */ struct hl_asic_funcs { int (*early_init)(struct hl_device *hdev); @@ -968,6 +971,10 @@ struct hl_asic_funcs { u64 (*scramble_addr)(struct hl_device *hdev, u64 addr); u64 (*descramble_addr)(struct hl_device *hdev, u64 addr); void (*ack_protection_bits_errors)(struct hl_device *hdev); + int (*get_hw_block_id)(struct hl_device *hdev, u64 block_addr, + u32 *block_id); + int (*hw_block_mmap)(struct hl_device *hdev, struct vm_area_struct *vma, + u32 block_id, u32 block_size); }; @@ -2167,6 +2174,7 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, bool map_cb, u64 *handle); int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle); int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma); +int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma); struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr, u32 handle); void hl_cb_put(struct hl_cb *cb); diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 36de0d05d3a2..7171e8820a2d 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -1289,11 +1289,88 @@ vm_type_err: return rc; } +static int map_block(struct hl_device *hdev, u64 address, u64 *handle) +{ + u32 block_id = 0; + int rc; + + rc = hdev->asic_funcs->get_hw_block_id(hdev, address, &block_id); + + *handle = block_id | HL_MMAP_TYPE_BLOCK; + *handle <<= PAGE_SHIFT; + + return rc; +} + +static void hw_block_vm_close(struct vm_area_struct *vma) +{ + struct hl_ctx *ctx = (struct hl_ctx *) vma->vm_private_data; + + hl_ctx_put(ctx); + vma->vm_private_data = NULL; +} + +static const struct vm_operations_struct hw_block_vm_ops = { + .close = hw_block_vm_close +}; + +/** + * hl_hw_block_mmap() - mmap a hw block to user. + * @hpriv: pointer to the private data of the fd + * @vma: pointer to vm_area_struct of the process + * + * Driver increments context reference for every HW block mapped in order + * to prevent user from closing FD without unmapping first + */ +int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) +{ + struct hl_device *hdev = hpriv->hdev; + u32 block_id, block_size; + int rc; + + /* We use the page offset to hold the block id and thus we need to clear + * it before doing the mmap itself + */ + block_id = vma->vm_pgoff; + vma->vm_pgoff = 0; + + /* Driver only allows mapping of a complete HW block */ + block_size = vma->vm_end - vma->vm_start; + +#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK + if (!access_ok(VERIFY_WRITE, + (void __user *) (uintptr_t) vma->vm_start, block_size)) { +#else + if (!access_ok((void __user *) (uintptr_t) vma->vm_start, block_size)) { +#endif + dev_err(hdev->dev, + "user pointer is invalid - 0x%lx\n", + vma->vm_start); + + return -EINVAL; + } + + vma->vm_ops = &hw_block_vm_ops; + vma->vm_private_data = hpriv->ctx; + + hl_ctx_get(hdev, hpriv->ctx); + + rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size); + if (rc) { + hl_ctx_put(hpriv->ctx); + return rc; + } + + vma->vm_pgoff = block_id; + + return 0; +} + static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args) { struct hl_device *hdev = hpriv->hdev; struct hl_ctx *ctx = hpriv->ctx; - u64 device_addr = 0; + u64 block_handle, device_addr = 0; u32 handle = 0; int rc; @@ -1337,6 +1414,12 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args) rc = 0; break; + case HL_MEM_OP_MAP_BLOCK: + rc = map_block(hdev, args->in.map_block.block_addr, + &block_handle); + args->out.handle = block_handle; + break; + default: dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); rc = -ENOTTY; @@ -1353,7 +1436,7 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) union hl_mem_args *args = data; struct hl_device *hdev = hpriv->hdev; struct hl_ctx *ctx = hpriv->ctx; - u64 device_addr = 0; + u64 block_handle, device_addr = 0; u32 handle = 0; int rc; @@ -1439,6 +1522,12 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) rc = unmap_device_va(ctx, &args->in, false); break; + case HL_MEM_OP_MAP_BLOCK: + rc = map_block(hdev, args->in.map_block.block_addr, + &block_handle); + args->out.handle = block_handle; + break; + default: dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); rc = -ENOTTY; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index b49e10394ed4..e1c6072e5fb3 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -8471,6 +8471,19 @@ static u64 gaudi_get_device_time(struct hl_device *hdev) return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL); } +static int gaudi_get_hw_block_id(struct hl_device *hdev, u64 block_addr, + u32 *block_id) +{ + return -EPERM; +} + +static int gaudi_block_mmap(struct hl_device *hdev, + struct vm_area_struct *vma, + u32 block_id, u32 block_size) +{ + return -EPERM; +} + static const struct hl_asic_funcs gaudi_funcs = { .early_init = gaudi_early_init, .early_fini = gaudi_early_fini, @@ -8550,7 +8563,9 @@ static const struct hl_asic_funcs gaudi_funcs = { .collective_wait_create_jobs = gaudi_collective_wait_create_jobs, .scramble_addr = hl_mmu_scramble_addr, .descramble_addr = hl_mmu_descramble_addr, - .ack_protection_bits_errors = gaudi_ack_protection_bits_errors + .ack_protection_bits_errors = gaudi_ack_protection_bits_errors, + .get_hw_block_id = gaudi_get_hw_block_id, + .hw_block_mmap = gaudi_block_mmap }; /** diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index db951d622ad5..6b4c41188495 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -5382,6 +5382,18 @@ static void goya_ctx_fini(struct hl_ctx *ctx) } +static int goya_get_hw_block_id(struct hl_device *hdev, u64 block_addr, + u32 *block_id) +{ + return -EPERM; +} + +static int goya_block_mmap(struct hl_device *hdev, struct vm_area_struct *vma, + u32 block_id, u32 block_size) +{ + return -EPERM; +} + static const struct hl_asic_funcs goya_funcs = { .early_init = goya_early_init, .early_fini = goya_early_fini, @@ -5461,7 +5473,9 @@ static const struct hl_asic_funcs goya_funcs = { .collective_wait_create_jobs = goya_collective_wait_create_jobs, .scramble_addr = hl_mmu_scramble_addr, .descramble_addr = hl_mmu_descramble_addr, - .ack_protection_bits_errors = goya_ack_protection_bits_errors + .ack_protection_bits_errors = goya_ack_protection_bits_errors, + .get_hw_block_id = goya_get_hw_block_id, + .hw_block_mmap = goya_block_mmap }; /* diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 866355a53188..b1c09eba8ac2 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -718,6 +718,8 @@ union hl_wait_cs_args { #define HL_MEM_OP_MAP 2 /* Opcode to unmap previously mapped host and device memory */ #define HL_MEM_OP_UNMAP 3 +/* Opcode to map a hw block */ +#define HL_MEM_OP_MAP_BLOCK 4 /* Memory flags */ #define HL_MEM_CONTIGUOUS 0x1 @@ -772,6 +774,17 @@ struct hl_mem_in { __u64 mem_size; } map_host; + /* HL_MEM_OP_MAP_BLOCK - map a hw block */ + struct { + /* + * HW block address to map, a handle will be returned + * to the user and will be used to mmap the relevant + * block. Only addresses from configuration space are + * allowed. + */ + __u64 block_addr; + } map_block; + /* HL_MEM_OP_UNMAP - unmap host memory */ struct { /* Virtual address returned from HL_MEM_OP_MAP */ @@ -798,8 +811,9 @@ struct hl_mem_out { __u64 device_virt_addr; /* - * Used for HL_MEM_OP_ALLOC. This is the assigned - * handle for the allocated memory + * Used for HL_MEM_OP_ALLOC and HL_MEM_OP_MAP_BLOCK. + * This is the assigned handle for the allocated memory + * or mapped block */ __u64 handle; }; -- cgit v1.2.3-58-ga151 From 0811b391469510447a2263f706389b610e454177 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Sun, 6 Dec 2020 17:18:02 +0200 Subject: habanalabs: add CS completion and timeout properties In order to support staged submission feature, we need to distinguish on which command submission we want to receive timeout and for which we want to receive completion. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 104 +++++++++++++++++---- drivers/misc/habanalabs/common/habanalabs.h | 15 ++- drivers/misc/habanalabs/common/hw_queue.c | 24 ++++- 3 files changed, 117 insertions(+), 26 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index a5e9bb0a4855..57daff0e59ae 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -149,9 +149,10 @@ void hl_fence_get(struct hl_fence *fence) kref_get(&fence->refcount); } -static void hl_fence_init(struct hl_fence *fence) +static void hl_fence_init(struct hl_fence *fence, u64 sequence) { kref_init(&fence->refcount); + fence->cs_sequence = sequence; fence->error = 0; fence->timestamp = ktime_set(0, 0); init_completion(&fence->completion); @@ -184,6 +185,28 @@ static void cs_job_put(struct hl_cs_job *job) kref_put(&job->refcount, cs_job_do_release); } +bool cs_needs_completion(struct hl_cs *cs) +{ + /* In case this is a staged CS, only the last CS in sequence should + * get a completion, any non staged CS will always get a completion + */ + if (cs->staged_cs && !cs->staged_last) + return false; + + return true; +} + +bool cs_needs_timeout(struct hl_cs *cs) +{ + /* In case this is a staged CS, only the first CS in sequence should + * get a timeout, any non staged CS will always get a timeout + */ + if (cs->staged_cs && !cs->staged_first) + return false; + + return true; +} + static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job) { /* @@ -225,7 +248,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) parser.queue_type = job->queue_type; parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb; job->patched_cb = NULL; - parser.completion = true; + parser.completion = cs_needs_completion(job->cs); rc = hdev->asic_funcs->cs_parser(hdev, &parser); @@ -291,8 +314,21 @@ static void complete_job(struct hl_device *hdev, struct hl_cs_job *job) hl_debugfs_remove_job(hdev, job); - if (job->queue_type == QUEUE_TYPE_EXT || - job->queue_type == QUEUE_TYPE_HW) + /* We decrement reference only for a CS that gets completion + * because the reference was incremented only for this kind of CS + * right before it was scheduled. + * + * In staged submission, only the last CS marked as 'staged_last' + * gets completion, hence its release function will be called from here. + * As for all the rest CS's in the staged submission which do not get + * completion, their CS reference will be decremented by the + * 'staged_last' CS during the CS release flow. + * All relevant PQ CI counters will be incremented during the CS release + * flow by calling 'hl_hw_queue_update_ci'. + */ + if (cs_needs_completion(cs) && + (job->queue_type == QUEUE_TYPE_EXT || + job->queue_type == QUEUE_TYPE_HW)) cs_put(cs); cs_job_put(job); @@ -347,8 +383,8 @@ static void cs_do_release(struct kref *ref) hdev->asic_funcs->hw_queues_unlock(hdev); - /* Need to update CI for internal queues */ - hl_int_hw_queue_update_ci(cs); + /* Need to update CI for all queue jobs that does not get completion */ + hl_hw_queue_update_ci(cs); /* remove CS from CS mirror list */ spin_lock(&hdev->cs_mirror_lock); @@ -359,6 +395,7 @@ static void cs_do_release(struct kref *ref) * running from the TDR context */ if (!cs->timedout && hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) { + bool next_entry_found = false; struct hl_cs *next; if (cs->tdr_active) @@ -367,10 +404,13 @@ static void cs_do_release(struct kref *ref) spin_lock(&hdev->cs_mirror_lock); /* queue TDR for next CS */ - next = list_first_entry_or_null(&hdev->cs_mirror_list, - struct hl_cs, mirror_node); + list_for_each_entry(next, &hdev->cs_mirror_list, mirror_node) + if (cs_needs_timeout(next)) { + next_entry_found = true; + break; + } - if (next && !next->tdr_active) { + if (next_entry_found && !next->tdr_active) { next->tdr_active = true; schedule_delayed_work(&next->work_tdr, hdev->timeout_jiffies); @@ -462,7 +502,8 @@ static void cs_timedout(struct work_struct *work) } static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, - enum hl_cs_type cs_type, struct hl_cs **cs_new) + enum hl_cs_type cs_type, u64 user_sequence, + struct hl_cs **cs_new) { struct hl_cs_counters_atomic *cntr; struct hl_fence *other = NULL; @@ -511,6 +552,18 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, (hdev->asic_prop.max_pending_cs - 1)]; if (other && !completion_done(&other->completion)) { + /* If the following statement is true, it means we have reached + * a point in which only part of the staged submission was + * submitted and we don't have enough room in the 'cs_pending' + * array for the rest of the submission. + * This causes a deadlock because this CS will never be + * completed as it depends on future CS's for completion. + */ + if (other->cs_sequence == user_sequence) + dev_crit_ratelimited(hdev->dev, + "Staged CS %llu deadlock due to lack of resources", + user_sequence); + dev_dbg_ratelimited(hdev->dev, "Rejecting CS because of too many in-flights CS\n"); atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt); @@ -529,7 +582,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, } /* init hl_fence */ - hl_fence_init(&cs_cmpl->base_fence); + hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq); cs->sequence = cs_cmpl->cs_seq; @@ -825,7 +878,7 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev, static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, u32 num_chunks, u64 *cs_seq, u32 flags) { - bool int_queues_only = true; + bool staged_mid, int_queues_only = true; struct hl_device *hdev = hpriv->hdev; struct hl_cs_chunk *cs_chunk_array; struct hl_cs_counters_atomic *cntr; @@ -833,9 +886,11 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, struct hl_cs_job *job; struct hl_cs *cs; struct hl_cb *cb; + u64 user_sequence; int rc, i; cntr = &hdev->aggregated_cs_counters; + user_sequence = *cs_seq; *cs_seq = ULLONG_MAX; rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks, @@ -843,7 +898,14 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, if (rc) goto out; - rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT, &cs); + if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) && + !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST)) + staged_mid = true; + else + staged_mid = false; + + rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT, + staged_mid ? user_sequence : ULLONG_MAX, &cs); if (rc) goto free_cs_chunk_array; @@ -911,8 +973,9 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, * Only increment for JOB on external or H/W queues, because * only for those JOBs we get completion */ - if (job->queue_type == QUEUE_TYPE_EXT || - job->queue_type == QUEUE_TYPE_HW) + if (cs_needs_completion(cs) && + (job->queue_type == QUEUE_TYPE_EXT || + job->queue_type == QUEUE_TYPE_HW)) cs_get(cs); hl_debugfs_add_job(hdev, job); @@ -928,11 +991,14 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, } } - if (int_queues_only) { + /* We allow a CS with any queue type combination as long as it does + * not get a completion + */ + if (int_queues_only && cs_needs_completion(cs)) { atomic64_inc(&ctx->cs_counters.validation_drop_cnt); atomic64_inc(&cntr->validation_drop_cnt); dev_err(hdev->dev, - "Reject CS %d.%llu because only internal queues jobs are present\n", + "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n", cs->ctx->asid, cs->sequence); rc = -EINVAL; goto free_cs_object; @@ -1037,7 +1103,7 @@ static int hl_submit_pending_cb(struct hl_fpriv *hpriv) list_move_tail(&pending_cb->cb_node, &local_cb_list); spin_unlock(&ctx->pending_cb_lock); - rc = allocate_cs(hdev, ctx, CS_TYPE_DEFAULT, &cs); + rc = allocate_cs(hdev, ctx, CS_TYPE_DEFAULT, ULLONG_MAX, &cs); if (rc) goto add_list_elements; @@ -1410,7 +1476,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, } } - rc = allocate_cs(hdev, ctx, cs_type, &cs); + rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs); if (rc) { if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index e105612ed577..fd2fffd20ba1 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -486,6 +486,7 @@ struct asic_fixed_properties { * struct hl_fence - software synchronization primitive * @completion: fence is implemented using completion * @refcount: refcount for this fence + * @cs_sequence: sequence of the corresponding command submission * @error: mark this fence with error * @timestamp: timestamp upon completion * @@ -493,6 +494,7 @@ struct asic_fixed_properties { struct hl_fence { struct completion completion; struct kref refcount; + u64 cs_sequence; int error; ktime_t timestamp; }; @@ -1176,7 +1178,11 @@ struct hl_userptr { * @tdr_active: true if TDR was activated for this CS (to prevent * double TDR activation). * @aborted: true if CS was aborted due to some device error. - * @timestamp: true if a timestmap must be captured upon completion + * @timestamp: true if a timestmap must be captured upon completion. + * @staged_last: true if this is the last staged CS and needs completion. + * @staged_first: true if this is the first staged CS and we need to receive + * timeout for this CS. + * @staged_cs: true if this CS is part of a staged submission. */ struct hl_cs { u16 *jobs_in_queue_cnt; @@ -1198,6 +1204,9 @@ struct hl_cs { u8 tdr_active; u8 aborted; u8 timestamp; + u8 staged_last; + u8 staged_first; + u8 staged_cs; }; /** @@ -2118,7 +2127,7 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id, int hl_hw_queue_schedule_cs(struct hl_cs *cs); u32 hl_hw_queue_add_ptr(u32 ptr, u16 val); void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id); -void hl_int_hw_queue_update_ci(struct hl_cs *cs); +void hl_hw_queue_update_ci(struct hl_cs *cs); void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset); #define hl_queue_inc_ptr(p) hl_hw_queue_add_ptr(p, 1) @@ -2196,6 +2205,8 @@ int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask); void hl_fence_put(struct hl_fence *fence); void hl_fence_get(struct hl_fence *fence); void cs_get(struct hl_cs *cs); +bool cs_needs_completion(struct hl_cs *cs); +bool cs_needs_timeout(struct hl_cs *cs); void goya_set_asic_funcs(struct hl_device *hdev); void gaudi_set_asic_funcs(struct hl_device *hdev); diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c index 76217258780a..ad440ae785a3 100644 --- a/drivers/misc/habanalabs/common/hw_queue.c +++ b/drivers/misc/habanalabs/common/hw_queue.c @@ -38,7 +38,7 @@ static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len) return (abs(delta) - queue_len); } -void hl_int_hw_queue_update_ci(struct hl_cs *cs) +void hl_hw_queue_update_ci(struct hl_cs *cs) { struct hl_device *hdev = cs->ctx->hdev; struct hl_hw_queue *q; @@ -53,8 +53,13 @@ void hl_int_hw_queue_update_ci(struct hl_cs *cs) if (!hdev->asic_prop.max_queues || q->queue_type == QUEUE_TYPE_HW) return; + /* We must increment CI for every queue that will never get a + * completion, there are 2 scenarios this can happen: + * 1. All queues of a non completion CS will never get a completion. + * 2. Internal queues never gets completion. + */ for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) { - if (q->queue_type == QUEUE_TYPE_INT) + if (!cs_needs_completion(cs) || q->queue_type == QUEUE_TYPE_INT) atomic_add(cs->jobs_in_queue_cnt[i], &q->ci); } } @@ -292,6 +297,10 @@ static void ext_queue_schedule_job(struct hl_cs_job *job) len = job->job_cb_size; ptr = cb->bus_address; + /* Skip completion flow in case this is a non completion CS */ + if (!cs_needs_completion(job->cs)) + goto submit_bd; + cq_pkt.data = cpu_to_le32( ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT) & CQ_ENTRY_SHADOW_INDEX_MASK) | @@ -318,6 +327,7 @@ static void ext_queue_schedule_job(struct hl_cs_job *job) cq->pi = hl_cq_inc_ptr(cq->pi); +submit_bd: ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr); } @@ -525,6 +535,7 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) struct hl_cs_job *job, *tmp; struct hl_hw_queue *q; int rc = 0, i, cq_cnt; + bool first_entry; u32 max_queues; cntr = &hdev->aggregated_cs_counters; @@ -548,7 +559,9 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) switch (q->queue_type) { case QUEUE_TYPE_EXT: rc = ext_queue_sanity_checks(hdev, q, - cs->jobs_in_queue_cnt[i], true); + cs->jobs_in_queue_cnt[i], + cs_needs_completion(cs) ? + true : false); break; case QUEUE_TYPE_INT: rc = int_queue_sanity_checks(hdev, q, @@ -586,9 +599,10 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) list_add_tail(&cs->mirror_node, &hdev->cs_mirror_list); /* Queue TDR if the CS is the first entry and if timeout is wanted */ + first_entry = list_first_entry(&hdev->cs_mirror_list, + struct hl_cs, mirror_node) == cs; if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) && - (list_first_entry(&hdev->cs_mirror_list, - struct hl_cs, mirror_node) == cs)) { + first_entry && cs_needs_timeout(cs)) { cs->tdr_active = true; schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies); -- cgit v1.2.3-58-ga151 From cf30339d3f44a64115e88d46a932fdc3d3644785 Mon Sep 17 00:00:00 2001 From: Ohad Sharabi Date: Sun, 17 Jan 2021 16:01:56 +0200 Subject: habanalabs: modify device_idle interface Currently this API uses single 64 bits mask for engines idle indication. Recently, it was observed that more bits are needed for some ASICs. This patch modifies the use of the idle mask and the idle_extensions mask. Signed-off-by: Ohad Sharabi Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/context.c | 9 +++--- drivers/misc/habanalabs/common/debugfs.c | 2 +- drivers/misc/habanalabs/common/habanalabs.h | 4 +-- drivers/misc/habanalabs/common/habanalabs_ioctl.c | 5 +-- drivers/misc/habanalabs/gaudi/gaudi.c | 37 ++++++++++------------- drivers/misc/habanalabs/goya/goya.c | 21 ++++++------- include/uapi/misc/habanalabs.h | 4 ++- 7 files changed, 40 insertions(+), 42 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c index 829fe98eed61..cda871afb8f4 100644 --- a/drivers/misc/habanalabs/common/context.c +++ b/drivers/misc/habanalabs/common/context.c @@ -12,7 +12,7 @@ static void hl_ctx_fini(struct hl_ctx *ctx) { struct hl_device *hdev = ctx->hdev; - u64 idle_mask = 0; + u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0}; int i; /* Release all allocated pending cb's, those cb's were never @@ -55,10 +55,11 @@ static void hl_ctx_fini(struct hl_ctx *ctx) if ((!hdev->pldm) && (hdev->pdev) && (!hdev->asic_funcs->is_device_idle(hdev, - &idle_mask, NULL))) + idle_mask, + HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL))) dev_notice(hdev->dev, - "device not idle after user context is closed (0x%llx)\n", - idle_mask); + "device not idle after user context is closed (0x%llx, 0x%llx)\n", + idle_mask[0], idle_mask[1]); } else { dev_dbg(hdev->dev, "closing kernel context\n"); hdev->asic_funcs->ctx_fini(ctx); diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index 9e3c1efe56ba..df847a6d19f4 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -414,7 +414,7 @@ static int engines_show(struct seq_file *s, void *data) return 0; } - hdev->asic_funcs->is_device_idle(hdev, NULL, s); + hdev->asic_funcs->is_device_idle(hdev, NULL, 0, s); return 0; } diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index fd2fffd20ba1..be7947d69dfa 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -933,8 +933,8 @@ struct hl_asic_funcs { void (*set_clock_gating)(struct hl_device *hdev); void (*disable_clock_gating)(struct hl_device *hdev); int (*debug_coresight)(struct hl_device *hdev, void *data); - bool (*is_device_idle)(struct hl_device *hdev, u64 *mask, - struct seq_file *s); + bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr, + u8 mask_len, struct seq_file *s); int (*soft_reset_late_init)(struct hl_device *hdev); void (*hw_queues_lock)(struct hl_device *hdev); void (*hw_queues_unlock)(struct hl_device *hdev); diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index 628bdc56dca3..e86f46d4b613 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -145,9 +145,10 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args) return -EINVAL; hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev, - &hw_idle.busy_engines_mask_ext, NULL); + hw_idle.busy_engines_mask_ext, + HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL); hw_idle.busy_engines_mask = - lower_32_bits(hw_idle.busy_engines_mask_ext); + lower_32_bits(hw_idle.busy_engines_mask_ext[0]); return copy_to_user(out, &hw_idle, min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index e1c6072e5fb3..9a3d2fb477a8 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -4538,7 +4538,6 @@ static int gaudi_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size) { struct asic_fixed_properties *prop = &hdev->asic_prop; struct gaudi_device *gaudi = hdev->asic_specific; - u64 idle_mask = 0; int rc = 0; u64 val = 0; @@ -4551,8 +4550,8 @@ static int gaudi_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size) hdev, mmDMA0_CORE_STS0/* dummy */, val/* dummy */, - (hdev->asic_funcs->is_device_idle(hdev, - &idle_mask, NULL)), + (hdev->asic_funcs->is_device_idle(hdev, NULL, + 0, NULL)), 1000, HBM_SCRUBBING_TIMEOUT_US); if (rc) { @@ -6423,7 +6422,7 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev, else timeout = HL_DEVICE_TIMEOUT_USEC; - if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) { + if (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) { dev_err_ratelimited(hdev->dev, "Can't send driver job on QMAN0 because the device is not idle\n"); return -EBUSY; @@ -7706,13 +7705,14 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev) return 0; } -static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask, - struct seq_file *s) +static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr, + u8 mask_len, struct seq_file *s) { struct gaudi_device *gaudi = hdev->asic_specific; const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n"; const char *mme_slave_fmt = "%-5d%-9s%-14s%-12s%#x\n"; const char *nic_fmt = "%-5d%-9s%#-14x%#x\n"; + unsigned long *mask = (unsigned long *)mask_arr; u32 qm_glbl_sts0, qm_cgm_sts, dma_core_sts0, tpc_cfg_sts, mme_arch_sts; bool is_idle = true, is_eng_idle, is_slave; u64 offset; @@ -7738,9 +7738,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask, IS_DMA_IDLE(dma_core_sts0); is_idle &= is_eng_idle; - if (mask) - *mask |= ((u64) !is_eng_idle) << - (GAUDI_ENGINE_ID_DMA_0 + dma_id); + if (mask && !is_eng_idle) + set_bit(GAUDI_ENGINE_ID_DMA_0 + dma_id, mask); if (s) seq_printf(s, fmt, dma_id, is_eng_idle ? "Y" : "N", qm_glbl_sts0, @@ -7761,9 +7760,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask, IS_TPC_IDLE(tpc_cfg_sts); is_idle &= is_eng_idle; - if (mask) - *mask |= ((u64) !is_eng_idle) << - (GAUDI_ENGINE_ID_TPC_0 + i); + if (mask && !is_eng_idle) + set_bit(GAUDI_ENGINE_ID_TPC_0 + i, mask); if (s) seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N", @@ -7790,9 +7788,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask, is_idle &= is_eng_idle; - if (mask) - *mask |= ((u64) !is_eng_idle) << - (GAUDI_ENGINE_ID_MME_0 + i); + if (mask && !is_eng_idle) + set_bit(GAUDI_ENGINE_ID_MME_0 + i, mask); if (s) { if (!is_slave) seq_printf(s, fmt, i, @@ -7818,9 +7815,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask, is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts); is_idle &= is_eng_idle; - if (mask) - *mask |= ((u64) !is_eng_idle) << - (GAUDI_ENGINE_ID_NIC_0 + port); + if (mask && !is_eng_idle) + set_bit(GAUDI_ENGINE_ID_NIC_0 + port, mask); if (s) seq_printf(s, nic_fmt, port, is_eng_idle ? "Y" : "N", @@ -7834,9 +7830,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask, is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts); is_idle &= is_eng_idle; - if (mask) - *mask |= ((u64) !is_eng_idle) << - (GAUDI_ENGINE_ID_NIC_0 + port); + if (mask && !is_eng_idle) + set_bit(GAUDI_ENGINE_ID_NIC_0 + port, mask); if (s) seq_printf(s, nic_fmt, port, is_eng_idle ? "Y" : "N", diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 6b4c41188495..a954e7c02375 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -2916,7 +2916,7 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job) else timeout = HL_DEVICE_TIMEOUT_USEC; - if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) { + if (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) { dev_err_ratelimited(hdev->dev, "Can't send driver job on QMAN0 because the device is not idle\n"); return -EBUSY; @@ -5187,11 +5187,12 @@ static void goya_disable_clock_gating(struct hl_device *hdev) /* clock gating not supported in Goya */ } -static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask, - struct seq_file *s) +static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr, + u8 mask_len, struct seq_file *s) { const char *fmt = "%-5d%-9s%#-14x%#-16x%#x\n"; const char *dma_fmt = "%-5d%-9s%#-14x%#x\n"; + unsigned long *mask = (unsigned long *)mask_arr; u32 qm_glbl_sts0, cmdq_glbl_sts0, dma_core_sts0, tpc_cfg_sts, mme_arch_sts; bool is_idle = true, is_eng_idle; @@ -5211,9 +5212,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask, IS_DMA_IDLE(dma_core_sts0); is_idle &= is_eng_idle; - if (mask) - *mask |= ((u64) !is_eng_idle) << - (GOYA_ENGINE_ID_DMA_0 + i); + if (mask && !is_eng_idle) + set_bit(GOYA_ENGINE_ID_DMA_0 + i, mask); if (s) seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N", qm_glbl_sts0, dma_core_sts0); @@ -5235,9 +5235,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask, IS_TPC_IDLE(tpc_cfg_sts); is_idle &= is_eng_idle; - if (mask) - *mask |= ((u64) !is_eng_idle) << - (GOYA_ENGINE_ID_TPC_0 + i); + if (mask && !is_eng_idle) + set_bit(GOYA_ENGINE_ID_TPC_0 + i, mask); if (s) seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N", qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts); @@ -5256,8 +5255,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask, IS_MME_IDLE(mme_arch_sts); is_idle &= is_eng_idle; - if (mask) - *mask |= ((u64) !is_eng_idle) << GOYA_ENGINE_ID_MME_0; + if (mask && !is_eng_idle) + set_bit(GOYA_ENGINE_ID_MME_0, mask); if (s) { seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0, cmdq_glbl_sts0, mme_arch_sts); diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index b1c09eba8ac2..ebde42b37b43 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -331,6 +331,8 @@ struct hl_info_dram_usage { __u64 ctx_dram_mem; }; +#define HL_BUSY_ENGINES_MASK_EXT_SIZE 2 + struct hl_info_hw_idle { __u32 is_idle; /* @@ -343,7 +345,7 @@ struct hl_info_hw_idle { * Extended Bitmask of busy engines. * Bits definition is according to `enum _enging_id'. */ - __u64 busy_engines_mask_ext; + __u64 busy_engines_mask_ext[HL_BUSY_ENGINES_MASK_EXT_SIZE]; }; struct hl_info_device_status { -- cgit v1.2.3-58-ga151 From 2795c8891577c4f1493dd4e3abc298c60009ec42 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Tue, 8 Dec 2020 13:47:05 +0200 Subject: habanalabs: staged submission support We introduce a new mechanism named Staged Submission. This mechanism allows the user to send a whole CS in pieces. Each CS will not require completion rather than the last CS. Timeout timer will be triggered upon reception of the first CS in group. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 214 ++++++++++++++++++--- drivers/misc/habanalabs/common/habanalabs.h | 9 + drivers/misc/habanalabs/common/hw_queue.c | 27 +++ drivers/misc/habanalabs/gaudi/gaudi.c | 1 + 4 files changed, 227 insertions(+), 24 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 57daff0e59ae..7bd4a03b3429 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -334,6 +334,133 @@ static void complete_job(struct hl_device *hdev, struct hl_cs_job *job) cs_job_put(job); } +/* + * hl_staged_cs_find_first - locate the first CS in this staged submission + * + * @hdev: pointer to device structure + * @cs_seq: staged submission sequence number + * + * @note: This function must be called under 'hdev->cs_mirror_lock' + * + * Find and return a CS pointer with the given sequence + */ +struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq) +{ + struct hl_cs *cs; + + list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node) + if (cs->staged_cs && cs->staged_first && + cs->sequence == cs_seq) + return cs; + + return NULL; +} + +/* + * is_staged_cs_last_exists - returns true if the last CS in sequence exists + * + * @hdev: pointer to device structure + * @cs: staged submission member + * + */ +bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs) +{ + struct hl_cs *last_entry; + + last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs, + staged_cs_node); + + if (last_entry->staged_last) + return true; + + return false; +} + +/* + * staged_cs_get - get CS reference if this CS is a part of a staged CS + * + * @hdev: pointer to device structure + * @cs: current CS + * @cs_seq: staged submission sequence number + * + * Increment CS reference for every CS in this staged submission except for + * the CS which get completion. + */ +static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs) +{ + /* Only the last CS in this staged submission will get a completion. + * We must increment the reference for all other CS's in this + * staged submission. + * Once we get a completion we will release the whole staged submission. + */ + if (!cs->staged_last) + cs_get(cs); +} + +/* + * staged_cs_put - put a CS in case it is part of staged submission + * + * @hdev: pointer to device structure + * @cs: CS to put + * + * This function decrements a CS reference (for a non completion CS) + */ +static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs) +{ + /* We release all CS's in a staged submission except the last + * CS which we have never incremented its reference. + */ + if (!cs_needs_completion(cs)) + cs_put(cs); +} + +static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs) +{ + bool next_entry_found = false; + struct hl_cs *next; + + if (!cs_needs_timeout(cs)) + return; + + spin_lock(&hdev->cs_mirror_lock); + + /* We need to handle tdr only once for the complete staged submission. + * Hence, we choose the CS that reaches this function first which is + * the CS marked as 'staged_last'. + */ + if (cs->staged_cs && cs->staged_last) + cs = hl_staged_cs_find_first(hdev, cs->staged_sequence); + + spin_unlock(&hdev->cs_mirror_lock); + + /* Don't cancel TDR in case this CS was timedout because we might be + * running from the TDR context + */ + if (cs && (cs->timedout || + hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT)) + return; + + if (cs && cs->tdr_active) + cancel_delayed_work_sync(&cs->work_tdr); + + spin_lock(&hdev->cs_mirror_lock); + + /* queue TDR for next CS */ + list_for_each_entry(next, &hdev->cs_mirror_list, mirror_node) + if (cs_needs_timeout(next)) { + next_entry_found = true; + break; + } + + if (next_entry_found && !next->tdr_active) { + next->tdr_active = true; + schedule_delayed_work(&next->work_tdr, + hdev->timeout_jiffies); + } + + spin_unlock(&hdev->cs_mirror_lock); +} + static void cs_do_release(struct kref *ref) { struct hl_cs *cs = container_of(ref, struct hl_cs, refcount); @@ -391,32 +518,29 @@ static void cs_do_release(struct kref *ref) list_del_init(&cs->mirror_node); spin_unlock(&hdev->cs_mirror_lock); - /* Don't cancel TDR in case this CS was timedout because we might be - * running from the TDR context - */ - if (!cs->timedout && hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) { - bool next_entry_found = false; - struct hl_cs *next; - - if (cs->tdr_active) - cancel_delayed_work_sync(&cs->work_tdr); + cs_handle_tdr(hdev, cs); - spin_lock(&hdev->cs_mirror_lock); - - /* queue TDR for next CS */ - list_for_each_entry(next, &hdev->cs_mirror_list, mirror_node) - if (cs_needs_timeout(next)) { - next_entry_found = true; - break; - } + if (cs->staged_cs) { + /* the completion CS decrements reference for the entire + * staged submission + */ + if (cs->staged_last) { + struct hl_cs *staged_cs, *tmp; - if (next_entry_found && !next->tdr_active) { - next->tdr_active = true; - schedule_delayed_work(&next->work_tdr, - hdev->timeout_jiffies); + list_for_each_entry_safe(staged_cs, tmp, + &cs->staged_cs_node, staged_cs_node) + staged_cs_put(hdev, staged_cs); } - spin_unlock(&hdev->cs_mirror_lock); + /* A staged CS will be a member in the list only after it + * was submitted. We used 'cs_mirror_lock' when inserting + * it to list so we will use it again when removing it + */ + if (cs->submitted) { + spin_lock(&hdev->cs_mirror_lock); + list_del(&cs->staged_cs_node); + spin_unlock(&hdev->cs_mirror_lock); + } } out: @@ -614,6 +738,8 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) { struct hl_cs_job *job, *tmp; + staged_cs_put(hdev, cs); + list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) complete_job(hdev, job); } @@ -623,7 +749,9 @@ void hl_cs_rollback_all(struct hl_device *hdev) int i; struct hl_cs *cs, *tmp; - /* flush all completions */ + /* flush all completions before iterating over the CS mirror list in + * order to avoid a race with the release functions + */ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) flush_workqueue(hdev->cq_wq[i]); @@ -632,7 +760,7 @@ void hl_cs_rollback_all(struct hl_device *hdev) cs_get(cs); cs->aborted = true; dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n", - cs->ctx->asid, cs->sequence); + cs->ctx->asid, cs->sequence); cs_rollback(hdev, cs); cs_put(cs); } @@ -804,6 +932,12 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args) return -EBUSY; } + if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) && + !hdev->supports_staged_submission) { + dev_err(hdev->dev, "staged submission not supported"); + return -EPERM; + } + cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK; if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) { @@ -875,6 +1009,34 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev, return 0; } +static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs, + u64 sequence, u32 flags) +{ + if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION)) + return 0; + + cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST); + cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST); + + if (cs->staged_first) { + /* Staged CS sequence is the first CS sequence */ + INIT_LIST_HEAD(&cs->staged_cs_node); + cs->staged_sequence = cs->sequence; + } else { + /* User sequence will be validated in 'hl_hw_queue_schedule_cs' + * under the cs_mirror_lock + */ + cs->staged_sequence = sequence; + } + + /* Increment CS reference if needed */ + staged_cs_get(hdev, cs); + + cs->staged_cs = true; + + return 0; +} + static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, u32 num_chunks, u64 *cs_seq, u32 flags) { @@ -914,6 +1076,10 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, hl_debugfs_add_cs(cs); + rc = cs_staged_submission(hdev, cs, user_sequence, flags); + if (rc) + goto free_cs_object; + /* Validate ALL the CS chunks before submitting the CS */ for (i = 0 ; i < num_chunks ; i++) { struct hl_cs_chunk *chunk = &cs_chunk_array[i]; diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index be7947d69dfa..30f32f2edb8a 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -1169,8 +1169,11 @@ struct hl_userptr { * @finish_work: workqueue object to run when CS is completed by H/W. * @work_tdr: delayed work node for TDR. * @mirror_node : node in device mirror list of command submissions. + * @staged_cs_node: node in the staged cs list. * @debugfs_list: node in debugfs list of command submissions. * @sequence: the sequence number of this CS. + * @staged_sequence: the sequence of the staged submission this CS is part of, + * relevant only if staged_cs is set. * @type: CS_TYPE_*. * @submitted: true if CS was submitted to H/W. * @completed: true if CS was completed by device. @@ -1195,8 +1198,10 @@ struct hl_cs { struct work_struct finish_work; struct delayed_work work_tdr; struct list_head mirror_node; + struct list_head staged_cs_node; struct list_head debugfs_list; u64 sequence; + u64 staged_sequence; enum hl_cs_type type; u8 submitted; u8 completed; @@ -1905,6 +1910,7 @@ struct hl_mmu_funcs { * user processes * @device_fini_pending: true if device_fini was called and might be * waiting for the reset thread to finish + * @supports_staged_submission: true if staged submissions are supported */ struct hl_device { struct pci_dev *pdev; @@ -2010,6 +2016,7 @@ struct hl_device { u8 needs_reset; u8 process_kill_trial_cnt; u8 device_fini_pending; + u8 supports_staged_submission; /* Parameters for bring-up */ u64 nic_ports_mask; @@ -2207,6 +2214,8 @@ void hl_fence_get(struct hl_fence *fence); void cs_get(struct hl_cs *cs); bool cs_needs_completion(struct hl_cs *cs); bool cs_needs_timeout(struct hl_cs *cs); +bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs); +struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq); void goya_set_asic_funcs(struct hl_device *hdev); void gaudi_set_asic_funcs(struct hl_device *hdev); diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c index ad440ae785a3..0f335182267f 100644 --- a/drivers/misc/habanalabs/common/hw_queue.c +++ b/drivers/misc/habanalabs/common/hw_queue.c @@ -596,6 +596,31 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) hdev->asic_funcs->collective_wait_init_cs(cs); spin_lock(&hdev->cs_mirror_lock); + + /* Verify staged CS exists and add to the staged list */ + if (cs->staged_cs && !cs->staged_first) { + struct hl_cs *staged_cs; + + staged_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence); + if (!staged_cs) { + dev_err(hdev->dev, + "Cannot find staged submission sequence %llu", + cs->staged_sequence); + rc = -EINVAL; + goto unlock_cs_mirror; + } + + if (is_staged_cs_last_exists(hdev, staged_cs)) { + dev_err(hdev->dev, + "Staged submission sequence %llu already submitted", + cs->staged_sequence); + rc = -EINVAL; + goto unlock_cs_mirror; + } + + list_add_tail(&cs->staged_cs_node, &staged_cs->staged_cs_node); + } + list_add_tail(&cs->mirror_node, &hdev->cs_mirror_list); /* Queue TDR if the CS is the first entry and if timeout is wanted */ @@ -637,6 +662,8 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) goto out; +unlock_cs_mirror: + spin_unlock(&hdev->cs_mirror_lock); unroll_cq_resv: q = &hdev->kernel_queues[0]; for (i = 0 ; (i < max_queues) && (cq_cnt > 0) ; i++, q++) { diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 9a3d2fb477a8..1348016309e3 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -1627,6 +1627,7 @@ static int gaudi_sw_init(struct hl_device *hdev) hdev->supports_sync_stream = true; hdev->supports_coresight = true; + hdev->supports_staged_submission = true; return 0; -- cgit v1.2.3-58-ga151 From 663a301d75b86fbb23e02628c31136e0b8698a1a Mon Sep 17 00:00:00 2001 From: Ohad Sharabi Date: Thu, 21 Jan 2021 22:25:52 +0200 Subject: habanalabs: fix ETR security issue ETR should always be non-secured as it is used by the users to record profiling/trace data. This patch fixes the configuration to match those requirements. Signed-off-by: Ohad Sharabi Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi_coresight.c | 18 +++++++++++++++--- drivers/misc/habanalabs/goya/goya_coresight.c | 11 +++++++++-- drivers/misc/habanalabs/include/gaudi/gaudi_masks.h | 5 ++++- .../misc/habanalabs/include/goya/asic_reg/goya_masks.h | 5 ++++- 4 files changed, 32 insertions(+), 7 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c index 88a09d42e111..6e56fa1c6c69 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c +++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c @@ -634,9 +634,21 @@ static int gaudi_config_etr(struct hl_device *hdev, WREG32(mmPSOC_ETR_BUFWM, 0x3FFC); WREG32(mmPSOC_ETR_RSZ, input->buffer_size); WREG32(mmPSOC_ETR_MODE, input->sink_mode); - /* Workaround for H3 #HW-2075 bug: use small data chunks */ - WREG32(mmPSOC_ETR_AXICTL, (is_host ? 0 : 0x700) | - PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT); + if (hdev->asic_prop.fw_security_disabled) { + /* make ETR not privileged */ + val = FIELD_PREP( + PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK, 0); + /* make ETR non-secured (inverted logic) */ + val |= FIELD_PREP( + PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK, 1); + /* + * Workaround for H3 #HW-2075 bug: use small data + * chunks + */ + val |= FIELD_PREP(PSOC_ETR_AXICTL_WRBURSTLEN_MASK, + is_host ? 0 : 7); + WREG32(mmPSOC_ETR_AXICTL, val); + } WREG32(mmPSOC_ETR_DBALO, lower_32_bits(input->buffer_address)); WREG32(mmPSOC_ETR_DBAHI, diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c index 6fa03933b438..6b7445cca580 100644 --- a/drivers/misc/habanalabs/goya/goya_coresight.c +++ b/drivers/misc/habanalabs/goya/goya_coresight.c @@ -434,8 +434,15 @@ static int goya_config_etr(struct hl_device *hdev, WREG32(mmPSOC_ETR_BUFWM, 0x3FFC); WREG32(mmPSOC_ETR_RSZ, input->buffer_size); WREG32(mmPSOC_ETR_MODE, input->sink_mode); - WREG32(mmPSOC_ETR_AXICTL, - 0x700 | PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT); + if (hdev->asic_prop.fw_security_disabled) { + /* make ETR not privileged */ + val = FIELD_PREP(PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK, 0); + /* make ETR non-secured (inverted logic) */ + val |= FIELD_PREP(PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK, 1); + /* burst size 8 */ + val |= FIELD_PREP(PSOC_ETR_AXICTL_WRBURSTLEN_MASK, 7); + WREG32(mmPSOC_ETR_AXICTL, val); + } WREG32(mmPSOC_ETR_DBALO, lower_32_bits(input->buffer_address)); WREG32(mmPSOC_ETR_DBAHI, diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h index b9b90d079e23..b53aeda9a982 100644 --- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h @@ -388,7 +388,10 @@ enum axi_id { #define RAZWI_INITIATOR_ID_X_Y_TPC6 RAZWI_INITIATOR_ID_X_Y(7, 6) #define RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5 RAZWI_INITIATOR_ID_X_Y(8, 6) -#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1 +#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1 +#define PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK 0x1 +#define PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK 0x2 +#define PSOC_ETR_AXICTL_WRBURSTLEN_MASK 0xF00 /* STLB_CACHE_INV */ #define STLB_CACHE_INV_PRODUCER_INDEX_SHIFT 0 diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h index 067489bd048e..9ff3cb245580 100644 --- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h +++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h @@ -259,6 +259,9 @@ #define DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT #define DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT -#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1 +#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1 +#define PSOC_ETR_AXICTL_PROTCTRLBIT0_MASK 0x1 +#define PSOC_ETR_AXICTL_PROTCTRLBIT1_MASK 0x2 +#define PSOC_ETR_AXICTL_WRBURSTLEN_MASK 0xF00 #endif /* ASIC_REG_GOYA_MASKS_H_ */ -- cgit v1.2.3-58-ga151 From 7838504171d910a0934c0b4043386a471309649e Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Tue, 26 Jan 2021 22:56:56 +0200 Subject: habanalabs: update SyncManager interrupt handling The firmware provides more information about SyncManager events. Adjust the code to the latest firmware interface file. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 14 +++++++------- drivers/misc/habanalabs/include/common/cpucp_if.h | 11 +++++++++-- 2 files changed, 16 insertions(+), 9 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 1348016309e3..a9bd5aef6c02 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -6860,24 +6860,24 @@ static void gaudi_print_sm_sei_info(struct hl_device *hdev, u16 event_type, u32 index = event_type - GAUDI_EVENT_DMA_IF_SEI_0; switch (sei_data->sei_cause) { - case GAUDI_SM_SEI_SO_OVERFLOW: + case SM_SEI_SO_OVERFLOW: dev_err(hdev->dev, "SM %u SEI Error: SO %u overflow/underflow", - index, le16_to_cpu(sei_data->sei_log)); + index, le32_to_cpu(sei_data->sei_log)); break; - case GAUDI_SM_SEI_LBW_4B_UNALIGNED: + case SM_SEI_LBW_4B_UNALIGNED: dev_err(hdev->dev, "SM %u SEI Error: Unaligned 4B LBW access, monitor agent address low - %#x", - index, le16_to_cpu(sei_data->sei_log)); + index, le32_to_cpu(sei_data->sei_log)); break; - case GAUDI_SM_SEI_AXI_RESPONSE_ERR: + case SM_SEI_AXI_RESPONSE_ERR: dev_err(hdev->dev, "SM %u SEI Error: AXI ID %u response error", - index, le16_to_cpu(sei_data->sei_log)); + index, le32_to_cpu(sei_data->sei_log)); break; default: dev_err(hdev->dev, "Unknown SM SEI cause %u", - le16_to_cpu(sei_data->sei_log)); + le32_to_cpu(sei_data->sei_log)); break; } } diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h index d75d1077461b..b77c1c16c32c 100644 --- a/drivers/misc/habanalabs/include/common/cpucp_if.h +++ b/drivers/misc/habanalabs/include/common/cpucp_if.h @@ -58,10 +58,17 @@ struct hl_eq_ecc_data { __u8 pad[7]; }; +enum hl_sm_sei_cause { + SM_SEI_SO_OVERFLOW, + SM_SEI_LBW_4B_UNALIGNED, + SM_SEI_AXI_RESPONSE_ERR +}; + struct hl_eq_sm_sei_data { - __le16 sei_log; + __le32 sei_log; + /* enum hl_sm_sei_cause */ __u8 sei_cause; - __u8 pad[5]; + __u8 pad[3]; }; struct hl_eq_entry { -- cgit v1.2.3-58-ga151 From 230cd89480d322ff76ed241973158059a170fc5c Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Tue, 26 Jan 2021 22:58:13 +0200 Subject: habanalabs/gaudi: unmask HBM interrupts after handling As the driver does with all interrupts, we need to tell F/W to unmask the HBM interrupts after the driver handled them. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index a9bd5aef6c02..52fcaf25531a 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -7377,6 +7377,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, gaudi_hbm_read_interrupts(hdev, gaudi_hbm_event_to_dev(event_type), &eq_entry->hbm_ecc_data); + hl_fw_unmask_irq(hdev, event_type); break; case GAUDI_EVENT_TPC0_DEC: -- cgit v1.2.3-58-ga151 From f1aebf5e3d606a2a910eed54bc8d17655f12b606 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Tue, 26 Jan 2021 22:59:35 +0200 Subject: habanalabs: update to latest hl_boot_if.h spec from F/W It adds the definition for indication that the F/W handles HBM ECC events. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/include/common/hl_boot_if.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h index e29c77bdea07..57785478a4ef 100644 --- a/drivers/misc/habanalabs/include/common/hl_boot_if.h +++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h @@ -69,8 +69,9 @@ * image has failed to match expected * checksum. Trying to program image again * might solve this. + * * CPU_BOOT_ERR0_PLL_FAIL PLL settings failed, meaning that one - * of the PLLs remained in REF_CLK + * of the PLLs remains in REF_CLK * * CPU_BOOT_ERR0_ENABLED Error registers enabled. * This is a main indication that the @@ -161,6 +162,10 @@ * FW initialized Clock Gating. * Initialized in: preboot * + * CPU_BOOT_DEV_STS0_HBM_ECC_EN HBM ECC handling Enabled. + * FW handles HBM ECC indications. + * Initialized in: linux + * * CPU_BOOT_DEV_STS0_ENABLED Device status register enabled. * This is a main indication that the * running FW populates the device status @@ -184,6 +189,7 @@ #define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << 11) #define CPU_BOOT_DEV_STS0_SP_SRAM_EN (1 << 12) #define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << 13) +#define CPU_BOOT_DEV_STS0_HBM_ECC_EN (1 << 14) #define CPU_BOOT_DEV_STS0_ENABLED (1 << 31) enum cpu_boot_status { -- cgit v1.2.3-58-ga151 From d71277dc9bd6789964405e0a55d70c1fb2098f84 Mon Sep 17 00:00:00 2001 From: Desmond Yan Date: Thu, 28 Jan 2021 22:04:03 -0800 Subject: misc: bcm-vk: fix set_q_num API precedence issue Change set_q_num API to use if-else to make it more explicit, and avoid a precedence rule issue. Reported-by: kernel test robot Signed-off-by: Desmond Yan Signed-off-by: Scott Branden Link: https://lore.kernel.org/r/20210129060403.14801-1-scott.branden@broadcom.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/bcm-vk/bcm_vk_msg.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.c b/drivers/misc/bcm-vk/bcm_vk_msg.c index eec90494777d..a363e2e4f7bc 100644 --- a/drivers/misc/bcm-vk/bcm_vk_msg.c +++ b/drivers/misc/bcm-vk/bcm_vk_msg.c @@ -52,8 +52,14 @@ static u32 get_q_num(const struct vk_msg_blk *msg) static void set_q_num(struct vk_msg_blk *msg, u32 q_num) { - msg->trans_id = (msg->trans_id & ~BCM_VK_MSG_Q_MASK) | - (q_num >= VK_MSGQ_PER_CHAN_MAX) ? VK_MSGQ_NUM_DEFAULT : q_num; + u32 trans_q; + + if (q_num >= VK_MSGQ_PER_CHAN_MAX) + trans_q = VK_MSGQ_NUM_DEFAULT; + else + trans_q = q_num; + + msg->trans_id = (msg->trans_id & ~BCM_VK_MSG_Q_MASK) | trans_q; } static u32 get_msg_id(const struct vk_msg_blk *msg) -- cgit v1.2.3-58-ga151 From 1309ecc90f16ee9cc3077761e7f4474369747e6e Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Fri, 29 Jan 2021 14:07:46 +0200 Subject: mei: fix transfer over dma with extended header The size in header field for packet transferred over DMA includes size of the extended header. Include extended header in size check. Add size and sanity checks on extended header. Cc: # v5.10+ Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20210129120752.850325-1-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/interrupt.c | 33 ++++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 326955b04fda..2161c1234ad7 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -295,12 +295,17 @@ static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr) static inline int hdr_is_valid(u32 msg_hdr) { struct mei_msg_hdr *mei_hdr; + u32 expected_len = 0; mei_hdr = (struct mei_msg_hdr *)&msg_hdr; if (!msg_hdr || mei_hdr->reserved) return -EBADMSG; - if (mei_hdr->dma_ring && mei_hdr->length != MEI_SLOT_SIZE) + if (mei_hdr->dma_ring) + expected_len += MEI_SLOT_SIZE; + if (mei_hdr->extended) + expected_len += MEI_SLOT_SIZE; + if (mei_hdr->length < expected_len) return -EBADMSG; return 0; @@ -324,6 +329,8 @@ int mei_irq_read_handler(struct mei_device *dev, struct mei_cl *cl; int ret; u32 ext_meta_hdr_u32; + u32 hdr_size_left; + u32 hdr_size_ext; int i; int ext_hdr_end; @@ -353,6 +360,7 @@ int mei_irq_read_handler(struct mei_device *dev, } ext_hdr_end = 1; + hdr_size_left = mei_hdr->length; if (mei_hdr->extended) { if (!dev->rd_msg_hdr[1]) { @@ -363,8 +371,21 @@ int mei_irq_read_handler(struct mei_device *dev, dev_dbg(dev->dev, "extended header is %08x\n", ext_meta_hdr_u32); } - meta_hdr = ((struct mei_ext_meta_hdr *) - dev->rd_msg_hdr + 1); + meta_hdr = ((struct mei_ext_meta_hdr *)dev->rd_msg_hdr + 1); + if (check_add_overflow((u32)sizeof(*meta_hdr), + mei_slots2data(meta_hdr->size), + &hdr_size_ext)) { + dev_err(dev->dev, "extended message size too big %d\n", + meta_hdr->size); + return -EBADMSG; + } + if (hdr_size_left < hdr_size_ext) { + dev_err(dev->dev, "corrupted message header len %d\n", + mei_hdr->length); + return -EBADMSG; + } + hdr_size_left -= hdr_size_ext; + ext_hdr_end = meta_hdr->size + 2; for (i = dev->rd_msg_hdr_count; i < ext_hdr_end; i++) { dev->rd_msg_hdr[i] = mei_read_hdr(dev); @@ -376,6 +397,12 @@ int mei_irq_read_handler(struct mei_device *dev, } if (mei_hdr->dma_ring) { + if (hdr_size_left != sizeof(dev->rd_msg_hdr[ext_hdr_end])) { + dev_err(dev->dev, "corrupted message header len %d\n", + mei_hdr->length); + return -EBADMSG; + } + dev->rd_msg_hdr[ext_hdr_end] = mei_read_hdr(dev); dev->rd_msg_hdr_count++; (*slots)--; -- cgit v1.2.3-58-ga151 From 7615da2be0069254099bbb596583b46e9623e385 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Fri, 29 Jan 2021 14:07:47 +0200 Subject: mei: document that mei_msg_hdr_init returns ERR_PTR Document that mei_msg_hdr_init returns ERR_PTR. Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20210129120752.850325-2-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/client.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index a56d41321f32..beb6cdff20fb 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -1737,7 +1737,7 @@ static inline u8 mei_ext_hdr_set_vtag(struct mei_ext_hdr *ext, u8 vtag) * * @cb: message callback structure * - * Return: a pointer to initialized header + * Return: a pointer to initialized header or ERR_PTR on failure */ static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb) { -- cgit v1.2.3-58-ga151 From 3a77df62deb2e62de0dc26c1cb763cc152329287 Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Fri, 29 Jan 2021 14:07:48 +0200 Subject: mei: hbm: call mei_set_devstate() on hbm stop response Use mei_set_devstate() wrapper upon hbm stop command response, to trigger sysfs event. Fixes: 43b8a7ed4739 ("mei: expose device state in sysfs") Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20210129120752.850325-3-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/hbm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 686e8b6a4c55..0cba3c6dfb14 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1373,7 +1373,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) return -EPROTO; } - dev->dev_state = MEI_DEV_POWER_DOWN; + mei_set_devstate(dev, MEI_DEV_POWER_DOWN); dev_info(dev->dev, "hbm: stop response: resetting.\n"); /* force the reset */ return -EPROTO; -- cgit v1.2.3-58-ga151 From da3eb47c90d4f951b83573d7203c40c0888521e5 Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Fri, 29 Jan 2021 14:07:49 +0200 Subject: mei: hbm: drop hbm responses on shutdown Ignore and drop HBM responses from init phase in shutdown phase. Fixes stall if driver starting to stop in the middle of link init. Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20210129120752.850325-4-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/hbm.c | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 0cba3c6dfb14..df0f62de3dca 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1177,6 +1177,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) if (dev->dev_state != MEI_DEV_INIT_CLIENTS || dev->hbm_state != MEI_HBM_STARTING) { + if (dev->dev_state == MEI_DEV_POWER_DOWN) { + dev_dbg(dev->dev, "hbm: start: on shutdown, ignoring\n"); + return 0; + } dev_err(dev->dev, "hbm: start: state mismatch, [%d, %d]\n", dev->dev_state, dev->hbm_state); return -EPROTO; @@ -1215,7 +1219,12 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) dev->init_clients_timer = 0; - if (dev->hbm_state != MEI_HBM_CAP_SETUP) { + if (dev->dev_state != MEI_DEV_INIT_CLIENTS || + dev->hbm_state != MEI_HBM_CAP_SETUP) { + if (dev->dev_state == MEI_DEV_POWER_DOWN) { + dev_dbg(dev->dev, "hbm: capabilities response: on shutdown, ignoring\n"); + return 0; + } dev_err(dev->dev, "hbm: capabilities response: state mismatch, [%d, %d]\n", dev->dev_state, dev->hbm_state); return -EPROTO; @@ -1247,7 +1256,12 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) dev->init_clients_timer = 0; - if (dev->hbm_state != MEI_HBM_DR_SETUP) { + if (dev->dev_state != MEI_DEV_INIT_CLIENTS || + dev->hbm_state != MEI_HBM_DR_SETUP) { + if (dev->dev_state == MEI_DEV_POWER_DOWN) { + dev_dbg(dev->dev, "hbm: dma setup response: on shutdown, ignoring\n"); + return 0; + } dev_err(dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n", dev->dev_state, dev->hbm_state); return -EPROTO; @@ -1311,6 +1325,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) if (dev->dev_state != MEI_DEV_INIT_CLIENTS || dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) { + if (dev->dev_state == MEI_DEV_POWER_DOWN) { + dev_dbg(dev->dev, "hbm: properties response: on shutdown, ignoring\n"); + return 0; + } dev_err(dev->dev, "hbm: properties response: state mismatch, [%d, %d]\n", dev->dev_state, dev->hbm_state); return -EPROTO; @@ -1349,6 +1367,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) if (dev->dev_state != MEI_DEV_INIT_CLIENTS || dev->hbm_state != MEI_HBM_ENUM_CLIENTS) { + if (dev->dev_state == MEI_DEV_POWER_DOWN) { + dev_dbg(dev->dev, "hbm: enumeration response: on shutdown, ignoring\n"); + return 0; + } dev_err(dev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n", dev->dev_state, dev->hbm_state); return -EPROTO; -- cgit v1.2.3-58-ga151 From 372726cb3957dbd69ded9a4e3419d5c6c3bc648e Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Fri, 29 Jan 2021 14:07:50 +0200 Subject: mei: me: emmitsburg workstation DID Add Emmitsburg workstation DID. Cc: Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20210129120752.850325-5-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/hw-me-regs.h | 2 ++ drivers/misc/mei/pci-me.c | 2 ++ 2 files changed, 4 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 9cf8d8f60cfe..a368849278b2 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -101,6 +101,8 @@ #define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */ #define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */ +#define MEI_DEV_ID_EBG 0x1BE0 /* Emmitsburg WS */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 1de9ef7a272b..12dc99ee4b58 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -107,6 +107,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_EBG, MEI_ME_PCH15_SPS_CFG)}, + /* required last entry */ {0, } }; -- cgit v1.2.3-58-ga151 From f7545efaf7950b240de6b8a20b9c3ffd7278538e Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Fri, 29 Jan 2021 14:07:51 +0200 Subject: mei: me: add adler lake point S DID Add Adler Lake S device id. Cc: Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20210129120752.850325-6-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/hw-me-regs.h | 2 ++ drivers/misc/mei/pci-me.c | 2 ++ 2 files changed, 4 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index a368849278b2..50a82a38e66d 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -103,6 +103,8 @@ #define MEI_DEV_ID_EBG 0x1BE0 /* Emmitsburg WS */ +#define MEI_DEV_ID_ADP_S 0x7AE8 /* Alder Lake Point S */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 12dc99ee4b58..94c122007504 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -109,6 +109,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_EBG, MEI_ME_PCH15_SPS_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)}, + /* required last entry */ {0, } }; -- cgit v1.2.3-58-ga151 From 930c922a987a02936000f15ea62988b7a39c27f5 Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Fri, 29 Jan 2021 14:07:52 +0200 Subject: mei: me: add adler lake point LP DID Add Adler Lake LP device id. Cc: Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20210129120752.850325-7-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/hw-me-regs.h | 1 + drivers/misc/mei/pci-me.c | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 50a82a38e66d..14be76d4c2e6 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -104,6 +104,7 @@ #define MEI_DEV_ID_EBG 0x1BE0 /* Emmitsburg WS */ #define MEI_DEV_ID_ADP_S 0x7AE8 /* Alder Lake Point S */ +#define MEI_DEV_ID_ADP_LP 0x7A60 /* Alder Lake Point LP */ /* * MEI HW Section diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 94c122007504..a7e179626b63 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -110,6 +110,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_EBG, MEI_ME_PCH15_SPS_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)}, /* required last entry */ {0, } -- cgit v1.2.3-58-ga151 From d7a4bfcac9a5f229aebfe307280e773b5f565443 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 1 Feb 2021 15:22:07 +0300 Subject: misc: bcm-vk: unlock on error in bcm_to_h_msg_dequeue() Unlock before returning on this error path. Fixes: 111d746bb476 ("misc: bcm-vk: add VK messaging support") Acked-by: Desmond Yan Acked-by: Scott Branden Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/YBfyb+jU5lDUe+5g@mwanda Signed-off-by: Greg Kroah-Hartman --- drivers/misc/bcm-vk/bcm_vk_msg.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.c b/drivers/misc/bcm-vk/bcm_vk_msg.c index a363e2e4f7bc..f40cf08a6192 100644 --- a/drivers/misc/bcm-vk/bcm_vk_msg.c +++ b/drivers/misc/bcm-vk/bcm_vk_msg.c @@ -855,7 +855,8 @@ s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk) * that is fatal. */ dev_crit(dev, "Kernel mem allocation failure.\n"); - return -ENOMEM; + total = -ENOMEM; + goto idx_err; } /* flush rd pointer after a message is dequeued */ -- cgit v1.2.3-58-ga151 From 94e6a5b9e3bbb0cec6850b3662b7e3d44a008371 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Mon, 1 Feb 2021 15:30:40 +0800 Subject: misc: rtsx: Remove unneeded return variable This patch removes unneeded return variables, using only '0' instead. It fixes the following warning detected by coccinelle: ./drivers/misc/cardreader/rtsx_pcr.c:1808:5-8: Unneeded variable: "ret". Return "0" on line 1833. Reported-by: Abaci Robot Signed-off-by: Yang Li Link: https://lore.kernel.org/r/1612164640-84541-1-git-send-email-yang.lee@linux.alibaba.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cardreader/rtsx_pcr.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 5e94e87c80b7..273311184669 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -1799,7 +1799,6 @@ static int rtsx_pci_runtime_resume(struct device *device) struct pci_dev *pcidev = to_pci_dev(device); struct pcr_handle *handle; struct rtsx_pcr *pcr; - int ret = 0; handle = pci_get_drvdata(pcidev); pcr = handle->pcr; @@ -1824,7 +1823,7 @@ static int rtsx_pci_runtime_resume(struct device *device) schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); mutex_unlock(&pcr->pcr_mutex); - return ret; + return 0; } #else /* CONFIG_PM */ -- cgit v1.2.3-58-ga151 From 8078efff8d4dda4a84deddcc3172b5820593147a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 3 Feb 2021 17:42:53 +0300 Subject: misc: bcm-vk: Fix a couple error codes in probe() These errors should return negative error codes instead of returning success. Fixes: 064ffc7c3939 ("misc: bcm-vk: add autoload support") Fixes: 522f692686a7 ("misc: bcm-vk: add Broadcom VK driver") Acked-by: Scott Branden Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/YBpyEbmz00rjvT9S@mwanda Signed-off-by: Greg Kroah-Hartman --- drivers/misc/bcm-vk/bcm_vk_dev.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c index c3d2bba68ef1..a82a8927d92b 100644 --- a/drivers/misc/bcm-vk/bcm_vk_dev.c +++ b/drivers/misc/bcm-vk/bcm_vk_dev.c @@ -1358,6 +1358,7 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) vk->bar[i] = pci_ioremap_bar(pdev, i * 2); if (!vk->bar[i]) { dev_err(dev, "failed to remap BAR%d\n", i); + err = -ENOMEM; goto err_iounmap; } } @@ -1463,7 +1464,8 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); if (auto_load) { if ((boot_status & BOOT_STATE_MASK) == BROM_RUNNING) { - if (bcm_vk_trigger_autoload(vk)) + err = bcm_vk_trigger_autoload(vk); + if (err) goto err_bcm_vk_tty_exit; } else { dev_err(dev, -- cgit v1.2.3-58-ga151 From 3a11b0b5d8d2b3f7d4b44945ef9226a3115bb15f Mon Sep 17 00:00:00 2001 From: Scott Branden Date: Wed, 3 Feb 2021 14:38:26 -0800 Subject: misc: bcm-vk: only support ttyVK if CONFIG_TTY is set Correct compile issue if CONFIG_TTY is not set by only adding ttyVK devices if CONFIG_BCM_VK_TTY is set. Reported-by: Randy Dunlap Acked-by: Randy Dunlap # build-tested Signed-off-by: Scott Branden Link: https://lore.kernel.org/r/20210203223826.21674-1-scott.branden@broadcom.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/bcm-vk/Kconfig | 12 ++++++++++++ drivers/misc/bcm-vk/Makefile | 4 ++-- drivers/misc/bcm-vk/bcm_vk.h | 42 +++++++++++++++++++++++++++++++++++++--- drivers/misc/bcm-vk/bcm_vk_dev.c | 5 ++--- drivers/misc/bcm-vk/bcm_vk_tty.c | 6 ++++++ 5 files changed, 61 insertions(+), 8 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/bcm-vk/Kconfig b/drivers/misc/bcm-vk/Kconfig index 052f6f28b540..68a972772b99 100644 --- a/drivers/misc/bcm-vk/Kconfig +++ b/drivers/misc/bcm-vk/Kconfig @@ -15,3 +15,15 @@ config BCM_VK accelerators via /dev/bcm-vk.N devices. If unsure, say N. + +config BCM_VK_TTY + bool "Enable tty ports on a Broadcom VK Accelerator device" + depends on TTY + depends on BCM_VK + help + Select this option to enable tty support to allow console + access to Broadcom VK Accelerator cards from host. + + Device node will in the form /dev/bcm-vk.x_ttyVKy where: + x is the instance of the VK card + y is the tty device number on the VK card. diff --git a/drivers/misc/bcm-vk/Makefile b/drivers/misc/bcm-vk/Makefile index e4a1486f7209..1df2ebe851ca 100644 --- a/drivers/misc/bcm-vk/Makefile +++ b/drivers/misc/bcm-vk/Makefile @@ -7,6 +7,6 @@ obj-$(CONFIG_BCM_VK) += bcm_vk.o bcm_vk-objs := \ bcm_vk_dev.o \ bcm_vk_msg.o \ - bcm_vk_sg.o \ - bcm_vk_tty.o + bcm_vk_sg.o +bcm_vk-$(CONFIG_BCM_VK_TTY) += bcm_vk_tty.o diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h index 3f37c640a814..a1338f375589 100644 --- a/drivers/misc/bcm-vk/bcm_vk.h +++ b/drivers/misc/bcm-vk/bcm_vk.h @@ -258,7 +258,11 @@ enum pci_barno { BAR_2 }; +#ifdef CONFIG_BCM_VK_TTY #define BCM_VK_NUM_TTY 2 +#else +#define BCM_VK_NUM_TTY 0 +#endif struct bcm_vk_tty { struct tty_port port; @@ -366,11 +370,13 @@ struct bcm_vk { struct miscdevice miscdev; int devid; /* dev id allocated */ +#ifdef CONFIG_BCM_VK_TTY struct tty_driver *tty_drv; struct timer_list serial_timer; struct bcm_vk_tty tty[BCM_VK_NUM_TTY]; struct workqueue_struct *tty_wq_thread; struct work_struct tty_wq_work; +#endif /* Reference-counting to handle file operations */ struct kref kref; @@ -501,13 +507,43 @@ int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type, const pid_t pid, const u32 q_num); void bcm_to_v_q_doorbell(struct bcm_vk *vk, u32 q_num, u32 db_val); int bcm_vk_auto_load_all_images(struct bcm_vk *vk); -int bcm_vk_tty_init(struct bcm_vk *vk, char *name); -void bcm_vk_tty_exit(struct bcm_vk *vk); -void bcm_vk_tty_terminate_tty_user(struct bcm_vk *vk); void bcm_vk_hb_init(struct bcm_vk *vk); void bcm_vk_hb_deinit(struct bcm_vk *vk); void bcm_vk_handle_notf(struct bcm_vk *vk); bool bcm_vk_drv_access_ok(struct bcm_vk *vk); void bcm_vk_set_host_alert(struct bcm_vk *vk, u32 bit_mask); +#ifdef CONFIG_BCM_VK_TTY +int bcm_vk_tty_init(struct bcm_vk *vk, char *name); +void bcm_vk_tty_exit(struct bcm_vk *vk); +void bcm_vk_tty_terminate_tty_user(struct bcm_vk *vk); +void bcm_vk_tty_wq_exit(struct bcm_vk *vk); + +static inline void bcm_vk_tty_set_irq_enabled(struct bcm_vk *vk, int index) +{ + vk->tty[index].irq_enabled = true; +} +#else +static inline int bcm_vk_tty_init(struct bcm_vk *vk, char *name) +{ + return 0; +} + +static inline void bcm_vk_tty_exit(struct bcm_vk *vk) +{ +} + +static inline void bcm_vk_tty_terminate_tty_user(struct bcm_vk *vk) +{ +} + +static inline void bcm_vk_tty_wq_exit(struct bcm_vk *vk) +{ +} + +static inline void bcm_vk_tty_set_irq_enabled(struct bcm_vk *vk, int index) +{ +} +#endif /* CONFIG_BCM_VK_TTY */ + #endif diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c index a82a8927d92b..6bfea3210389 100644 --- a/drivers/misc/bcm-vk/bcm_vk_dev.c +++ b/drivers/misc/bcm-vk/bcm_vk_dev.c @@ -1397,7 +1397,7 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pdev->irq + vk->num_irqs, vk->num_irqs + 1); goto err_irq; } - vk->tty[i].irq_enabled = true; + bcm_vk_tty_set_irq_enabled(vk, i); } id = ida_simple_get(&bcm_vk_ida, 0, 0, GFP_KERNEL); @@ -1582,8 +1582,7 @@ static void bcm_vk_remove(struct pci_dev *pdev) cancel_work_sync(&vk->wq_work); destroy_workqueue(vk->wq_thread); - cancel_work_sync(&vk->tty_wq_work); - destroy_workqueue(vk->tty_wq_thread); + bcm_vk_tty_wq_exit(vk); for (i = 0; i < MAX_BAR; i++) { if (vk->bar[i]) diff --git a/drivers/misc/bcm-vk/bcm_vk_tty.c b/drivers/misc/bcm-vk/bcm_vk_tty.c index be3964949b63..4d02692ecfc7 100644 --- a/drivers/misc/bcm-vk/bcm_vk_tty.c +++ b/drivers/misc/bcm-vk/bcm_vk_tty.c @@ -331,3 +331,9 @@ void bcm_vk_tty_terminate_tty_user(struct bcm_vk *vk) kill_pid(find_vpid(vktty->pid), SIGKILL, 1); } } + +void bcm_vk_tty_wq_exit(struct bcm_vk *vk) +{ + cancel_work_sync(&vk->tty_wq_work); + destroy_workqueue(vk->tty_wq_thread); +} -- cgit v1.2.3-58-ga151 From a74ab2ed0def52a993d0cbd9df9d1c4de33c6fbd Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Fri, 29 Jan 2021 11:08:01 -0800 Subject: misc: pvpanic: sysfs_emit uses should have a newline Add newline terminations to the sysfs_emit uses added by -next commit 8d6da6575ffe ("misc: pvpanic: introduce events device attribue") Signed-off-by: Joe Perches Signed-off-by: zhenwei pi Link: https://lore.kernel.org/r/13b1c892d52c27d4caeccc89506aadda74f61365.camel@perches.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/pvpanic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c index b1e4922a7fda..9f350e05ef68 100644 --- a/drivers/misc/pvpanic.c +++ b/drivers/misc/pvpanic.c @@ -25,13 +25,13 @@ static unsigned int events; static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sysfs_emit(buf, "%x", capability); + return sysfs_emit(buf, "%x\n", capability); } static DEVICE_ATTR_RO(capability); static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sysfs_emit(buf, "%x", events); + return sysfs_emit(buf, "%x\n", events); } static ssize_t events_store(struct device *dev, struct device_attribute *attr, -- cgit v1.2.3-58-ga151 From 76ec1ec8fc7c4bda24af8c3a73c5f56fa8d6e460 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Fri, 29 Jan 2021 16:25:05 +0800 Subject: cxl: Simplify bool conversion Fix the following coccicheck warning: ./drivers/misc/cxl/sysfs.c:181:48-53: WARNING: conversion to bool not needed here Reported-by: Abaci Robot Reviewed-by: Andrew Donnellan Acked-by: Frederic Barrat Signed-off-by: Yang Li Link: https://lore.kernel.org/r/1611908705-98507-1-git-send-email-yang.lee@linux.alibaba.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cxl/sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index d97a243ad30c..c173a5e88c91 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c @@ -178,7 +178,7 @@ static ssize_t perst_reloads_same_image_store(struct device *device, if ((rc != 1) || !(val == 1 || val == 0)) return -EINVAL; - adapter->perst_same_image = (val == 1 ? true : false); + adapter->perst_same_image = (val == 1); return count; } -- cgit v1.2.3-58-ga151 From 920fd8a70619074eac7687352c8f1c6f3c2a64a5 Mon Sep 17 00:00:00 2001 From: Ricky Wu Date: Thu, 4 Feb 2021 16:31:15 +0800 Subject: misc: rtsx: init of rts522a add OCP power off when no card is present Power down OCP for power consumption when no SD/MMC card is present Cc: stable@vger.kernel.org Signed-off-by: Ricky Wu Link: https://lore.kernel.org/r/20210204083115.9471-1-ricky_wu@realtek.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cardreader/rts5227.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c index 8859011672cb..8200af22b529 100644 --- a/drivers/misc/cardreader/rts5227.c +++ b/drivers/misc/cardreader/rts5227.c @@ -398,6 +398,11 @@ static int rts522a_extra_init_hw(struct rtsx_pcr *pcr) { rts5227_extra_init_hw(pcr); + /* Power down OCP for power consumption */ + if (!pcr->card_exist) + rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, + OC_POWER_DOWN); + rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, FUNC_FORCE_UPME_XMT_DBG, FUNC_FORCE_UPME_XMT_DBG); rtsx_pci_write_register(pcr, PCLK_CTL, 0x04, 0x04); -- cgit v1.2.3-58-ga151 From 36edb1407c3c042240b5dbc2530d6e7119535baa Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Sat, 6 Feb 2021 16:43:21 +0200 Subject: mei: allow clients on bus to communicate in remove callback Introduce new intermediate state to allow the clients on the bus to communicate with the firmware from the remove handler. This is to enable to perform a clean shutdown. Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20210206144325.25682-2-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/bus.c | 6 ++++-- drivers/misc/mei/client.c | 3 ++- drivers/misc/mei/init.c | 5 ++++- drivers/misc/mei/mei_dev.h | 1 + 4 files changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 2907db260fba..34fb5e541fe5 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -44,7 +44,8 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, u8 vtag, bus = cl->dev; mutex_lock(&bus->device_lock); - if (bus->dev_state != MEI_DEV_ENABLED) { + if (bus->dev_state != MEI_DEV_ENABLED && + bus->dev_state != MEI_DEV_POWERING_DOWN) { rets = -ENODEV; goto out; } @@ -128,7 +129,8 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag, bus = cl->dev; mutex_lock(&bus->device_lock); - if (bus->dev_state != MEI_DEV_ENABLED) { + if (bus->dev_state != MEI_DEV_ENABLED && + bus->dev_state != MEI_DEV_POWERING_DOWN) { rets = -ENODEV; goto out; } diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index beb6cdff20fb..d3f060dce4a6 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -990,7 +990,8 @@ int mei_cl_disconnect(struct mei_cl *cl) return 0; } - if (dev->dev_state == MEI_DEV_POWER_DOWN) { + if (dev->dev_state == MEI_DEV_POWERING_DOWN || + dev->dev_state == MEI_DEV_POWER_DOWN) { cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n"); mei_cl_set_disconnected(cl); return 0; diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index bcee77768b91..5c8cb679b997 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -303,9 +303,12 @@ void mei_stop(struct mei_device *dev) dev_dbg(dev->dev, "stopping the device.\n"); mutex_lock(&dev->device_lock); - mei_set_devstate(dev, MEI_DEV_POWER_DOWN); + mei_set_devstate(dev, MEI_DEV_POWERING_DOWN); mutex_unlock(&dev->device_lock); mei_cl_bus_remove_devices(dev); + mutex_lock(&dev->device_lock); + mei_set_devstate(dev, MEI_DEV_POWER_DOWN); + mutex_unlock(&dev->device_lock); mei_cancel_work(dev); diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 8c395bfdf6f3..585a6f615bf8 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -57,6 +57,7 @@ enum mei_dev_state { MEI_DEV_ENABLED, MEI_DEV_RESETTING, MEI_DEV_DISABLED, + MEI_DEV_POWERING_DOWN, MEI_DEV_POWER_DOWN, MEI_DEV_POWER_UP }; -- cgit v1.2.3-58-ga151 From b7a4804129c7adb3ee9caf5505e3f91fb1467ec3 Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Sat, 6 Feb 2021 16:43:22 +0200 Subject: mei: add support for client dma capability Client DMA capability indicates whether the firmware supports setting up a direct DMA channel between the host and me client. The DMA capabilities are supported from firmware HBM version 2.2 and newer. Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20210206144325.25682-3-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/debugfs.c | 1 + drivers/misc/mei/hbm.c | 13 ++++++++++++- drivers/misc/mei/hw.h | 8 ++++++++ drivers/misc/mei/mei_dev.h | 2 ++ 4 files changed, 23 insertions(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index 3ab1a431d810..1ce61e9e24fc 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c @@ -106,6 +106,7 @@ static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused) seq_printf(m, "\tDR: %01d\n", dev->hbm_f_dr_supported); seq_printf(m, "\tVT: %01d\n", dev->hbm_f_vt_supported); seq_printf(m, "\tCAP: %01d\n", dev->hbm_f_cap_supported); + seq_printf(m, "\tCD: %01d\n", dev->hbm_f_cd_supported); } seq_printf(m, "pg: %s, %s\n", diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index df0f62de3dca..6e748da7e55d 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -339,7 +339,9 @@ static int mei_hbm_capabilities_req(struct mei_device *dev) memset(&req, 0, sizeof(req)); req.hbm_cmd = MEI_HBM_CAPABILITIES_REQ_CMD; if (dev->hbm_f_vt_supported) - req.capability_requested[0] = HBM_CAP_VT; + req.capability_requested[0] |= HBM_CAP_VT; + if (dev->hbm_f_cd_supported) + req.capability_requested[0] |= HBM_CAP_CD; ret = mei_hbm_write_message(dev, &mei_hdr, &req); if (ret) { @@ -1085,6 +1087,13 @@ static void mei_hbm_config_features(struct mei_device *dev) (dev->version.major_version == HBM_MAJOR_VERSION_CAP && dev->version.minor_version >= HBM_MINOR_VERSION_CAP)) dev->hbm_f_cap_supported = 1; + + /* Client DMA Support */ + dev->hbm_f_cd_supported = 0; + if (dev->version.major_version > HBM_MAJOR_VERSION_CD || + (dev->version.major_version == HBM_MAJOR_VERSION_CD && + dev->version.minor_version >= HBM_MINOR_VERSION_CD)) + dev->hbm_f_cd_supported = 1; } /** @@ -1233,6 +1242,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) capability_res = (struct hbm_capability_response *)mei_msg; if (!(capability_res->capability_granted[0] & HBM_CAP_VT)) dev->hbm_f_vt_supported = 0; + if (!(capability_res->capability_granted[0] & HBM_CAP_CD)) + dev->hbm_f_cd_supported = 0; if (dev->hbm_f_dr_supported) { if (mei_dmam_ring_alloc(dev)) diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index df2fb9520dd8..9577a2cc0733 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -88,6 +88,12 @@ #define HBM_MINOR_VERSION_CAP 2 #define HBM_MAJOR_VERSION_CAP 2 +/* + * MEI version with client DMA support + */ +#define HBM_MINOR_VERSION_CD 2 +#define HBM_MAJOR_VERSION_CD 2 + /* Host bus message command opcode */ #define MEI_HBM_CMD_OP_MSK 0x7f /* Host bus message command RESPONSE */ @@ -648,6 +654,8 @@ struct hbm_dma_ring_ctrl { /* virtual tag supported */ #define HBM_CAP_VT BIT(0) +/* client dma supported */ +#define HBM_CAP_CD BIT(2) /** * struct hbm_capability_request - capability request from host to fw diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 585a6f615bf8..8ebd32cb2075 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -451,6 +451,7 @@ struct mei_fw_version { * @hbm_f_dr_supported : hbm feature dma ring supported * @hbm_f_vt_supported : hbm feature vtag supported * @hbm_f_cap_supported : hbm feature capabilities message supported + * @hbm_f_cd_supported : hbm feature client dma supported * * @fw_ver : FW versions * @@ -538,6 +539,7 @@ struct mei_device { unsigned int hbm_f_dr_supported:1; unsigned int hbm_f_vt_supported:1; unsigned int hbm_f_cap_supported:1; + unsigned int hbm_f_cd_supported:1; struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS]; -- cgit v1.2.3-58-ga151 From dfad8742a328598d52d8472ce34fac312f4a5acb Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Sat, 6 Feb 2021 16:43:23 +0200 Subject: mei: hbm: add client dma hbm messages Define structures for client DMA HBM protocol. The protocol requires passing dma buffer address and the buffer id. Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20210206144325.25682-4-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/hw.h | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index 9577a2cc0733..b10606550613 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -142,6 +142,12 @@ #define MEI_HBM_CAPABILITIES_REQ_CMD 0x13 #define MEI_HBM_CAPABILITIES_RES_CMD 0x93 +#define MEI_HBM_CLIENT_DMA_MAP_REQ_CMD 0x14 +#define MEI_HBM_CLIENT_DMA_MAP_RES_CMD 0x94 + +#define MEI_HBM_CLIENT_DMA_UNMAP_REQ_CMD 0x15 +#define MEI_HBM_CLIENT_DMA_UNMAP_RES_CMD 0x95 + /* * MEI Stop Reason * used by hbm_host_stop_request.reason @@ -679,4 +685,51 @@ struct hbm_capability_response { u8 capability_granted[3]; } __packed; +/** + * struct hbm_client_dma_map_request - client dma map request from host to fw + * + * @hbm_cmd: bus message command header + * @client_buffer_id: client buffer id + * @reserved: reserved + * @address_lsb: DMA address LSB + * @address_msb: DMA address MSB + * @size: DMA size + */ +struct hbm_client_dma_map_request { + u8 hbm_cmd; + u8 client_buffer_id; + u8 reserved[2]; + u32 address_lsb; + u32 address_msb; + u32 size; +} __packed; + +/** + * struct hbm_client_dma_unmap_request + * client dma unmap request from the host to the firmware + * + * @hbm_cmd: bus message command header + * @status: unmap status + * @client_buffer_id: client buffer id + * @reserved: reserved + */ +struct hbm_client_dma_unmap_request { + u8 hbm_cmd; + u8 status; + u8 client_buffer_id; + u8 reserved; +} __packed; + +/** + * struct hbm_client_dma_response + * client dma unmap response from the firmware to the host + * + * @hbm_cmd: bus message command header + * @status: command status + */ +struct hbm_client_dma_response { + u8 hbm_cmd; + u8 status; +} __packed; + #endif -- cgit v1.2.3-58-ga151 From 369aea84595189200a2e6b028f556a7efa0ec489 Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Sat, 6 Feb 2021 16:43:24 +0200 Subject: mei: implement client dma setup. Implement HBM message protocol to setup and tear down DMA buffer on behalf of an client. On top there DMA buffer allocation and its life time management. Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20210206144325.25682-5-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/client.c | 286 +++++++++++++++++++++++++++++++++++++++++++ drivers/misc/mei/client.h | 8 ++ drivers/misc/mei/hbm.c | 124 +++++++++++++++++++ drivers/misc/mei/hbm.h | 4 +- drivers/misc/mei/interrupt.c | 10 ++ drivers/misc/mei/mei_dev.h | 15 +++ 6 files changed, 446 insertions(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index d3f060dce4a6..4378a9b25848 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -9,6 +9,7 @@ #include #include #include +#include #include @@ -2114,6 +2115,8 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) case MEI_FOP_DISCONNECT: case MEI_FOP_NOTIFY_STOP: case MEI_FOP_NOTIFY_START: + case MEI_FOP_DMA_MAP: + case MEI_FOP_DMA_UNMAP: if (waitqueue_active(&cl->wait)) wake_up(&cl->wait); @@ -2140,3 +2143,286 @@ void mei_cl_all_disconnect(struct mei_device *dev) list_for_each_entry(cl, &dev->file_list, link) mei_cl_set_disconnected(cl); } + +static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id) +{ + struct mei_cl *cl; + + list_for_each_entry(cl, &dev->file_list, link) + if (cl->dma.buffer_id == buffer_id) + return cl; + return NULL; +} + +/** + * mei_cl_irq_dma_map - send client dma map request in irq_thread context + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * Return: 0 on such and error otherwise. + */ +int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb, + struct list_head *cmpl_list) +{ + struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; + int ret; + + msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_map_request)); + slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; + + if ((u32)slots < msg_slots) + return -EMSGSIZE; + + ret = mei_hbm_cl_dma_map_req(dev, cl); + if (ret) { + cl->status = ret; + list_move_tail(&cb->list, cmpl_list); + return ret; + } + + list_move_tail(&cb->list, &dev->ctrl_rd_list); + return 0; +} + +/** + * mei_cl_irq_dma_unmap - send client dma unmap request in irq_thread context + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * Return: 0 on such and error otherwise. + */ +int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb, + struct list_head *cmpl_list) +{ + struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; + int ret; + + msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_unmap_request)); + slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; + + if ((u32)slots < msg_slots) + return -EMSGSIZE; + + ret = mei_hbm_cl_dma_unmap_req(dev, cl); + if (ret) { + cl->status = ret; + list_move_tail(&cb->list, cmpl_list); + return ret; + } + + list_move_tail(&cb->list, &dev->ctrl_rd_list); + return 0; +} + +static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size) +{ + cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size, + &cl->dma.daddr, GFP_KERNEL); + if (!cl->dma.vaddr) + return -ENOMEM; + + cl->dma.buffer_id = buf_id; + cl->dma.size = size; + + return 0; +} + +static void mei_cl_dma_free(struct mei_cl *cl) +{ + cl->dma.buffer_id = 0; + dmam_free_coherent(cl->dev->dev, + cl->dma.size, cl->dma.vaddr, cl->dma.daddr); + cl->dma.size = 0; + cl->dma.vaddr = NULL; + cl->dma.daddr = 0; +} + +/** + * mei_cl_alloc_and_map - send client dma map request + * + * @cl: host client + * @fp: pointer to file structure + * @buffer_id: id of the mapped buffer + * @size: size of the buffer + * + * Locking: called under "dev->device_lock" lock + * + * Return: + * * -ENODEV + * * -EINVAL + * * -EOPNOTSUPP + * * -EPROTO + * * -ENOMEM; + */ +int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp, + u8 buffer_id, size_t size) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + int rets; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + if (!dev->hbm_f_cd_supported) { + cl_dbg(dev, cl, "client dma is not supported\n"); + return -EOPNOTSUPP; + } + + if (buffer_id == 0) + return -EINVAL; + + if (!mei_cl_is_connected(cl)) + return -ENODEV; + + if (cl->dma_mapped) + return -EPROTO; + + if (mei_cl_dma_map_find(dev, buffer_id)) { + cl_dbg(dev, cl, "client dma with id %d is already allocated\n", + cl->dma.buffer_id); + return -EPROTO; + } + + rets = pm_runtime_get(dev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(dev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; + } + + rets = mei_cl_dma_alloc(cl, buffer_id, size); + if (rets) { + pm_runtime_put_noidle(dev->dev); + return rets; + } + + cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_MAP, fp); + if (!cb) { + rets = -ENOMEM; + goto out; + } + + if (mei_hbuf_acquire(dev)) { + if (mei_hbm_cl_dma_map_req(dev, cl)) { + rets = -ENODEV; + goto out; + } + list_move_tail(&cb->list, &dev->ctrl_rd_list); + } + + mutex_unlock(&dev->device_lock); + wait_event_timeout(cl->wait, + cl->dma_mapped || + cl->status || + !mei_cl_is_connected(cl), + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + mutex_lock(&dev->device_lock); + + if (!cl->dma_mapped && !cl->status) + cl->status = -EFAULT; + + rets = cl->status; + +out: + if (rets) + mei_cl_dma_free(cl); + + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + mei_io_cb_free(cb); + return rets; +} + +/** + * mei_cl_unmap_and_free - send client dma unmap request + * + * @cl: host client + * @fp: pointer to file structure + * + * Locking: called under "dev->device_lock" lock + * + * Return: 0 on such and error otherwise. + */ +int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + int rets; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + if (!dev->hbm_f_cd_supported) { + cl_dbg(dev, cl, "client dma is not supported\n"); + return -EOPNOTSUPP; + } + + if (!mei_cl_is_connected(cl)) + return -ENODEV; + + if (!cl->dma_mapped) + return -EPROTO; + + rets = pm_runtime_get(dev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(dev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; + } + + cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_UNMAP, fp); + if (!cb) { + rets = -ENOMEM; + goto out; + } + + if (mei_hbuf_acquire(dev)) { + if (mei_hbm_cl_dma_unmap_req(dev, cl)) { + rets = -ENODEV; + goto out; + } + list_move_tail(&cb->list, &dev->ctrl_rd_list); + } + + mutex_unlock(&dev->device_lock); + wait_event_timeout(cl->wait, + !cl->dma_mapped || + cl->status || + !mei_cl_is_connected(cl), + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + mutex_lock(&dev->device_lock); + + if (cl->dma_mapped && !cl->status) + cl->status = -EFAULT; + + rets = cl->status; + + if (!rets) + mei_cl_dma_free(cl); +out: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + mei_io_cb_free(cb); + return rets; +} diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index 9e08a9843bba..b12cdcde9436 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h @@ -265,6 +265,14 @@ void mei_cl_notify(struct mei_cl *cl); void mei_cl_all_disconnect(struct mei_device *dev); +int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb, + struct list_head *cmpl_list); +int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb, + struct list_head *cmpl_list); +int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp, + u8 buffer_id, size_t size); +int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp); + #define MEI_CL_FMT "cl:host=%02d me=%02d " #define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl) diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 6e748da7e55d..d0277c7fed10 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -594,6 +594,117 @@ static void mei_hbm_cl_notify(struct mei_device *dev, mei_cl_notify(cl); } +/** + * mei_hbm_cl_dma_map_req - send client dma map request + * + * @dev: the device structure + * @cl: mei host client + * + * Return: 0 on success and -EIO on write failure + */ +int mei_hbm_cl_dma_map_req(struct mei_device *dev, struct mei_cl *cl) +{ + struct mei_msg_hdr mei_hdr; + struct hbm_client_dma_map_request req; + int ret; + + mei_hbm_hdr(&mei_hdr, sizeof(req)); + + memset(&req, 0, sizeof(req)); + + req.hbm_cmd = MEI_HBM_CLIENT_DMA_MAP_REQ_CMD; + req.client_buffer_id = cl->dma.buffer_id; + req.address_lsb = lower_32_bits(cl->dma.daddr); + req.address_msb = upper_32_bits(cl->dma.daddr); + req.size = cl->dma.size; + + ret = mei_hbm_write_message(dev, &mei_hdr, &req); + if (ret) + dev_err(dev->dev, "dma map request failed: ret = %d\n", ret); + + return ret; +} + +/** + * mei_hbm_cl_dma_unmap_req - send client dma unmap request + * + * @dev: the device structure + * @cl: mei host client + * + * Return: 0 on success and -EIO on write failure + */ +int mei_hbm_cl_dma_unmap_req(struct mei_device *dev, struct mei_cl *cl) +{ + struct mei_msg_hdr mei_hdr; + struct hbm_client_dma_unmap_request req; + int ret; + + mei_hbm_hdr(&mei_hdr, sizeof(req)); + + memset(&req, 0, sizeof(req)); + + req.hbm_cmd = MEI_HBM_CLIENT_DMA_UNMAP_REQ_CMD; + req.client_buffer_id = cl->dma.buffer_id; + + ret = mei_hbm_write_message(dev, &mei_hdr, &req); + if (ret) + dev_err(dev->dev, "dma unmap request failed: ret = %d\n", ret); + + return ret; +} + +static void mei_hbm_cl_dma_map_res(struct mei_device *dev, + struct hbm_client_dma_response *res) +{ + struct mei_cl *cl; + struct mei_cl_cb *cb, *next; + + cl = NULL; + list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) { + if (cb->fop_type != MEI_FOP_DMA_MAP) + continue; + if (!cb->cl->dma.buffer_id || cb->cl->dma_mapped) + continue; + + cl = cb->cl; + break; + } + if (!cl) + return; + + dev_dbg(dev->dev, "cl dma map result = %d\n", res->status); + cl->status = res->status; + if (!cl->status) + cl->dma_mapped = 1; + wake_up(&cl->wait); +} + +static void mei_hbm_cl_dma_unmap_res(struct mei_device *dev, + struct hbm_client_dma_response *res) +{ + struct mei_cl *cl; + struct mei_cl_cb *cb, *next; + + cl = NULL; + list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) { + if (cb->fop_type != MEI_FOP_DMA_UNMAP) + continue; + if (!cb->cl->dma.buffer_id || !cb->cl->dma_mapped) + continue; + + cl = cb->cl; + break; + } + if (!cl) + return; + + dev_dbg(dev->dev, "cl dma unmap result = %d\n", res->status); + cl->status = res->status; + if (!cl->status) + cl->dma_mapped = 0; + wake_up(&cl->wait); +} + /** * mei_hbm_prop_req - request property for a single client * @@ -1133,6 +1244,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) struct mei_hbm_cl_cmd *cl_cmd; struct hbm_client_connect_request *disconnect_req; struct hbm_flow_control *fctrl; + struct hbm_client_dma_response *client_dma_res; /* read the message to our buffer */ BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf)); @@ -1459,6 +1571,18 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) mei_hbm_cl_notify(dev, cl_cmd); break; + case MEI_HBM_CLIENT_DMA_MAP_RES_CMD: + dev_dbg(dev->dev, "hbm: client dma map response: message received.\n"); + client_dma_res = (struct hbm_client_dma_response *)mei_msg; + mei_hbm_cl_dma_map_res(dev, client_dma_res); + break; + + case MEI_HBM_CLIENT_DMA_UNMAP_RES_CMD: + dev_dbg(dev->dev, "hbm: client dma unmap response: message received.\n"); + client_dma_res = (struct hbm_client_dma_response *)mei_msg; + mei_hbm_cl_dma_unmap_res(dev, client_dma_res); + break; + default: WARN(1, "hbm: wrong command %d\n", mei_msg->hbm_cmd); return -EPROTO; diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h index 4d95e38e4ddf..cd5b08ca34b6 100644 --- a/drivers/misc/mei/hbm.h +++ b/drivers/misc/mei/hbm.h @@ -10,6 +10,7 @@ struct mei_device; struct mei_msg_hdr; struct mei_cl; +struct mei_dma_data; /** * enum mei_hbm_state - host bus message protocol state @@ -51,6 +52,7 @@ int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd); void mei_hbm_pg_resume(struct mei_device *dev); int mei_hbm_cl_notify_req(struct mei_device *dev, struct mei_cl *cl, u8 request); - +int mei_hbm_cl_dma_map_req(struct mei_device *dev, struct mei_cl *cl); +int mei_hbm_cl_dma_unmap_req(struct mei_device *dev, struct mei_cl *cl); #endif /* _MEI_HBM_H_ */ diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 2161c1234ad7..a98f6b895af7 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -547,6 +547,16 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list) if (ret) return ret; break; + case MEI_FOP_DMA_MAP: + ret = mei_cl_irq_dma_map(cl, cb, cmpl_list); + if (ret) + return ret; + break; + case MEI_FOP_DMA_UNMAP: + ret = mei_cl_irq_dma_unmap(cl, cb, cmpl_list); + if (ret) + return ret; + break; default: BUG(); } diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 8ebd32cb2075..b7b6ef344e80 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -79,6 +79,8 @@ enum mei_file_transaction_states { * @MEI_FOP_DISCONNECT_RSP: disconnect response * @MEI_FOP_NOTIFY_START: start notification * @MEI_FOP_NOTIFY_STOP: stop notification + * @MEI_FOP_DMA_MAP: request client dma map + * @MEI_FOP_DMA_UNMAP: request client dma unmap */ enum mei_cb_file_ops { MEI_FOP_READ = 0, @@ -88,6 +90,8 @@ enum mei_cb_file_ops { MEI_FOP_DISCONNECT_RSP, MEI_FOP_NOTIFY_START, MEI_FOP_NOTIFY_STOP, + MEI_FOP_DMA_MAP, + MEI_FOP_DMA_UNMAP, }; /** @@ -113,6 +117,13 @@ struct mei_msg_data { unsigned char *data; }; +struct mei_dma_data { + u8 buffer_id; + void *vaddr; + dma_addr_t daddr; + size_t size; +}; + /** * struct mei_dma_dscr - dma address descriptor * @@ -236,6 +247,8 @@ struct mei_cl_vtag { * @rd_pending: pending read credits * @rd_completed_lock: protects rd_completed queue * @rd_completed: completed read + * @dma: dma settings + * @dma_mapped: dma buffer is currently mapped. * * @cldev: device on the mei client bus */ @@ -263,6 +276,8 @@ struct mei_cl { struct list_head rd_pending; spinlock_t rd_completed_lock; /* protects rd_completed queue */ struct list_head rd_completed; + struct mei_dma_data dma; + u8 dma_mapped; struct mei_cl_device *cldev; }; -- cgit v1.2.3-58-ga151 From 6c1e3f92f9f1dfc7f14b43fd432c8ec95b1a188f Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Thu, 28 Jan 2021 08:50:25 +0200 Subject: habanalabs: fix integer handling issue Need to add ull suffix to constant when doing shift of constant into 64-bit variables Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/mmu/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c index 27dc0d116db5..71703a32350f 100644 --- a/drivers/misc/habanalabs/common/mmu/mmu.c +++ b/drivers/misc/habanalabs/common/mmu/mmu.c @@ -507,7 +507,7 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr, p = (char *)p + hop0_shift_off; p = (char *)p + ((hops->used_hops - 1) * sizeof(u64)); hop_shift = *(u64 *)p; - offset_mask = (1 << hop_shift) - 1; + offset_mask = (1ull << hop_shift) - 1; addr_mask = ~(offset_mask); *phys_addr = (tmp_phys_addr & addr_mask) | (virt_addr & offset_mask); -- cgit v1.2.3-58-ga151 From 5dbd7b4de6ef84321cc1378eccdd92d4730c2e56 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 28 Jan 2021 16:30:25 +0200 Subject: habanalabs: improve communication protocol with cpucp Current messaging communictaion protocol with cpucp can get out of sync due to coherency issues. In order to improve the protocol reliability, we modify the protocol to expect a different acknowledgment for every packet sent to cpucp. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/firmware_if.c | 17 +++++++++++++++-- drivers/misc/habanalabs/common/habanalabs.h | 3 +++ drivers/misc/habanalabs/gaudi/gaudi.c | 6 +++++- drivers/misc/habanalabs/goya/goya.c | 6 +++++- drivers/misc/habanalabs/include/common/hl_boot_if.h | 5 +++++ 5 files changed, 33 insertions(+), 4 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index ba6920f2b4ab..31b52a223f02 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -90,9 +90,10 @@ int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode) int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, u16 len, u32 timeout, u64 *result) { + struct hl_hw_queue *queue = &hdev->kernel_queues[hw_queue_id]; struct cpucp_packet *pkt; dma_addr_t pkt_dma_addr; - u32 tmp; + u32 tmp, expected_ack_val; int rc = 0; pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len, @@ -115,14 +116,22 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, goto out; } + /* set fence to a non valid value */ + pkt->fence = UINT_MAX; + rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, len, pkt_dma_addr); if (rc) { dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc); goto out; } + if (hdev->asic_prop.fw_cpucp_ack_with_pi) + expected_ack_val = queue->pi; + else + expected_ack_val = CPUCP_PACKET_FENCE_VAL; + rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp, - (tmp == CPUCP_PACKET_FENCE_VAL), 1000, + (tmp == expected_ack_val), 1000, timeout, true); hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id); @@ -777,6 +786,10 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, CPU_BOOT_DEV_STS0_FW_HARD_RST_EN) prop->hard_reset_done_by_fw = true; + if (prop->fw_boot_cpu_security_map & + CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN) + prop->fw_cpucp_ack_with_pi = true; + dev_dbg(hdev->dev, "Firmware boot CPU security status %#x\n", prop->fw_boot_cpu_security_map); diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 30f32f2edb8a..3c54010f7ab9 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -419,6 +419,8 @@ struct hl_mmu_properties { * from BOOT_DEV_STS0 * @dram_supports_virtual_memory: is there an MMU towards the DRAM * @hard_reset_done_by_fw: true if firmware is handling hard reset flow + * @fw_cpucp_ack_with_pi: true if cpucp is acking messages with the PQ PI + * instead of a magic number * @num_functional_hbms: number of functional HBMs in each DCORE. */ struct asic_fixed_properties { @@ -479,6 +481,7 @@ struct asic_fixed_properties { u8 fw_security_status_valid; u8 dram_supports_virtual_memory; u8 hard_reset_done_by_fw; + u8 fw_cpucp_ack_with_pi; u8 num_functional_hbms; }; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 52fcaf25531a..006c34ae35c2 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -533,6 +533,7 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev) prop->fw_security_disabled = true; prop->fw_security_status_valid = false; prop->hard_reset_done_by_fw = false; + prop->fw_cpucp_ack_with_pi = false; return 0; } @@ -4438,9 +4439,12 @@ static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi) /* ring the doorbell */ WREG32(db_reg_offset, db_value); - if (hw_queue_id == GAUDI_QUEUE_ID_CPU_PQ) + if (hw_queue_id == GAUDI_QUEUE_ID_CPU_PQ) { + /* make sure device CPU will read latest data from host */ + mb(); WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_PI_UPDATE); + } } static void gaudi_pqe_write(struct hl_device *hdev, __le64 *pqe, diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index a954e7c02375..53db7e966866 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -461,6 +461,7 @@ int goya_get_fixed_properties(struct hl_device *hdev) prop->fw_security_disabled = true; prop->fw_security_status_valid = false; prop->hard_reset_done_by_fw = false; + prop->fw_cpucp_ack_with_pi = false; return 0; } @@ -2806,9 +2807,12 @@ void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi) /* ring the doorbell */ WREG32(db_reg_offset, db_value); - if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ) + if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ) { + /* make sure device CPU will read latest data from host */ + mb(); WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GOYA_ASYNC_EVENT_ID_PI_UPDATE); + } } void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd) diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h index 57785478a4ef..e87f5a98e193 100644 --- a/drivers/misc/habanalabs/include/common/hl_boot_if.h +++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h @@ -166,6 +166,10 @@ * FW handles HBM ECC indications. * Initialized in: linux * + * CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN Packets ack value used in the armcpd + * is set to the PI counter. + * Initialized in: linux + * * CPU_BOOT_DEV_STS0_ENABLED Device status register enabled. * This is a main indication that the * running FW populates the device status @@ -190,6 +194,7 @@ #define CPU_BOOT_DEV_STS0_SP_SRAM_EN (1 << 12) #define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << 13) #define CPU_BOOT_DEV_STS0_HBM_ECC_EN (1 << 14) +#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << 15) #define CPU_BOOT_DEV_STS0_ENABLED (1 << 31) enum cpu_boot_status { -- cgit v1.2.3-58-ga151 From e52606d2f5363f4900cfe8419e391644b0229c6f Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Wed, 27 Jan 2021 16:34:37 +0200 Subject: habanalabs: support fetching first available user CQ User must be aware of the available CQs when it needs to use them. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/habanalabs.h | 2 ++ drivers/misc/habanalabs/common/habanalabs_ioctl.c | 3 ++- drivers/misc/habanalabs/gaudi/gaudi.c | 3 +++ drivers/misc/habanalabs/goya/goya.c | 3 +++ include/uapi/misc/habanalabs.h | 3 +++ 5 files changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 3c54010f7ab9..98163317ec43 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -411,6 +411,7 @@ struct hl_mmu_properties { * @first_available_user_mon: first monitor available for the user * @first_available_user_msix_interrupt: first available msix interrupt * reserved for the user + * @first_available_cq: first available CQ for the user. * @tpc_enabled_mask: which TPCs are enabled. * @completion_queues_count: number of completion queues. * @fw_security_disabled: true if security measures are disabled in firmware, @@ -475,6 +476,7 @@ struct asic_fixed_properties { u16 first_available_user_sob[HL_MAX_DCORES]; u16 first_available_user_mon[HL_MAX_DCORES]; u16 first_available_user_msix_interrupt; + u16 first_available_cq[HL_MAX_DCORES]; u8 tpc_enabled_mask; u8 completion_queues_count; u8 fw_security_disabled; diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index e86f46d4b613..03af61cecd37 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -397,7 +397,8 @@ static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args) prop->first_available_user_sob[args->dcore_id]; sm_info.first_available_monitor = prop->first_available_user_mon[args->dcore_id]; - + sm_info.first_available_cq = + prop->first_available_cq[args->dcore_id]; return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size, sizeof(sm_info))) ? -EFAULT : 0; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 006c34ae35c2..8fc0de3cf3a9 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -529,6 +529,9 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev) prop->first_available_user_msix_interrupt = USHRT_MAX; + for (i = 0 ; i < HL_MAX_DCORES ; i++) + prop->first_available_cq[i] = USHRT_MAX; + /* disable fw security for now, set it in a later stage */ prop->fw_security_disabled = true; prop->fw_security_status_valid = false; diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 53db7e966866..d26b405f0c17 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -457,6 +457,9 @@ int goya_get_fixed_properties(struct hl_device *hdev) prop->first_available_user_msix_interrupt = USHRT_MAX; + for (i = 0 ; i < HL_MAX_DCORES ; i++) + prop->first_available_cq[i] = USHRT_MAX; + /* disable fw security for now, set it in a later stage */ prop->fw_security_disabled = true; prop->fw_security_status_valid = false; diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index ebde42b37b43..64ae83b5f8e5 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -414,10 +414,13 @@ struct hl_pll_frequency_info { * struct hl_info_sync_manager - sync manager information * @first_available_sync_object: first available sob * @first_available_monitor: first available monitor + * @first_available_cq: first available cq */ struct hl_info_sync_manager { __u32 first_available_sync_object; __u32 first_available_monitor; + __u32 first_available_cq; + __u32 reserved; }; /** -- cgit v1.2.3-58-ga151 From b520ca5d82f69ac28ca3d57f001430c203487cb3 Mon Sep 17 00:00:00 2001 From: Ohad Sharabi Date: Wed, 27 Jan 2021 15:42:53 +0200 Subject: habanalabs/gaudi: use HBM_ECC_EN bit for ECC ERR driver should use ECC info from FW only if HBM ECC CAP is set. otherwise, try to fetch the data from MC regs only if security is disabled. Signed-off-by: Ohad Sharabi Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 8fc0de3cf3a9..b929e602fa3d 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -7105,7 +7105,9 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device, u32 base, val, val2, wr_par, rd_par, ca_par, derr, serr, type, ch; int err = 0; - if (!hdev->asic_prop.fw_security_disabled) { + if (hdev->asic_prop.fw_security_status_valid && + (hdev->asic_prop.fw_app_security_map & + CPU_BOOT_DEV_STS0_HBM_ECC_EN)) { if (!hbm_ecc_data) { dev_err(hdev->dev, "No FW ECC data"); return 0; @@ -7127,14 +7129,24 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device, le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); dev_err(hdev->dev, - "HBM%d pc%d ECC: TYPE=%d, WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n", - device, ch, type, wr_par, rd_par, ca_par, serr, derr); + "HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n", + device, ch, wr_par, rd_par, ca_par, serr, derr); + dev_err(hdev->dev, + "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%u, SEC_CNT=%d, DEC_CNT=%d\n", + device, ch, hbm_ecc_data->first_addr, type, + hbm_ecc_data->sec_cont_cnt, hbm_ecc_data->sec_cnt, + hbm_ecc_data->dec_cnt); err = 1; return 0; } + if (!hdev->asic_prop.fw_security_disabled) { + dev_info(hdev->dev, "Cannot access MC regs for ECC data while security is enabled\n"); + return 0; + } + base = GAUDI_HBM_CFG_BASE + device * GAUDI_HBM_CFG_OFFSET; for (ch = 0 ; ch < GAUDI_HBM_CHANNELS ; ch++) { val = RREG32_MASK(base + ch * 0x1000 + 0x06C, 0x0000FFFF); -- cgit v1.2.3-58-ga151 From 28bcf1fdc408cd2f7393ae5dcd71c756e1163cdb Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Mon, 1 Feb 2021 21:23:43 +0200 Subject: habanalabs: enable F/W events after init done Only after the initialization of the device is done, the driver is ready to receive events from the F/W. The driver can't handle events before that because of races so it will ignore events. In case of a fatal event, the driver won't know about it and the device will be operational although it shouldn't be. Same logic should be applied after hard-reset. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/device.c | 23 +++++++++++++++++++---- drivers/misc/habanalabs/common/habanalabs.h | 9 +++++++-- drivers/misc/habanalabs/gaudi/gaudi.c | 10 +++++++--- drivers/misc/habanalabs/goya/goya.c | 12 ++++++++---- 4 files changed, 41 insertions(+), 13 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 59219c862ca0..15fcb5c31c4b 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -1159,12 +1159,20 @@ kill_processes: atomic_set(&hdev->in_reset, 0); hdev->needs_reset = false; - if (hard_reset) + dev_notice(hdev->dev, "Successfully finished resetting the device\n"); + + if (hard_reset) { hdev->hard_reset_cnt++; - else - hdev->soft_reset_cnt++; - dev_warn(hdev->dev, "Successfully finished resetting the device\n"); + /* After reset is done, we are ready to receive events from + * the F/W. We can't do it before because we will ignore events + * and if those events are fatal, we won't know about it and + * the device will be operational although it shouldn't be + */ + hdev->asic_funcs->enable_events_from_fw(hdev); + } else { + hdev->soft_reset_cnt++; + } return 0; @@ -1415,6 +1423,13 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) hdev->init_done = true; + /* After initialization is done, we are ready to receive events from + * the F/W. We can't do it before because we will ignore events and if + * those events are fatal, we won't know about it and the device will + * be operational although it shouldn't be + */ + hdev->asic_funcs->enable_events_from_fw(hdev); + return 0; release_ctx: diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 98163317ec43..18ed3a6000b0 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -860,12 +860,16 @@ enum div_select_defs { * and place them in the relevant cs jobs * @collective_wait_create_jobs: allocate collective wait cs jobs * @scramble_addr: Routine to scramble the address prior of mapping it - * in the MMU. + * in the MMU. * @descramble_addr: Routine to de-scramble the address prior of - * showing it to users. + * showing it to users. * @ack_protection_bits_errors: ack and dump all security violations * @get_hw_block_id: retrieve a HW block id to be used by the user to mmap it. * @hw_block_mmap: mmap a HW block with a given id. + * @enable_events_from_fw: send interrupt to firmware to notify them the + * driver is ready to receive asynchronous events. This + * function should be called during the first init and + * after every hard-reset of the device */ struct hl_asic_funcs { int (*early_init)(struct hl_device *hdev); @@ -982,6 +986,7 @@ struct hl_asic_funcs { u32 *block_id); int (*hw_block_mmap)(struct hl_device *hdev, struct vm_area_struct *vma, u32 block_id, u32 block_size); + void (*enable_events_from_fw)(struct hl_device *hdev); }; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index b929e602fa3d..6905857b363b 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -1383,8 +1383,6 @@ static int gaudi_late_init(struct hl_device *hdev) return rc; } - WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER); - rc = gaudi_fetch_psoc_frequency(hdev); if (rc) { dev_err(hdev->dev, "Failed to fetch psoc frequency\n"); @@ -8500,6 +8498,11 @@ static int gaudi_block_mmap(struct hl_device *hdev, return -EPERM; } +static void gaudi_enable_events_from_fw(struct hl_device *hdev) +{ + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER); +} + static const struct hl_asic_funcs gaudi_funcs = { .early_init = gaudi_early_init, .early_fini = gaudi_early_fini, @@ -8581,7 +8584,8 @@ static const struct hl_asic_funcs gaudi_funcs = { .descramble_addr = hl_mmu_descramble_addr, .ack_protection_bits_errors = gaudi_ack_protection_bits_errors, .get_hw_block_id = gaudi_get_hw_block_id, - .hw_block_mmap = gaudi_block_mmap + .hw_block_mmap = gaudi_block_mmap, + .enable_events_from_fw = gaudi_enable_events_from_fw }; /** diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index d26b405f0c17..af6a5760924c 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -798,9 +798,6 @@ int goya_late_init(struct hl_device *hdev) return rc; } - WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, - GOYA_ASYNC_EVENT_ID_INTS_REGISTER); - return 0; } @@ -5400,6 +5397,12 @@ static int goya_block_mmap(struct hl_device *hdev, struct vm_area_struct *vma, return -EPERM; } +static void goya_enable_events_from_fw(struct hl_device *hdev) +{ + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, + GOYA_ASYNC_EVENT_ID_INTS_REGISTER); +} + static const struct hl_asic_funcs goya_funcs = { .early_init = goya_early_init, .early_fini = goya_early_fini, @@ -5481,7 +5484,8 @@ static const struct hl_asic_funcs goya_funcs = { .descramble_addr = hl_mmu_descramble_addr, .ack_protection_bits_errors = goya_ack_protection_bits_errors, .get_hw_block_id = goya_get_hw_block_id, - .hw_block_mmap = goya_block_mmap + .hw_block_mmap = goya_block_mmap, + .enable_events_from_fw = goya_enable_events_from_fw }; /* -- cgit v1.2.3-58-ga151 From 5b6b780660ad9e3ce60a1f04cfa1f4d5013e267a Mon Sep 17 00:00:00 2001 From: Ohad Sharabi Date: Tue, 2 Feb 2021 13:33:34 +0200 Subject: habanalabs: update security map after init CPU Qs when reading CPU_BOOT_DEV_STS0 reg after FW reports SRAM AVAILABLE the value in the register might not yet be updated by FW. to overcome this issue another "up-to-date" read of this register is done at the end of CPU queues init. Signed-off-by: Ohad Sharabi Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/firmware_if.c | 7 ++----- drivers/misc/habanalabs/common/habanalabs.h | 3 --- drivers/misc/habanalabs/gaudi/gaudi.c | 6 +++++- drivers/misc/habanalabs/goya/goya.c | 6 +++++- 4 files changed, 12 insertions(+), 10 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index 31b52a223f02..09706c571e95 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -125,7 +125,8 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, goto out; } - if (hdev->asic_prop.fw_cpucp_ack_with_pi) + if (hdev->asic_prop.fw_app_security_map & + CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN) expected_ack_val = queue->pi; else expected_ack_val = CPUCP_PACKET_FENCE_VAL; @@ -786,10 +787,6 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, CPU_BOOT_DEV_STS0_FW_HARD_RST_EN) prop->hard_reset_done_by_fw = true; - if (prop->fw_boot_cpu_security_map & - CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN) - prop->fw_cpucp_ack_with_pi = true; - dev_dbg(hdev->dev, "Firmware boot CPU security status %#x\n", prop->fw_boot_cpu_security_map); diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 18ed3a6000b0..46c37b0c704a 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -420,8 +420,6 @@ struct hl_mmu_properties { * from BOOT_DEV_STS0 * @dram_supports_virtual_memory: is there an MMU towards the DRAM * @hard_reset_done_by_fw: true if firmware is handling hard reset flow - * @fw_cpucp_ack_with_pi: true if cpucp is acking messages with the PQ PI - * instead of a magic number * @num_functional_hbms: number of functional HBMs in each DCORE. */ struct asic_fixed_properties { @@ -483,7 +481,6 @@ struct asic_fixed_properties { u8 fw_security_status_valid; u8 dram_supports_virtual_memory; u8 hard_reset_done_by_fw; - u8 fw_cpucp_ack_with_pi; u8 num_functional_hbms; }; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 6905857b363b..f937d90786b2 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -536,7 +536,6 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev) prop->fw_security_disabled = true; prop->fw_security_status_valid = false; prop->hard_reset_done_by_fw = false; - prop->fw_cpucp_ack_with_pi = false; return 0; } @@ -3727,6 +3726,7 @@ static int gaudi_init_cpu(struct hl_device *hdev) static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout) { struct gaudi_device *gaudi = hdev->asic_specific; + struct asic_fixed_properties *prop = &hdev->asic_prop; struct hl_eq *eq; u32 status; struct hl_hw_queue *cpu_pq = @@ -3783,6 +3783,10 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout) return -EIO; } + /* update FW application security bits */ + if (prop->fw_security_status_valid) + prop->fw_app_security_map = RREG32(mmCPU_BOOT_DEV_STS0); + gaudi->hw_cap_initialized |= HW_CAP_CPU_Q; return 0; } diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index af6a5760924c..c30524660761 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -464,7 +464,6 @@ int goya_get_fixed_properties(struct hl_device *hdev) prop->fw_security_disabled = true; prop->fw_security_status_valid = false; prop->hard_reset_done_by_fw = false; - prop->fw_cpucp_ack_with_pi = false; return 0; } @@ -1189,6 +1188,7 @@ static int goya_stop_external_queues(struct hl_device *hdev) int goya_init_cpu_queues(struct hl_device *hdev) { struct goya_device *goya = hdev->asic_specific; + struct asic_fixed_properties *prop = &hdev->asic_prop; struct hl_eq *eq; u32 status; struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ]; @@ -1241,6 +1241,10 @@ int goya_init_cpu_queues(struct hl_device *hdev) return -EIO; } + /* update FW application security bits */ + if (prop->fw_security_status_valid) + prop->fw_app_security_map = RREG32(mmCPU_BOOT_DEV_STS0); + goya->hw_cap_initialized |= HW_CAP_CPU_Q; return 0; } -- cgit v1.2.3-58-ga151 From 6df50d274363aa189a31435024339b781a6e32a9 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Fri, 5 Feb 2021 16:04:34 +0200 Subject: habanalabs: return block size + block ID When user gives us a block address to get its ID to mmap it, he also needs to get from us the block size to pass to the driver in the mmap function. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/habanalabs.h | 4 +++- drivers/misc/habanalabs/common/memory.c | 19 +++++++++++-------- drivers/misc/habanalabs/gaudi/gaudi.c | 2 +- drivers/misc/habanalabs/goya/goya.c | 2 +- include/uapi/misc/habanalabs.h | 27 +++++++++++++++++++++------ 5 files changed, 37 insertions(+), 17 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 46c37b0c704a..dc9f5a83dfc9 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -862,6 +862,8 @@ enum div_select_defs { * showing it to users. * @ack_protection_bits_errors: ack and dump all security violations * @get_hw_block_id: retrieve a HW block id to be used by the user to mmap it. + * also returns the size of the block if caller supplies + * a valid pointer for it * @hw_block_mmap: mmap a HW block with a given id. * @enable_events_from_fw: send interrupt to firmware to notify them the * driver is ready to receive asynchronous events. This @@ -980,7 +982,7 @@ struct hl_asic_funcs { u64 (*descramble_addr)(struct hl_device *hdev, u64 addr); void (*ack_protection_bits_errors)(struct hl_device *hdev); int (*get_hw_block_id)(struct hl_device *hdev, u64 block_addr, - u32 *block_id); + u32 *block_size, u32 *block_id); int (*hw_block_mmap)(struct hl_device *hdev, struct vm_area_struct *vma, u32 block_id, u32 block_size); void (*enable_events_from_fw)(struct hl_device *hdev); diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 7171e8820a2d..7cadf75ebb81 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -1289,12 +1289,13 @@ vm_type_err: return rc; } -static int map_block(struct hl_device *hdev, u64 address, u64 *handle) +static int map_block(struct hl_device *hdev, u64 address, u64 *handle, + u32 *size) { u32 block_id = 0; int rc; - rc = hdev->asic_funcs->get_hw_block_id(hdev, address, &block_id); + rc = hdev->asic_funcs->get_hw_block_id(hdev, address, size, &block_id); *handle = block_id | HL_MMAP_TYPE_BLOCK; *handle <<= PAGE_SHIFT; @@ -1371,7 +1372,7 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args) struct hl_device *hdev = hpriv->hdev; struct hl_ctx *ctx = hpriv->ctx; u64 block_handle, device_addr = 0; - u32 handle = 0; + u32 handle = 0, block_size; int rc; switch (args->in.op) { @@ -1416,8 +1417,9 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args) case HL_MEM_OP_MAP_BLOCK: rc = map_block(hdev, args->in.map_block.block_addr, - &block_handle); - args->out.handle = block_handle; + &block_handle, &block_size); + args->out.block_handle = block_handle; + args->out.block_size = block_size; break; default: @@ -1437,7 +1439,7 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) struct hl_device *hdev = hpriv->hdev; struct hl_ctx *ctx = hpriv->ctx; u64 block_handle, device_addr = 0; - u32 handle = 0; + u32 handle = 0, block_size; int rc; if (!hl_device_operational(hdev, &status)) { @@ -1524,8 +1526,9 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) case HL_MEM_OP_MAP_BLOCK: rc = map_block(hdev, args->in.map_block.block_addr, - &block_handle); - args->out.handle = block_handle; + &block_handle, &block_size); + args->out.block_handle = block_handle; + args->out.block_size = block_size; break; default: diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index f937d90786b2..35342edd4a02 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -8490,7 +8490,7 @@ static u64 gaudi_get_device_time(struct hl_device *hdev) } static int gaudi_get_hw_block_id(struct hl_device *hdev, u64 block_addr, - u32 *block_id) + u32 *block_size, u32 *block_id) { return -EPERM; } diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index c30524660761..ed566c52ccaa 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -5390,7 +5390,7 @@ static void goya_ctx_fini(struct hl_ctx *ctx) } static int goya_get_hw_block_id(struct hl_device *hdev, u64 block_addr, - u32 *block_id) + u32 *block_size, u32 *block_id) { return -EPERM; } diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 64ae83b5f8e5..5a86b521a450 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -782,10 +782,10 @@ struct hl_mem_in { /* HL_MEM_OP_MAP_BLOCK - map a hw block */ struct { /* - * HW block address to map, a handle will be returned - * to the user and will be used to mmap the relevant - * block. Only addresses from configuration space are - * allowed. + * HW block address to map, a handle and size will be + * returned to the user and will be used to mmap the + * relevant block. Only addresses from configuration + * space are allowed. */ __u64 block_addr; } map_block; @@ -816,11 +816,26 @@ struct hl_mem_out { __u64 device_virt_addr; /* - * Used for HL_MEM_OP_ALLOC and HL_MEM_OP_MAP_BLOCK. + * Used in HL_MEM_OP_ALLOC * This is the assigned handle for the allocated memory - * or mapped block */ __u64 handle; + + struct { + /* + * Used in HL_MEM_OP_MAP_BLOCK. + * This is the assigned handle for the mapped block + */ + __u64 block_handle; + + /* + * Used in HL_MEM_OP_MAP_BLOCK + * This is the size of the mapped block + */ + __u32 block_size; + + __u32 pad; + }; }; }; -- cgit v1.2.3-58-ga151 From da5dfbb97a82ff698e1dc7b229d4d4f5759dad2b Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Sat, 6 Feb 2021 19:34:59 +0200 Subject: habanalabs/gaudi: don't enable clock gating on DMA5 Graph Compiler uses DMA5 in a non-standard way and it requires the driver to disable clock gating on that DMA. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 35342edd4a02..9152242778f5 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -3460,6 +3460,12 @@ static void gaudi_set_clock_gating(struct hl_device *hdev) enable = !!(hdev->clock_gating_mask & (BIT_ULL(gaudi_dma_assignment[i]))); + /* GC sends work to DMA engine through Upper CP in DMA5 so + * we need to not enable clock gating in that DMA + */ + if (i == GAUDI_HBM_DMA_4) + enable = 0; + qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET; WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, enable ? QMAN_CGM1_PWR_GATE_EN : 0); -- cgit v1.2.3-58-ga151 From f320ff0387a8a2d3123c4f7d6d61eecc740d0466 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Mon, 8 Feb 2021 08:37:04 +0100 Subject: mei: bus: simplify mei_cl_device_remove() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The driver core only calls a bus' remove function when there is actually a driver and a device. So drop the needless check and assign cldrv earlier. (Side note: The check for cldev being non-NULL is broken anyhow, because to_mei_cl_device() is a wrapper around container_of() for a member that is not the first one. So cldev only can become NULL if dev is (void *)0xc (for archs with 32 bit pointers) or (void *)0x18 (for archs with 64 bit pointers).) Signed-off-by: Uwe Kleine-König Link: https://lore.kernel.org/r/20210208073705.428185-2-uwe@kleine-koenig.org Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/bus.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 34fb5e541fe5..393a41330a6b 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -880,13 +880,9 @@ static int mei_cl_device_probe(struct device *dev) static int mei_cl_device_remove(struct device *dev) { struct mei_cl_device *cldev = to_mei_cl_device(dev); - struct mei_cl_driver *cldrv; + struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver); int ret = 0; - if (!cldev || !dev->driver) - return 0; - - cldrv = to_mei_cl_driver(dev->driver); if (cldrv->remove) ret = cldrv->remove(cldev); -- cgit v1.2.3-58-ga151 From bf5c9cc8ad7fffd1f72df3baa5870449e4c16d1b Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Mon, 8 Feb 2021 08:37:05 +0100 Subject: mei: bus: change remove callback to return void MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The driver core ignores the return value of mei_cl_device_remove() so passing an error value doesn't solve any problem. As most mei drivers' remove callbacks return 0 unconditionally and returning a different value doesn't have any effect, change this prototype to return void and return 0 unconditionally in mei_cl_device_remove(). The only driver that could return an error value is modified to emit an explicit warning in the error case. Acked-by: Guenter Roeck Signed-off-by: Uwe Kleine-König Link: https://lore.kernel.org/r/20210208073705.428185-3-uwe@kleine-koenig.org Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/bus.c | 5 ++--- drivers/misc/mei/hdcp/mei_hdcp.c | 7 +++++-- drivers/nfc/microread/mei.c | 4 +--- drivers/nfc/pn544/mei.c | 4 +--- drivers/watchdog/mei_wdt.c | 4 +--- include/linux/mei_cl_bus.h | 2 +- 6 files changed, 11 insertions(+), 15 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 393a41330a6b..580074e32599 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -881,17 +881,16 @@ static int mei_cl_device_remove(struct device *dev) { struct mei_cl_device *cldev = to_mei_cl_device(dev); struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver); - int ret = 0; if (cldrv->remove) - ret = cldrv->remove(cldev); + cldrv->remove(cldev); mei_cldev_unregister_callbacks(cldev); mei_cl_bus_module_put(cldev); module_put(THIS_MODULE); - return ret; + return 0; } static ssize_t name_show(struct device *dev, struct device_attribute *a, diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c index 9ae9669e46ea..8447ad4b7d47 100644 --- a/drivers/misc/mei/hdcp/mei_hdcp.c +++ b/drivers/misc/mei/hdcp/mei_hdcp.c @@ -845,16 +845,19 @@ enable_err_exit: return ret; } -static int mei_hdcp_remove(struct mei_cl_device *cldev) +static void mei_hdcp_remove(struct mei_cl_device *cldev) { struct i915_hdcp_comp_master *comp_master = mei_cldev_get_drvdata(cldev); + int ret; component_master_del(&cldev->dev, &mei_component_master_ops); kfree(comp_master); mei_cldev_set_drvdata(cldev, NULL); - return mei_cldev_disable(cldev); + ret = mei_cldev_disable(cldev); + if (ret) + dev_warn(&cldev->dev, "mei_cldev_disable() failed\n"); } #define MEI_UUID_HDCP GUID_INIT(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \ diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c index 5dad8847a9b3..8fa7771085eb 100644 --- a/drivers/nfc/microread/mei.c +++ b/drivers/nfc/microread/mei.c @@ -44,15 +44,13 @@ static int microread_mei_probe(struct mei_cl_device *cldev, return 0; } -static int microread_mei_remove(struct mei_cl_device *cldev) +static void microread_mei_remove(struct mei_cl_device *cldev) { struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev); microread_remove(phy->hdev); nfc_mei_phy_free(phy); - - return 0; } static struct mei_cl_device_id microread_mei_tbl[] = { diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c index 579bc599f545..5c10aac085a4 100644 --- a/drivers/nfc/pn544/mei.c +++ b/drivers/nfc/pn544/mei.c @@ -42,7 +42,7 @@ static int pn544_mei_probe(struct mei_cl_device *cldev, return 0; } -static int pn544_mei_remove(struct mei_cl_device *cldev) +static void pn544_mei_remove(struct mei_cl_device *cldev) { struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev); @@ -51,8 +51,6 @@ static int pn544_mei_remove(struct mei_cl_device *cldev) pn544_hci_remove(phy->hdev); nfc_mei_phy_free(phy); - - return 0; } static struct mei_cl_device_id pn544_mei_tbl[] = { diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c index 5391bf3e6b11..53165e49c298 100644 --- a/drivers/watchdog/mei_wdt.c +++ b/drivers/watchdog/mei_wdt.c @@ -619,7 +619,7 @@ err_out: return ret; } -static int mei_wdt_remove(struct mei_cl_device *cldev) +static void mei_wdt_remove(struct mei_cl_device *cldev) { struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev); @@ -636,8 +636,6 @@ static int mei_wdt_remove(struct mei_cl_device *cldev) dbgfs_unregister(wdt); kfree(wdt); - - return 0; } #define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \ diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index 959ad7d850b4..07f5ef8fc456 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -68,7 +68,7 @@ struct mei_cl_driver { int (*probe)(struct mei_cl_device *cldev, const struct mei_cl_device_id *id); - int (*remove)(struct mei_cl_device *cldev); + void (*remove)(struct mei_cl_device *cldev); }; int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, -- cgit v1.2.3-58-ga151 From b398d53cd421454d64850f8b1f6d609ede9042d9 Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Mon, 8 Feb 2021 17:06:48 +0200 Subject: mei: bus: block send with vtag on non-conformat FW Block data send with vtag if either transport layer or FW client are not supporting vtags. Cc: # v5.10+ Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20210208150649.141358-1-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/bus.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 580074e32599..935acc6bbf3c 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -61,6 +61,13 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, u8 vtag, goto out; } + if (vtag) { + /* Check if vtag is supported by client */ + rets = mei_cl_vt_support_check(cl); + if (rets) + goto out; + } + if (length > mei_cl_mtu(cl)) { rets = -EFBIG; goto out; -- cgit v1.2.3-58-ga151 From e666b79e22958564fc23e32bb67ef57b21729067 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Mon, 8 Feb 2021 17:06:49 +0200 Subject: mei: use sysfs_emit() in tx_queue_limit_show sysfs Using of snprintf is discouraged in sysfs use the new sysfs_emit() API. Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20210208150649.141358-2-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/misc') diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 9f6682033ed7..28937b6e7e0c 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -1026,7 +1026,7 @@ static ssize_t tx_queue_limit_show(struct device *device, size = dev->tx_queue_limit; mutex_unlock(&dev->device_lock); - return snprintf(buf, PAGE_SIZE, "%u\n", size); + return sysfs_emit(buf, "%u\n", size); } static ssize_t tx_queue_limit_store(struct device *device, -- cgit v1.2.3-58-ga151 From b212658aebda82f92967bcbd4c7380d607c3d803 Mon Sep 17 00:00:00 2001 From: Jonathan Marek Date: Mon, 8 Feb 2021 15:04:01 -0500 Subject: misc: fastrpc: fix incorrect usage of dma_map_sgtable dma_map_sgtable() returns 0 on success, which is the opposite of what this code was doing. Fixes: 7cd7edb89437 ("misc: fastrpc: fix common struct sg_table related issues") Acked-by: Marek Szyprowski Signed-off-by: Jonathan Marek Link: https://lore.kernel.org/r/20210208200401.31100-1-jonathan@marek.ca Signed-off-by: Greg Kroah-Hartman --- drivers/misc/fastrpc.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/misc') diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 70eb5ed942d0..f12e909034ac 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -520,12 +520,13 @@ fastrpc_map_dma_buf(struct dma_buf_attachment *attachment, { struct fastrpc_dma_buf_attachment *a = attachment->priv; struct sg_table *table; + int ret; table = &a->sgt; - if (!dma_map_sgtable(attachment->dev, table, dir, 0)) - return ERR_PTR(-ENOMEM); - + ret = dma_map_sgtable(attachment->dev, table, dir, 0); + if (ret) + table = ERR_PTR(ret); return table; } -- cgit v1.2.3-58-ga151 From 2fd10bcf0310b9525b2af9e1f7aa9ddd87c3772e Mon Sep 17 00:00:00 2001 From: Sabyrzhan Tasbolatov Date: Tue, 9 Feb 2021 16:26:12 +0600 Subject: drivers/misc/vmw_vmci: restrict too big queue size in qp_host_alloc_queue syzbot found WARNING in qp_broker_alloc[1] in qp_host_alloc_queue() when num_pages is 0x100001, giving queue_size + queue_page_size bigger than KMALLOC_MAX_SIZE for kzalloc(), resulting order >= MAX_ORDER condition. queue_size + queue_page_size=0x8000d8, where KMALLOC_MAX_SIZE=0x400000. [1] Call Trace: alloc_pages include/linux/gfp.h:547 [inline] kmalloc_order+0x40/0x130 mm/slab_common.c:837 kmalloc_order_trace+0x15/0x70 mm/slab_common.c:853 kmalloc_large include/linux/slab.h:481 [inline] __kmalloc+0x257/0x330 mm/slub.c:3959 kmalloc include/linux/slab.h:557 [inline] kzalloc include/linux/slab.h:682 [inline] qp_host_alloc_queue drivers/misc/vmw_vmci/vmci_queue_pair.c:540 [inline] qp_broker_create drivers/misc/vmw_vmci/vmci_queue_pair.c:1351 [inline] qp_broker_alloc+0x936/0x2740 drivers/misc/vmw_vmci/vmci_queue_pair.c:1739 Reported-by: syzbot+15ec7391f3d6a1a7cc7d@syzkaller.appspotmail.com Signed-off-by: Sabyrzhan Tasbolatov Link: https://lore.kernel.org/r/20210209102612.2112247-1-snovitoll@gmail.com Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/misc/vmw_vmci/vmci_queue_pair.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/misc') diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index d787ddecee77..880c33ab9f47 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -539,6 +539,9 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size) queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page); + if (queue_size + queue_page_size > KMALLOC_MAX_SIZE) + return NULL; + queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); if (queue) { queue->q_header = NULL; -- cgit v1.2.3-58-ga151