diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2023-09-23 05:35:55 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2023-09-23 05:35:55 -0400 |
commit | 5804c19b80bf625c6a9925317f845e497434d6d3 (patch) | |
tree | ba4a0bf9cac721e2bf898e3be5ab64773da5a9d2 | |
parent | 916e3e5f26abc165437950daff370c0693572ef4 (diff) | |
parent | 071ef070ca77e6dfe33fd78afa293e83422f0411 (diff) |
Merge tag 'kvm-riscv-fixes-6.6-1' of https://github.com/kvm-riscv/linux into HEAD
KVM/riscv fixes for 6.6, take #1
- Fix KVM_GET_REG_LIST API for ISA_EXT registers
- Fix reading ISA_EXT register of a missing extension
- Fix ISA_EXT register handling in get-reg-list test
- Fix filtering of AIA registers in get-reg-list test
350 files changed, 2321 insertions, 1241 deletions
diff --git a/Documentation/filesystems/btrfs.rst b/Documentation/filesystems/btrfs.rst index 992eddb0e11b..a81db8f54d68 100644 --- a/Documentation/filesystems/btrfs.rst +++ b/Documentation/filesystems/btrfs.rst @@ -37,7 +37,6 @@ For more information please refer to the documentation site or wiki https://btrfs.readthedocs.io - https://btrfs.wiki.kernel.org that maintains information about administration tasks, frequently asked questions, use cases, mount options, comprehensible changelogs, features, diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst index cb686238f21d..ac7c52f130c9 100644 --- a/Documentation/process/embargoed-hardware-issues.rst +++ b/Documentation/process/embargoed-hardware-issues.rst @@ -251,6 +251,7 @@ an involved disclosed party. The current ambassadors list: IBM Z Christian Borntraeger <borntraeger@de.ibm.com> Intel Tony Luck <tony.luck@intel.com> Qualcomm Trilok Soni <tsoni@codeaurora.org> + RISC-V Palmer Dabbelt <palmer@dabbelt.com> Samsung Javier González <javier.gonz@samsung.com> Microsoft James Morris <jamorris@linux.microsoft.com> diff --git a/MAINTAINERS b/MAINTAINERS index 90f13281d297..bf0f54c24f81 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1855,7 +1855,7 @@ F: Documentation/devicetree/bindings/phy/amlogic* F: arch/arm/boot/dts/amlogic/ F: arch/arm/mach-meson/ F: arch/arm64/boot/dts/amlogic/ -F: drivers/genpd/amlogic/ +F: drivers/pmdomain/amlogic/ F: drivers/mmc/host/meson* F: drivers/phy/amlogic/ F: drivers/pinctrl/meson/ @@ -1918,7 +1918,7 @@ F: drivers/bluetooth/hci_bcm4377.c F: drivers/clk/clk-apple-nco.c F: drivers/cpufreq/apple-soc-cpufreq.c F: drivers/dma/apple-admac.c -F: drivers/genpd/apple/ +F: drivers/pmdomain/apple/ F: drivers/i2c/busses/i2c-pasemi-core.c F: drivers/i2c/busses/i2c-pasemi-platform.c F: drivers/iommu/apple-dart.c @@ -2435,7 +2435,7 @@ F: arch/arm/mach-ux500/ F: drivers/clk/clk-nomadik.c F: drivers/clocksource/clksrc-dbx500-prcmu.c F: drivers/dma/ste_dma40* -F: drivers/genpd/st/ste-ux500-pm-domain.c +F: drivers/pmdomain/st/ste-ux500-pm-domain.c F: drivers/hwspinlock/u8500_hsem.c F: drivers/i2c/busses/i2c-nomadik.c F: drivers/iio/adc/ab8500-gpadc.c @@ -2598,7 +2598,7 @@ F: arch/arm/include/debug/renesas-scif.S F: arch/arm/mach-shmobile/ F: arch/arm64/boot/dts/renesas/ F: arch/riscv/boot/dts/renesas/ -F: drivers/genpd/renesas/ +F: drivers/pmdomain/renesas/ F: drivers/soc/renesas/ F: include/linux/soc/renesas/ K: \brenesas, @@ -4026,7 +4026,7 @@ F: arch/mips/kernel/*bmips* F: drivers/irqchip/irq-bcm63* F: drivers/irqchip/irq-bcm7* F: drivers/irqchip/irq-brcmstb* -F: drivers/genpd/bcm/bcm63xx-power.c +F: drivers/pmdomain/bcm/bcm63xx-power.c F: include/linux/bcm963xx_nvram.h F: include/linux/bcm963xx_tag.h @@ -4248,7 +4248,7 @@ R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> L: linux-pm@vger.kernel.org S: Maintained T: git https://github.com/broadcom/stblinux.git -F: drivers/genpd/bcm/bcm-pmb.c +F: drivers/pmdomain/bcm/bcm-pmb.c F: include/dt-bindings/soc/bcm-pmb.h BROADCOM SPECIFIC AMBA DRIVER (BCMA) @@ -4378,7 +4378,6 @@ M: David Sterba <dsterba@suse.com> L: linux-btrfs@vger.kernel.org S: Maintained W: https://btrfs.readthedocs.io -W: https://btrfs.wiki.kernel.org/ Q: https://patchwork.kernel.org/project/linux-btrfs/list/ C: irc://irc.libera.chat/btrfs T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git @@ -8729,7 +8728,7 @@ M: Ulf Hansson <ulf.hansson@linaro.org> L: linux-pm@vger.kernel.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm.git -F: drivers/genpd/ +F: drivers/pmdomain/ GENERIC RESISTIVE TOUCHSCREEN ADC DRIVER M: Eugen Hristev <eugen.hristev@microchip.com> @@ -17680,7 +17679,7 @@ L: linux-pm@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/power/avs/qcom,cpr.yaml -F: drivers/genpd/qcom/cpr.c +F: drivers/pmdomain/qcom/cpr.c QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096 M: Ilia Lin <ilia.lin@kernel.org> @@ -20514,7 +20513,7 @@ STARFIVE JH71XX PMU CONTROLLER DRIVER M: Walker Chen <walker.chen@starfivetech.com> S: Supported F: Documentation/devicetree/bindings/power/starfive* -F: drivers/genpd/starfive/jh71xx-pmu.c +F: drivers/pmdomain/starfive/jh71xx-pmu.c F: include/dt-bindings/power/starfive,jh7110-pmu.h STARFIVE SOC DRIVERS @@ -21339,7 +21338,7 @@ F: drivers/irqchip/irq-ti-sci-inta.c F: drivers/irqchip/irq-ti-sci-intr.c F: drivers/reset/reset-ti-sci.c F: drivers/soc/ti/ti_sci_inta_msi.c -F: drivers/genpd/ti/ti_sci_pm_domains.c +F: drivers/pmdomain/ti/ti_sci_pm_domains.c F: include/dt-bindings/soc/ti,sci_pm_domain.h F: include/linux/soc/ti/ti_sci_inta_msi.h F: include/linux/soc/ti/ti_sci_protocol.h @@ -21581,7 +21580,7 @@ L: linux-kernel@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git -F: drivers/genpd/ti/omap_prm.c +F: drivers/pmdomain/ti/omap_prm.c F: drivers/soc/ti/* TI LM49xxx FAMILY ASoC CODEC DRIVERS @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 6 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Hurr durr I'ma ninja sloth # *DOCUMENTATION* diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index e23d06b51a20..2a60d7a72f1f 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -37,6 +37,7 @@ extern int split_tlb; extern int dcache_stride; extern int icache_stride; extern struct pdc_cache_info cache_info; +extern struct pdc_btlb_info btlb_info; void parisc_setup_cache_timing(void); #define pdtlb(sr, addr) asm volatile("pdtlb 0(%%sr%0,%1)" \ diff --git a/arch/parisc/include/asm/mckinley.h b/arch/parisc/include/asm/mckinley.h deleted file mode 100644 index 1314390b9034..000000000000 --- a/arch/parisc/include/asm/mckinley.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ASM_PARISC_MCKINLEY_H -#define ASM_PARISC_MCKINLEY_H - -/* declared in arch/parisc/kernel/setup.c */ -extern struct proc_dir_entry * proc_mckinley_root; - -#endif /*ASM_PARISC_MCKINLEY_H*/ diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h index 269b9a159f01..5d2d9737e579 100644 --- a/arch/parisc/include/asm/pdc.h +++ b/arch/parisc/include/asm/pdc.h @@ -44,10 +44,11 @@ int pdc_model_capabilities(unsigned long *capabilities); int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num, char *serial_no); int pdc_cache_info(struct pdc_cache_info *cache); int pdc_spaceid_bits(unsigned long *space_bits); -#ifndef CONFIG_PA20 int pdc_btlb_info(struct pdc_btlb_info *btlb); +int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len, + unsigned long entry_info, unsigned long slot); +int pdc_btlb_purge_all(void); int pdc_mem_map_hpa(struct pdc_memory_map *r_addr, struct pdc_module_path *mod_path); -#endif /* !CONFIG_PA20 */ int pdc_pim_toc11(struct pdc_toc_pim_11 *ret); int pdc_pim_toc20(struct pdc_toc_pim_20 *ret); int pdc_lan_station_id(char *lan_addr, unsigned long net_hpa); diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index d77c43d32974..ff6cbdb6903b 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -310,6 +310,7 @@ extern void do_syscall_trace_exit(struct pt_regs *); struct seq_file; extern void early_trap_init(void); extern void collect_boot_cpu_data(void); +extern void btlb_init_per_cpu(void); extern int show_cpuinfo (struct seq_file *m, void *v); /* driver code in driver/parisc */ diff --git a/arch/parisc/include/asm/ropes.h b/arch/parisc/include/asm/ropes.h index fd96706c7234..e2d2d7e9bfde 100644 --- a/arch/parisc/include/asm/ropes.h +++ b/arch/parisc/include/asm/ropes.h @@ -29,7 +29,7 @@ struct ioc { void __iomem *ioc_hpa; /* I/O MMU base address */ char *res_map; /* resource map, bit == pdir entry */ - u64 *pdir_base; /* physical base address */ + __le64 *pdir_base; /* physical base address */ unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */ unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */ #ifdef ZX1_SUPPORT @@ -86,6 +86,9 @@ struct sba_device { struct ioc ioc[MAX_IOC]; }; +/* list of SBA's in system, see drivers/parisc/sba_iommu.c */ +extern struct sba_device *sba_list; + #define ASTRO_RUNWAY_PORT 0x582 #define IKE_MERCED_PORT 0x803 #define REO_MERCED_PORT 0x804 @@ -110,7 +113,7 @@ static inline int IS_PLUTO(struct parisc_device *d) { #define SBA_PDIR_VALID_BIT 0x8000000000000000ULL -#define SBA_AGPGART_COOKIE 0x0000badbadc0ffeeULL +#define SBA_AGPGART_COOKIE (__force __le64) 0x0000badbadc0ffeeULL #define SBA_FUNC_ID 0x0000 /* function id */ #define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */ diff --git a/arch/parisc/include/asm/shmparam.h b/arch/parisc/include/asm/shmparam.h index 74f74e4d35b7..5a95b0f62b87 100644 --- a/arch/parisc/include/asm/shmparam.h +++ b/arch/parisc/include/asm/shmparam.h @@ -2,6 +2,21 @@ #ifndef _ASMPARISC_SHMPARAM_H #define _ASMPARISC_SHMPARAM_H +/* + * PA-RISC uses virtually indexed & physically tagged (VIPT) caches + * which has strict requirements when two pages to the same physical + * address are accessed through different mappings. Read the section + * "Address Aliasing" in the arch docs for more detail: + * PA-RISC 1.1 (page 3-6): + * https://parisc.wiki.kernel.org/images-parisc/6/68/Pa11_acd.pdf + * PA-RISC 2.0 (page F-5): + * https://parisc.wiki.kernel.org/images-parisc/7/73/Parisc2.0.pdf + * + * For Linux we allow kernel and userspace to map pages on page size + * granularity (SHMLBA) but have to ensure that, if two pages are + * mapped to the same physical address, the virtual and physical + * addresses modulo SHM_COLOUR are identical. + */ #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ #define SHM_COLOUR 0x00400000 /* shared mappings colouring */ diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 94652e13c260..757816a7bd4b 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -275,6 +275,8 @@ int main(void) * and kernel data on physical huge pages */ #ifdef CONFIG_HUGETLB_PAGE DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT); +#elif !defined(CONFIG_64BIT) + DEFINE(HUGEPAGE_SIZE, 4*1024*1024); #else DEFINE(HUGEPAGE_SIZE, PAGE_SIZE); #endif diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 442109a48940..268d90a9325b 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -58,7 +58,7 @@ int pa_serialize_tlb_flushes __ro_after_init; struct pdc_cache_info cache_info __ro_after_init; #ifndef CONFIG_PA20 -static struct pdc_btlb_info btlb_info __ro_after_init; +struct pdc_btlb_info btlb_info __ro_after_init; #endif DEFINE_STATIC_KEY_TRUE(parisc_has_cache); @@ -264,12 +264,6 @@ parisc_cache_init(void) icache_stride = CAFL_STRIDE(cache_info.ic_conf); #undef CAFL_STRIDE -#ifndef CONFIG_PA20 - if (pdc_btlb_info(&btlb_info) < 0) { - memset(&btlb_info, 0, sizeof btlb_info); - } -#endif - if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) == PDC_MODEL_NVA_UNSUPPORTED) { printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n"); diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 8f4b77648491..ed8b75948061 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -925,9 +925,9 @@ static __init void qemu_header(void) pr_info("#define PARISC_MODEL \"%s\"\n\n", boot_cpu_data.pdc.sys_model_name); + #define p ((unsigned long *)&boot_cpu_data.pdc.model) pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, " "0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n", - #define p ((unsigned long *)&boot_cpu_data.pdc.model) p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]); #undef p diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 8f37e75f2fb9..81078abec521 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -687,7 +687,6 @@ int pdc_spaceid_bits(unsigned long *space_bits) return retval; } -#ifndef CONFIG_PA20 /** * pdc_btlb_info - Return block TLB information. * @btlb: The return buffer. @@ -696,18 +695,51 @@ int pdc_spaceid_bits(unsigned long *space_bits) */ int pdc_btlb_info(struct pdc_btlb_info *btlb) { - int retval; + int retval; unsigned long flags; - spin_lock_irqsave(&pdc_lock, flags); - retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0); - memcpy(btlb, pdc_result, sizeof(*btlb)); - spin_unlock_irqrestore(&pdc_lock, flags); + if (IS_ENABLED(CONFIG_PA20)) + return PDC_BAD_PROC; - if(retval < 0) { - btlb->max_size = 0; - } - return retval; + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0); + memcpy(btlb, pdc_result, sizeof(*btlb)); + spin_unlock_irqrestore(&pdc_lock, flags); + + if(retval < 0) { + btlb->max_size = 0; + } + return retval; +} + +int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len, + unsigned long entry_info, unsigned long slot) +{ + int retval; + unsigned long flags; + + if (IS_ENABLED(CONFIG_PA20)) + return PDC_BAD_PROC; + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INSERT, (unsigned long) (vpage >> 32), + (unsigned long) vpage, physpage, len, entry_info, slot); + spin_unlock_irqrestore(&pdc_lock, flags); + return retval; +} + +int pdc_btlb_purge_all(void) +{ + int retval; + unsigned long flags; + + if (IS_ENABLED(CONFIG_PA20)) + return PDC_BAD_PROC; + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_PURGE_ALL); + spin_unlock_irqrestore(&pdc_lock, flags); + return retval; } /** @@ -728,6 +760,9 @@ int pdc_mem_map_hpa(struct pdc_memory_map *address, int retval; unsigned long flags; + if (IS_ENABLED(CONFIG_PA20)) + return PDC_BAD_PROC; + spin_lock_irqsave(&pdc_lock, flags); memcpy(pdc_result2, mod_path, sizeof(*mod_path)); retval = mem_pdc_call(PDC_MEM_MAP, PDC_MEM_MAP_HPA, __pa(pdc_result), @@ -737,7 +772,6 @@ int pdc_mem_map_hpa(struct pdc_memory_map *address, return retval; } -#endif /* !CONFIG_PA20 */ /** * pdc_lan_station_id - Get the LAN address. diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index fd15fd4bbb61..a171bf3c6b31 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -180,10 +180,10 @@ $pgt_fill_loop: std %dp,0x18(%r10) #endif -#ifdef CONFIG_64BIT - /* Get PDCE_PROC for monarch CPU. */ #define MEM_PDC_LO 0x388 #define MEM_PDC_HI 0x35C +#ifdef CONFIG_64BIT + /* Get PDCE_PROC for monarch CPU. */ ldw MEM_PDC_LO(%r0),%r3 ldw MEM_PDC_HI(%r0),%r10 depd %r10, 31, 32, %r3 /* move to upper word */ @@ -269,7 +269,17 @@ stext_pdc_ret: tovirt_r1 %r6 mtctl %r6,%cr30 /* restore task thread info */ #endif - + +#ifndef CONFIG_64BIT + /* clear all BTLBs */ + ldi PDC_BLOCK_TLB,%arg0 + load32 PA(stext_pdc_btlb_ret), %rp + ldw MEM_PDC_LO(%r0),%r3 + bv (%r3) + ldi PDC_BTLB_PURGE_ALL,%arg1 +stext_pdc_btlb_ret: +#endif + /* PARANOID: clear user scratch/user space SR's */ mtsp %r0,%sr0 mtsp %r0,%sr1 diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 12c4d4104ade..2f81bfd4f15e 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -365,7 +365,7 @@ union irq_stack_union { volatile unsigned int lock[1]; }; -DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = { +static DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = { .slock = { 1,1,1,1 }, }; #endif diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index a0e2d37c5b3b..1fc89fa2c2d2 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -368,6 +368,8 @@ int init_per_cpu(int cpunum) /* FUTURE: Enable Performance Monitor : ccr bit 0x20 */ init_percpu_prof(cpunum); + btlb_init_per_cpu(); + return ret; } diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 1aaa2ca09800..58694d1989c2 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -154,6 +154,7 @@ SECTIONS } /* End of data section */ + . = ALIGN(PAGE_SIZE); _edata = .; /* BSS */ diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index a088c243edea..a2a3e89f2d9a 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -32,6 +32,7 @@ #include <asm/sections.h> #include <asm/msgbuf.h> #include <asm/sparsemem.h> +#include <asm/asm-offsets.h> extern int data_start; extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ @@ -720,6 +721,77 @@ void __init paging_init(void) parisc_bootmem_free(); } +static void alloc_btlb(unsigned long start, unsigned long end, int *slot, + unsigned long entry_info) +{ + const int slot_max = btlb_info.fixed_range_info.num_comb; + int min_num_pages = btlb_info.min_size; + unsigned long size; + + /* map at minimum 4 pages */ + if (min_num_pages < 4) + min_num_pages = 4; + + size = HUGEPAGE_SIZE; + while (start < end && *slot < slot_max && size >= PAGE_SIZE) { + /* starting address must have same alignment as size! */ + /* if correctly aligned and fits in double size, increase */ + if (((start & (2 * size - 1)) == 0) && + (end - start) >= (2 * size)) { + size <<= 1; + continue; + } + /* if current size alignment is too big, try smaller size */ + if ((start & (size - 1)) != 0) { + size >>= 1; + continue; + } + if ((end - start) >= size) { + if ((size >> PAGE_SHIFT) >= min_num_pages) + pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT, + size >> PAGE_SHIFT, entry_info, *slot); + (*slot)++; + start += size; + continue; + } + size /= 2; + continue; + } +} + +void btlb_init_per_cpu(void) +{ + unsigned long s, t, e; + int slot; + + /* BTLBs are not available on 64-bit CPUs */ + if (IS_ENABLED(CONFIG_PA20)) + return; + else if (pdc_btlb_info(&btlb_info) < 0) { + memset(&btlb_info, 0, sizeof btlb_info); + } + + /* insert BLTLBs for code and data segments */ + s = (uintptr_t) dereference_function_descriptor(&_stext); + e = (uintptr_t) dereference_function_descriptor(&_etext); + t = (uintptr_t) dereference_function_descriptor(&_sdata); + BUG_ON(t != e); + + /* code segments */ + slot = 0; + alloc_btlb(s, e, &slot, 0x13800000); + + /* sanity check */ + t = (uintptr_t) dereference_function_descriptor(&_edata); + e = (uintptr_t) dereference_function_descriptor(&__bss_start); + BUG_ON(t != e); + + /* data segments */ + s = (uintptr_t) dereference_function_descriptor(&_sdata); + e = (uintptr_t) dereference_function_descriptor(&__bss_stop); + alloc_btlb(s, e, &slot, 0x11800000); +} + #ifdef CONFIG_PA20 /* diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h index e2ecd01bfac7..b55b434f0059 100644 --- a/arch/riscv/include/asm/errata_list.h +++ b/arch/riscv/include/asm/errata_list.h @@ -105,7 +105,7 @@ asm volatile(ALTERNATIVE( \ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | * 0000001 01001 rs1 000 00000 0001011 * dcache.cva rs1 (clean, virtual address) - * 0000001 00100 rs1 000 00000 0001011 + * 0000001 00101 rs1 000 00000 0001011 * * dcache.cipa rs1 (clean then invalidate, physical address) * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | @@ -118,7 +118,7 @@ asm volatile(ALTERNATIVE( \ * 0000000 11001 00000 000 00000 0001011 */ #define THEAD_inval_A0 ".long 0x0265000b" -#define THEAD_clean_A0 ".long 0x0245000b" +#define THEAD_clean_A0 ".long 0x0255000b" #define THEAD_flush_A0 ".long 0x0275000b" #define THEAD_SYNC_S ".long 0x0190000b" diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c index f4099059ed8f..e60fbd8660c4 100644 --- a/arch/riscv/kernel/elf_kexec.c +++ b/arch/riscv/kernel/elf_kexec.c @@ -98,7 +98,13 @@ static int elf_find_pbase(struct kimage *image, unsigned long kernel_len, kbuf.image = image; kbuf.buf_min = lowest_paddr; kbuf.buf_max = ULONG_MAX; - kbuf.buf_align = PAGE_SIZE; + + /* + * Current riscv boot protocol requires 2MB alignment for + * RV64 and 4MB alignment for RV32 + * + */ + kbuf.buf_align = PMD_SIZE; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE); kbuf.top_down = false; diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index 1b7e9fa265cb..b7e0e03c69b1 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -460,8 +460,11 @@ static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu, reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) return -ENOENT; - *reg_val = 0; host_isa_ext = kvm_isa_ext_arr[reg_num]; + if (!__riscv_isa_extension_available(NULL, host_isa_ext)) + return -ENOENT; + + *reg_val = 0; if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext)) *reg_val = 1; /* Mark the given extension as available */ @@ -842,7 +845,7 @@ static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu, u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i; isa_ext = kvm_isa_ext_arr[i]; - if (!__riscv_isa_extension_available(vcpu->arch.isa, isa_ext)) + if (!__riscv_isa_extension_available(NULL, isa_ext)) continue; if (uindices) { diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 982b777eadc7..66bfabae8814 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1945,6 +1945,7 @@ config EFI select UCS2_STRING select EFI_RUNTIME_WRAPPERS select ARCH_USE_MEMREMAP_PROT + select EFI_RUNTIME_MAP if KEXEC_CORE help This enables the kernel to use EFI runtime services that are available (such as the EFI variable services). @@ -2020,7 +2021,6 @@ config EFI_MAX_FAKE_MEM config EFI_RUNTIME_MAP bool "Export EFI runtime maps to sysfs" if EXPERT depends on EFI - default KEXEC_CORE help Export EFI runtime memory regions to /sys/firmware/efi/runtime-map. That memory map is required by the 2nd kernel to set up EFI virtual diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c index bcc956c17872..08f93b0401bb 100644 --- a/arch/x86/boot/compressed/ident_map_64.c +++ b/arch/x86/boot/compressed/ident_map_64.c @@ -59,6 +59,14 @@ static void *alloc_pgt_page(void *context) return NULL; } + /* Consumed more tables than expected? */ + if (pages->pgt_buf_offset == BOOT_PGT_SIZE_WARN) { + debug_putstr("pgt_buf running low in " __FILE__ "\n"); + debug_putstr("Need to raise BOOT_PGT_SIZE?\n"); + debug_putaddr(pages->pgt_buf_offset); + debug_putaddr(pages->pgt_buf_size); + } + entry = pages->pgt_buf + pages->pgt_buf_offset; pages->pgt_buf_offset += PAGE_SIZE; diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h index 4ae14339cb8c..b3a7cfb0d99e 100644 --- a/arch/x86/include/asm/boot.h +++ b/arch/x86/include/asm/boot.h @@ -40,23 +40,40 @@ #ifdef CONFIG_X86_64 # define BOOT_STACK_SIZE 0x4000 +/* + * Used by decompressor's startup_32() to allocate page tables for identity + * mapping of the 4G of RAM in 4-level paging mode: + * - 1 level4 table; + * - 1 level3 table; + * - 4 level2 table that maps everything with 2M pages; + * + * The additional level5 table needed for 5-level paging is allocated from + * trampoline_32bit memory. + */ # define BOOT_INIT_PGT_SIZE (6*4096) -# ifdef CONFIG_RANDOMIZE_BASE + /* - * Assuming all cross the 512GB boundary: - * 1 page for level4 - * (2+2)*4 pages for kernel, param, cmd_line, and randomized kernel - * 2 pages for first 2M (video RAM: CONFIG_X86_VERBOSE_BOOTUP). - * Total is 19 pages. + * Total number of page tables kernel_add_identity_map() can allocate, + * including page tables consumed by startup_32(). + * + * Worst-case scenario: + * - 5-level paging needs 1 level5 table; + * - KASLR needs to map kernel, boot_params, cmdline and randomized kernel, + * assuming all of them cross 256T boundary: + * + 4*2 level4 table; + * + 4*2 level3 table; + * + 4*2 level2 table; + * - X86_VERBOSE_BOOTUP needs to map the first 2M (video RAM): + * + 1 level4 table; + * + 1 level3 table; + * + 1 level2 table; + * Total: 28 tables + * + * Add 4 spare table in case decompressor touches anything beyond what is + * accounted above. Warn if it happens. */ -# ifdef CONFIG_X86_VERBOSE_BOOTUP -# define BOOT_PGT_SIZE (19*4096) -# else /* !CONFIG_X86_VERBOSE_BOOTUP */ -# define BOOT_PGT_SIZE (17*4096) -# endif -# else /* !CONFIG_RANDOMIZE_BASE */ -# define BOOT_PGT_SIZE BOOT_INIT_PGT_SIZE -# endif +# define BOOT_PGT_SIZE_WARN (28*4096) +# define BOOT_PGT_SIZE (32*4096) #else /* !CONFIG_X86_64 */ # define BOOT_STACK_SIZE 0x1000 diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index b0994ae3bc23..c4555b269a1b 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -91,19 +91,6 @@ static inline void efi_fpu_end(void) #ifdef CONFIG_X86_32 #define EFI_X86_KERNEL_ALLOC_LIMIT (SZ_512M - 1) - -#define arch_efi_call_virt_setup() \ -({ \ - efi_fpu_begin(); \ - firmware_restrict_branch_speculation_start(); \ -}) - -#define arch_efi_call_virt_teardown() \ -({ \ - firmware_restrict_branch_speculation_end(); \ - efi_fpu_end(); \ -}) - #else /* !CONFIG_X86_32 */ #define EFI_X86_KERNEL_ALLOC_LIMIT EFI_ALLOC_LIMIT @@ -116,14 +103,6 @@ extern bool efi_disable_ibt_for_runtime; __efi_call(__VA_ARGS__); \ }) -#define arch_efi_call_virt_setup() \ -({ \ - efi_sync_low_kernel_mappings(); \ - efi_fpu_begin(); \ - firmware_restrict_branch_speculation_start(); \ - efi_enter_mm(); \ -}) - #undef arch_efi_call_virt #define arch_efi_call_virt(p, f, args...) ({ \ u64 ret, ibt = ibt_save(efi_disable_ibt_for_runtime); \ @@ -132,13 +111,6 @@ extern bool efi_disable_ibt_for_runtime; ret; \ }) -#define arch_efi_call_virt_teardown() \ -({ \ - efi_leave_mm(); \ - firmware_restrict_branch_speculation_end(); \ - efi_fpu_end(); \ -}) - #ifdef CONFIG_KASAN /* * CONFIG_KASAN may redefine memset to __memset. __memset function is present @@ -168,8 +140,8 @@ extern void efi_delete_dummy_variable(void); extern void efi_crash_gracefully_on_page_fault(unsigned long phys_addr); extern void efi_free_boot_services(void); -void efi_enter_mm(void); -void efi_leave_mm(void); +void arch_efi_call_virt_setup(void); +void arch_efi_call_virt_teardown(void); /* kexec external ABI */ struct efi_setup_data { diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index 97a3de7892d3..5ff49fd67732 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h @@ -8,6 +8,14 @@ #undef notrace #define notrace __attribute__((no_instrument_function)) +#ifdef CONFIG_64BIT +/* + * The generic version tends to create spurious ENDBR instructions under + * certain conditions. + */ +#define _THIS_IP_ ({ unsigned long __here; asm ("lea 0(%%rip), %0" : "=r" (__here)); __here; }) +#endif + #ifdef CONFIG_X86_32 #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) #endif /* CONFIG_X86_32 */ diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index d9f5d7492f83..205cee567629 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -1533,7 +1533,7 @@ static void __init build_socket_tables(void) { struct uv_gam_range_entry *gre = uv_gre_table; int nums, numn, nump; - int cpu, i, lnid; + int i, lnid, apicid; int minsock = _min_socket; int maxsock = _max_socket; int minpnode = _min_pnode; @@ -1584,15 +1584,14 @@ static void __init build_socket_tables(void) /* Set socket -> node values: */ lnid = NUMA_NO_NODE; - for_each_possible_cpu(cpu) { - int nid = cpu_to_node(cpu); - int apicid, sockid; + for (apicid = 0; apicid < ARRAY_SIZE(__apicid_to_node); apicid++) { + int nid = __apicid_to_node[apicid]; + int sockid; - if (lnid == nid) + if ((nid == NUMA_NO_NODE) || (lnid == nid)) continue; lnid = nid; - apicid = per_cpu(x86_cpu_to_apicid, cpu); sockid = apicid >> uv_cpuid.socketid_shift; if (_socket_to_node[sockid - minsock] == SOCK_EMPTY) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 4e45ff44aa07..48e040618731 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -579,7 +579,6 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) } -#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_CLUSTER) || defined(CONFIG_SCHED_MC) static inline int x86_sched_itmt_flags(void) { return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0; @@ -603,7 +602,14 @@ static int x86_cluster_flags(void) return cpu_cluster_flags() | x86_sched_itmt_flags(); } #endif -#endif + +static int x86_die_flags(void) +{ + if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) + return x86_sched_itmt_flags(); + + return 0; +} /* * Set if a package/die has multiple NUMA nodes inside. @@ -640,7 +646,7 @@ static void __init build_sched_topology(void) */ if (!x86_has_numa_in_package) { x86_topology[i++] = (struct sched_domain_topology_level){ - cpu_cpu_mask, SD_INIT_NAME(DIE) + cpu_cpu_mask, x86_die_flags, SD_INIT_NAME(DIE) }; } diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index 1451e0c4ae22..235bbda6fc82 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -56,7 +56,6 @@ SYM_FUNC_END(__put_user_1) EXPORT_SYMBOL(__put_user_1) SYM_FUNC_START(__put_user_nocheck_1) - ENDBR ASM_STAC 2: movb %al,(%_ASM_CX) xor %ecx,%ecx @@ -76,7 +75,6 @@ SYM_FUNC_END(__put_user_2) EXPORT_SYMBOL(__put_user_2) SYM_FUNC_START(__put_user_nocheck_2) - ENDBR ASM_STAC 4: movw %ax,(%_ASM_CX) xor %ecx,%ecx @@ -96,7 +94,6 @@ SYM_FUNC_END(__put_user_4) EXPORT_SYMBOL(__put_user_4) SYM_FUNC_START(__put_user_nocheck_4) - ENDBR ASM_STAC 6: movl %eax,(%_ASM_CX) xor %ecx,%ecx @@ -119,7 +116,6 @@ SYM_FUNC_END(__put_user_8) EXPORT_SYMBOL(__put_user_8) SYM_FUNC_START(__put_user_nocheck_8) - ENDBR ASM_STAC 9: mov %_ASM_AX,(%_ASM_CX) #ifdef CONFIG_X86_32 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index e06a199423c0..b2cc7b4552a1 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -140,3 +140,15 @@ void __init efi_runtime_update_mappings(void) } } } + +void arch_efi_call_virt_setup(void) +{ + efi_fpu_begin(); + firmware_restrict_branch_speculation_start(); +} + +void arch_efi_call_virt_teardown(void) +{ + firmware_restrict_branch_speculation_end(); + efi_fpu_end(); +} diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 77f7ac3668cb..91d31ac422d6 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -474,19 +474,34 @@ void __init efi_dump_pagetable(void) * can not change under us. * It should be ensured that there are no concurrent calls to this function. */ -void efi_enter_mm(void) +static void efi_enter_mm(void) { efi_prev_mm = current->active_mm; current->active_mm = &efi_mm; switch_mm(efi_prev_mm, &efi_mm, NULL); } -void efi_leave_mm(void) +static void efi_leave_mm(void) { current->active_mm = efi_prev_mm; switch_mm(&efi_mm, efi_prev_mm, NULL); } +void arch_efi_call_virt_setup(void) +{ + efi_sync_low_kernel_mappings(); + efi_fpu_begin(); + firmware_restrict_branch_speculation_start(); + efi_enter_mm(); +} + +void arch_efi_call_virt_teardown(void) +{ + efi_leave_mm(); + firmware_restrict_branch_speculation_end(); + efi_fpu_end(); +} + static DEFINE_SPINLOCK(efi_runtime_lock); /* diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index c2a29be35c01..08aa0f25f12a 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -19,6 +19,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS -D__NO_FORTIFY # optimization flags. KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS)) +# When LTO is enabled, llvm emits many text sections, which is not supported +# by kexec. Remove -flto=* flags. +KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS)) + # When linking purgatory.ro with -r unresolved symbols are not checked, # also link a purgatory.chk binary without -r to check for unresolved symbols. PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib diff --git a/block/blk-mq.c b/block/blk-mq.c index ec922c6bccbe..1fafd54dce3c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4405,11 +4405,8 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, struct blk_mq_tags **new_tags; int i; - if (set->nr_hw_queues >= new_nr_hw_queues) { - for (i = new_nr_hw_queues; i < set->nr_hw_queues; i++) - __blk_mq_free_map_and_rqs(set, i); + if (set->nr_hw_queues >= new_nr_hw_queues) goto done; - } new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), GFP_KERNEL, set->numa_node); @@ -4719,7 +4716,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, { struct request_queue *q; LIST_HEAD(head); - int prev_nr_hw_queues; + int prev_nr_hw_queues = set->nr_hw_queues; + int i; lockdep_assert_held(&set->tag_list_lock); @@ -4746,7 +4744,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, blk_mq_sysfs_unregister_hctxs(q); } - prev_nr_hw_queues = set->nr_hw_queues; if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0) goto reregister; @@ -4781,6 +4778,10 @@ switch_back: list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_unfreeze_queue(q); + + /* Free the excess tags when nr_hw_queues shrink. */ + for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++) + __blk_mq_free_map_and_rqs(set, i); } void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) diff --git a/drivers/Makefile b/drivers/Makefile index cb0afca2e4a0..1bec7819a837 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -46,7 +46,7 @@ obj-$(CONFIG_DMADEVICES) += dma/ # SOC specific infrastructure drivers. obj-y += soc/ -obj-$(CONFIG_PM_GENERIC_DOMAINS) += genpd/ +obj-$(CONFIG_PM_GENERIC_DOMAINS) += pmdomain/ obj-y += virtio/ obj-$(CONFIG_VDPA) += vdpa/ diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index f14e68266ccd..312730f8272e 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -492,7 +492,7 @@ static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp) } static int thermal_get_trend(struct thermal_zone_device *thermal, - struct thermal_trip *trip, + const struct thermal_trip *trip, enum thermal_trend *trend) { struct acpi_thermal *tz = thermal_zone_device_priv(thermal); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index abb5911c9d09..08745e7db820 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1883,6 +1883,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) else dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n"); + if (!(hpriv->cap & HOST_CAP_PART)) + host->flags |= ATA_HOST_NO_PART; + + if (!(hpriv->cap & HOST_CAP_SSC)) + host->flags |= ATA_HOST_NO_SSC; + + if (!(hpriv->cap2 & HOST_CAP2_SDS)) + host->flags |= ATA_HOST_NO_DEVSLP; + if (pi.flags & ATA_FLAG_EM) ahci_reset_em(host); diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index e2bacedf28ef..f1263364fa97 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1256,6 +1256,26 @@ static ssize_t ahci_activity_show(struct ata_device *dev, char *buf) return sprintf(buf, "%d\n", emp->blink_policy); } +static void ahci_port_clear_pending_irq(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u32 tmp; + + /* clear SError */ + tmp = readl(port_mmio + PORT_SCR_ERR); + dev_dbg(ap->host->dev, "PORT_SCR_ERR 0x%x\n", tmp); + writel(tmp, port_mmio + PORT_SCR_ERR); + + /* clear port IRQ */ + tmp = readl(port_mmio + PORT_IRQ_STAT); + dev_dbg(ap->host->dev, "PORT_IRQ_STAT 0x%x\n", tmp); + if (tmp) + writel(tmp, port_mmio + PORT_IRQ_STAT); + + writel(1 << ap->port_no, hpriv->mmio + HOST_IRQ_STAT); +} + static void ahci_port_init(struct device *dev, struct ata_port *ap, int port_no, void __iomem *mmio, void __iomem *port_mmio) @@ -1270,18 +1290,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap, if (rc) dev_warn(dev, "%s (%d)\n", emsg, rc); - /* clear SError */ - tmp = readl(port_mmio + PORT_SCR_ERR); - dev_dbg(dev, "PORT_SCR_ERR 0x%x\n", tmp); - writel(tmp, port_mmio + PORT_SCR_ERR); - - /* clear port IRQ */ - tmp = readl(port_mmio + PORT_IRQ_STAT); - dev_dbg(dev, "PORT_IRQ_STAT 0x%x\n", tmp); - if (tmp) - writel(tmp, port_mmio + PORT_IRQ_STAT); - - writel(1 << port_no, mmio + HOST_IRQ_STAT); + ahci_port_clear_pending_irq(ap); /* mark esata ports */ tmp = readl(port_mmio + PORT_CMD); @@ -1603,6 +1612,8 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class, tf.status = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); + ahci_port_clear_pending_irq(ap); + rc = sata_link_hardreset(link, timing, deadline, online, ahci_check_ready); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 74314311295f..0072e0f9ad39 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4783,11 +4783,8 @@ void ata_qc_complete(struct ata_queued_cmd *qc) * been aborted by the device due to a limit timeout using the policy * 0xD. For these commands, invoke EH to get the command sense data. */ - if (qc->result_tf.status & ATA_SENSE && - ((ata_is_ncq(qc->tf.protocol) && - dev->flags & ATA_DFLAG_CDL_ENABLED) || - (!ata_is_ncq(qc->tf.protocol) && - ata_id_sense_reporting_enabled(dev->id)))) { + if (qc->flags & ATA_QCFLAG_HAS_CDL && + qc->result_tf.status & ATA_SENSE) { /* * Tell SCSI EH to not overwrite scmd->result even if this * command is finished with result SAM_STAT_GOOD. diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 159ba6ba19eb..4cf4f57e57b8 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2796,23 +2796,13 @@ int ata_eh_reset(struct ata_link *link, int classify, } } - /* - * Some controllers can't be frozen very well and may set spurious - * error conditions during reset. Clear accumulated error - * information and re-thaw the port if frozen. As reset is the - * final recovery action and we cross check link onlineness against - * device classification later, no hotplug event is lost by this. - */ + /* clear cached SError */ spin_lock_irqsave(link->ap->lock, flags); - memset(&link->eh_info, 0, sizeof(link->eh_info)); + link->eh_info.serror = 0; if (slave) - memset(&slave->eh_info, 0, sizeof(link->eh_info)); - ap->pflags &= ~ATA_PFLAG_EH_PENDING; + slave->eh_info.serror = 0; spin_unlock_irqrestore(link->ap->lock, flags); - if (ata_port_is_frozen(ap)) - ata_eh_thaw_port(ap); - /* * Make sure onlineness and classification result correspond. * Hotplug could have happened during reset and some diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index 5d31c08be013..a701e1538482 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -396,10 +396,23 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, case ATA_LPM_MED_POWER_WITH_DIPM: case ATA_LPM_MIN_POWER_WITH_PARTIAL: case ATA_LPM_MIN_POWER: - if (ata_link_nr_enabled(link) > 0) - /* no restrictions on LPM transitions */ + if (ata_link_nr_enabled(link) > 0) { + /* assume no restrictions on LPM transitions */ scontrol &= ~(0x7 << 8); - else { + + /* + * If the controller does not support partial, slumber, + * or devsleep, then disallow these transitions. + */ + if (link->ap->host->flags & ATA_HOST_NO_PART) + scontrol |= (0x1 << 8); + + if (link->ap->host->flags & ATA_HOST_NO_SSC) + scontrol |= (0x2 << 8); + + if (link->ap->host->flags & ATA_HOST_NO_DEVSLP) + scontrol |= (0x4 << 8); + } else { /* empty port, power off */ scontrol &= ~0xf; scontrol |= (0x1 << 2); diff --git a/drivers/ata/pata_parport/comm.c b/drivers/ata/pata_parport/comm.c index 4839becbbd56..94b8d352102e 100644 --- a/drivers/ata/pata_parport/comm.c +++ b/drivers/ata/pata_parport/comm.c @@ -37,7 +37,7 @@ static int comm_read_regr(struct pi_adapter *pi, int cont, int regr) { int l, h, r; - r = regr + cont_map[cont]; + r = regr + cont_map[cont]; switch (pi->mode) { case 0: @@ -90,7 +90,6 @@ static void comm_connect(struct pi_adapter *pi) } static void comm_disconnect(struct pi_adapter *pi) - { w2(0); w2(0); w2(0); w2(4); w0(pi->saved_r0); @@ -172,12 +171,12 @@ static void comm_write_block(struct pi_adapter *pi, char *buf, int count) w4l(swab16(((u16 *)buf)[2 * k]) | swab16(((u16 *)buf)[2 * k + 1]) << 16); break; - } + } } static void comm_log_adapter(struct pi_adapter *pi) - -{ char *mode_string[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" }; +{ + char *mode_string[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" }; dev_info(&pi->dev, "DataStor Commuter at 0x%x, mode %d (%s), delay %d\n", diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index d105db5c7d81..45e48d653c60 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -1255,8 +1255,8 @@ static void mv_dump_mem(struct device *dev, void __iomem *start, unsigned bytes) for (b = 0; b < bytes; ) { for (w = 0, o = 0; b < bytes && w < 4; w++) { - o += snprintf(linebuf + o, sizeof(linebuf) - o, - "%08x ", readl(start + b)); + o += scnprintf(linebuf + o, sizeof(linebuf) - o, + "%08x ", readl(start + b)); b += sizeof(u32); } dev_dbg(dev, "%s: %p: %s\n", diff --git a/drivers/base/core.c b/drivers/base/core.c index b7d7f410c256..4d8b315c48a1 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -3537,6 +3537,8 @@ int device_add(struct device *dev) /* subsystems can specify simple device enumeration */ else if (dev->bus && dev->bus->dev_name) error = dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id); + else + error = -EINVAL; if (error) goto name_error; diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c index 514f9f287a78..c6f181702b9a 100644 --- a/drivers/char/agp/parisc-agp.c +++ b/drivers/char/agp/parisc-agp.c @@ -394,8 +394,6 @@ find_quicksilver(struct device *dev, void *data) static int __init parisc_agp_init(void) { - extern struct sba_device *sba_list; - int err = -1; struct parisc_device *sba = NULL, *lba = NULL; struct lba_device *lbadev = NULL; diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 23f6f2eda84c..42b1062e33cd 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -33,7 +33,7 @@ const struct class tpm_class = { .shutdown_pre = tpm_class_shutdown, }; const struct class tpmrm_class = { - .name = "tmprm", + .name = "tpmrm", }; dev_t tpm_devt; diff --git a/drivers/comedi/Kconfig b/drivers/comedi/Kconfig index 7a8d402f05be..9af280735cba 100644 --- a/drivers/comedi/Kconfig +++ b/drivers/comedi/Kconfig @@ -67,7 +67,6 @@ config COMEDI_TEST config COMEDI_PARPORT tristate "Parallel port support" - depends on HAS_IOPORT help Enable support for the standard parallel port. A cheap and easy way to get a few more digital I/O lines. Steal @@ -80,7 +79,6 @@ config COMEDI_PARPORT config COMEDI_SSV_DNP tristate "SSV Embedded Systems DIL/Net-PC support" depends on X86_32 || COMPILE_TEST - depends on HAS_IOPORT help Enable support for SSV Embedded Systems DIL/Net-PC @@ -91,7 +89,6 @@ endif # COMEDI_MISC_DRIVERS menuconfig COMEDI_ISA_DRIVERS bool "Comedi ISA and PC/104 drivers" - depends on ISA help Enable comedi ISA and PC/104 drivers to be built @@ -103,8 +100,7 @@ if COMEDI_ISA_DRIVERS config COMEDI_PCL711 tristate "Advantech PCL-711/711b and ADlink ACL-8112 ISA card support" - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 help Enable support for Advantech PCL-711 and 711b, ADlink ACL-8112 @@ -165,9 +161,8 @@ config COMEDI_PCL730 config COMEDI_PCL812 tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216" - depends on HAS_IOPORT select COMEDI_ISADMA if ISA_DMA_API - depends on COMEDI_8254 + select COMEDI_8254 help Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink ACL-8112DG/HG/PG, ACL-8113, ACL-8216, ICP DAS A-821PGH/PGL/PGL-NDA, @@ -178,9 +173,8 @@ config COMEDI_PCL812 config COMEDI_PCL816 tristate "Advantech PCL-814 and PCL-816 ISA card support" - depends on HAS_IOPORT select COMEDI_ISADMA if ISA_DMA_API - depends on COMEDI_8254 + select COMEDI_8254 help Enable support for Advantech PCL-814 and PCL-816 ISA cards @@ -189,9 +183,8 @@ config COMEDI_PCL816 config COMEDI_PCL818 tristate "Advantech PCL-718 and PCL-818 ISA card support" - depends on HAS_IOPORT select COMEDI_ISADMA if ISA_DMA_API - depends on COMEDI_8254 + select COMEDI_8254 help Enable support for Advantech PCL-818 ISA cards PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818 and PCL-718 @@ -210,7 +203,7 @@ config COMEDI_PCM3724 config COMEDI_AMPLC_DIO200_ISA tristate "Amplicon PC212E/PC214E/PC215E/PC218E/PC272E" - depends on COMEDI_AMPLC_DIO200 + select COMEDI_AMPLC_DIO200 help Enable support for Amplicon PC212E, PC214E, PC215E, PC218E and PC272E ISA DIO boards @@ -262,8 +255,7 @@ config COMEDI_DAC02 config COMEDI_DAS16M1 tristate "MeasurementComputing CIO-DAS16/M1DAS-16 ISA card support" - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 select COMEDI_8255 help Enable support for Measurement Computing CIO-DAS16/M1 ISA cards. @@ -273,7 +265,7 @@ config COMEDI_DAS16M1 config COMEDI_DAS08_ISA tristate "DAS-08 compatible ISA and PC/104 card support" - depends on COMEDI_DAS08 + select COMEDI_DAS08 help Enable support for Keithley Metrabyte/ComputerBoards DAS08 and compatible ISA and PC/104 cards: @@ -286,9 +278,8 @@ config COMEDI_DAS08_ISA config COMEDI_DAS16 tristate "DAS-16 compatible ISA and PC/104 card support" - depends on HAS_IOPORT select COMEDI_ISADMA if ISA_DMA_API - depends on COMEDI_8254 + select COMEDI_8254 select COMEDI_8255 help Enable support for Keithley Metrabyte/ComputerBoards DAS16 @@ -305,8 +296,7 @@ config COMEDI_DAS16 config COMEDI_DAS800 tristate "DAS800 and compatible ISA card support" - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 help Enable support for Keithley Metrabyte DAS800 and compatible ISA cards Keithley Metrabyte DAS-800, DAS-801, DAS-802 @@ -318,9 +308,8 @@ config COMEDI_DAS800 config COMEDI_DAS1800 tristate "DAS1800 and compatible ISA card support" - depends on HAS_IOPORT select COMEDI_ISADMA if ISA_DMA_API - depends on COMEDI_8254 + select COMEDI_8254 help Enable support for DAS1800 and compatible ISA cards Keithley Metrabyte DAS-1701ST, DAS-1701ST-DA, DAS-1701/AO, @@ -334,8 +323,7 @@ config COMEDI_DAS1800 config COMEDI_DAS6402 tristate "DAS6402 and compatible ISA card support" - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 help Enable support for DAS6402 and compatible ISA cards Computerboards, Keithley Metrabyte DAS6402 and compatibles @@ -414,8 +402,7 @@ config COMEDI_FL512 config COMEDI_AIO_AIO12_8 tristate "I/O Products PC/104 AIO12-8 Analog I/O Board support" - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 select COMEDI_8255 help Enable support for I/O Products PC/104 AIO12-8 Analog I/O Board @@ -469,9 +456,8 @@ config COMEDI_ADQ12B config COMEDI_NI_AT_A2150 tristate "NI AT-A2150 ISA card support" - depends on HAS_IOPORT select COMEDI_ISADMA if ISA_DMA_API - depends on COMEDI_8254 + select COMEDI_8254 help Enable support for National Instruments AT-A2150 cards @@ -480,8 +466,7 @@ config COMEDI_NI_AT_A2150 config COMEDI_NI_AT_AO tristate "NI AT-AO-6/10 EISA card support" - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 help Enable support for National Instruments AT-AO-6/10 cards @@ -512,7 +497,7 @@ config COMEDI_NI_ATMIO16D config COMEDI_NI_LABPC_ISA tristate "NI Lab-PC and compatibles ISA support" - depends on COMEDI_NI_LABPC + select COMEDI_NI_LABPC help Enable support for National Instruments Lab-PC and compatibles Lab-PC-1200, Lab-PC-1200AI, Lab-PC+. @@ -576,7 +561,7 @@ endif # COMEDI_ISA_DRIVERS menuconfig COMEDI_PCI_DRIVERS tristate "Comedi PCI drivers" - depends on PCI && HAS_IOPORT + depends on PCI help Enable support for comedi PCI drivers. @@ -725,8 +710,7 @@ config COMEDI_ADL_PCI8164 config COMEDI_ADL_PCI9111 tristate "ADLink PCI-9111HR support" - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 help Enable support for ADlink PCI9111 cards @@ -736,7 +720,7 @@ config COMEDI_ADL_PCI9111 config COMEDI_ADL_PCI9118 tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support" depends on HAS_DMA - depends on COMEDI_8254 + select COMEDI_8254 help Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards @@ -745,8 +729,7 @@ config COMEDI_ADL_PCI9118 config COMEDI_ADV_PCI1710 tristate "Advantech PCI-171x and PCI-1731 support" - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 help Enable support for Advantech PCI-1710, PCI-1710HG, PCI-1711, PCI-1713 and PCI-1731 @@ -790,8 +773,7 @@ config COMEDI_ADV_PCI1760 config COMEDI_ADV_PCI_DIO tristate "Advantech PCI DIO card support" - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 select COMEDI_8255 help Enable support for Advantech PCI DIO cards @@ -804,7 +786,7 @@ config COMEDI_ADV_PCI_DIO config COMEDI_AMPLC_DIO200_PCI tristate "Amplicon PCI215/PCI272/PCIe215/PCIe236/PCIe296 DIO support" - depends on COMEDI_AMPLC_DIO200 + select COMEDI_AMPLC_DIO200 help Enable support for Amplicon PCI215, PCI272, PCIe215, PCIe236 and PCIe296 DIO boards. @@ -832,8 +814,7 @@ config COMEDI_AMPLC_PC263_PCI config COMEDI_AMPLC_PCI224 tristate "Amplicon PCI224 and PCI234 support" - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 help Enable support for Amplicon PCI224 and PCI234 AO boards @@ -842,8 +823,7 @@ config COMEDI_AMPLC_PCI224 config COMEDI_AMPLC_PCI230 tristate "Amplicon PCI230 and PCI260 support" - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 select COMEDI_8255 help Enable support for Amplicon PCI230 and PCI260 Multifunction I/O @@ -862,7 +842,7 @@ config COMEDI_CONTEC_PCI_DIO config COMEDI_DAS08_PCI tristate "DAS-08 PCI support" - depends on COMEDI_DAS08 + select COMEDI_DAS08 help Enable support for PCI DAS-08 cards. @@ -949,8 +929,7 @@ config COMEDI_CB_PCIDAS64 config COMEDI_CB_PCIDAS tristate "MeasurementComputing PCI-DAS support" - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 select COMEDI_8255 help Enable support for ComputerBoards/MeasurementComputing PCI-DAS with @@ -974,8 +953,7 @@ config COMEDI_CB_PCIDDA config COMEDI_CB_PCIMDAS tristate "MeasurementComputing PCIM-DAS1602/16, PCIe-DAS1602/16 support" - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 select COMEDI_8255 help Enable support for ComputerBoards/MeasurementComputing PCI Migration @@ -995,8 +973,7 @@ config COMEDI_CB_PCIMDDA config COMEDI_ME4000 tristate "Meilhaus ME-4000 support" - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 help Enable support for Meilhaus PCI data acquisition cards ME-4650, ME-4670i, ME-4680, ME-4680i and ME-4680is @@ -1054,7 +1031,7 @@ config COMEDI_NI_670X config COMEDI_NI_LABPC_PCI tristate "NI Lab-PC PCI-1200 support" - depends on COMEDI_NI_LABPC + select COMEDI_NI_LABPC help Enable support for National Instruments Lab-PC PCI-1200. @@ -1076,7 +1053,6 @@ config COMEDI_NI_PCIDIO config COMEDI_NI_PCIMIO tristate "NI PCI-MIO-E series and M series support" depends on HAS_DMA - depends on HAS_IOPORT select COMEDI_NI_TIOCMD select COMEDI_8255 help @@ -1098,8 +1074,7 @@ config COMEDI_NI_PCIMIO config COMEDI_RTD520 tristate "Real Time Devices PCI4520/DM7520 support" - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 help Enable support for Real Time Devices PCI4520/DM7520 @@ -1139,8 +1114,7 @@ if COMEDI_PCMCIA_DRIVERS config COMEDI_CB_DAS16_CS tristate "CB DAS16 series PCMCIA support" - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 help Enable support for the ComputerBoards/MeasurementComputing PCMCIA cards DAS16/16, PCM-DAS16D/12 and PCM-DAS16s/16 @@ -1150,7 +1124,7 @@ config COMEDI_CB_DAS16_CS config COMEDI_DAS08_CS tristate "CB DAS08 PCMCIA support" - depends on COMEDI_DAS08 + select COMEDI_DAS08 help Enable support for the ComputerBoards/MeasurementComputing DAS-08 PCMCIA card @@ -1160,7 +1134,6 @@ config COMEDI_DAS08_CS config COMEDI_NI_DAQ_700_CS tristate "NI DAQCard-700 PCMCIA support" - depends on HAS_IOPORT help Enable support for the National Instruments PCMCIA DAQCard-700 DIO @@ -1169,7 +1142,6 @@ config COMEDI_NI_DAQ_700_CS config COMEDI_NI_DAQ_DIO24_CS tristate "NI DAQ-Card DIO-24 PCMCIA support" - depends on HAS_IOPORT select COMEDI_8255 help Enable support for the National Instruments PCMCIA DAQ-Card DIO-24 @@ -1179,7 +1151,7 @@ config COMEDI_NI_DAQ_DIO24_CS config COMEDI_NI_LABPC_CS tristate "NI DAQCard-1200 PCMCIA support" - depends on COMEDI_NI_LABPC + select COMEDI_NI_LABPC help Enable support for the National Instruments PCMCIA DAQCard-1200 @@ -1188,7 +1160,6 @@ config COMEDI_NI_LABPC_CS config COMEDI_NI_MIO_CS tristate "NI DAQCard E series PCMCIA support" - depends on HAS_IOPORT select COMEDI_NI_TIO select COMEDI_8255 help @@ -1201,7 +1172,6 @@ config COMEDI_NI_MIO_CS config COMEDI_QUATECH_DAQP_CS tristate "Quatech DAQP PCMCIA data capture card support" - depends on HAS_IOPORT help Enable support for the Quatech DAQP PCMCIA data capture cards DAQP-208 and DAQP-308 @@ -1278,14 +1248,12 @@ endif # COMEDI_USB_DRIVERS config COMEDI_8254 tristate - depends on HAS_IOPORT config COMEDI_8255 tristate config COMEDI_8255_SA tristate "Standalone 8255 support" - depends on HAS_IOPORT select COMEDI_8255 help Enable support for 8255 digital I/O as a standalone driver. @@ -1317,7 +1285,7 @@ config COMEDI_KCOMEDILIB called kcomedilib. config COMEDI_AMPLC_DIO200 - depends on COMEDI_8254 + select COMEDI_8254 tristate config COMEDI_AMPLC_PC236 @@ -1326,7 +1294,7 @@ config COMEDI_AMPLC_PC236 config COMEDI_DAS08 tristate - depends on COMEDI_8254 + select COMEDI_8254 select COMEDI_8255 config COMEDI_ISADMA @@ -1334,8 +1302,7 @@ config COMEDI_ISADMA config COMEDI_NI_LABPC tristate - depends on HAS_IOPORT - depends on COMEDI_8254 + select COMEDI_8254 select COMEDI_8255 config COMEDI_NI_LABPC_ISADMA diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index a3104e35412c..aa597cda0d88 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -1211,7 +1211,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) * without actually having a link. */ create: - device = kzalloc(sizeof(*device), GFP_KERNEL); + device = kzalloc(sizeof(*device), GFP_ATOMIC); if (device == NULL) break; diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index 88466b663482..f40c81534381 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c @@ -101,7 +101,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color) { struct fw_node *node; - node = kzalloc(struct_size(node, ports, port_count), GFP_KERNEL); + node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC); if (node == NULL) return NULL; diff --git a/drivers/firmware/efi/libstub/unaccepted_memory.c b/drivers/firmware/efi/libstub/unaccepted_memory.c index ca61f4733ea5..9a655f30ba47 100644 --- a/drivers/firmware/efi/libstub/unaccepted_memory.c +++ b/drivers/firmware/efi/libstub/unaccepted_memory.c @@ -62,7 +62,7 @@ efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc, bitmap_size = DIV_ROUND_UP(unaccepted_end - unaccepted_start, EFI_UNACCEPTED_UNIT_SIZE * BITS_PER_BYTE); - status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY, sizeof(*unaccepted_table) + bitmap_size, (void **)&unaccepted_table); if (status != EFI_SUCCESS) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index dc2d53081e80..a79d53bdbe13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1293,7 +1293,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); int amdgpu_device_pci_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); -bool amdgpu_sg_display_supported(struct amdgpu_device *adev); bool amdgpu_device_pcie_dynamic_switching_supported(void); bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev); bool amdgpu_device_aspm_support_quirk(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index cdf6087706aa..25d5fda5b243 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -478,7 +478,7 @@ void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, struct kfd_cu_info *c cu_info->cu_active_number = acu_info.number; cu_info->cu_ao_mask = acu_info.ao_cu_mask; memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0], - sizeof(acu_info.bitmap)); + sizeof(cu_info->cu_bitmap)); cu_info->num_shader_engines = adev->gfx.config.max_shader_engines; cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index f1f2c24de081..69810b3f1c63 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -980,8 +980,7 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data, - uint32_t inst) + uint32_t *reg_data) { *reg_data = wait_times; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h index ecaead24e8c9..67bcaa3d4226 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h @@ -55,5 +55,4 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data, - uint32_t inst); + uint32_t *reg_data); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index fa5ee96f8845..3c45a188b701 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -1103,8 +1103,7 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data, - uint32_t inst) + uint32_t *reg_data) { *reg_data = wait_times; @@ -1120,8 +1119,7 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, SCH_WAVE, grace_period); - *reg_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), - mmCP_IQ_WAIT_TIME2); + *reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2); } void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 936e501908ce..ce424615f59b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -100,5 +100,4 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data, - uint32_t inst); + uint32_t *reg_data); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3f001a50b34a..30c4f5cca02c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1245,32 +1245,6 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) } /* - * On APUs with >= 64GB white flickering has been observed w/ SG enabled. - * Disable S/G on such systems until we have a proper fix. - * https://gitlab.freedesktop.org/drm/amd/-/issues/2354 - * https://gitlab.freedesktop.org/drm/amd/-/issues/2735 - */ -bool amdgpu_sg_display_supported(struct amdgpu_device *adev) -{ - switch (amdgpu_sg_display) { - case -1: - break; - case 0: - return false; - case 1: - return true; - default: - return false; - } - if ((totalram_pages() << (PAGE_SHIFT - 10)) + - (adev->gmc.real_vram_size / 1024) >= 64000000) { - DRM_WARN("Disabling S/G due to >=64GB RAM\n"); - return false; - } - return true; -} - -/* * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic * speed switching. Until we have confirmation from Intel that a specific host * supports it, it's safer that we keep it disabled for all. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 395c1768b9fc..0ca95c4d4bfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -43,6 +43,7 @@ #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L #define AMDGPU_MAX_GC_INSTANCES 8 +#define KGD_MAX_QUEUES 128 #define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES @@ -257,7 +258,7 @@ struct amdgpu_cu_info { uint32_t number; uint32_t ao_cu_mask; uint32_t ao_cu_bitmap[4][4]; - uint32_t bitmap[4][4]; + uint32_t bitmap[AMDGPU_MAX_GC_INSTANCES][4][4]; }; struct amdgpu_gfx_ras { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 99f4df133ed3..d30dc0b718c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -839,7 +839,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0], sizeof(adev->gfx.cu_info.ao_cu_bitmap)); memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], - sizeof(adev->gfx.cu_info.bitmap)); + sizeof(dev_info->cu_bitmap)); dev_info->vram_type = adev->gmc.vram_type; dev_info->vram_bit_width = adev->gmc.vram_width; dev_info->vce_harvest_config = adev->vce.harvest_config; @@ -940,12 +940,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct atom_context *atom_context; atom_context = adev->mode_info.atom_context; - memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name)); - memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn)); - vbios_info.version = atom_context->version; - memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str, - sizeof(atom_context->vbios_ver_str)); - memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date)); + if (atom_context) { + memcpy(vbios_info.name, atom_context->name, + sizeof(atom_context->name)); + memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, + sizeof(atom_context->vbios_pn)); + vbios_info.version = atom_context->version; + memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str, + sizeof(atom_context->vbios_ver_str)); + memcpy(vbios_info.date, atom_context->date, + sizeof(atom_context->date)); + } return copy_to_user(out, &vbios_info, min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 3c4600e15b86..937c54fc7174 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1052,7 +1052,8 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, info->ce_count = obj->err_data.ce_count; if (err_data.ce_count) { - if (adev->smuio.funcs && + if (!adev->aid_mask && + adev->smuio.funcs && adev->smuio.funcs->get_socket_id && adev->smuio.funcs->get_die_id) { dev_info(adev->dev, "socket: %d, die: %d " @@ -1072,7 +1073,8 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, } } if (err_data.ue_count) { - if (adev->smuio.funcs && + if (!adev->aid_mask && + adev->smuio.funcs && adev->smuio.funcs->get_socket_id && adev->smuio.funcs->get_die_id) { dev_info(adev->dev, "socket: %d, die: %d " diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index c6b4337eb20c..10df731998b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -81,7 +81,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, unsigned int size) { struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size, - GFP_KERNEL, true, 0); + GFP_KERNEL, false, 0); if (IS_ERR(sa)) { *sa_bo = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 0aee9c8288a2..9032d7a24d7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -9449,7 +9449,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh( adev, disable_masks[i * 2 + j]); bitmap = gfx_v10_0_get_cu_active_bitmap_per_sh(adev); - cu_info->bitmap[i][j] = bitmap; + cu_info->bitmap[0][i][j] = bitmap; for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { if (bitmap & mask) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 5c3db694afa8..762d7a19f1be 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -6368,7 +6368,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]} * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]} */ - cu_info->bitmap[i % 4][j + (i / 4) * 2] = bitmap; + cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap; for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { if (bitmap & mask) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index da6caff78c22..34f9211b2679 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -3577,7 +3577,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) gfx_v6_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * 2 + j]); bitmap = gfx_v6_0_get_cu_enabled(adev); - cu_info->bitmap[i][j] = bitmap; + cu_info->bitmap[0][i][j] = bitmap; for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { if (bitmap & mask) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 90b034b173c1..c2faf6b4c2fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -5119,7 +5119,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) gfx_v7_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * 2 + j]); bitmap = gfx_v7_0_get_cu_active_bitmap(adev); - cu_info->bitmap[i][j] = bitmap; + cu_info->bitmap[0][i][j] = bitmap; for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { if (bitmap & mask) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 51c1745c8369..885ebd703260 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -7121,7 +7121,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) gfx_v8_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * 2 + j]); bitmap = gfx_v8_0_get_cu_active_bitmap(adev); - cu_info->bitmap[i][j] = bitmap; + cu_info->bitmap[0][i][j] = bitmap; for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { if (bitmap & mask) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 458faf657042..fd61574a737c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1499,7 +1499,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0); for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { - if (cu_info->bitmap[i][j] & mask) { + if (cu_info->bitmap[0][i][j] & mask) { if (counter == pg_always_on_cu_num) WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap); if (counter < always_on_cu_num) @@ -7233,7 +7233,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, * SE6,SH0 --> bitmap[2][1] * SE7,SH0 --> bitmap[3][1] */ - cu_info->bitmap[i % 4][j + i / 4] = bitmap; + cu_info->bitmap[0][i % 4][j + i / 4] = bitmap; for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { if (bitmap & mask) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 0a26a00074a6..18ce5fe45f6f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -4259,7 +4259,7 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev) } static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, - u32 bitmap) + u32 bitmap, int xcc_id) { u32 data; @@ -4269,15 +4269,15 @@ static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; - WREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG, data); + WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data); } -static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev) +static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id) { u32 data, mask; - data = RREG32_SOC15(GC, GET_INST(GC, 0), regCC_GC_SHADER_ARRAY_CONFIG); - data |= RREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG); + data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG); + data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG); data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; @@ -4290,7 +4290,7 @@ static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev) static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info) { - int i, j, k, counter, active_cu_number = 0; + int i, j, k, counter, xcc_id, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; unsigned disable_masks[4 * 4]; @@ -4309,46 +4309,38 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, adev->gfx.config.max_sh_per_se); mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { - for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - mask = 1; - ao_bitmap = 0; - counter = 0; - gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, 0); - gfx_v9_4_3_set_user_cu_inactive_bitmap( - adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]); - bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev); - - /* - * The bitmap(and ao_cu_bitmap) in cu_info structure is - * 4x4 size array, and it's usually suitable for Vega - * ASICs which has 4*2 SE/SH layout. - * But for Arcturus, SE/SH layout is changed to 8*1. - * To mostly reduce the impact, we make it compatible - * with current bitmap array as below: - * SE4,SH0 --> bitmap[0][1] - * SE5,SH0 --> bitmap[1][1] - * SE6,SH0 --> bitmap[2][1] - * SE7,SH0 --> bitmap[3][1] - */ - cu_info->bitmap[i % 4][j + i / 4] = bitmap; - - for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { - if (bitmap & mask) { - if (counter < adev->gfx.config.max_cu_per_sh) - ao_bitmap |= mask; - counter++; + for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) { + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + mask = 1; + ao_bitmap = 0; + counter = 0; + gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id); + gfx_v9_4_3_set_user_cu_inactive_bitmap( + adev, + disable_masks[i * adev->gfx.config.max_sh_per_se + j], + xcc_id); + bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id); + + cu_info->bitmap[xcc_id][i][j] = bitmap; + + for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { + if (bitmap & mask) { + if (counter < adev->gfx.config.max_cu_per_sh) + ao_bitmap |= mask; + counter++; + } + mask <<= 1; } - mask <<= 1; + active_cu_number += counter; + if (i < 2 && j < 2) + ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); + cu_info->ao_cu_bitmap[i][j] = ao_bitmap; } - active_cu_number += counter; - if (i < 2 && j < 2) - ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); - cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap; } + gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, + xcc_id); } - gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, - 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c index d5ed9e0e1a5f..e5b5b0f4940f 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c @@ -345,6 +345,9 @@ static void nbio_v4_3_init_registers(struct amdgpu_device *adev) data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK; WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data); } + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 40d23738ee4e..8b2ff2b281b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -766,7 +766,7 @@ static int soc21_common_hw_init(void *handle) * for the purpose of expose those registers * to process space */ - if (adev->nbio.funcs->remap_hdp_registers) + if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev)) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ adev->nbio.funcs->enable_doorbell_aperture(adev, true); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 86fb7ac7982a..f76b7aee5c0a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -2087,7 +2087,8 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info); cu->num_simd_per_cu = cu_info.simd_per_cu; - cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number; + cu->num_simd_cores = cu_info.simd_per_cu * + (cu_info.cu_active_number / kdev->kfd->num_nodes); cu->max_waves_simd = cu_info.max_waves_per_simd; cu->wave_front_size = cu_info.wave_front_size; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h index 387a8ef49385..74c2d7a0d628 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h @@ -79,6 +79,10 @@ struct crat_header { #define CRAT_SUBTYPE_IOLINK_AFFINITY 5 #define CRAT_SUBTYPE_MAX 6 +/* + * Do not change the value of CRAT_SIBLINGMAP_SIZE from 32 + * as it breaks the ABI. + */ #define CRAT_SIBLINGMAP_SIZE 32 /* diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index b166f30f083e..8a6cb41444a4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1677,8 +1677,7 @@ static int start_cpsch(struct device_queue_manager *dqm) dqm->dev->kfd2kgd->build_grace_period_packet_info( dqm->dev->adev, dqm->wait_times, grace_period, ®_offset, - &dqm->wait_times, - ffs(dqm->dev->xcc_mask) - 1); + &dqm->wait_times); } dqm_unlock(dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index c2e0b79dcc6d..7b38537c7c99 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -162,6 +162,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, return NULL; *doorbell_off = amdgpu_doorbell_index_on_bar(kfd->adev, kfd->doorbells, inx); + inx *= 2; pr_debug("Get kernel queue doorbell\n" " doorbell offset == 0x%08X\n" @@ -176,6 +177,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr) unsigned int inx; inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr); + inx /= 2; mutex_lock(&kfd->doorbell_mutex); __clear_bit(inx, kfd->doorbell_bitmap); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index d01bb57733b3..447829c22295 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -97,18 +97,22 @@ void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd, void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, const uint32_t *cu_mask, uint32_t cu_mask_count, - uint32_t *se_mask) + uint32_t *se_mask, uint32_t inst) { struct kfd_cu_info cu_info; uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0}; bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0); uint32_t en_mask = wgp_mode_req ? 0x3 : 0x1; - int i, se, sh, cu, cu_bitmap_sh_mul, inc = wgp_mode_req ? 2 : 1; + int i, se, sh, cu, cu_bitmap_sh_mul, cu_inc = wgp_mode_req ? 2 : 1; + uint32_t cu_active_per_node; + int inc = cu_inc * NUM_XCC(mm->dev->xcc_mask); + int xcc_inst = inst + ffs(mm->dev->xcc_mask) - 1; amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info); - if (cu_mask_count > cu_info.cu_active_number) - cu_mask_count = cu_info.cu_active_number; + cu_active_per_node = cu_info.cu_active_number / mm->dev->kfd->num_nodes; + if (cu_mask_count > cu_active_per_node) + cu_mask_count = cu_active_per_node; /* Exceeding these bounds corrupts the stack and indicates a coding error. * Returning with no CU's enabled will hang the queue, which should be @@ -141,7 +145,8 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, for (se = 0; se < cu_info.num_shader_engines; se++) for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) cu_per_sh[se][sh] = hweight32( - cu_info.cu_bitmap[se % 4][sh + (se / 4) * cu_bitmap_sh_mul]); + cu_info.cu_bitmap[xcc_inst][se % 4][sh + (se / 4) * + cu_bitmap_sh_mul]); /* Symmetrically map cu_mask to all SEs & SHs: * se_mask programs up to 2 SH in the upper and lower 16 bits. @@ -164,20 +169,33 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, * cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1) * ... * + * For GFX 9.4.3, the following code only looks at a + * subset of the cu_mask corresponding to the inst parameter. + * If we have n XCCs under one GPU node + * cu_mask[0] bit0 -> XCC0 se_mask[0] bit0 (XCC0,SE0,SH0,CU0) + * cu_mask[0] bit1 -> XCC1 se_mask[0] bit0 (XCC1,SE0,SH0,CU0) + * .. + * cu_mask[0] bitn -> XCCn se_mask[0] bit0 (XCCn,SE0,SH0,CU0) + * cu_mask[0] bit n+1 -> XCC0 se_mask[1] bit0 (XCC0,SE1,SH0,CU0) + * + * For example, if there are 6 XCCs under 1 KFD node, this code + * running for each inst, will look at the bits as: + * inst, inst + 6, inst + 12... + * * First ensure all CUs are disabled, then enable user specified CUs. */ for (i = 0; i < cu_info.num_shader_engines; i++) se_mask[i] = 0; - i = 0; - for (cu = 0; cu < 16; cu += inc) { + i = inst; + for (cu = 0; cu < 16; cu += cu_inc) { for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) { for (se = 0; se < cu_info.num_shader_engines; se++) { if (cu_per_sh[se][sh] > cu) { if (cu_mask[i / 32] & (en_mask << (i % 32))) se_mask[se] |= en_mask << (cu + sh * 16); i += inc; - if (i == cu_mask_count) + if (i >= cu_mask_count) return; } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h index 23158db7da03..57bf5e513f4d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -138,7 +138,7 @@ void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd, void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, const uint32_t *cu_mask, uint32_t cu_mask_count, - uint32_t *se_mask); + uint32_t *se_mask, uint32_t inst); int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index ee1d32d957f2..1a4a69943c71 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -52,7 +52,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, return; mqd_symmetrically_map_cu_mask(mm, - minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); + minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0); m = get_mqd(mqd); m->compute_static_thread_mgmt_se0 = se_mask[0]; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 83699392c808..8b7fed913526 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -52,7 +52,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, return; mqd_symmetrically_map_cu_mask(mm, - minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); + minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0); m = get_mqd(mqd); m->compute_static_thread_mgmt_se0 = se_mask[0]; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 0bbf0edbabd4..15277f1d5cf0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -71,7 +71,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, } mqd_symmetrically_map_cu_mask(mm, - minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); + minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0); m->compute_static_thread_mgmt_se0 = se_mask[0]; m->compute_static_thread_mgmt_se1 = se_mask[1]; @@ -321,6 +321,43 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, return 0; } +static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst) +{ + struct v11_compute_mqd *m; + + m = get_mqd(mqd); + + memcpy(mqd_dst, m, sizeof(struct v11_compute_mqd)); +} + +static void restore_mqd(struct mqd_manager *mm, void **mqd, + struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, + struct queue_properties *qp, + const void *mqd_src, + const void *ctl_stack_src, const u32 ctl_stack_size) +{ + uint64_t addr; + struct v11_compute_mqd *m; + + m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr; + addr = mqd_mem_obj->gpu_addr; + + memcpy(m, mqd_src, sizeof(*m)); + + *mqd = m; + if (gart_addr) + *gart_addr = addr; + + m->cp_hqd_pq_doorbell_control = + qp->doorbell_off << + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; + pr_debug("cp_hqd_pq_doorbell_control 0x%x\n", + m->cp_hqd_pq_doorbell_control); + + qp->is_active = 0; +} + + static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) @@ -458,6 +495,8 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, mqd->mqd_size = sizeof(struct v11_compute_mqd); mqd->get_wave_state = get_wave_state; mqd->mqd_stride = kfd_mqd_stride; + mqd->checkpoint_mqd = checkpoint_mqd; + mqd->restore_mqd = restore_mqd; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif @@ -502,6 +541,8 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, mqd->update_mqd = update_mqd_sdma; mqd->destroy_mqd = kfd_destroy_mqd_sdma; mqd->is_occupied = kfd_is_occupied_sdma; + mqd->checkpoint_mqd = checkpoint_mqd; + mqd->restore_mqd = restore_mqd; mqd->mqd_size = sizeof(struct v11_sdma_mqd); mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index e23d32f35607..42d881809dc7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -60,7 +60,7 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) } static void update_cu_mask(struct mqd_manager *mm, void *mqd, - struct mqd_update_info *minfo) + struct mqd_update_info *minfo, uint32_t inst) { struct v9_mqd *m; uint32_t se_mask[KFD_MAX_NUM_SE] = {0}; @@ -69,27 +69,36 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, return; mqd_symmetrically_map_cu_mask(mm, - minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); + minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, inst); m = get_mqd(mqd); + m->compute_static_thread_mgmt_se0 = se_mask[0]; m->compute_static_thread_mgmt_se1 = se_mask[1]; m->compute_static_thread_mgmt_se2 = se_mask[2]; m->compute_static_thread_mgmt_se3 = se_mask[3]; - m->compute_static_thread_mgmt_se4 = se_mask[4]; - m->compute_static_thread_mgmt_se5 = se_mask[5]; - m->compute_static_thread_mgmt_se6 = se_mask[6]; - m->compute_static_thread_mgmt_se7 = se_mask[7]; - - pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n", - m->compute_static_thread_mgmt_se0, - m->compute_static_thread_mgmt_se1, - m->compute_static_thread_mgmt_se2, - m->compute_static_thread_mgmt_se3, - m->compute_static_thread_mgmt_se4, - m->compute_static_thread_mgmt_se5, - m->compute_static_thread_mgmt_se6, - m->compute_static_thread_mgmt_se7); + if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) { + m->compute_static_thread_mgmt_se4 = se_mask[4]; + m->compute_static_thread_mgmt_se5 = se_mask[5]; + m->compute_static_thread_mgmt_se6 = se_mask[6]; + m->compute_static_thread_mgmt_se7 = se_mask[7]; + + pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n", + m->compute_static_thread_mgmt_se0, + m->compute_static_thread_mgmt_se1, + m->compute_static_thread_mgmt_se2, + m->compute_static_thread_mgmt_se3, + m->compute_static_thread_mgmt_se4, + m->compute_static_thread_mgmt_se5, + m->compute_static_thread_mgmt_se6, + m->compute_static_thread_mgmt_se7); + } else { + pr_debug("inst: %u, update cu mask to %#x %#x %#x %#x\n", + inst, m->compute_static_thread_mgmt_se0, + m->compute_static_thread_mgmt_se1, + m->compute_static_thread_mgmt_se2, + m->compute_static_thread_mgmt_se3); + } } static void set_priority(struct v9_mqd *m, struct queue_properties *q) @@ -290,7 +299,8 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) m->cp_hqd_ctx_save_control = 0; - update_cu_mask(mm, mqd, minfo); + if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) + update_cu_mask(mm, mqd, minfo, 0); set_priority(m, q); q->is_active = QUEUE_IS_ACTIVE(*q); @@ -676,6 +686,8 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, m = get_mqd(mqd + size * xcc); update_mqd(mm, m, q, minfo); + update_cu_mask(mm, mqd, minfo, xcc); + if (q->format == KFD_QUEUE_FORMAT_AQL) { switch (xcc) { case 0: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 657c37822980..3e1a574d4ea6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -55,7 +55,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, return; mqd_symmetrically_map_cu_mask(mm, - minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); + minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0); m = get_mqd(mqd); m->compute_static_thread_mgmt_se0 = se_mask[0]; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 8ce6f5200905..1a03173e2313 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -299,8 +299,7 @@ static int pm_set_grace_period_v9(struct packet_manager *pm, pm->dqm->wait_times, grace_period, ®_offset, - ®_data, - 0); + ®_data); if (grace_period == USE_DEFAULT_GRACE_PERIOD) reg_data = pm->dqm->wait_times; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 3d9ce44d88da..fa24e1852493 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1466,8 +1466,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type); static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) { - return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) || - KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || + return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) || (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) || KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index ff98fded9534..c8c75ff7cea8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -450,8 +450,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count", dev->node_props.cpu_cores_count); sysfs_show_32bit_prop(buffer, offs, "simd_count", - dev->gpu ? (dev->node_props.simd_count * - NUM_XCC(dev->gpu->xcc_mask)) : 0); + dev->gpu ? dev->node_props.simd_count : 0); sysfs_show_32bit_prop(buffer, offs, "mem_banks_count", dev->node_props.mem_banks_count); sysfs_show_32bit_prop(buffer, offs, "caches_count", @@ -1597,14 +1596,17 @@ static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext, static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext, struct kfd_gpu_cache_info *pcache_info, struct kfd_cu_info *cu_info, - int cache_type, unsigned int cu_processor_id) + int cache_type, unsigned int cu_processor_id, + struct kfd_node *knode) { unsigned int cu_sibling_map_mask; int first_active_cu; - int i, j, k; + int i, j, k, xcc, start, end; struct kfd_cache_properties *pcache = NULL; - cu_sibling_map_mask = cu_info->cu_bitmap[0][0]; + start = ffs(knode->xcc_mask) - 1; + end = start + NUM_XCC(knode->xcc_mask); + cu_sibling_map_mask = cu_info->cu_bitmap[start][0][0]; cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1); first_active_cu = ffs(cu_sibling_map_mask); @@ -1639,16 +1641,18 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext, cu_sibling_map_mask = cu_sibling_map_mask >> (first_active_cu - 1); k = 0; - for (i = 0; i < cu_info->num_shader_engines; i++) { - for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) { - pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF); - pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF); - pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF); - pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF); - k += 4; - - cu_sibling_map_mask = cu_info->cu_bitmap[i % 4][j + i / 4]; - cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1); + for (xcc = start; xcc < end; xcc++) { + for (i = 0; i < cu_info->num_shader_engines; i++) { + for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) { + pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF); + pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF); + pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF); + pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF); + k += 4; + + cu_sibling_map_mask = cu_info->cu_bitmap[xcc][i % 4][j + i / 4]; + cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1); + } } } pcache->sibling_map_size = k; @@ -1666,7 +1670,7 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext, static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_node *kdev) { struct kfd_gpu_cache_info *pcache_info = NULL; - int i, j, k; + int i, j, k, xcc, start, end; int ct = 0; unsigned int cu_processor_id; int ret; @@ -1700,37 +1704,42 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct * then it will consider only one CU from * the shared unit */ + start = ffs(kdev->xcc_mask) - 1; + end = start + NUM_XCC(kdev->xcc_mask); + for (ct = 0; ct < num_of_cache_types; ct++) { cu_processor_id = gpu_processor_id; if (pcache_info[ct].cache_level == 1) { - for (i = 0; i < pcu_info->num_shader_engines; i++) { - for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) { - for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) { + for (xcc = start; xcc < end; xcc++) { + for (i = 0; i < pcu_info->num_shader_engines; i++) { + for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) { + for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) { - ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info, - pcu_info->cu_bitmap[i % 4][j + i / 4], ct, + ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info, + pcu_info->cu_bitmap[xcc][i % 4][j + i / 4], ct, cu_processor_id, k); - if (ret < 0) - break; + if (ret < 0) + break; - if (!ret) { - num_of_entries++; - list_add_tail(&props_ext->list, &dev->cache_props); - } + if (!ret) { + num_of_entries++; + list_add_tail(&props_ext->list, &dev->cache_props); + } - /* Move to next CU block */ - num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <= - pcu_info->num_cu_per_sh) ? - pcache_info[ct].num_cu_shared : - (pcu_info->num_cu_per_sh - k); - cu_processor_id += num_cu_shared; + /* Move to next CU block */ + num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <= + pcu_info->num_cu_per_sh) ? + pcache_info[ct].num_cu_shared : + (pcu_info->num_cu_per_sh - k); + cu_processor_id += num_cu_shared; + } } } } } else { ret = fill_in_l2_l3_pcache(&props_ext, pcache_info, - pcu_info, ct, cu_processor_id); + pcu_info, ct, cu_processor_id, kdev); if (ret < 0) break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index dea32a9e5506..27386ce9a021 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -89,7 +89,7 @@ struct kfd_mem_properties { struct attribute attr; }; -#define CACHE_SIBLINGMAP_SIZE 64 +#define CACHE_SIBLINGMAP_SIZE 128 struct kfd_cache_properties { struct list_head list; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 88ba8b66de1f..c6fd34bab358 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1274,11 +1274,15 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); - page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF; - page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12); - page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF; - page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12); - page_table_base.high_part = upper_32_bits(pt_base) & 0xF; + page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >> + AMDGPU_GPU_PAGE_SHIFT); + page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >> + AMDGPU_GPU_PAGE_SHIFT); + page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >> + AMDGPU_GPU_PAGE_SHIFT); + page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >> + AMDGPU_GPU_PAGE_SHIFT); + page_table_base.high_part = upper_32_bits(pt_base); page_table_base.low_part = lower_32_bits(pt_base); pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; @@ -1640,8 +1644,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) } break; } - if (init_data.flags.gpu_vm_support) - init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev); + if (init_data.flags.gpu_vm_support && + (amdgpu_sg_display == 0)) + init_data.flags.gpu_vm_support = false; if (init_data.flags.gpu_vm_support) adev->mode_info.gpu_vm_support = true; @@ -2335,14 +2340,62 @@ static int dm_late_init(void *handle) return detect_mst_link_for_all_connectors(adev_to_drm(adev)); } +static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) +{ + int ret; + u8 guid[16]; + u64 tmp64; + + mutex_lock(&mgr->lock); + if (!mgr->mst_primary) + goto out_fail; + + if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { + drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } + + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | + DP_UP_REQ_EN | + DP_UPSTREAM_IS_SRC); + if (ret < 0) { + drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); + goto out_fail; + } + + /* Some hubs forget their guids after they resume */ + ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); + if (ret != 16) { + drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } + + if (memchr_inv(guid, 0, 16) == NULL) { + tmp64 = get_jiffies_64(); + memcpy(&guid[0], &tmp64, sizeof(u64)); + memcpy(&guid[8], &tmp64, sizeof(u64)); + + ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16); + + if (ret != 16) { + drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n"); + goto out_fail; + } + } + + memcpy(mgr->mst_primary->guid, guid, 16); + +out_fail: + mutex_unlock(&mgr->lock); +} + static void s3_handle_mst(struct drm_device *dev, bool suspend) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; struct drm_connector_list_iter iter; struct drm_dp_mst_topology_mgr *mgr; - int ret; - bool need_hotplug = false; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { @@ -2364,18 +2417,15 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) if (!dp_is_lttpr_present(aconnector->dc_link)) try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); - ret = drm_dp_mst_topology_mgr_resume(mgr, true); - if (ret < 0) { - dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, - aconnector->dc_link); - need_hotplug = true; - } + /* TODO: move resume_mst_branch_status() into drm mst resume again + * once topology probing work is pulled out from mst resume into mst + * resume 2nd step. mst resume 2nd step should be called after old + * state getting restored (i.e. drm_atomic_helper_resume()). + */ + resume_mst_branch_status(mgr); } } drm_connector_list_iter_end(&iter); - - if (need_hotplug) - drm_kms_helper_hotplug_event(dev); } static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) @@ -2769,7 +2819,8 @@ static int dm_resume(void *handle) struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; struct dc_state *dc_state; - int i, r, j; + int i, r, j, ret; + bool need_hotplug = false; if (amdgpu_in_reset(adev)) { dc_state = dm->cached_dc_state; @@ -2867,7 +2918,7 @@ static int dm_resume(void *handle) continue; /* - * this is the case when traversing through already created + * this is the case when traversing through already created end sink * MST connectors, should be skipped */ if (aconnector && aconnector->mst_root) @@ -2927,6 +2978,27 @@ static int dm_resume(void *handle) dm->cached_state = NULL; + /* Do mst topology probing after resuming cached state*/ + drm_connector_list_iter_begin(ddev, &iter); + drm_for_each_connector_iter(connector, &iter) { + aconnector = to_amdgpu_dm_connector(connector); + if (aconnector->dc_link->type != dc_connection_mst_branch || + aconnector->mst_root) + continue; + + ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true); + + if (ret < 0) { + dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, + aconnector->dc_link); + need_hotplug = true; + } + } + drm_connector_list_iter_end(&iter); + + if (need_hotplug) + drm_kms_helper_hotplug_event(ddev); + amdgpu_dm_irq_resume_late(adev); amdgpu_dm_smu_write_watermarks_table(adev); @@ -8073,7 +8145,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; - if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) { + if (acrtc_state->stream->link->psr_settings.psr_feature_enabled || + acrtc_state->stream->link->replay_settings.replay_feature_enabled) { fill_dc_dirty_rects(plane, old_plane_state, new_plane_state, new_crtc_state, &bundle->flip_addrs[planes_count], diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index a2d34be82613..9e4cc5eeda76 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -620,7 +620,7 @@ struct amdgpu_hdmi_vsdb_info { unsigned int max_refresh_rate_hz; /** - * @replay mode: Replay supported + * @replay_mode: Replay supported */ bool replay_mode; }; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index 30c0644d4418..be5a6d008b29 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -169,11 +169,23 @@ static void add_link_enc_assignment( /* Return first available DIG link encoder. */ static enum engine_id find_first_avail_link_enc( const struct dc_context *ctx, - const struct dc_state *state) + const struct dc_state *state, + enum engine_id eng_id_requested) { enum engine_id eng_id = ENGINE_ID_UNKNOWN; int i; + if (eng_id_requested != ENGINE_ID_UNKNOWN) { + + for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) { + eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i]; + if (eng_id == eng_id_requested) + return eng_id; + } + } + + eng_id = ENGINE_ID_UNKNOWN; + for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) { eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i]; if (eng_id != ENGINE_ID_UNKNOWN) @@ -287,7 +299,7 @@ void link_enc_cfg_link_encs_assign( struct dc_stream_state *streams[], uint8_t stream_count) { - enum engine_id eng_id = ENGINE_ID_UNKNOWN; + enum engine_id eng_id = ENGINE_ID_UNKNOWN, eng_id_req = ENGINE_ID_UNKNOWN; int i; int j; @@ -377,8 +389,14 @@ void link_enc_cfg_link_encs_assign( * assigned to that endpoint. */ link_enc = get_link_enc_used_by_link(state, stream->link); - if (link_enc == NULL) - eng_id = find_first_avail_link_enc(stream->ctx, state); + if (link_enc == NULL) { + + if (stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + stream->link->dpia_preferred_eng_id != ENGINE_ID_UNKNOWN) + eng_id_req = stream->link->dpia_preferred_eng_id; + + eng_id = find_first_avail_link_enc(stream->ctx, state, eng_id_req); + } else eng_id = link_enc->preferred_engine; @@ -402,7 +420,9 @@ void link_enc_cfg_link_encs_assign( DC_LOG_DEBUG("%s: CUR %s(%d) - enc_id(%d)\n", __func__, assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA", - assignment.ep_id.link_id.enum_id - 1, + assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? + assignment.ep_id.link_id.enum_id : + assignment.ep_id.link_id.enum_id - 1, assignment.eng_id); } for (i = 0; i < MAX_PIPES; i++) { @@ -413,7 +433,9 @@ void link_enc_cfg_link_encs_assign( DC_LOG_DEBUG("%s: NEW %s(%d) - enc_id(%d)\n", __func__, assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA", - assignment.ep_id.link_id.enum_id - 1, + assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? + assignment.ep_id.link_id.enum_id : + assignment.ep_id.link_id.enum_id - 1, assignment.eng_id); } @@ -478,7 +500,6 @@ struct dc_link *link_enc_cfg_get_link_using_link_enc( if (stream) link = stream->link; - // dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id); return link; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 0d0bef8eb331..31e3183497a7 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -1496,6 +1496,7 @@ struct dc_link { * object creation. */ enum engine_id eng_id; + enum engine_id dpia_preferred_eng_id; bool test_pattern_enabled; enum dp_test_pattern current_test_pattern; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index ad967b58d7be..478281f2a5ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -964,7 +964,9 @@ void dce110_edp_backlight_control( return; } - if (link->panel_cntl) { + if (link->panel_cntl && !(link->dpcd_sink_ext_caps.bits.oled || + link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 || + link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) { bool is_backlight_on = link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl); if ((enable && is_backlight_on) || (!enable && !is_backlight_on)) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index 1c1fb2fa0822..004beed9bd44 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -1032,6 +1032,28 @@ static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) }; +/* ========================================================== */ + +/* + * DPIA index | Preferred Encoder | Host Router + * 0 | C | 0 + * 1 | First Available | 0 + * 2 | D | 1 + * 3 | First Available | 1 + */ +/* ========================================================== */ +static const enum engine_id dpia_to_preferred_enc_id_table[] = { + ENGINE_ID_DIGC, + ENGINE_ID_DIGC, + ENGINE_ID_DIGD, + ENGINE_ID_DIGD +}; + +static enum engine_id dcn314_get_preferred_eng_id_dpia(unsigned int dpia_index) +{ + return dpia_to_preferred_enc_id_table[dpia_index]; +} + static struct dce_i2c_hw *dcn31_i2c_hw_create( struct dc_context *ctx, uint32_t inst) @@ -1785,6 +1807,7 @@ static struct resource_funcs dcn314_res_pool_funcs = { .update_bw_bounding_box = dcn314_update_bw_bounding_box, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn314_get_panel_config_defaults, + .get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia, }; static struct clock_source *dcn30_clock_source_create( diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 027aec70c070..eaad1260bfd1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -65,6 +65,7 @@ struct resource_context; struct clk_bw_params; struct resource_funcs { + enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index); void (*destroy)(struct resource_pool **pool); void (*link_init)(struct dc_link *link); struct panel_cntl*(*panel_cntl_create)( diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 195ca9e52eda..0895742a3102 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -791,6 +791,10 @@ static bool construct_dpia(struct dc_link *link, /* Set dpia port index : 0 to number of dpia ports */ link->ddc_hw_inst = init_params->connector_index; + // Assign Dpia preferred eng_id + if (link->dc->res_pool->funcs->get_preferred_eng_id_dpia) + link->dpia_preferred_eng_id = link->dc->res_pool->funcs->get_preferred_eng_id_dpia(link->ddc_hw_inst); + /* TODO: Create link encoder */ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 8433f99f6667..3b5a56585c4b 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -31,12 +31,12 @@ #include <linux/types.h> #include <linux/bitmap.h> #include <linux/dma-fence.h> +#include "amdgpu_irq.h" +#include "amdgpu_gfx.h" struct pci_dev; struct amdgpu_device; -#define KGD_MAX_QUEUES 128 - struct kfd_dev; struct kgd_mem; @@ -68,7 +68,7 @@ struct kfd_cu_info { uint32_t wave_front_size; uint32_t max_scratch_slots_per_cu; uint32_t lds_size; - uint32_t cu_bitmap[4][4]; + uint32_t cu_bitmap[AMDGPU_MAX_GC_INSTANCES][4][4]; }; /* For getting GPU local memory information from KGD */ @@ -326,8 +326,7 @@ struct kfd2kgd_calls { uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data, - uint32_t inst); + uint32_t *reg_data); void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid, int *wave_cnt, int *max_waves_per_cu, uint32_t inst); void (*program_trap_handler_settings)(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 199a673b8120..de80e191a92c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -336,7 +336,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) /* Store one-time values in driver PPTable */ if (!pptable->Init) { - while (retry--) { + while (--retry) { ret = smu_v13_0_6_get_metrics_table(smu, NULL, true); if (ret) return ret; diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index bf8371dc2a61..c44d5bcf1284 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -2203,6 +2203,7 @@ static int drm_mode_create_colorspace_property(struct drm_connector *connector, /** * drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property * @connector: connector to create the Colorspace property on. + * @supported_colorspaces: bitmap of supported color spaces * * Called by a driver the first time it's needed, must be attached to desired * HDMI connectors. @@ -2227,6 +2228,7 @@ EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property); /** * drm_mode_create_dp_colorspace_property - create dp colorspace property * @connector: connector to create the Colorspace property on. + * @supported_colorspaces: bitmap of supported color spaces * * Called by a driver the first time it's needed, must be attached to desired * DP connectors. diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c index ff69cf0fb42a..5d2809de4517 100644 --- a/drivers/gpu/drm/drm_exec.c +++ b/drivers/gpu/drm/drm_exec.c @@ -56,7 +56,7 @@ static void drm_exec_unlock_all(struct drm_exec *exec) struct drm_gem_object *obj; unsigned long index; - drm_exec_for_each_locked_object(exec, index, obj) { + drm_exec_for_each_locked_object_reverse(exec, index, obj) { dma_resv_unlock(obj->resv); drm_gem_object_put(obj); } diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 858c959f7bab..f735b035436c 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -3540,6 +3540,27 @@ enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata) return map_aux_ch(devdata->i915, devdata->child.aux_channel); } +bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata) +{ + struct drm_i915_private *i915; + u8 aux_channel; + int count = 0; + + if (!devdata || !devdata->child.aux_channel) + return false; + + i915 = devdata->i915; + aux_channel = devdata->child.aux_channel; + + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { + if (intel_bios_encoder_supports_dp(devdata) && + aux_channel == devdata->child.aux_channel) + count++; + } + + return count > 1; +} + int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index 9680e3e92bb5..49e24b7cf675 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -273,6 +273,7 @@ enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata); int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata); int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata); int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata); +bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata); int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata); int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata); int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 12bd2f322e62..e0e4cb529284 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -5512,8 +5512,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, /* * VBT and straps are liars. Also check HPD as that seems * to be the most reliable piece of information available. + * + * ... expect on devices that forgot to hook HPD up for eDP + * (eg. Acer Chromebook C710), so we'll check it only if multiple + * ports are attempting to use the same AUX CH, according to VBT. */ - if (!intel_digital_port_connected(encoder)) { + if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) && + !intel_digital_port_connected(encoder)) { /* * If this fails, presume the DPCD answer came * from some other port using the same AUX CH. diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index c87a57c9c592..22dd8b445685 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c @@ -123,7 +123,7 @@ int radeon_sa_bo_new(struct radeon_sa_manager *sa_manager, unsigned int size, unsigned int align) { struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size, - GFP_KERNEL, true, align); + GFP_KERNEL, false, align); if (IS_ERR(sa)) { *sa_bo = NULL; diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index c5bb683e440c..0187539ff5ea 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -70,10 +70,10 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)"); #define READ_STATUS_SIZE 13 #define MISC_VALUE_SIZE 4 -#define CMD_TIMEOUT msecs_to_jiffies(200) -#define DATA_TIMEOUT msecs_to_jiffies(1000) -#define IDLE_TIMEOUT msecs_to_jiffies(2000) -#define FIRST_FRAME_TIMEOUT msecs_to_jiffies(2000) +#define CMD_TIMEOUT 200 +#define DATA_TIMEOUT 1000 +#define IDLE_TIMEOUT 2000 +#define FIRST_FRAME_TIMEOUT 2000 #define MISC_REQ_GET_SET_ECO_A 0xff #define MISC_REQ_GET_SET_ECO_B 0x35 @@ -389,7 +389,7 @@ static void gm12u320_fb_update_work(struct work_struct *work) * switches back to showing its logo. */ queue_delayed_work(system_long_wq, &gm12u320->fb_update.work, - IDLE_TIMEOUT); + msecs_to_jiffies(IDLE_TIMEOUT)); return; err: diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c index d5d4f642d367..3c99fb8b54e2 100644 --- a/drivers/gpu/drm/vkms/vkms_composer.c +++ b/drivers/gpu/drm/vkms/vkms_composer.c @@ -408,15 +408,10 @@ void vkms_set_composer(struct vkms_output *out, bool enabled) if (enabled) drm_crtc_vblank_get(&out->crtc); - mutex_lock(&out->enabled_lock); + spin_lock_irq(&out->lock); old_enabled = out->composer_enabled; out->composer_enabled = enabled; - - /* the composition wasn't enabled, so unlock the lock to make sure the lock - * will be balanced even if we have a failed commit - */ - if (!out->composer_enabled) - mutex_unlock(&out->enabled_lock); + spin_unlock_irq(&out->lock); if (old_enabled) drm_crtc_vblank_put(&out->crtc); diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 3c5ebf106b66..61e500b8c9da 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -16,7 +16,7 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) struct drm_crtc *crtc = &output->crtc; struct vkms_crtc_state *state; u64 ret_overrun; - bool ret, fence_cookie, composer_enabled; + bool ret, fence_cookie; fence_cookie = dma_fence_begin_signalling(); @@ -25,15 +25,15 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) if (ret_overrun != 1) pr_warn("%s: vblank timer overrun\n", __func__); + spin_lock(&output->lock); ret = drm_crtc_handle_vblank(crtc); if (!ret) DRM_ERROR("vkms failure on handling vblank"); state = output->composer_state; - composer_enabled = output->composer_enabled; - mutex_unlock(&output->enabled_lock); + spin_unlock(&output->lock); - if (state && composer_enabled) { + if (state && output->composer_enabled) { u64 frame = drm_crtc_accurate_vblank_count(crtc); /* update frame_start only if a queued vkms_composer_worker() @@ -295,7 +295,6 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, spin_lock_init(&vkms_out->lock); spin_lock_init(&vkms_out->composer_lock); - mutex_init(&vkms_out->enabled_lock); vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0); if (!vkms_out->composer_workq) diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index c7ae6c2ba1df..8f5710debb1e 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -108,10 +108,8 @@ struct vkms_output { struct workqueue_struct *composer_workq; /* protects concurrent access to composer */ spinlock_t lock; - /* guarantees that if the composer is enabled, a job will be queued */ - struct mutex enabled_lock; - /* protected by @enabled_lock */ + /* protected by @lock */ bool composer_enabled; struct vkms_crtc_state *composer_state; diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index c6d1a345ea6d..9388823bb0bb 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig @@ -72,7 +72,7 @@ config I2C_MUX source "drivers/i2c/muxes/Kconfig" config I2C_ATR - tristate "I2C Address Translator (ATR) support" + tristate "I2C Address Translator (ATR) support" if COMPILE_TEST help Enable support for I2C Address Translator (ATR) chips. diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 169607e80331..6644eebedaf3 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -1384,10 +1384,10 @@ config I2C_ICY config I2C_MLXCPLD tristate "Mellanox I2C driver" - depends on X86_64 || ARM64 || COMPILE_TEST + depends on X86_64 || (ARM64 && ACPI) || COMPILE_TEST help This exposes the Mellanox platform I2C busses to the linux I2C layer - for X86 based systems. + for X86 and ARM64/ACPI based systems. Controller is implemented as CPLD logic. This driver can also be built as a module. If so, the module will be diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index 2e5acfeb76c8..5a416b39b818 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -698,13 +698,16 @@ static int aspeed_i2c_master_xfer(struct i2c_adapter *adap, if (time_left == 0) { /* - * If timed out and bus is still busy in a multi master - * environment, attempt recovery at here. + * In a multi-master setup, if a timeout occurs, attempt + * recovery. But if the bus is idle, we still need to reset the + * i2c controller to clear the remaining interrupts. */ if (bus->multi_master && (readl(bus->base + ASPEED_I2C_CMD_REG) & ASPEED_I2CD_BUS_BUSY_STS)) aspeed_i2c_recover_bus(bus); + else + aspeed_i2c_reset(bus); /* * If timed out and the state is still pending, drop the pending diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 9849f4502570..de3f58b60dce 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -182,6 +182,7 @@ enum cdns_i2c_slave_state { * @reset: Reset control for the device * @quirks: flag for broken hold bit usage in r1p10 * @ctrl_reg: Cached value of the control register. + * @rinfo: I2C GPIO recovery information * @ctrl_reg_diva_divb: value of fields DIV_A and DIV_B from CR register * @slave: Registered slave instance. * @dev_mode: I2C operating role(master/slave). diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 0d93661f88d3..095b9b49aa82 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -214,6 +214,7 @@ struct dm_table { /* a list of devices used by this table */ struct list_head devices; + struct rw_semaphore devices_lock; /* events get handed up using this callback */ void (*event_fn)(void *data); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index f5ed729a8e0c..21ebb6c39394 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1630,6 +1630,8 @@ static void retrieve_deps(struct dm_table *table, struct dm_dev_internal *dd; struct dm_target_deps *deps; + down_read(&table->devices_lock); + deps = get_result_buffer(param, param_size, &len); /* @@ -1644,7 +1646,7 @@ static void retrieve_deps(struct dm_table *table, needed = struct_size(deps, dev, count); if (len < needed) { param->flags |= DM_BUFFER_FULL_FLAG; - return; + goto out; } /* @@ -1656,6 +1658,9 @@ static void retrieve_deps(struct dm_table *table, deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev); param->data_size = param->data_start + needed; + +out: + up_read(&table->devices_lock); } static int table_deps(struct file *filp, struct dm_ioctl *param, size_t param_size) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 7d208b2b1a19..37b48f63ae6a 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -135,6 +135,7 @@ int dm_table_create(struct dm_table **result, blk_mode_t mode, return -ENOMEM; INIT_LIST_HEAD(&t->devices); + init_rwsem(&t->devices_lock); if (!num_targets) num_targets = KEYS_PER_NODE; @@ -359,16 +360,20 @@ int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode, if (dev == disk_devt(t->md->disk)) return -EINVAL; + down_write(&t->devices_lock); + dd = find_device(&t->devices, dev); if (!dd) { dd = kmalloc(sizeof(*dd), GFP_KERNEL); - if (!dd) - return -ENOMEM; + if (!dd) { + r = -ENOMEM; + goto unlock_ret_r; + } r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev); if (r) { kfree(dd); - return r; + goto unlock_ret_r; } refcount_set(&dd->count, 1); @@ -378,12 +383,17 @@ int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode, } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { r = upgrade_mode(dd, mode, t->md); if (r) - return r; + goto unlock_ret_r; } refcount_inc(&dd->count); out: + up_write(&t->devices_lock); *result = dd->dm_dev; return 0; + +unlock_ret_r: + up_write(&t->devices_lock); + return r; } EXPORT_SYMBOL(dm_get_device); @@ -419,9 +429,12 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, void dm_put_device(struct dm_target *ti, struct dm_dev *d) { int found = 0; - struct list_head *devices = &ti->table->devices; + struct dm_table *t = ti->table; + struct list_head *devices = &t->devices; struct dm_dev_internal *dd; + down_write(&t->devices_lock); + list_for_each_entry(dd, devices, list) { if (dd->dm_dev == d) { found = 1; @@ -430,14 +443,17 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d) } if (!found) { DMERR("%s: device %s not in table devices list", - dm_device_name(ti->table->md), d->name); - return; + dm_device_name(t->md), d->name); + goto unlock_ret; } if (refcount_dec_and_test(&dd->count)) { - dm_put_table_device(ti->table->md, d); + dm_put_table_device(t->md, d); list_del(&dd->list); kfree(dd); } + +unlock_ret: + up_write(&t->devices_lock); } EXPORT_SYMBOL(dm_put_device); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f0f118ab20fa..64a1f306c96c 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -715,24 +715,6 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) rcu_read_unlock(); } -static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md, - int *srcu_idx, blk_opf_t bio_opf) -{ - if (bio_opf & REQ_NOWAIT) - return dm_get_live_table_fast(md); - else - return dm_get_live_table(md, srcu_idx); -} - -static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx, - blk_opf_t bio_opf) -{ - if (bio_opf & REQ_NOWAIT) - dm_put_live_table_fast(md); - else - dm_put_live_table(md, srcu_idx); -} - static char *_dm_claim_ptr = "I belong to device-mapper"; /* @@ -1833,9 +1815,8 @@ static void dm_submit_bio(struct bio *bio) struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; int srcu_idx; struct dm_table *map; - blk_opf_t bio_opf = bio->bi_opf; - map = dm_get_live_table_bio(md, &srcu_idx, bio_opf); + map = dm_get_live_table(md, &srcu_idx); /* If suspended, or map not yet available, queue this IO for later */ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || @@ -1851,7 +1832,7 @@ static void dm_submit_bio(struct bio *bio) dm_split_and_process_bio(md, map, bio); out: - dm_put_live_table_bio(md, srcu_idx, bio_opf); + dm_put_live_table(md, srcu_idx); } static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, diff --git a/drivers/md/md.c b/drivers/md/md.c index 0fe7ab6e8ab9..a104a025084d 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -798,14 +798,14 @@ void mddev_unlock(struct mddev *mddev) } else mutex_unlock(&mddev->reconfig_mutex); + md_wakeup_thread(mddev->thread); + wake_up(&mddev->sb_wait); + list_for_each_entry_safe(rdev, tmp, &delete, same_set) { list_del_init(&rdev->same_set); kobject_del(&rdev->kobj); export_rdev(rdev, mddev); } - - md_wakeup_thread(mddev->thread); - wake_up(&mddev->sb_wait); } EXPORT_SYMBOL_GPL(mddev_unlock); @@ -2452,7 +2452,8 @@ static void export_rdev(struct md_rdev *rdev, struct mddev *mddev) if (test_bit(AutoDetected, &rdev->flags)) md_autodetect_dev(rdev->bdev->bd_dev); #endif - blkdev_put(rdev->bdev, mddev->external ? &claim_rdev : rdev); + blkdev_put(rdev->bdev, + test_bit(Holder, &rdev->flags) ? rdev : &claim_rdev); rdev->bdev = NULL; kobject_put(&rdev->kobj); } @@ -3632,6 +3633,7 @@ EXPORT_SYMBOL_GPL(md_rdev_init); static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) { struct md_rdev *rdev; + struct md_rdev *holder; sector_t size; int err; @@ -3646,8 +3648,15 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe if (err) goto out_clear_rdev; + if (super_format == -2) { + holder = &claim_rdev; + } else { + holder = rdev; + set_bit(Holder, &rdev->flags); + } + rdev->bdev = blkdev_get_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE, - super_format == -2 ? &claim_rdev : rdev, NULL); + holder, NULL); if (IS_ERR(rdev->bdev)) { pr_warn("md: could not open device unknown-block(%u,%u).\n", MAJOR(newdev), MINOR(newdev)); @@ -3684,7 +3693,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe return rdev; out_blkdev_put: - blkdev_put(rdev->bdev, super_format == -2 ? &claim_rdev : rdev); + blkdev_put(rdev->bdev, holder); out_clear_rdev: md_rdev_clear(rdev); out_free_rdev: @@ -8256,7 +8265,7 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) spin_unlock(&all_mddevs_lock); if (to_put) - mddev_put(mddev); + mddev_put(to_put); return next_mddev; } diff --git a/drivers/md/md.h b/drivers/md/md.h index 9bcb77bca963..7c9c13abd7ca 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -211,6 +211,9 @@ enum flag_bits { * check if there is collision between raid1 * serial bios. */ + Holder, /* rdev is used as holder while opening + * underlying disk exclusively. + */ }; static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4b30a1742162..2aabac773fe7 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1837,12 +1837,11 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) struct r1conf *conf = mddev->private; int err = 0; int number = rdev->raid_disk; + struct raid1_info *p = conf->mirrors + number; if (unlikely(number >= conf->raid_disks)) goto abort; - struct raid1_info *p = conf->mirrors + number; - if (rdev != p->rdev) p = conf->mirrors + conf->raid_disks + number; diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index 0617d5ccd3ff..8c66d3bf61f0 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -266,6 +266,8 @@ struct sja1105_private { * the switch doesn't confuse them with one another. */ struct mutex mgmt_lock; + /* Serializes accesses to the FDB */ + struct mutex fdb_lock; /* PTP two-step TX timestamp ID, and its serialization lock */ spinlock_t ts_id_lock; u8 ts_id; diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c index 7729d3f8b7f5..984c0e604e8d 100644 --- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c @@ -1175,18 +1175,15 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = { static int sja1105_dynamic_config_poll_valid(struct sja1105_private *priv, - struct sja1105_dyn_cmd *cmd, - const struct sja1105_dynamic_table_ops *ops) + const struct sja1105_dynamic_table_ops *ops, + void *entry, bool check_valident, + bool check_errors) { u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {}; + struct sja1105_dyn_cmd cmd = {}; int rc; - /* We don't _need_ to read the full entry, just the command area which - * is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a - * buffer that contains the full entry too. Additionally, our API - * doesn't really know how many bytes into the buffer does the command - * area really begin. So just read back the whole entry. - */ + /* Read back the whole entry + command structure. */ rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf, ops->packed_size); if (rc) @@ -1195,11 +1192,25 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv, /* Unpack the command structure, and return it to the caller in case it * needs to perform further checks on it (VALIDENT). */ - memset(cmd, 0, sizeof(*cmd)); - ops->cmd_packing(packed_buf, cmd, UNPACK); + ops->cmd_packing(packed_buf, &cmd, UNPACK); /* Hardware hasn't cleared VALID => still working on it */ - return cmd->valid ? -EAGAIN : 0; + if (cmd.valid) + return -EAGAIN; + + if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY)) + return -ENOENT; + + if (check_errors && cmd.errors) + return -EINVAL; + + /* Don't dereference possibly NULL pointer - maybe caller + * only wanted to see whether the entry existed or not. + */ + if (entry) + ops->entry_packing(packed_buf, entry, UNPACK); + + return 0; } /* Poll the dynamic config entry's control area until the hardware has @@ -1208,16 +1219,19 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv, */ static int sja1105_dynamic_config_wait_complete(struct sja1105_private *priv, - struct sja1105_dyn_cmd *cmd, - const struct sja1105_dynamic_table_ops *ops) + const struct sja1105_dynamic_table_ops *ops, + void *entry, bool check_valident, + bool check_errors) { - int rc; - - return read_poll_timeout(sja1105_dynamic_config_poll_valid, - rc, rc != -EAGAIN, - SJA1105_DYNAMIC_CONFIG_SLEEP_US, - SJA1105_DYNAMIC_CONFIG_TIMEOUT_US, - false, priv, cmd, ops); + int err, rc; + + err = read_poll_timeout(sja1105_dynamic_config_poll_valid, + rc, rc != -EAGAIN, + SJA1105_DYNAMIC_CONFIG_SLEEP_US, + SJA1105_DYNAMIC_CONFIG_TIMEOUT_US, + false, priv, ops, entry, check_valident, + check_errors); + return err < 0 ? err : rc; } /* Provides read access to the settings through the dynamic interface @@ -1286,25 +1300,14 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv, mutex_lock(&priv->dynamic_config_lock); rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf, ops->packed_size); - if (rc < 0) { - mutex_unlock(&priv->dynamic_config_lock); - return rc; - } - - rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops); - mutex_unlock(&priv->dynamic_config_lock); if (rc < 0) - return rc; + goto out; - if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY)) - return -ENOENT; + rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false); +out: + mutex_unlock(&priv->dynamic_config_lock); - /* Don't dereference possibly NULL pointer - maybe caller - * only wanted to see whether the entry existed or not. - */ - if (entry) - ops->entry_packing(packed_buf, entry, UNPACK); - return 0; + return rc; } int sja1105_dynamic_config_write(struct sja1105_private *priv, @@ -1356,22 +1359,14 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv, mutex_lock(&priv->dynamic_config_lock); rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf, ops->packed_size); - if (rc < 0) { - mutex_unlock(&priv->dynamic_config_lock); - return rc; - } - - rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops); - mutex_unlock(&priv->dynamic_config_lock); if (rc < 0) - return rc; + goto out; - cmd = (struct sja1105_dyn_cmd) {0}; - ops->cmd_packing(packed_buf, &cmd, UNPACK); - if (cmd.errors) - return -EINVAL; + rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true); +out: + mutex_unlock(&priv->dynamic_config_lock); - return 0; + return rc; } static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly) diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index a23d980d28f5..1a367e64bc3b 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -1798,6 +1798,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port, struct dsa_db db) { struct sja1105_private *priv = ds->priv; + int rc; if (!vid) { switch (db.type) { @@ -1812,12 +1813,16 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port, } } - return priv->info->fdb_add_cmd(ds, port, addr, vid); + mutex_lock(&priv->fdb_lock); + rc = priv->info->fdb_add_cmd(ds, port, addr, vid); + mutex_unlock(&priv->fdb_lock); + + return rc; } -static int sja1105_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, - struct dsa_db db) +static int __sja1105_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct sja1105_private *priv = ds->priv; @@ -1837,6 +1842,20 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port, return priv->info->fdb_del_cmd(ds, port, addr, vid); } +static int sja1105_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct sja1105_private *priv = ds->priv; + int rc; + + mutex_lock(&priv->fdb_lock); + rc = __sja1105_fdb_del(ds, port, addr, vid, db); + mutex_unlock(&priv->fdb_lock); + + return rc; +} + static int sja1105_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data) { @@ -1868,13 +1887,14 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port, if (!(l2_lookup.destports & BIT(port))) continue; - /* We need to hide the FDB entry for unknown multicast */ - if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST && - l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST) - continue; - u64_to_ether_addr(l2_lookup.macaddr, macaddr); + /* Hardware FDB is shared for fdb and mdb, "bridge fdb show" + * only wants to see unicast + */ + if (is_multicast_ether_addr(macaddr)) + continue; + /* We need to hide the dsa_8021q VLANs from the user. */ if (vid_is_dsa_8021q(l2_lookup.vlanid)) l2_lookup.vlanid = 0; @@ -1898,6 +1918,8 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port) }; int i; + mutex_lock(&priv->fdb_lock); + for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { struct sja1105_l2_lookup_entry l2_lookup = {0}; u8 macaddr[ETH_ALEN]; @@ -1911,7 +1933,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port) if (rc) { dev_err(ds->dev, "Failed to read FDB: %pe\n", ERR_PTR(rc)); - return; + break; } if (!(l2_lookup.destports & BIT(port))) @@ -1923,14 +1945,16 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port) u64_to_ether_addr(l2_lookup.macaddr, macaddr); - rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db); + rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db); if (rc) { dev_err(ds->dev, "Failed to delete FDB entry %pM vid %lld: %pe\n", macaddr, l2_lookup.vlanid, ERR_PTR(rc)); - return; + break; } } + + mutex_unlock(&priv->fdb_lock); } static int sja1105_mdb_add(struct dsa_switch *ds, int port, @@ -2273,6 +2297,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv, int rc, i; s64 now; + mutex_lock(&priv->fdb_lock); mutex_lock(&priv->mgmt_lock); mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; @@ -2385,6 +2410,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv, goto out; out: mutex_unlock(&priv->mgmt_lock); + mutex_unlock(&priv->fdb_lock); return rc; } @@ -2954,7 +2980,9 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to, { struct sja1105_l2_lookup_entry *l2_lookup; struct sja1105_table *table; - int match; + int match, rc; + + mutex_lock(&priv->fdb_lock); table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; l2_lookup = table->entries; @@ -2967,7 +2995,8 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to, if (match == table->entry_count) { NL_SET_ERR_MSG_MOD(extack, "Could not find FDB entry for unknown multicast"); - return -ENOSPC; + rc = -ENOSPC; + goto out; } if (flags.val & BR_MCAST_FLOOD) @@ -2975,10 +3004,13 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to, else l2_lookup[match].destports &= ~BIT(to); - return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, - l2_lookup[match].index, - &l2_lookup[match], - true); + rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, + l2_lookup[match].index, + &l2_lookup[match], true); +out: + mutex_unlock(&priv->fdb_lock); + + return rc; } static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port, @@ -3348,6 +3380,7 @@ static int sja1105_probe(struct spi_device *spi) mutex_init(&priv->ptp_data.lock); mutex_init(&priv->dynamic_config_lock); mutex_init(&priv->mgmt_lock); + mutex_init(&priv->fdb_lock); spin_lock_init(&priv->ts_id_lock); rc = sja1105_parse_dt(priv); diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c index 1c009b485188..ca66b747b7c5 100644 --- a/drivers/net/ethernet/adi/adin1110.c +++ b/drivers/net/ethernet/adi/adin1110.c @@ -1385,7 +1385,7 @@ static int adin1110_fdb_add(struct adin1110_port_priv *port_priv, return -ENOMEM; other_port = priv->ports[!port_priv->nr]; - port_rules = adin1110_port_rules(port_priv, false, true); + port_rules = adin1110_port_rules(other_port, false, true); eth_broadcast_addr(mask); return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr, diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c index d63d321f3e7b..41a6098eb0c2 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c @@ -528,13 +528,16 @@ void bcmasp_netfilt_suspend(struct bcmasp_intf *intf) ASP_RX_FILTER_BLK_CTRL); } -void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, - u32 *rule_cnt) +int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, + u32 *rule_cnt) { struct bcmasp_priv *priv = intf->parent; int j = 0, i; for (i = 0; i < NUM_NET_FILTERS; i++) { + if (j == *rule_cnt) + return -EMSGSIZE; + if (!priv->net_filters[i].claimed || priv->net_filters[i].port != intf->port) continue; @@ -548,6 +551,8 @@ void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, } *rule_cnt = j; + + return 0; } int bcmasp_netfilt_get_active(struct bcmasp_intf *intf) @@ -1300,6 +1305,7 @@ static int bcmasp_probe(struct platform_device *pdev) if (!intf) { dev_err(dev, "Cannot create eth interface %d\n", i); bcmasp_remove_intfs(priv); + of_node_put(intf_node); goto of_put_exit; } list_add_tail(&intf->list, &priv->intfs); diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h index 5b512f7f5e94..ec90add6b03e 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h @@ -577,8 +577,8 @@ void bcmasp_netfilt_release(struct bcmasp_intf *intf, int bcmasp_netfilt_get_active(struct bcmasp_intf *intf); -void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, - u32 *rule_cnt); +int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, + u32 *rule_cnt); void bcmasp_netfilt_suspend(struct bcmasp_intf *intf); diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c index c4f1604d5ab3..ce6a3d56fb23 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c @@ -335,7 +335,7 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, err = bcmasp_flow_get(intf, cmd); break; case ETHTOOL_GRXCLSRLALL: - bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt); + err = bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt); cmd->data = NUM_NET_FILTERS; break; default: diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 31f664ee4d77..b940dcd3ace6 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -756,8 +756,6 @@ static void macb_mac_link_up(struct phylink_config *config, if (rx_pause) ctrl |= MACB_BIT(PAE); - macb_set_tx_clk(bp, speed); - /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down * cleared the pipeline and control registers. */ @@ -777,6 +775,9 @@ static void macb_mac_link_up(struct phylink_config *config, spin_unlock_irqrestore(&bp->lock, flags); + if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) + macb_set_tx_clk(bp, speed); + /* Enable Rx and Tx; Enable PTP unicast */ ctrl = macb_readl(bp, NCR); if (gem_has_ptp(bp)) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 13ba9c74bd84..76b34cee1da3 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3827,8 +3827,11 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit) } /* only call pci_enable_sriov() if no VFs are allocated already */ - if (!old_vfs) + if (!old_vfs) { err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); + if (err) + goto err_out; + } goto out; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 0310af851086..9339edbd9082 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -979,6 +979,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; u32 tsync_rx_mtrl = PTP_EV_PORT << 16; + u32 aflags = adapter->flags; bool is_l2 = false; u32 regval; @@ -996,20 +997,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; tsync_rx_mtrl = 0; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -1023,8 +1024,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; is_l2 = true; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_NTP_ALL: @@ -1035,7 +1036,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, if (hw->mac.type >= ixgbe_mac_X550) { tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL; config->rx_filter = HWTSTAMP_FILTER_ALL; - adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; break; } fallthrough; @@ -1046,8 +1047,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, * Delay_Req messages and hardware does not support * timestamping all packets => return error */ - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); config->rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; } @@ -1079,8 +1078,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, IXGBE_TSYNCRXCTL_TYPE_ALL | IXGBE_TSYNCRXCTL_TSIP_UT_EN; config->rx_filter = HWTSTAMP_FILTER_ALL; - adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; - adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; + aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; is_l2 = true; break; default: @@ -1113,6 +1112,9 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, IXGBE_WRITE_FLUSH(hw); + /* configure adapter flags only when HW is actually configured */ + adapter->flags = aflags; + /* clear TX/RX time stamp registers, just to be sure */ ixgbe_ptp_clear_tx_timestamp(adapter); IXGBE_READ_REG(hw, IXGBE_RXSTMPH); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index eb74ccddb440..21c3f9b015c8 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5586,6 +5586,11 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, break; case ETHTOOL_GRXCLSRLALL: for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { + if (loc == info->rule_cnt) { + ret = -EMSGSIZE; + break; + } + if (port->rfs_rules[i]) rules[loc++] = i; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 826f691de259..a4a258da8dd5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -107,12 +107,13 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) } #define NPA_MAX_BURST 16 -void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) +int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) { struct otx2_nic *pfvf = dev; + int cnt = cq->pool_ptrs; u64 ptrs[NPA_MAX_BURST]; - int num_ptrs = 1; dma_addr_t bufptr; + int num_ptrs = 1; /* Refill pool with new buffers */ while (cq->pool_ptrs) { @@ -131,6 +132,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) num_ptrs = 1; } } + return cnt - cq->pool_ptrs; } void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index 8ae96815865e..c1861f7de254 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -24,7 +24,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu) return weight; } -void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); +int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); int cn10k_lmtst_init(struct otx2_nic *pfvf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 8511906cb4e2..997fedac3a98 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -574,20 +574,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, dma_addr_t *dma) { - if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) { - struct refill_work *work; - struct delayed_work *dwork; - - work = &pfvf->refill_wrk[cq->cq_idx]; - dwork = &work->pool_refill_work; - /* Schedule a task if no other task is running */ - if (!cq->refill_task_sched) { - cq->refill_task_sched = true; - schedule_delayed_work(dwork, - msecs_to_jiffies(100)); - } + if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) return -ENOMEM; - } return 0; } @@ -1082,39 +1070,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) static void otx2_pool_refill_task(struct work_struct *work) { struct otx2_cq_queue *cq; - struct otx2_pool *rbpool; struct refill_work *wrk; - int qidx, free_ptrs = 0; struct otx2_nic *pfvf; - dma_addr_t bufptr; + int qidx; wrk = container_of(work, struct refill_work, pool_refill_work.work); pfvf = wrk->pf; qidx = wrk - pfvf->refill_wrk; cq = &pfvf->qset.cq[qidx]; - rbpool = cq->rbpool; - free_ptrs = cq->pool_ptrs; - while (cq->pool_ptrs) { - if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) { - /* Schedule a WQ if we fails to free atleast half of the - * pointers else enable napi for this RQ. - */ - if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) { - struct delayed_work *dwork; - - dwork = &wrk->pool_refill_work; - schedule_delayed_work(dwork, - msecs_to_jiffies(100)); - } else { - cq->refill_task_sched = false; - } - return; - } - pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); - cq->pool_ptrs--; - } cq->refill_task_sched = false; + + local_bh_disable(); + napi_schedule(wrk->napi); + local_bh_enable(); } int otx2_config_nix_queues(struct otx2_nic *pfvf) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 4c6032ee7800..c04a8ee53a82 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -302,6 +302,7 @@ struct flr_work { struct refill_work { struct delayed_work pool_refill_work; struct otx2_nic *pf; + struct napi_struct *napi; }; /* PTPv2 originTimestamp structure */ @@ -370,7 +371,7 @@ struct dev_hw_ops { int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, int size, int qidx); - void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); + int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); void (*aura_freeptr)(void *dev, int aura, u64 buf); }; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 70b9065f7d10..6daf4d58c25d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1943,6 +1943,10 @@ int otx2_stop(struct net_device *netdev) netif_tx_disable(netdev); + for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++) + cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work); + devm_kfree(pf->dev, pf->refill_wrk); + otx2_free_hw_resources(pf); otx2_free_cints(pf, pf->hw.cint_cnt); otx2_disable_napi(pf); @@ -1950,9 +1954,6 @@ int otx2_stop(struct net_device *netdev) for (qidx = 0; qidx < netdev->num_tx_queues; qidx++) netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx)); - for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++) - cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work); - devm_kfree(pf->dev, pf->refill_wrk); kfree(qset->sq); kfree(qset->cq); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index e369baf11530..e77d43848955 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -424,9 +424,10 @@ process_cqe: return processed_cqe; } -void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) +int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) { struct otx2_nic *pfvf = dev; + int cnt = cq->pool_ptrs; dma_addr_t bufptr; while (cq->pool_ptrs) { @@ -435,6 +436,8 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); cq->pool_ptrs--; } + + return cnt - cq->pool_ptrs; } static int otx2_tx_napi_handler(struct otx2_nic *pfvf, @@ -521,6 +524,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) struct otx2_cq_queue *cq; struct otx2_qset *qset; struct otx2_nic *pfvf; + int filled_cnt = -1; cq_poll = container_of(napi, struct otx2_cq_poll, napi); pfvf = (struct otx2_nic *)cq_poll->dev; @@ -541,7 +545,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) } if (rx_cq && rx_cq->pool_ptrs) - pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); + filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); /* Clear the IRQ */ otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); @@ -561,9 +565,25 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) otx2_config_irq_coalescing(pfvf, i); } - /* Re-enable interrupts */ - otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), - BIT_ULL(0)); + if (unlikely(!filled_cnt)) { + struct refill_work *work; + struct delayed_work *dwork; + + work = &pfvf->refill_wrk[cq->cq_idx]; + dwork = &work->pool_refill_work; + /* Schedule a task if no other task is running */ + if (!cq->refill_task_sched) { + work->napi = napi; + cq->refill_task_sched = true; + schedule_delayed_work(dwork, + msecs_to_jiffies(100)); + } + } else { + /* Re-enable interrupts */ + otx2_write64(pfvf, + NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), + BIT_ULL(0)); + } } return workdone; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 9e3bfbe5c480..a82ffca8ce1b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -170,6 +170,6 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); -void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); -void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); +int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); +int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); #endif /* OTX2_TXRX_H */ diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 6ad42e3b488f..3cffd1bd3067 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2005,11 +2005,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, u8 *data, *new_data; struct mtk_rx_dma_v2 *rxd, trxd; int done = 0, bytes = 0; + dma_addr_t dma_addr = DMA_MAPPING_ERROR; while (done < budget) { unsigned int pktlen, *rxdcsum; struct net_device *netdev; - dma_addr_t dma_addr; u32 hash, reason; int mac = 0; @@ -2186,7 +2186,8 @@ release_desc: else rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) && + likely(dma_addr != DMA_MAPPING_ERROR)) rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr); ring->calc_idx = idx; @@ -2994,6 +2995,9 @@ static int mtk_hwlro_get_fdir_all(struct net_device *dev, int i; for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + if (mac->hwlro_ip[i]) { rule_locs[cnt] = i; cnt++; diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index a70a5417c173..a4efbeb16208 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -214,9 +214,11 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, dsa_port = mtk_flow_get_dsa_port(&dev); if (dev == eth->netdev[0]) - pse_port = 1; + pse_port = PSE_GDM1_PORT; else if (dev == eth->netdev[1]) - pse_port = 2; + pse_port = PSE_GDM2_PORT; + else if (dev == eth->netdev[2]) + pse_port = PSE_GDM3_PORT; else return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c index 300fe1a93dce..ef980e4e5bc2 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c @@ -1021,18 +1021,32 @@ static struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri, list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) { newckf = kmemdup(ckf, sizeof(*newckf), GFP_KERNEL); if (!newckf) - return ERR_PTR(-ENOMEM); + goto err; list_add_tail(&newckf->ctrl.list, &duprule->data.keyfields); } list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) { newcaf = kmemdup(caf, sizeof(*newcaf), GFP_KERNEL); if (!newcaf) - return ERR_PTR(-ENOMEM); + goto err; list_add_tail(&newcaf->ctrl.list, &duprule->data.actionfields); } return duprule; + +err: + list_for_each_entry_safe(ckf, newckf, &duprule->data.keyfields, ctrl.list) { + list_del(&ckf->ctrl.list); + kfree(ckf); + } + + list_for_each_entry_safe(caf, newcaf, &duprule->data.actionfields, ctrl.list) { + list_del(&caf->ctrl.list); + kfree(caf); + } + + kfree(duprule); + return ERR_PTR(-ENOMEM); } static void vcap_apply_width(u8 *dst, int width, int bytes) diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 6083b1c8e4fb..ea9186178091 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -799,6 +799,7 @@ static int rswitch_poll(struct napi_struct *napi, int budget) struct net_device *ndev = napi->dev; struct rswitch_private *priv; struct rswitch_device *rdev; + unsigned long flags; int quota = budget; rdev = netdev_priv(ndev); @@ -816,10 +817,12 @@ retry: netif_wake_subqueue(ndev, 0); - napi_complete(napi); - - rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); - rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); + if (napi_complete_done(napi, budget - quota)) { + spin_lock_irqsave(&priv->lock, flags); + rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); + rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); + spin_unlock_irqrestore(&priv->lock, flags); + } out: return budget - quota; @@ -835,8 +838,10 @@ static void rswitch_queue_interrupt(struct net_device *ndev) struct rswitch_device *rdev = netdev_priv(ndev); if (napi_schedule_prep(&rdev->napi)) { + spin_lock(&rdev->priv->lock); rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); + spin_unlock(&rdev->priv->lock); __napi_schedule(&rdev->napi); } } @@ -1440,14 +1445,17 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) static int rswitch_open(struct net_device *ndev) { struct rswitch_device *rdev = netdev_priv(ndev); + unsigned long flags; phy_start(ndev->phydev); napi_enable(&rdev->napi); netif_start_queue(ndev); + spin_lock_irqsave(&rdev->priv->lock, flags); rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); + spin_unlock_irqrestore(&rdev->priv->lock, flags); if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); @@ -1461,6 +1469,7 @@ static int rswitch_stop(struct net_device *ndev) { struct rswitch_device *rdev = netdev_priv(ndev); struct rswitch_gwca_ts_info *ts_info, *ts_info2; + unsigned long flags; netif_tx_stop_all_queues(ndev); bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); @@ -1476,8 +1485,10 @@ static int rswitch_stop(struct net_device *ndev) kfree(ts_info); } + spin_lock_irqsave(&rdev->priv->lock, flags); rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); + spin_unlock_irqrestore(&rdev->priv->lock, flags); phy_stop(ndev->phydev); napi_disable(&rdev->napi); @@ -1887,6 +1898,7 @@ static int renesas_eth_sw_probe(struct platform_device *pdev) priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + spin_lock_init(&priv->lock); attr = soc_device_match(rswitch_soc_no_speed_change); if (attr) diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index 54f397effbc6..f0c16a37ea55 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -1011,6 +1011,8 @@ struct rswitch_private { struct rswitch_etha etha[RSWITCH_NUM_PORTS]; struct rswitch_mfwd mfwd; + spinlock_t lock; /* lock interrupt registers' control */ + bool etha_no_runtime_change; bool gwca_halt; }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 9a3182b9e767..2206789802bf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2704,9 +2704,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) /* We still have pending packets, let's call for a new scheduling */ if (tx_q->dirty_tx != tx_q->cur_tx) - hrtimer_start(&tx_q->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), - HRTIMER_MODE_REL); + stmmac_tx_timer_arm(priv, queue); flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp); tx_q->txq_stats.tx_packets += tx_packets; @@ -2995,9 +2993,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + u32 tx_coal_timer = priv->tx_coal_timer[queue]; + + if (!tx_coal_timer) + return; hrtimer_start(&tx_q->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), + STMMAC_COAL_TIMER(tx_coal_timer), HRTIMER_MODE_REL); } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 332c853ca99b..0c13d9950cd8 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -2636,6 +2636,9 @@ static int r8152_poll(struct napi_struct *napi, int budget) struct r8152 *tp = container_of(napi, struct r8152, napi); int work_done; + if (!budget) + return 0; + work_done = rx_bottom(tp, budget); if (work_done < budget) { diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 9c6f4f83f22b..0deefd1573cf 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1446,6 +1446,8 @@ static int veth_open(struct net_device *dev) netif_carrier_on(peer); } + veth_set_xdp_features(dev); + return 0; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f3a01b79148c..21783aa2ee8e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2245,25 +2245,8 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) else ctrl->ctrl_config = NVME_CC_CSS_NVM; - if (ctrl->cap & NVME_CAP_CRMS_CRWMS) { - u32 crto; - - ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto); - if (ret) { - dev_err(ctrl->device, "Reading CRTO failed (%d)\n", - ret); - return ret; - } - - if (ctrl->cap & NVME_CAP_CRMS_CRIMS) { - ctrl->ctrl_config |= NVME_CC_CRIME; - timeout = NVME_CRTO_CRIMT(crto); - } else { - timeout = NVME_CRTO_CRWMT(crto); - } - } else { - timeout = NVME_CAP_TIMEOUT(ctrl->cap); - } + if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS) + ctrl->ctrl_config |= NVME_CC_CRIME; ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; @@ -2277,6 +2260,39 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) if (ret) return ret; + /* CAP value may change after initial CC write */ + ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); + if (ret) + return ret; + + timeout = NVME_CAP_TIMEOUT(ctrl->cap); + if (ctrl->cap & NVME_CAP_CRMS_CRWMS) { + u32 crto, ready_timeout; + + ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto); + if (ret) { + dev_err(ctrl->device, "Reading CRTO failed (%d)\n", + ret); + return ret; + } + + /* + * CRTO should always be greater or equal to CAP.TO, but some + * devices are known to get this wrong. Use the larger of the + * two values. + */ + if (ctrl->ctrl_config & NVME_CC_CRIME) + ready_timeout = NVME_CRTO_CRIMT(crto); + else + ready_timeout = NVME_CRTO_CRWMT(crto); + + if (ready_timeout < timeout) + dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n", + crto, ctrl->cap); + else + timeout = ready_timeout; + } + ctrl->ctrl_config |= NVME_CC_ENABLE; ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); if (ret) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 1cd2bf82319a..a15b37750d6e 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1924,7 +1924,7 @@ char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req) struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); struct request *rq = op->rq; - if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq->bio) + if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq || !rq->bio) return NULL; return blkcg_get_fc_appid(rq->bio); } diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c index 316f3e4ca7cc..8df73a0b3980 100644 --- a/drivers/nvme/host/hwmon.c +++ b/drivers/nvme/host/hwmon.c @@ -187,7 +187,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data, return 0; } -static const struct hwmon_channel_info *nvme_hwmon_info[] = { +static const struct hwmon_channel_info *const nvme_hwmon_info[] = { HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ), HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 2f57da12d983..347cb5daebc3 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2916,9 +2916,6 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, struct nvme_dev *dev; int ret = -ENOMEM; - if (node == NUMA_NO_NODE) - set_dev_node(&pdev->dev, first_memory_node); - dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); if (!dev) return ERR_PTR(-ENOMEM); diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 868aa4de2e4c..cd92d7ddf5ed 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -348,7 +348,7 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) while (length) { u32 iov_len = min_t(u32, length, sg->length - sg_offset); - bvec_set_page(iov, sg_page(sg), sg->length, + bvec_set_page(iov, sg_page(sg), iov_len, sg->offset + sg_offset); length -= iov_len; diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 509a4072d50a..9ce0d20a6c58 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -214,7 +214,7 @@ struct ioa_registers { struct ioc { struct ioa_registers __iomem *ioc_regs; /* I/O MMU base address */ u8 *res_map; /* resource map, bit == pdir entry */ - u64 *pdir_base; /* physical base address */ + __le64 *pdir_base; /* physical base address */ u32 pdir_size; /* bytes, function of IOV Space size */ u32 res_hint; /* next available IOVP - circular search */ @@ -339,7 +339,7 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size) BUG_ON(pages_needed == 0); BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE); - DBG_RES("%s() size: %d pages_needed %d\n", + DBG_RES("%s() size: %zu pages_needed %d\n", __func__, size, pages_needed); /* @@ -427,7 +427,7 @@ ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped) BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE); BUG_ON(pages_mapped > BITS_PER_LONG); - DBG_RES("%s(): res_idx: %d pages_mapped %d\n", + DBG_RES("%s(): res_idx: %d pages_mapped %lu\n", __func__, res_idx, pages_mapped); #ifdef CCIO_COLLECT_STATS @@ -543,7 +543,7 @@ static u32 hint_lookup[] = { * index are bits 12:19 of the value returned by LCI. */ static void -ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, +ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba, unsigned long hints) { register unsigned long pa; @@ -719,7 +719,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size, unsigned long flags; dma_addr_t iovp; dma_addr_t offset; - u64 *pdir_start; + __le64 *pdir_start; unsigned long hint = hint_lookup[(int)direction]; BUG_ON(!dev); @@ -746,8 +746,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size, pdir_start = &(ioc->pdir_base[idx]); - DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n", - __func__, addr, (long)iovp | offset, size); + DBG_RUN("%s() %px -> %#lx size: %zu\n", + __func__, addr, (long)(iovp | offset), size); /* If not cacheline aligned, force SAFE_DMA on the whole mess */ if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES)) @@ -805,7 +805,7 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size, return; } - DBG_RUN("%s() iovp 0x%lx/%x\n", + DBG_RUN("%s() iovp %#lx/%zx\n", __func__, (long)iova, size); iova ^= offset; /* clear offset bits */ @@ -1283,7 +1283,7 @@ ccio_ioc_init(struct ioc *ioc) iova_space_size>>20, iov_order + PAGE_SHIFT); - ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL, + ioc->pdir_base = (__le64 *)__get_free_pages(GFP_KERNEL, get_order(ioc->pdir_size)); if(NULL == ioc->pdir_base) { panic("%s() could not allocate I/O Page Table\n", __func__); diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h index 0905be256de0..c43f1a212a5c 100644 --- a/drivers/parisc/iommu-helpers.h +++ b/drivers/parisc/iommu-helpers.h @@ -14,13 +14,13 @@ static inline unsigned int iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, unsigned long hint, - void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long, + void (*iommu_io_pdir_entry)(__le64 *, space_t, unsigned long, unsigned long)) { struct scatterlist *dma_sg = startsg; /* pointer to current DMA */ unsigned int n_mappings = 0; unsigned long dma_offset = 0, dma_len = 0; - u64 *pdirp = NULL; + __le64 *pdirp = NULL; /* Horrible hack. For efficiency's sake, dma_sg starts one * entry below the true start (it is immediately incremented @@ -31,8 +31,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, unsigned long vaddr; long size; - DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents, - (unsigned long)sg_dma_address(startsg), cnt, + DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents, + (unsigned long)sg_dma_address(startsg), sg_virt(startsg), startsg->length ); diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index a7df764f1a72..a4011461189b 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -202,9 +202,9 @@ static inline void iosapic_write(void __iomem *iosapic, unsigned int reg, u32 va static DEFINE_SPINLOCK(iosapic_lock); -static inline void iosapic_eoi(void __iomem *addr, unsigned int data) +static inline void iosapic_eoi(__le32 __iomem *addr, __le32 data) { - __raw_writel(data, addr); + __raw_writel((__force u32)data, addr); } /* diff --git a/drivers/parisc/iosapic_private.h b/drivers/parisc/iosapic_private.h index 73ecc657ad95..bd8ff40162b4 100644 --- a/drivers/parisc/iosapic_private.h +++ b/drivers/parisc/iosapic_private.h @@ -118,8 +118,8 @@ struct iosapic_irt { struct vector_info { struct iosapic_info *iosapic; /* I/O SAPIC this vector is on */ struct irt_entry *irte; /* IRT entry */ - u32 __iomem *eoi_addr; /* precalculate EOI reg address */ - u32 eoi_data; /* IA64: ? PA: swapped txn_data */ + __le32 __iomem *eoi_addr; /* precalculate EOI reg address */ + __le32 eoi_data; /* IA64: ? PA: swapped txn_data */ int txn_irq; /* virtual IRQ number for processor */ ulong txn_addr; /* IA64: id_eid PA: partial HPA */ u32 txn_data; /* CPU interrupt bit */ diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index f6b510675318..05e7103d1d40 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -46,8 +46,6 @@ #include <linux/module.h> #include <asm/ropes.h> -#include <asm/mckinley.h> /* for proc_mckinley_root */ -#include <asm/runway.h> /* for proc_runway_root */ #include <asm/page.h> /* for PAGE0 */ #include <asm/pdc.h> /* for PDC_MODEL_* */ #include <asm/pdcpat.h> /* for is_pdc_pat() */ @@ -122,7 +120,7 @@ MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART"); #endif static struct proc_dir_entry *proc_runway_root __ro_after_init; -struct proc_dir_entry *proc_mckinley_root __ro_after_init; +static struct proc_dir_entry *proc_mckinley_root __ro_after_init; /************************************ ** SBA register read and write support @@ -204,7 +202,7 @@ static void sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) { /* start printing from lowest pde in rval */ - u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]); + __le64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]); unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]); uint rcnt; @@ -571,7 +569,7 @@ typedef unsigned long space_t; */ static void -sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, +sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba, unsigned long hint) { u64 pa; /* physical address */ @@ -615,7 +613,7 @@ static void sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) { u32 iovp = (u32) SBA_IOVP(ioc,iova); - u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)]; + __le64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)]; #ifdef ASSERT_PDIR_SANITY /* Assert first pdir entry is set. @@ -716,7 +714,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, unsigned long flags; dma_addr_t iovp; dma_addr_t offset; - u64 *pdir_start; + __le64 *pdir_start; int pide; ioc = GET_IOC(dev); @@ -1434,7 +1432,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64); - DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n", + DBG_INIT("%s() hpa %px mem %ldMB IOV %dMB (%d bits)\n", __func__, ioc->ioc_hpa, (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT), @@ -1471,7 +1469,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); #endif - DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n", + DBG_INIT("%s() IOV base %#lx mask %#0lx\n", __func__, ioc->ibase, ioc->imask); /* @@ -1583,7 +1581,7 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa, if (!IS_PLUTO(sba_dev->dev)) { ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL); - DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->", + DBG_INIT("%s() hpa %px ioc_ctl 0x%Lx ->", __func__, sba_dev->sba_hpa, ioc_ctl); ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE); ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC; @@ -1668,14 +1666,14 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa, /* flush out the last writes */ READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL); - DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n", + DBG_INIT(" ioc[%d] ROPE_CFG %#lx ROPE_DBG %lx\n", i, - READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40), - READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50) + (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40), + (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50) ); - DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n", - READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108), - READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400) + DBG_INIT(" STATUS_CONTROL %#lx FLUSH_CTRL %#lx\n", + (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108), + (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400) ); if (IS_PLUTO(sba_dev->dev)) { @@ -1739,7 +1737,7 @@ sba_common_init(struct sba_device *sba_dev) #ifdef ASSERT_PDIR_SANITY /* Mark first bit busy - ie no IOVA 0 */ sba_dev->ioc[i].res_map[0] = 0x80; - sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL; + sba_dev->ioc[i].pdir_base[0] = (__force __le64) 0xeeffc0addbba0080ULL; #endif /* Third (and last) part of PIRANHA BUG */ @@ -1899,9 +1897,7 @@ static int __init sba_driver_callback(struct parisc_device *dev) int i; char *version; void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE); -#ifdef CONFIG_PROC_FS - struct proc_dir_entry *root; -#endif + struct proc_dir_entry *root __maybe_unused; sba_dump_ranges(sba_addr); @@ -1967,7 +1963,6 @@ static int __init sba_driver_callback(struct parisc_device *dev) hppa_dma_ops = &sba_ops; -#ifdef CONFIG_PROC_FS switch (dev->id.hversion) { case PLUTO_MCKINLEY_PORT: if (!proc_mckinley_root) @@ -1985,7 +1980,6 @@ static int __init sba_driver_callback(struct parisc_device *dev) proc_create_single("sba_iommu", 0, root, sba_proc_info); proc_create_single("sba_iommu-bitmap", 0, root, sba_proc_bitmap_info); -#endif return 0; } diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig index 382793e73a60..f7dfa0e785fd 100644 --- a/drivers/platform/mellanox/Kconfig +++ b/drivers/platform/mellanox/Kconfig @@ -60,6 +60,7 @@ config MLXBF_BOOTCTL tristate "Mellanox BlueField Firmware Boot Control driver" depends on ARM64 depends on ACPI + depends on NET help The Mellanox BlueField firmware implements functionality to request swapping the primary and alternate eMMC boot partition, @@ -80,8 +81,8 @@ config MLXBF_PMC config NVSW_SN2201 tristate "Nvidia SN2201 platform driver support" - depends on HWMON - depends on I2C + depends on HWMON && I2C + depends on ACPI || COMPILE_TEST select REGMAP_I2C help This driver provides support for the Nvidia SN2201 platform. diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c index be967d797c28..2d4bbe99959e 100644 --- a/drivers/platform/mellanox/mlxbf-pmc.c +++ b/drivers/platform/mellanox/mlxbf-pmc.c @@ -191,6 +191,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_smgen_events[] = { }; static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = { + { 0x0, "DISABLE" }, { 0xa0, "TPIO_DATA_BEAT" }, { 0xa1, "TDMA_DATA_BEAT" }, { 0xa2, "MAP_DATA_BEAT" }, @@ -214,6 +215,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = { }; static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = { + { 0x0, "DISABLE" }, { 0xa0, "TPIO_DATA_BEAT" }, { 0xa1, "TDMA_DATA_BEAT" }, { 0xa2, "MAP_DATA_BEAT" }, @@ -246,6 +248,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = { }; static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = { + { 0x0, "DISABLE" }, { 0x100, "ECC_SINGLE_ERROR_CNT" }, { 0x104, "ECC_DOUBLE_ERROR_CNT" }, { 0x114, "SERR_INJ" }, @@ -258,6 +261,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = { }; static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = { + { 0x0, "DISABLE" }, { 0xc0, "RXREQ_MSS" }, { 0xc1, "RXDAT_MSS" }, { 0xc2, "TXRSP_MSS" }, @@ -265,6 +269,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = { }; static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = { + { 0x0, "DISABLE" }, { 0x45, "HNF_REQUESTS" }, { 0x46, "HNF_REJECTS" }, { 0x47, "ALL_BUSY" }, @@ -323,6 +328,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = { }; static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = { + { 0x0, "DISABLE" }, { 0x12, "CDN_REQ" }, { 0x13, "DDN_REQ" }, { 0x14, "NDN_REQ" }, @@ -892,7 +898,7 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3, uint64_t *result) { uint32_t perfcfg_offset, perfval_offset; - uint64_t perfmon_cfg, perfevt, perfctl; + uint64_t perfmon_cfg, perfevt; if (cnt_num >= pmc->block[blk_num].counters) return -EINVAL; @@ -906,25 +912,6 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3, /* Set counter in "read" mode */ perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR, - MLXBF_PMC_PERFCTL); - perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1); - perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0); - - if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset, - MLXBF_PMC_WRITE_REG_64, perfmon_cfg)) - return -EFAULT; - - /* Check if the counter is enabled */ - - if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset, - MLXBF_PMC_READ_REG_64, &perfctl)) - return -EFAULT; - - if (!FIELD_GET(MLXBF_PMC_PERFCTL_EN0, perfctl)) - return -EINVAL; - - /* Set counter in "read" mode */ - perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR, MLXBF_PMC_PERFEVT); perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1); perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0); @@ -1008,7 +995,7 @@ static ssize_t mlxbf_pmc_counter_show(struct device *dev, } else return -EINVAL; - return sprintf(buf, "0x%llx\n", value); + return sysfs_emit(buf, "0x%llx\n", value); } /* Store function for "counter" sysfs files */ @@ -1078,13 +1065,13 @@ static ssize_t mlxbf_pmc_event_show(struct device *dev, err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num); if (err) - return sprintf(buf, "No event being monitored\n"); + return sysfs_emit(buf, "No event being monitored\n"); evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num); if (!evt_name) return -EINVAL; - return sprintf(buf, "0x%llx: %s\n", evt_num, evt_name); + return sysfs_emit(buf, "0x%llx: %s\n", evt_num, evt_name); } /* Store function for "event" sysfs files */ @@ -1139,9 +1126,9 @@ static ssize_t mlxbf_pmc_event_list_show(struct device *dev, return -EINVAL; for (i = 0, buf[0] = '\0'; i < size; ++i) { - len += sprintf(e_info, "0x%x: %s\n", events[i].evt_num, - events[i].evt_name); - if (len > PAGE_SIZE) + len += snprintf(e_info, sizeof(e_info), "0x%x: %s\n", + events[i].evt_num, events[i].evt_name); + if (len >= PAGE_SIZE) break; strcat(buf, e_info); ret = len; @@ -1168,7 +1155,7 @@ static ssize_t mlxbf_pmc_enable_show(struct device *dev, value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg); - return sprintf(buf, "%d\n", value); + return sysfs_emit(buf, "%d\n", value); } /* Store function for "enable" sysfs files - only for l3cache */ diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c index b600b77d91ef..f3696a54a2bd 100644 --- a/drivers/platform/mellanox/mlxbf-tmfifo.c +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c @@ -59,6 +59,7 @@ struct mlxbf_tmfifo; * @vq: pointer to the virtio virtqueue * @desc: current descriptor of the pending packet * @desc_head: head descriptor of the pending packet + * @drop_desc: dummy desc for packet dropping * @cur_len: processed length of the current descriptor * @rem_len: remaining length of the pending packet * @pkt_len: total length of the pending packet @@ -75,6 +76,7 @@ struct mlxbf_tmfifo_vring { struct virtqueue *vq; struct vring_desc *desc; struct vring_desc *desc_head; + struct vring_desc drop_desc; int cur_len; int rem_len; u32 pkt_len; @@ -86,6 +88,14 @@ struct mlxbf_tmfifo_vring { struct mlxbf_tmfifo *fifo; }; +/* Check whether vring is in drop mode. */ +#define IS_VRING_DROP(_r) ({ \ + typeof(_r) (r) = (_r); \ + (r->desc_head == &r->drop_desc ? true : false); }) + +/* A stub length to drop maximum length packet. */ +#define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0) + /* Interrupt types. */ enum { MLXBF_TM_RX_LWM_IRQ, @@ -214,7 +224,7 @@ static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = { static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr"; /* Maximum L2 header length. */ -#define MLXBF_TMFIFO_NET_L2_OVERHEAD 36 +#define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN) /* Supported virtio-net features. */ #define MLXBF_TMFIFO_NET_FEATURES \ @@ -262,6 +272,7 @@ static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo, vring->align = SMP_CACHE_BYTES; vring->index = i; vring->vdev_id = tm_vdev->vdev.id.device; + vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN; dev = &tm_vdev->vdev.dev; size = vring_size(vring->num, vring->align); @@ -367,7 +378,7 @@ static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring, return len; } -static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring) +static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring) { struct vring_desc *desc_head; u32 len = 0; @@ -596,19 +607,25 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring, if (vring->cur_len + sizeof(u64) <= len) { /* The whole word. */ - if (is_rx) - memcpy(addr + vring->cur_len, &data, sizeof(u64)); - else - memcpy(&data, addr + vring->cur_len, sizeof(u64)); + if (!IS_VRING_DROP(vring)) { + if (is_rx) + memcpy(addr + vring->cur_len, &data, + sizeof(u64)); + else + memcpy(&data, addr + vring->cur_len, + sizeof(u64)); + } vring->cur_len += sizeof(u64); } else { /* Leftover bytes. */ - if (is_rx) - memcpy(addr + vring->cur_len, &data, - len - vring->cur_len); - else - memcpy(&data, addr + vring->cur_len, - len - vring->cur_len); + if (!IS_VRING_DROP(vring)) { + if (is_rx) + memcpy(addr + vring->cur_len, &data, + len - vring->cur_len); + else + memcpy(&data, addr + vring->cur_len, + len - vring->cur_len); + } vring->cur_len = len; } @@ -625,13 +642,14 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring, * flag is set. */ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring, - struct vring_desc *desc, + struct vring_desc **desc, bool is_rx, bool *vring_change) { struct mlxbf_tmfifo *fifo = vring->fifo; struct virtio_net_config *config; struct mlxbf_tmfifo_msg_hdr hdr; int vdev_id, hdr_len; + bool drop_rx = false; /* Read/Write packet header. */ if (is_rx) { @@ -651,8 +669,8 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring, if (ntohs(hdr.len) > __virtio16_to_cpu(virtio_legacy_is_little_endian(), config->mtu) + - MLXBF_TMFIFO_NET_L2_OVERHEAD) - return; + MLXBF_TMFIFO_NET_L2_OVERHEAD) + drop_rx = true; } else { vdev_id = VIRTIO_ID_CONSOLE; hdr_len = 0; @@ -667,16 +685,25 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring, if (!tm_dev2) return; - vring->desc = desc; + vring->desc = *desc; vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX]; *vring_change = true; } + + if (drop_rx && !IS_VRING_DROP(vring)) { + if (vring->desc_head) + mlxbf_tmfifo_release_pkt(vring); + *desc = &vring->drop_desc; + vring->desc_head = *desc; + vring->desc = *desc; + } + vring->pkt_len = ntohs(hdr.len) + hdr_len; } else { /* Network virtio has an extra header. */ hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ? sizeof(struct virtio_net_hdr) : 0; - vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc); + vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc); hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ? VIRTIO_ID_NET : VIRTIO_ID_CONSOLE; hdr.len = htons(vring->pkt_len - hdr_len); @@ -709,15 +736,23 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring, /* Get the descriptor of the next packet. */ if (!vring->desc) { desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx); - if (!desc) - return false; + if (!desc) { + /* Drop next Rx packet to avoid stuck. */ + if (is_rx) { + desc = &vring->drop_desc; + vring->desc_head = desc; + vring->desc = desc; + } else { + return false; + } + } } else { desc = vring->desc; } /* Beginning of a packet. Start to Rx/Tx packet header. */ if (vring->pkt_len == 0) { - mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change); + mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change); (*avail)--; /* Return if new packet is for another ring. */ @@ -743,17 +778,24 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring, vring->rem_len -= len; /* Get the next desc on the chain. */ - if (vring->rem_len > 0 && + if (!IS_VRING_DROP(vring) && vring->rem_len > 0 && (virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) { idx = virtio16_to_cpu(vdev, desc->next); desc = &vr->desc[idx]; goto mlxbf_tmfifo_desc_done; } - /* Done and release the pending packet. */ - mlxbf_tmfifo_release_pending_pkt(vring); + /* Done and release the packet. */ desc = NULL; fifo->vring[is_rx] = NULL; + if (!IS_VRING_DROP(vring)) { + mlxbf_tmfifo_release_pkt(vring); + } else { + vring->pkt_len = 0; + vring->desc_head = NULL; + vring->desc = NULL; + return false; + } /* * Make sure the load/store are in order before @@ -933,7 +975,7 @@ static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev) /* Release the pending packet. */ if (vring->desc) - mlxbf_tmfifo_release_pending_pkt(vring); + mlxbf_tmfifo_release_pkt(vring); vq = vring->vq; if (vq) { vring->vq = NULL; diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index fdf7da06af30..d85d895fee89 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -480,6 +480,15 @@ static const struct dmi_system_id asus_quirks[] = { }, { .callback = dmi_matched, + .ident = "ASUS ROG FLOW X16", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GV601V"), + }, + .driver_data = &quirk_asus_tablet_mode, + }, + { + .callback = dmi_matched, .ident = "ASUS VivoBook E410MA", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), diff --git a/drivers/genpd/Makefile b/drivers/pmdomain/Makefile index 666753676e5c..666753676e5c 100644 --- a/drivers/genpd/Makefile +++ b/drivers/pmdomain/Makefile diff --git a/drivers/genpd/actions/Makefile b/drivers/pmdomain/actions/Makefile index 7e8aa473d12d..7e8aa473d12d 100644 --- a/drivers/genpd/actions/Makefile +++ b/drivers/pmdomain/actions/Makefile diff --git a/drivers/genpd/actions/owl-sps-helper.c b/drivers/pmdomain/actions/owl-sps-helper.c index e3f36603dd53..e3f36603dd53 100644 --- a/drivers/genpd/actions/owl-sps-helper.c +++ b/drivers/pmdomain/actions/owl-sps-helper.c diff --git a/drivers/genpd/actions/owl-sps.c b/drivers/pmdomain/actions/owl-sps.c index 73a9e0bb7e8e..73a9e0bb7e8e 100644 --- a/drivers/genpd/actions/owl-sps.c +++ b/drivers/pmdomain/actions/owl-sps.c diff --git a/drivers/genpd/amlogic/Makefile b/drivers/pmdomain/amlogic/Makefile index 3d58abd574f9..3d58abd574f9 100644 --- a/drivers/genpd/amlogic/Makefile +++ b/drivers/pmdomain/amlogic/Makefile diff --git a/drivers/genpd/amlogic/meson-ee-pwrc.c b/drivers/pmdomain/amlogic/meson-ee-pwrc.c index cfb796d40d9d..cfb796d40d9d 100644 --- a/drivers/genpd/amlogic/meson-ee-pwrc.c +++ b/drivers/pmdomain/amlogic/meson-ee-pwrc.c diff --git a/drivers/genpd/amlogic/meson-gx-pwrc-vpu.c b/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c index 33df520eab95..33df520eab95 100644 --- a/drivers/genpd/amlogic/meson-gx-pwrc-vpu.c +++ b/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c diff --git a/drivers/genpd/amlogic/meson-secure-pwrc.c b/drivers/pmdomain/amlogic/meson-secure-pwrc.c index 89c881c56cd7..89c881c56cd7 100644 --- a/drivers/genpd/amlogic/meson-secure-pwrc.c +++ b/drivers/pmdomain/amlogic/meson-secure-pwrc.c diff --git a/drivers/genpd/apple/Makefile b/drivers/pmdomain/apple/Makefile index 53665af630be..53665af630be 100644 --- a/drivers/genpd/apple/Makefile +++ b/drivers/pmdomain/apple/Makefile diff --git a/drivers/genpd/apple/pmgr-pwrstate.c b/drivers/pmdomain/apple/pmgr-pwrstate.c index d62a776c89a1..d62a776c89a1 100644 --- a/drivers/genpd/apple/pmgr-pwrstate.c +++ b/drivers/pmdomain/apple/pmgr-pwrstate.c diff --git a/drivers/genpd/bcm/Makefile b/drivers/pmdomain/bcm/Makefile index 6bfbe4e4db13..6bfbe4e4db13 100644 --- a/drivers/genpd/bcm/Makefile +++ b/drivers/pmdomain/bcm/Makefile diff --git a/drivers/genpd/bcm/bcm-pmb.c b/drivers/pmdomain/bcm/bcm-pmb.c index a72ba26ecf9d..a72ba26ecf9d 100644 --- a/drivers/genpd/bcm/bcm-pmb.c +++ b/drivers/pmdomain/bcm/bcm-pmb.c diff --git a/drivers/genpd/bcm/bcm2835-power.c b/drivers/pmdomain/bcm/bcm2835-power.c index 1a179d4e011c..1a179d4e011c 100644 --- a/drivers/genpd/bcm/bcm2835-power.c +++ b/drivers/pmdomain/bcm/bcm2835-power.c diff --git a/drivers/genpd/bcm/bcm63xx-power.c b/drivers/pmdomain/bcm/bcm63xx-power.c index 98b0c2430dbc..98b0c2430dbc 100644 --- a/drivers/genpd/bcm/bcm63xx-power.c +++ b/drivers/pmdomain/bcm/bcm63xx-power.c diff --git a/drivers/genpd/bcm/raspberrypi-power.c b/drivers/pmdomain/bcm/raspberrypi-power.c index 06196ebfe03b..06196ebfe03b 100644 --- a/drivers/genpd/bcm/raspberrypi-power.c +++ b/drivers/pmdomain/bcm/raspberrypi-power.c diff --git a/drivers/genpd/imx/Makefile b/drivers/pmdomain/imx/Makefile index 52d2629014a7..52d2629014a7 100644 --- a/drivers/genpd/imx/Makefile +++ b/drivers/pmdomain/imx/Makefile diff --git a/drivers/genpd/imx/gpc.c b/drivers/pmdomain/imx/gpc.c index 90a8b2c0676f..90a8b2c0676f 100644 --- a/drivers/genpd/imx/gpc.c +++ b/drivers/pmdomain/imx/gpc.c diff --git a/drivers/genpd/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c index fbd3d92f8cd8..fbd3d92f8cd8 100644 --- a/drivers/genpd/imx/gpcv2.c +++ b/drivers/pmdomain/imx/gpcv2.c diff --git a/drivers/genpd/imx/imx8m-blk-ctrl.c b/drivers/pmdomain/imx/imx8m-blk-ctrl.c index cc5ef6e2f0a8..cc5ef6e2f0a8 100644 --- a/drivers/genpd/imx/imx8m-blk-ctrl.c +++ b/drivers/pmdomain/imx/imx8m-blk-ctrl.c diff --git a/drivers/genpd/imx/imx8mp-blk-ctrl.c b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c index c6ac32c1a8c1..c6ac32c1a8c1 100644 --- a/drivers/genpd/imx/imx8mp-blk-ctrl.c +++ b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c diff --git a/drivers/genpd/imx/imx93-blk-ctrl.c b/drivers/pmdomain/imx/imx93-blk-ctrl.c index 40bd90f8b977..40bd90f8b977 100644 --- a/drivers/genpd/imx/imx93-blk-ctrl.c +++ b/drivers/pmdomain/imx/imx93-blk-ctrl.c diff --git a/drivers/genpd/imx/imx93-pd.c b/drivers/pmdomain/imx/imx93-pd.c index b9e60d136875..b9e60d136875 100644 --- a/drivers/genpd/imx/imx93-pd.c +++ b/drivers/pmdomain/imx/imx93-pd.c diff --git a/drivers/genpd/imx/scu-pd.c b/drivers/pmdomain/imx/scu-pd.c index 2f693b67ddb4..2f693b67ddb4 100644 --- a/drivers/genpd/imx/scu-pd.c +++ b/drivers/pmdomain/imx/scu-pd.c diff --git a/drivers/genpd/mediatek/Makefile b/drivers/pmdomain/mediatek/Makefile index 8cde09e654b3..8cde09e654b3 100644 --- a/drivers/genpd/mediatek/Makefile +++ b/drivers/pmdomain/mediatek/Makefile diff --git a/drivers/genpd/mediatek/mt6795-pm-domains.h b/drivers/pmdomain/mediatek/mt6795-pm-domains.h index ef07c9dfdd9b..ef07c9dfdd9b 100644 --- a/drivers/genpd/mediatek/mt6795-pm-domains.h +++ b/drivers/pmdomain/mediatek/mt6795-pm-domains.h diff --git a/drivers/genpd/mediatek/mt8167-pm-domains.h b/drivers/pmdomain/mediatek/mt8167-pm-domains.h index 4d6c32759606..4d6c32759606 100644 --- a/drivers/genpd/mediatek/mt8167-pm-domains.h +++ b/drivers/pmdomain/mediatek/mt8167-pm-domains.h diff --git a/drivers/genpd/mediatek/mt8173-pm-domains.h b/drivers/pmdomain/mediatek/mt8173-pm-domains.h index 1a5dc63b7357..1a5dc63b7357 100644 --- a/drivers/genpd/mediatek/mt8173-pm-domains.h +++ b/drivers/pmdomain/mediatek/mt8173-pm-domains.h diff --git a/drivers/genpd/mediatek/mt8183-pm-domains.h b/drivers/pmdomain/mediatek/mt8183-pm-domains.h index 99de67fe5de8..99de67fe5de8 100644 --- a/drivers/genpd/mediatek/mt8183-pm-domains.h +++ b/drivers/pmdomain/mediatek/mt8183-pm-domains.h diff --git a/drivers/genpd/mediatek/mt8186-pm-domains.h b/drivers/pmdomain/mediatek/mt8186-pm-domains.h index fce86f79c505..fce86f79c505 100644 --- a/drivers/genpd/mediatek/mt8186-pm-domains.h +++ b/drivers/pmdomain/mediatek/mt8186-pm-domains.h diff --git a/drivers/genpd/mediatek/mt8188-pm-domains.h b/drivers/pmdomain/mediatek/mt8188-pm-domains.h index 0692cb444ed0..0692cb444ed0 100644 --- a/drivers/genpd/mediatek/mt8188-pm-domains.h +++ b/drivers/pmdomain/mediatek/mt8188-pm-domains.h diff --git a/drivers/genpd/mediatek/mt8192-pm-domains.h b/drivers/pmdomain/mediatek/mt8192-pm-domains.h index b97b2051920f..b97b2051920f 100644 --- a/drivers/genpd/mediatek/mt8192-pm-domains.h +++ b/drivers/pmdomain/mediatek/mt8192-pm-domains.h diff --git a/drivers/genpd/mediatek/mt8195-pm-domains.h b/drivers/pmdomain/mediatek/mt8195-pm-domains.h index d7387ea1b9c9..d7387ea1b9c9 100644 --- a/drivers/genpd/mediatek/mt8195-pm-domains.h +++ b/drivers/pmdomain/mediatek/mt8195-pm-domains.h diff --git a/drivers/genpd/mediatek/mtk-pm-domains.c b/drivers/pmdomain/mediatek/mtk-pm-domains.c index ee962804b830..ee962804b830 100644 --- a/drivers/genpd/mediatek/mtk-pm-domains.c +++ b/drivers/pmdomain/mediatek/mtk-pm-domains.c diff --git a/drivers/genpd/mediatek/mtk-pm-domains.h b/drivers/pmdomain/mediatek/mtk-pm-domains.h index 5ec53ee073c4..5ec53ee073c4 100644 --- a/drivers/genpd/mediatek/mtk-pm-domains.h +++ b/drivers/pmdomain/mediatek/mtk-pm-domains.h diff --git a/drivers/genpd/mediatek/mtk-scpsys.c b/drivers/pmdomain/mediatek/mtk-scpsys.c index b374d01fdac7..b374d01fdac7 100644 --- a/drivers/genpd/mediatek/mtk-scpsys.c +++ b/drivers/pmdomain/mediatek/mtk-scpsys.c diff --git a/drivers/genpd/qcom/Makefile b/drivers/pmdomain/qcom/Makefile index 403dfc5af095..403dfc5af095 100644 --- a/drivers/genpd/qcom/Makefile +++ b/drivers/pmdomain/qcom/Makefile diff --git a/drivers/genpd/qcom/cpr.c b/drivers/pmdomain/qcom/cpr.c index 94a3f0977212..94a3f0977212 100644 --- a/drivers/genpd/qcom/cpr.c +++ b/drivers/pmdomain/qcom/cpr.c diff --git a/drivers/genpd/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c index a87e336d5e33..a87e336d5e33 100644 --- a/drivers/genpd/qcom/rpmhpd.c +++ b/drivers/pmdomain/qcom/rpmhpd.c diff --git a/drivers/genpd/qcom/rpmpd.c b/drivers/pmdomain/qcom/rpmpd.c index 3135dd1dafe0..3135dd1dafe0 100644 --- a/drivers/genpd/qcom/rpmpd.c +++ b/drivers/pmdomain/qcom/rpmpd.c diff --git a/drivers/genpd/renesas/Makefile b/drivers/pmdomain/renesas/Makefile index e306e396fc8c..e306e396fc8c 100644 --- a/drivers/genpd/renesas/Makefile +++ b/drivers/pmdomain/renesas/Makefile diff --git a/drivers/genpd/renesas/r8a7742-sysc.c b/drivers/pmdomain/renesas/r8a7742-sysc.c index 219a675f83f4..219a675f83f4 100644 --- a/drivers/genpd/renesas/r8a7742-sysc.c +++ b/drivers/pmdomain/renesas/r8a7742-sysc.c diff --git a/drivers/genpd/renesas/r8a7743-sysc.c b/drivers/pmdomain/renesas/r8a7743-sysc.c index 4e2c0ab951b3..4e2c0ab951b3 100644 --- a/drivers/genpd/renesas/r8a7743-sysc.c +++ b/drivers/pmdomain/renesas/r8a7743-sysc.c diff --git a/drivers/genpd/renesas/r8a7745-sysc.c b/drivers/pmdomain/renesas/r8a7745-sysc.c index 865821a2f0c6..865821a2f0c6 100644 --- a/drivers/genpd/renesas/r8a7745-sysc.c +++ b/drivers/pmdomain/renesas/r8a7745-sysc.c diff --git a/drivers/genpd/renesas/r8a77470-sysc.c b/drivers/pmdomain/renesas/r8a77470-sysc.c index 1eeb8018df50..1eeb8018df50 100644 --- a/drivers/genpd/renesas/r8a77470-sysc.c +++ b/drivers/pmdomain/renesas/r8a77470-sysc.c diff --git a/drivers/genpd/renesas/r8a774a1-sysc.c b/drivers/pmdomain/renesas/r8a774a1-sysc.c index 38ac2c689ff0..38ac2c689ff0 100644 --- a/drivers/genpd/renesas/r8a774a1-sysc.c +++ b/drivers/pmdomain/renesas/r8a774a1-sysc.c diff --git a/drivers/genpd/renesas/r8a774b1-sysc.c b/drivers/pmdomain/renesas/r8a774b1-sysc.c index 5f97ff26f3f8..5f97ff26f3f8 100644 --- a/drivers/genpd/renesas/r8a774b1-sysc.c +++ b/drivers/pmdomain/renesas/r8a774b1-sysc.c diff --git a/drivers/genpd/renesas/r8a774c0-sysc.c b/drivers/pmdomain/renesas/r8a774c0-sysc.c index c1c216f7d073..c1c216f7d073 100644 --- a/drivers/genpd/renesas/r8a774c0-sysc.c +++ b/drivers/pmdomain/renesas/r8a774c0-sysc.c diff --git a/drivers/genpd/renesas/r8a774e1-sysc.c b/drivers/pmdomain/renesas/r8a774e1-sysc.c index 18449f746455..18449f746455 100644 --- a/drivers/genpd/renesas/r8a774e1-sysc.c +++ b/drivers/pmdomain/renesas/r8a774e1-sysc.c diff --git a/drivers/genpd/renesas/r8a7779-sysc.c b/drivers/pmdomain/renesas/r8a7779-sysc.c index e24a7151d55f..e24a7151d55f 100644 --- a/drivers/genpd/renesas/r8a7779-sysc.c +++ b/drivers/pmdomain/renesas/r8a7779-sysc.c diff --git a/drivers/genpd/renesas/r8a7790-sysc.c b/drivers/pmdomain/renesas/r8a7790-sysc.c index b9afe7f6245b..b9afe7f6245b 100644 --- a/drivers/genpd/renesas/r8a7790-sysc.c +++ b/drivers/pmdomain/renesas/r8a7790-sysc.c diff --git a/drivers/genpd/renesas/r8a7791-sysc.c b/drivers/pmdomain/renesas/r8a7791-sysc.c index f00fa24522a3..f00fa24522a3 100644 --- a/drivers/genpd/renesas/r8a7791-sysc.c +++ b/drivers/pmdomain/renesas/r8a7791-sysc.c diff --git a/drivers/genpd/renesas/r8a7792-sysc.c b/drivers/pmdomain/renesas/r8a7792-sysc.c index 60aae242c43f..60aae242c43f 100644 --- a/drivers/genpd/renesas/r8a7792-sysc.c +++ b/drivers/pmdomain/renesas/r8a7792-sysc.c diff --git a/drivers/genpd/renesas/r8a7794-sysc.c b/drivers/pmdomain/renesas/r8a7794-sysc.c index 72ef4e85458f..72ef4e85458f 100644 --- a/drivers/genpd/renesas/r8a7794-sysc.c +++ b/drivers/pmdomain/renesas/r8a7794-sysc.c diff --git a/drivers/genpd/renesas/r8a7795-sysc.c b/drivers/pmdomain/renesas/r8a7795-sysc.c index cbe1ff0fc583..cbe1ff0fc583 100644 --- a/drivers/genpd/renesas/r8a7795-sysc.c +++ b/drivers/pmdomain/renesas/r8a7795-sysc.c diff --git a/drivers/genpd/renesas/r8a7796-sysc.c b/drivers/pmdomain/renesas/r8a7796-sysc.c index 471bd5b3b6ad..471bd5b3b6ad 100644 --- a/drivers/genpd/renesas/r8a7796-sysc.c +++ b/drivers/pmdomain/renesas/r8a7796-sysc.c diff --git a/drivers/genpd/renesas/r8a77965-sysc.c b/drivers/pmdomain/renesas/r8a77965-sysc.c index ff0b0d116992..ff0b0d116992 100644 --- a/drivers/genpd/renesas/r8a77965-sysc.c +++ b/drivers/pmdomain/renesas/r8a77965-sysc.c diff --git a/drivers/genpd/renesas/r8a77970-sysc.c b/drivers/pmdomain/renesas/r8a77970-sysc.c index 706258250600..706258250600 100644 --- a/drivers/genpd/renesas/r8a77970-sysc.c +++ b/drivers/pmdomain/renesas/r8a77970-sysc.c diff --git a/drivers/genpd/renesas/r8a77980-sysc.c b/drivers/pmdomain/renesas/r8a77980-sysc.c index 39ca84a67daa..39ca84a67daa 100644 --- a/drivers/genpd/renesas/r8a77980-sysc.c +++ b/drivers/pmdomain/renesas/r8a77980-sysc.c diff --git a/drivers/genpd/renesas/r8a77990-sysc.c b/drivers/pmdomain/renesas/r8a77990-sysc.c index 9f92737dc352..9f92737dc352 100644 --- a/drivers/genpd/renesas/r8a77990-sysc.c +++ b/drivers/pmdomain/renesas/r8a77990-sysc.c diff --git a/drivers/genpd/renesas/r8a77995-sysc.c b/drivers/pmdomain/renesas/r8a77995-sysc.c index efcc67e3d76d..efcc67e3d76d 100644 --- a/drivers/genpd/renesas/r8a77995-sysc.c +++ b/drivers/pmdomain/renesas/r8a77995-sysc.c diff --git a/drivers/genpd/renesas/r8a779a0-sysc.c b/drivers/pmdomain/renesas/r8a779a0-sysc.c index 04f1bc322ae7..04f1bc322ae7 100644 --- a/drivers/genpd/renesas/r8a779a0-sysc.c +++ b/drivers/pmdomain/renesas/r8a779a0-sysc.c diff --git a/drivers/genpd/renesas/r8a779f0-sysc.c b/drivers/pmdomain/renesas/r8a779f0-sysc.c index 5602aa6bd7ed..5602aa6bd7ed 100644 --- a/drivers/genpd/renesas/r8a779f0-sysc.c +++ b/drivers/pmdomain/renesas/r8a779f0-sysc.c diff --git a/drivers/genpd/renesas/r8a779g0-sysc.c b/drivers/pmdomain/renesas/r8a779g0-sysc.c index b932eba1b804..b932eba1b804 100644 --- a/drivers/genpd/renesas/r8a779g0-sysc.c +++ b/drivers/pmdomain/renesas/r8a779g0-sysc.c diff --git a/drivers/genpd/renesas/rcar-gen4-sysc.c b/drivers/pmdomain/renesas/rcar-gen4-sysc.c index 9e5e6e077abc..9e5e6e077abc 100644 --- a/drivers/genpd/renesas/rcar-gen4-sysc.c +++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.c diff --git a/drivers/genpd/renesas/rcar-gen4-sysc.h b/drivers/pmdomain/renesas/rcar-gen4-sysc.h index 388cfa8f8f9f..388cfa8f8f9f 100644 --- a/drivers/genpd/renesas/rcar-gen4-sysc.h +++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.h diff --git a/drivers/genpd/renesas/rcar-sysc.c b/drivers/pmdomain/renesas/rcar-sysc.c index eed47696e825..eed47696e825 100644 --- a/drivers/genpd/renesas/rcar-sysc.c +++ b/drivers/pmdomain/renesas/rcar-sysc.c diff --git a/drivers/genpd/renesas/rcar-sysc.h b/drivers/pmdomain/renesas/rcar-sysc.h index 266c599a0a9b..266c599a0a9b 100644 --- a/drivers/genpd/renesas/rcar-sysc.h +++ b/drivers/pmdomain/renesas/rcar-sysc.h diff --git a/drivers/genpd/renesas/rmobile-sysc.c b/drivers/pmdomain/renesas/rmobile-sysc.c index 912daadaa10d..912daadaa10d 100644 --- a/drivers/genpd/renesas/rmobile-sysc.c +++ b/drivers/pmdomain/renesas/rmobile-sysc.c diff --git a/drivers/genpd/rockchip/Makefile b/drivers/pmdomain/rockchip/Makefile index 8fb9d88a3492..8fb9d88a3492 100644 --- a/drivers/genpd/rockchip/Makefile +++ b/drivers/pmdomain/rockchip/Makefile diff --git a/drivers/genpd/rockchip/pm-domains.c b/drivers/pmdomain/rockchip/pm-domains.c index d5d3ecb38283..d5d3ecb38283 100644 --- a/drivers/genpd/rockchip/pm-domains.c +++ b/drivers/pmdomain/rockchip/pm-domains.c diff --git a/drivers/genpd/samsung/Makefile b/drivers/pmdomain/samsung/Makefile index 397aa5908c1d..397aa5908c1d 100644 --- a/drivers/genpd/samsung/Makefile +++ b/drivers/pmdomain/samsung/Makefile diff --git a/drivers/genpd/samsung/exynos-pm-domains.c b/drivers/pmdomain/samsung/exynos-pm-domains.c index 9b502e8751d1..9b502e8751d1 100644 --- a/drivers/genpd/samsung/exynos-pm-domains.c +++ b/drivers/pmdomain/samsung/exynos-pm-domains.c diff --git a/drivers/genpd/st/Makefile b/drivers/pmdomain/st/Makefile index 8fa5f9855460..8fa5f9855460 100644 --- a/drivers/genpd/st/Makefile +++ b/drivers/pmdomain/st/Makefile diff --git a/drivers/genpd/st/ste-ux500-pm-domain.c b/drivers/pmdomain/st/ste-ux500-pm-domain.c index 3d4f111ed156..3d4f111ed156 100644 --- a/drivers/genpd/st/ste-ux500-pm-domain.c +++ b/drivers/pmdomain/st/ste-ux500-pm-domain.c diff --git a/drivers/genpd/starfive/Makefile b/drivers/pmdomain/starfive/Makefile index 975bba2a29a9..975bba2a29a9 100644 --- a/drivers/genpd/starfive/Makefile +++ b/drivers/pmdomain/starfive/Makefile diff --git a/drivers/genpd/starfive/jh71xx-pmu.c b/drivers/pmdomain/starfive/jh71xx-pmu.c index 7d5f50d71c0d..7d5f50d71c0d 100644 --- a/drivers/genpd/starfive/jh71xx-pmu.c +++ b/drivers/pmdomain/starfive/jh71xx-pmu.c diff --git a/drivers/genpd/sunxi/Makefile b/drivers/pmdomain/sunxi/Makefile index ec1d7a2fb21d..ec1d7a2fb21d 100644 --- a/drivers/genpd/sunxi/Makefile +++ b/drivers/pmdomain/sunxi/Makefile diff --git a/drivers/genpd/sunxi/sun20i-ppu.c b/drivers/pmdomain/sunxi/sun20i-ppu.c index 8700f9dd5f75..8700f9dd5f75 100644 --- a/drivers/genpd/sunxi/sun20i-ppu.c +++ b/drivers/pmdomain/sunxi/sun20i-ppu.c diff --git a/drivers/genpd/tegra/Makefile b/drivers/pmdomain/tegra/Makefile index ec8acfd2c77c..ec8acfd2c77c 100644 --- a/drivers/genpd/tegra/Makefile +++ b/drivers/pmdomain/tegra/Makefile diff --git a/drivers/genpd/tegra/powergate-bpmp.c b/drivers/pmdomain/tegra/powergate-bpmp.c index 179ed895c279..179ed895c279 100644 --- a/drivers/genpd/tegra/powergate-bpmp.c +++ b/drivers/pmdomain/tegra/powergate-bpmp.c diff --git a/drivers/genpd/ti/Makefile b/drivers/pmdomain/ti/Makefile index 69580afbb436..69580afbb436 100644 --- a/drivers/genpd/ti/Makefile +++ b/drivers/pmdomain/ti/Makefile diff --git a/drivers/genpd/ti/omap_prm.c b/drivers/pmdomain/ti/omap_prm.c index c2feae3a634c..c2feae3a634c 100644 --- a/drivers/genpd/ti/omap_prm.c +++ b/drivers/pmdomain/ti/omap_prm.c diff --git a/drivers/genpd/ti/ti_sci_pm_domains.c b/drivers/pmdomain/ti/ti_sci_pm_domains.c index 34645104fe45..34645104fe45 100644 --- a/drivers/genpd/ti/ti_sci_pm_domains.c +++ b/drivers/pmdomain/ti/ti_sci_pm_domains.c diff --git a/drivers/genpd/xilinx/Makefile b/drivers/pmdomain/xilinx/Makefile index a706ab699cfa..a706ab699cfa 100644 --- a/drivers/genpd/xilinx/Makefile +++ b/drivers/pmdomain/xilinx/Makefile diff --git a/drivers/genpd/xilinx/zynqmp-pm-domains.c b/drivers/pmdomain/xilinx/zynqmp-pm-domains.c index 69d03ad4cf1e..69d03ad4cf1e 100644 --- a/drivers/genpd/xilinx/zynqmp-pm-domains.c +++ b/drivers/pmdomain/xilinx/zynqmp-pm-domains.c diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 7f9b221e7c34..ea9b42225e62 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -6073,7 +6073,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) phba->hba_debugfs_root, phba, &lpfc_debugfs_op_multixripools); - if (!phba->debug_multixri_pools) { + if (IS_ERR(phba->debug_multixri_pools)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0527 Cannot create debugfs multixripools\n"); goto debug_failed; @@ -6085,7 +6085,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, S_IFREG | 0644, phba->hba_debugfs_root, phba, &lpfc_cgn_buffer_op); - if (!phba->debug_cgn_buffer) { + if (IS_ERR(phba->debug_cgn_buffer)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "6527 Cannot create debugfs " "cgn_buffer\n"); @@ -6098,7 +6098,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, S_IFREG | 0644, phba->hba_debugfs_root, phba, &lpfc_rx_monitor_op); - if (!phba->debug_rx_monitor) { + if (IS_ERR(phba->debug_rx_monitor)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "6528 Cannot create debugfs " "rx_monitor\n"); @@ -6111,7 +6111,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, 0644, phba->hba_debugfs_root, phba, &lpfc_debugfs_ras_log); - if (!phba->debug_ras_log) { + if (IS_ERR(phba->debug_ras_log)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "6148 Cannot create debugfs" " ras_log\n"); @@ -6132,7 +6132,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, S_IFREG | 0644, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_lockstat); - if (!phba->debug_lockstat) { + if (IS_ERR(phba->debug_lockstat)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "4610 Can't create debugfs lockstat\n"); goto debug_failed; @@ -6358,7 +6358,7 @@ nvmeio_off: debugfs_create_file(name, 0644, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_scsistat); - if (!vport->debug_scsistat) { + if (IS_ERR(vport->debug_scsistat)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "4611 Cannot create debugfs scsistat\n"); goto debug_failed; @@ -6369,7 +6369,7 @@ nvmeio_off: debugfs_create_file(name, 0644, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_ioktime); - if (!vport->debug_ioktime) { + if (IS_ERR(vport->debug_ioktime)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "0815 Cannot create debugfs ioktime\n"); goto debug_failed; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 51afb60859eb..5154eeaee0ec 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -199,11 +199,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) /* Only 1 thread can drop the initial node reference. If * another thread has set NLP_DROPPED, this thread is done. */ - if (!(ndlp->nlp_flag & NLP_DROPPED)) { + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) && + !(ndlp->nlp_flag & NLP_DROPPED)) { ndlp->nlp_flag |= NLP_DROPPED; spin_unlock_irqrestore(&ndlp->lock, iflags); lpfc_nlp_put(ndlp); - spin_lock_irqsave(&ndlp->lock, iflags); + return; } spin_unlock_irqrestore(&ndlp->lock, iflags); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 39acbcb7ec66..96e11a26c297 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -228,8 +228,7 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) spin_unlock_irq(&ndlp->lock); /* On a devloss timeout event, one more put is executed provided the - * NVME and SCSI rport unregister requests are complete. If the vport - * is unloading, this extra put is executed by lpfc_drop_node. + * NVME and SCSI rport unregister requests are complete. */ if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags)) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); @@ -2567,11 +2566,7 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * nvme_transport perspective. Loss of an rport just means IO cannot * be sent and recovery is completely up to the initator. * For now, the driver just unbinds the DID and port_role so that - * no further IO can be issued. Changes are planned for later. - * - * Notes - the ndlp reference count is not decremented here since - * since there is no nvme_transport api for devloss. Node ref count - * is only adjusted in driver unload. + * no further IO can be issued. */ void lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) @@ -2646,6 +2641,21 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) "6167 NVME unregister failed %d " "port_state x%x\n", ret, remoteport->port_state); + + if (vport->load_flag & FC_UNLOADING) { + /* Only 1 thread can drop the initial node + * reference. Check if another thread has set + * NLP_DROPPED. + */ + spin_lock_irq(&ndlp->lock); + if (!(ndlp->nlp_flag & NLP_DROPPED)) { + ndlp->nlp_flag |= NLP_DROPPED; + spin_unlock_irq(&ndlp->lock); + lpfc_nlp_put(ndlp); + return; + } + spin_unlock_irq(&ndlp->lock); + } } } return; diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 3554f6b07727..94abba57582d 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2332,7 +2332,7 @@ struct megasas_instance { u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */ bool use_seqnum_jbod_fp; /* Added for PD sequence */ bool smp_affinity_enable; - spinlock_t crashdump_lock; + struct mutex crashdump_lock; struct megasas_register_set __iomem *reg_set; u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index b9d46dcb5210..e1aa667dae66 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3271,14 +3271,13 @@ fw_crash_buffer_store(struct device *cdev, struct megasas_instance *instance = (struct megasas_instance *) shost->hostdata; int val = 0; - unsigned long flags; if (kstrtoint(buf, 0, &val) != 0) return -EINVAL; - spin_lock_irqsave(&instance->crashdump_lock, flags); + mutex_lock(&instance->crashdump_lock); instance->fw_crash_buffer_offset = val; - spin_unlock_irqrestore(&instance->crashdump_lock, flags); + mutex_unlock(&instance->crashdump_lock); return strlen(buf); } @@ -3293,24 +3292,23 @@ fw_crash_buffer_show(struct device *cdev, unsigned long dmachunk = CRASH_DMA_BUF_SIZE; unsigned long chunk_left_bytes; unsigned long src_addr; - unsigned long flags; u32 buff_offset; - spin_lock_irqsave(&instance->crashdump_lock, flags); + mutex_lock(&instance->crashdump_lock); buff_offset = instance->fw_crash_buffer_offset; if (!instance->crash_dump_buf || !((instance->fw_crash_state == AVAILABLE) || (instance->fw_crash_state == COPYING))) { dev_err(&instance->pdev->dev, "Firmware crash dump is not available\n"); - spin_unlock_irqrestore(&instance->crashdump_lock, flags); + mutex_unlock(&instance->crashdump_lock); return -EINVAL; } if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { dev_err(&instance->pdev->dev, "Firmware crash dump offset is out of range\n"); - spin_unlock_irqrestore(&instance->crashdump_lock, flags); + mutex_unlock(&instance->crashdump_lock); return 0; } @@ -3322,7 +3320,7 @@ fw_crash_buffer_show(struct device *cdev, src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + (buff_offset % dmachunk); memcpy(buf, (void *)src_addr, size); - spin_unlock_irqrestore(&instance->crashdump_lock, flags); + mutex_unlock(&instance->crashdump_lock); return size; } @@ -3347,7 +3345,6 @@ fw_crash_state_store(struct device *cdev, struct megasas_instance *instance = (struct megasas_instance *) shost->hostdata; int val = 0; - unsigned long flags; if (kstrtoint(buf, 0, &val) != 0) return -EINVAL; @@ -3361,9 +3358,9 @@ fw_crash_state_store(struct device *cdev, instance->fw_crash_state = val; if ((val == COPIED) || (val == COPY_ERROR)) { - spin_lock_irqsave(&instance->crashdump_lock, flags); + mutex_lock(&instance->crashdump_lock); megasas_free_host_crash_buffer(instance); - spin_unlock_irqrestore(&instance->crashdump_lock, flags); + mutex_unlock(&instance->crashdump_lock); if (val == COPY_ERROR) dev_info(&instance->pdev->dev, "application failed to " "copy Firmware crash dump\n"); @@ -7422,7 +7419,7 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance) init_waitqueue_head(&instance->int_cmd_wait_q); init_waitqueue_head(&instance->abort_cmd_wait_q); - spin_lock_init(&instance->crashdump_lock); + mutex_init(&instance->crashdump_lock); spin_lock_init(&instance->mfi_pool_lock); spin_lock_init(&instance->hba_lock); spin_lock_init(&instance->stream_lock); diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 33053db5a713..90069c7b1642 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -4180,7 +4180,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) payload.sas_identify.dev_type = SAS_END_DEVICE; payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; memcpy(payload.sas_identify.sas_addr, - pm8001_ha->sas_addr, SAS_ADDR_SIZE); + &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE); payload.sas_identify.phy_id = phy_id; return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload, diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 5e5ce1e74c3b..443a3176c6c0 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -273,7 +273,6 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id) return ret; } -static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha); static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha); /** @@ -294,13 +293,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, pm8001_dbg(pm8001_ha, INIT, "pm8001_alloc: PHY:%x\n", pm8001_ha->chip->n_phy); - /* Setup Interrupt */ - rc = pm8001_setup_irq(pm8001_ha); - if (rc) { - pm8001_dbg(pm8001_ha, FAIL, - "pm8001_setup_irq failed [ret: %d]\n", rc); - goto err_out; - } /* Request Interrupt */ rc = pm8001_request_irq(pm8001_ha); if (rc) @@ -1031,47 +1023,38 @@ static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha) } #endif -static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha) -{ - struct pci_dev *pdev; - - pdev = pm8001_ha->pdev; - -#ifdef PM8001_USE_MSIX - if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) - return pm8001_setup_msix(pm8001_ha); - pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n"); -#endif - return 0; -} - /** * pm8001_request_irq - register interrupt * @pm8001_ha: our ha struct. */ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) { - struct pci_dev *pdev; + struct pci_dev *pdev = pm8001_ha->pdev; +#ifdef PM8001_USE_MSIX int rc; - pdev = pm8001_ha->pdev; + if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) { + rc = pm8001_setup_msix(pm8001_ha); + if (rc) { + pm8001_dbg(pm8001_ha, FAIL, + "pm8001_setup_irq failed [ret: %d]\n", rc); + return rc; + } -#ifdef PM8001_USE_MSIX - if (pdev->msix_cap && pci_msi_enabled()) - return pm8001_request_msix(pm8001_ha); - else { - pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n"); - goto intx; + if (pdev->msix_cap && pci_msi_enabled()) + return pm8001_request_msix(pm8001_ha); } + + pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n"); #endif -intx: /* initialize the INT-X interrupt */ pm8001_ha->irq_vector[0].irq_id = 0; pm8001_ha->irq_vector[0].drv_inst = pm8001_ha; - rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED, - pm8001_ha->name, SHOST_TO_SAS_HA(pm8001_ha->shost)); - return rc; + + return request_irq(pdev->irq, pm8001_interrupt_handler_intx, + IRQF_SHARED, pm8001_ha->name, + SHOST_TO_SAS_HA(pm8001_ha->shost)); } /** diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index f6857632dc7c..3afd9443c425 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -3671,10 +3671,12 @@ static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha, (struct set_ctrl_cfg_resp *)(piomb + 4); u32 status = le32_to_cpu(pPayload->status); u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd); + u32 tag = le32_to_cpu(pPayload->tag); pm8001_dbg(pm8001_ha, MSG, "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n", status, err_qlfr_pgcd); + pm8001_tag_free(pm8001_ha, tag); return 0; } @@ -4671,7 +4673,7 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) payload.sas_identify.dev_type = SAS_END_DEVICE; payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; memcpy(payload.sas_identify.sas_addr, - &pm8001_ha->sas_addr, SAS_ADDR_SIZE); + &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE); payload.sas_identify.phy_id = phy_id; return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload, diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index 19f0b93fa3d8..d592ee9170c1 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c @@ -307,9 +307,9 @@ static int ppa_out(ppa_struct *dev, char *buffer, int len) case PPA_EPP_8: epp_reset(ppb); w_ctr(ppb, 0x4); - if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x01)) + if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x03)) outsl(ppb + 4, buffer, len >> 2); - else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x03)) + else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x01)) outsw(ppb + 4, buffer, len >> 1); else outsb(ppb + 4, buffer, len); diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index 4750ec5789a8..10fe3383855c 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -1904,6 +1904,7 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts) goto drop_rdata_kref; } + spin_lock_irqsave(&fcport->rport_lock, flags); if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) || test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) { @@ -1911,17 +1912,20 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts) "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n", io_req->xid, io_req->sc_cmd); rc = 1; + spin_unlock_irqrestore(&fcport->rport_lock, flags); goto drop_rdata_kref; } + /* Set the command type to abort */ + io_req->cmd_type = QEDF_ABTS; + spin_unlock_irqrestore(&fcport->rport_lock, flags); + kref_get(&io_req->refcount); xid = io_req->xid; qedf->control_requests++; qedf->packet_aborts++; - /* Set the command type to abort */ - io_req->cmd_type = QEDF_ABTS; io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; set_bit(QEDF_CMD_IN_ABORT, &io_req->flags); @@ -2210,7 +2214,9 @@ process_els: refcount, fcport, fcport->rdata->ids.port_id); /* Cleanup cmds re-use the same TID as the original I/O */ + spin_lock_irqsave(&fcport->rport_lock, flags); io_req->cmd_type = QEDF_CLEANUP; + spin_unlock_irqrestore(&fcport->rport_lock, flags); io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; init_completion(&io_req->cleanup_done); diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 7825765c936c..91f3f1d7098e 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -2805,6 +2805,8 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe) struct qedf_ioreq *io_req; struct qedf_rport *fcport; u32 comp_type; + u8 io_comp_type; + unsigned long flags; comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & FCOE_CQE_CQE_TYPE_MASK; @@ -2838,11 +2840,14 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe) return; } + spin_lock_irqsave(&fcport->rport_lock, flags); + io_comp_type = io_req->cmd_type; + spin_unlock_irqrestore(&fcport->rport_lock, flags); switch (comp_type) { case FCOE_GOOD_COMPLETION_CQE_TYPE: atomic_inc(&fcport->free_sqes); - switch (io_req->cmd_type) { + switch (io_comp_type) { case QEDF_SCSI_CMD: qedf_scsi_completion(qedf, cqe, io_req); break; diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index f060e593685d..a7a364760b80 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -116,7 +116,7 @@ qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp) sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name)); fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root); - if (!fp->dfs_rport_dir) + if (IS_ERR(fp->dfs_rport_dir)) return; if (NVME_TARGET(vha->hw, fp)) debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir, @@ -708,14 +708,14 @@ create_nodes: if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) { ha->tgt.dfs_naqp = debugfs_create_file("naqp", 0400, ha->dfs_dir, vha, &dfs_naqp_ops); - if (!ha->tgt.dfs_naqp) { + if (IS_ERR(ha->tgt.dfs_naqp)) { ql_log(ql_log_warn, vha, 0xd011, "Unable to create debugFS naqp node.\n"); goto out; } } vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir); - if (!vha->dfs_rport_root) { + if (IS_ERR(vha->dfs_rport_root)) { ql_log(ql_log_warn, vha, 0xd012, "Unable to create debugFS rports node.\n"); goto out; diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 0556969f6dc1..a4a56ab0ba74 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -577,7 +577,7 @@ fcport_is_bigger(fc_port_t *fcport) static inline struct qla_qpair * qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair) { - int cpuid = smp_processor_id(); + int cpuid = raw_smp_processor_id(); if (qpair->cpuid != cpuid && ha->qp_cpu_map[cpuid]) { diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index e98788191897..d48007e18288 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -3965,7 +3965,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (!ha->flags.fw_started) return; - if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) { + if (rsp->qpair->cpuid != raw_smp_processor_id() || !rsp->qpair->rcv_intr) { rsp->qpair->rcv_intr = 1; if (!rsp->qpair->cpu_mapped) @@ -4468,7 +4468,7 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id) } ha = qpair->hw; - queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work); + queue_work(ha->wq, &qpair->q_work); return IRQ_HANDLED; } @@ -4494,7 +4494,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id) wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); - queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work); + queue_work(ha->wq, &qpair->q_work); return IRQ_HANDLED; } diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index db753d712991..a8ddf356e662 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -399,14 +399,14 @@ static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport, nvme->u.nvme.dl = 0; nvme->u.nvme.timeout_sec = 0; nvme->u.nvme.cmd_dma = fd_resp->rspdma; - nvme->u.nvme.cmd_len = fd_resp->rsplen; + nvme->u.nvme.cmd_len = cpu_to_le32(fd_resp->rsplen); nvme->u.nvme.rsp_len = 0; nvme->u.nvme.rsp_dma = 0; nvme->u.nvme.exchange_address = uctx->exchange_address; nvme->u.nvme.nport_handle = uctx->nport_handle; nvme->u.nvme.ox_id = uctx->ox_id; dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, - le32_to_cpu(fd_resp->rsplen), DMA_TO_DEVICE); + fd_resp->rsplen, DMA_TO_DEVICE); ql_dbg(ql_dbg_unsol, vha, 0x2122, "Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n", @@ -504,13 +504,13 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, nvme->u.nvme.desc = fd; nvme->u.nvme.dir = 0; nvme->u.nvme.dl = 0; - nvme->u.nvme.cmd_len = fd->rqstlen; - nvme->u.nvme.rsp_len = fd->rsplen; + nvme->u.nvme.cmd_len = cpu_to_le32(fd->rqstlen); + nvme->u.nvme.rsp_len = cpu_to_le32(fd->rsplen); nvme->u.nvme.rsp_dma = fd->rspdma; nvme->u.nvme.timeout_sec = fd->timeout; nvme->u.nvme.cmd_dma = fd->rqstdma; dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, - le32_to_cpu(fd->rqstlen), DMA_TO_DEVICE); + fd->rqstlen, DMA_TO_DEVICE); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 2b815a9928ea..2ef2dbac0db2 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -4425,8 +4425,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work); } else if (ha->msix_count) { if (cmd->atio.u.isp24.fcp_cmnd.rddata) - queue_work_on(smp_processor_id(), qla_tgt_wq, - &cmd->work); + queue_work(qla_tgt_wq, &cmd->work); else queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 3b5ba4b47b3b..68a0e6a2fb6e 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -310,7 +310,7 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) cmd->trc_flags |= TRC_CMD_DONE; INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); - queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); + queue_work(tcm_qla2xxx_free_wq, &cmd->work); } /* @@ -547,7 +547,7 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) cmd->trc_flags |= TRC_DATA_IN; cmd->cmd_in_wq = 1; INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); - queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); + queue_work(tcm_qla2xxx_free_wq, &cmd->work); } static int tcm_qla2xxx_chk_dif_tags(uint32_t tag) diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 936e5ff1b209..d5860c1c1f46 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -1392,16 +1392,16 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item, /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ unsigned char buf[INQUIRY_VENDOR_LEN + 2]; char *stripped = NULL; - size_t len; + ssize_t len; ssize_t ret; - len = strlcpy(buf, page, sizeof(buf)); - if (len < sizeof(buf)) { + len = strscpy(buf, page, sizeof(buf)); + if (len > 0) { /* Strip any newline added from userspace. */ stripped = strstrip(buf); len = strlen(stripped); } - if (len > INQUIRY_VENDOR_LEN) { + if (len < 0 || len > INQUIRY_VENDOR_LEN) { pr_err("Emulated T10 Vendor Identification exceeds" " INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN) "\n"); @@ -1448,16 +1448,16 @@ static ssize_t target_wwn_product_id_store(struct config_item *item, /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ unsigned char buf[INQUIRY_MODEL_LEN + 2]; char *stripped = NULL; - size_t len; + ssize_t len; ssize_t ret; - len = strlcpy(buf, page, sizeof(buf)); - if (len < sizeof(buf)) { + len = strscpy(buf, page, sizeof(buf)); + if (len > 0) { /* Strip any newline added from userspace. */ stripped = strstrip(buf); len = strlen(stripped); } - if (len > INQUIRY_MODEL_LEN) { + if (len < 0 || len > INQUIRY_MODEL_LEN) { pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: " __stringify(INQUIRY_MODEL_LEN) "\n"); @@ -1504,16 +1504,16 @@ static ssize_t target_wwn_revision_store(struct config_item *item, /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ unsigned char buf[INQUIRY_REVISION_LEN + 2]; char *stripped = NULL; - size_t len; + ssize_t len; ssize_t ret; - len = strlcpy(buf, page, sizeof(buf)); - if (len < sizeof(buf)) { + len = strscpy(buf, page, sizeof(buf)); + if (len > 0) { /* Strip any newline added from userspace. */ stripped = strstrip(buf); len = strlen(stripped); } - if (len > INQUIRY_REVISION_LEN) { + if (len < 0 || len > INQUIRY_REVISION_LEN) { pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: " __stringify(INQUIRY_REVISION_LEN) "\n"); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 687adc9e086c..0686882bcbda 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -264,6 +264,7 @@ void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt) percpu_ref_put(&cmd_cnt->refcnt); percpu_ref_exit(&cmd_cnt->refcnt); + kfree(cmd_cnt); } EXPORT_SYMBOL_GPL(target_free_cmd_counter); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 8717a3343512..58533ea75cd9 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -348,12 +348,14 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip_id) struct thermal_trip trip; /* Ignore disabled trip points */ - if (test_bit(trip_id, &tz->trips_disabled) || - trip.temperature == THERMAL_TEMP_INVALID) + if (test_bit(trip_id, &tz->trips_disabled)) return; __thermal_zone_get_trip(tz, trip_id, &trip); + if (trip.temperature == THERMAL_TEMP_INVALID) + return; + if (tz->last_temperature != THERMAL_TEMP_INVALID) { if (tz->last_temperature < trip.temperature && tz->temperature >= trip.temperature) diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c index 4ca905723429..1e0655b63259 100644 --- a/drivers/thermal/thermal_of.c +++ b/drivers/thermal/thermal_of.c @@ -37,8 +37,10 @@ static int of_find_trip_id(struct device_node *np, struct device_node *trip) */ for_each_child_of_node(trips, t) { - if (t == trip) + if (t == trip) { + of_node_put(t); goto out; + } i++; } @@ -401,8 +403,10 @@ static int thermal_of_for_each_cooling_maps(struct thermal_zone_device *tz, for_each_child_of_node(cm_np, child) { ret = thermal_of_for_each_cooling_device(tz_np, child, tz, cdev, action); - if (ret) + if (ret) { + of_node_put(child); break; + } } of_node_put(cm_np); diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c index 6ba2613627e1..0cf0826b805a 100644 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c @@ -110,7 +110,8 @@ static inline int __ti_thermal_get_temp(struct thermal_zone_device *tz, int *tem } static int __ti_thermal_get_trend(struct thermal_zone_device *tz, - struct thermal_trip *trip, enum thermal_trend *trend) + const struct thermal_trip *trip, + enum thermal_trend *trend) { struct ti_thermal_data *data = thermal_zone_device_priv(tz); struct ti_bandgap *bgp; diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 93417518c04d..c2df07545f96 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -22,6 +22,7 @@ #include <linux/module.h> #include <linux/regulator/consumer.h> #include <linux/sched/clock.h> +#include <linux/iopoll.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_driver.h> @@ -2299,7 +2300,11 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) */ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) { - return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY; + u32 val; + int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY, + 500, UIC_CMD_TIMEOUT * 1000, false, hba, + REG_CONTROLLER_STATUS); + return ret == 0 ? true : false; } /** @@ -2392,7 +2397,6 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, bool completion) { lockdep_assert_held(&hba->uic_cmd_mutex); - lockdep_assert_held(hba->host->host_lock); if (!ufshcd_ready_for_uic_cmd(hba)) { dev_err(hba->dev, @@ -2419,7 +2423,6 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { int ret; - unsigned long flags; if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) return 0; @@ -2428,9 +2431,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); - spin_lock_irqsave(hba->host->host_lock, flags); ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); - spin_unlock_irqrestore(hba->host->host_lock, flags); if (!ret) ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); @@ -4133,8 +4134,8 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) wmb(); reenable_intr = true; } - ret = __ufshcd_send_uic_cmd(hba, cmd, false); spin_unlock_irqrestore(hba->host->host_lock, flags); + ret = __ufshcd_send_uic_cmd(hba, cmd, false); if (ret) { dev_err(hba->dev, "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", diff --git a/drivers/usb/typec/ucsi/debugfs.c b/drivers/usb/typec/ucsi/debugfs.c index 0c7bf88d4a7f..f67733cecfdf 100644 --- a/drivers/usb/typec/ucsi/debugfs.c +++ b/drivers/usb/typec/ucsi/debugfs.c @@ -84,6 +84,9 @@ void ucsi_debugfs_register(struct ucsi *ucsi) void ucsi_debugfs_unregister(struct ucsi *ucsi) { + if (IS_ERR_OR_NULL(ucsi) || !ucsi->debugfs) + return; + debugfs_remove_recursive(ucsi->debugfs->dentry); kfree(ucsi->debugfs); } diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c index c1de8a92e144..b2d76c1784bd 100644 --- a/drivers/w1/masters/ds2482.c +++ b/drivers/w1/masters/ds2482.c @@ -551,7 +551,7 @@ static struct i2c_driver ds2482_driver = { .driver = { .name = "ds2482", }, - .probe_new = ds2482_probe, + .probe = ds2482_probe, .remove = ds2482_remove, .id_table = ds2482_id, }; diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index 3282adc84d52..a25c9910d90b 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig @@ -31,7 +31,7 @@ config BTRFS_FS continue to be mountable and usable by newer kernels. For more information, please see the web pages at - http://btrfs.wiki.kernel.org. + https://btrfs.readthedocs.io To compile this file system support as a module, choose M here. The module will be called btrfs. diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 0cb1dee965a0..b2e5107b7cec 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -3028,8 +3028,16 @@ static int update_block_group_item(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(leaf); fail: btrfs_release_path(path); - /* We didn't update the block group item, need to revert @commit_used. */ - if (ret < 0) { + /* + * We didn't update the block group item, need to revert commit_used + * unless the block group item didn't exist yet - this is to prevent a + * race with a concurrent insertion of the block group item, with + * insert_block_group_item(), that happened just after we attempted to + * update. In that case we would reset commit_used to 0 just after the + * insertion set it to a value greater than 0 - if the block group later + * becomes with 0 used bytes, we would incorrectly skip its update. + */ + if (ret < 0 && ret != -ENOENT) { spin_lock(&cache->lock); cache->commit_used = old_commit_used; spin_unlock(&cache->lock); diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 53c1211dd60b..caf0bbd028d1 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -412,6 +412,7 @@ static void finish_one_item(struct btrfs_delayed_root *delayed_root) static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) { + struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node; struct rb_root_cached *root; struct btrfs_delayed_root *delayed_root; @@ -419,18 +420,21 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) if (RB_EMPTY_NODE(&delayed_item->rb_node)) return; - delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root; + /* If it's in a rbtree, then we need to have delayed node locked. */ + lockdep_assert_held(&delayed_node->mutex); + + delayed_root = delayed_node->root->fs_info->delayed_root; BUG_ON(!delayed_root); if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM) - root = &delayed_item->delayed_node->ins_root; + root = &delayed_node->ins_root; else - root = &delayed_item->delayed_node->del_root; + root = &delayed_node->del_root; rb_erase_cached(&delayed_item->rb_node, root); RB_CLEAR_NODE(&delayed_item->rb_node); - delayed_item->delayed_node->count--; + delayed_node->count--; finish_one_item(delayed_root); } @@ -1153,20 +1157,33 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr) ret = __btrfs_commit_inode_delayed_items(trans, path, curr_node); if (ret) { - btrfs_release_delayed_node(curr_node); - curr_node = NULL; btrfs_abort_transaction(trans, ret); break; } prev_node = curr_node; curr_node = btrfs_next_delayed_node(curr_node); + /* + * See the comment below about releasing path before releasing + * node. If the commit of delayed items was successful the path + * should always be released, but in case of an error, it may + * point to locked extent buffers (a leaf at the very least). + */ + ASSERT(path->nodes[0] == NULL); btrfs_release_delayed_node(prev_node); } + /* + * Release the path to avoid a potential deadlock and lockdep splat when + * releasing the delayed node, as that requires taking the delayed node's + * mutex. If another task starts running delayed items before we take + * the mutex, it will first lock the mutex and then it may try to lock + * the same btree path (leaf). + */ + btrfs_free_path(path); + if (curr_node) btrfs_release_delayed_node(curr_node); - btrfs_free_path(path); trans->block_rsv = block_rsv; return ret; @@ -1413,7 +1430,29 @@ void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info) btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH); } -/* Will return 0 or -ENOMEM */ +static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); + + if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) + return; + + /* + * Adding the new dir index item does not require touching another + * leaf, so we can release 1 unit of metadata that was previously + * reserved when starting the transaction. This applies only to + * the case where we had a transaction start and excludes the + * transaction join case (when replaying log trees). + */ + trace_btrfs_space_reservation(fs_info, "transaction", + trans->transid, bytes, 0); + btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL); + ASSERT(trans->bytes_reserved >= bytes); + trans->bytes_reserved -= bytes; +} + +/* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, const char *name, int name_len, struct btrfs_inode *dir, @@ -1455,6 +1494,27 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, mutex_lock(&delayed_node->mutex); + /* + * First attempt to insert the delayed item. This is to make the error + * handling path simpler in case we fail (-EEXIST). There's no risk of + * any other task coming in and running the delayed item before we do + * the metadata space reservation below, because we are holding the + * delayed node's mutex and that mutex must also be locked before the + * node's delayed items can be run. + */ + ret = __btrfs_add_delayed_item(delayed_node, delayed_item); + if (unlikely(ret)) { + btrfs_err(trans->fs_info, +"error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d", + name_len, name, index, btrfs_root_id(delayed_node->root), + delayed_node->inode_id, dir->index_cnt, + delayed_node->index_cnt, ret); + btrfs_release_delayed_item(delayed_item); + btrfs_release_dir_index_item_space(trans); + mutex_unlock(&delayed_node->mutex); + goto release_node; + } + if (delayed_node->index_item_leaves == 0 || delayed_node->curr_index_batch_size + data_len > leaf_data_size) { delayed_node->curr_index_batch_size = data_len; @@ -1472,36 +1532,14 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, * impossible. */ if (WARN_ON(ret)) { - mutex_unlock(&delayed_node->mutex); btrfs_release_delayed_item(delayed_item); + mutex_unlock(&delayed_node->mutex); goto release_node; } delayed_node->index_item_leaves++; - } else if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { - const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1); - - /* - * Adding the new dir index item does not require touching another - * leaf, so we can release 1 unit of metadata that was previously - * reserved when starting the transaction. This applies only to - * the case where we had a transaction start and excludes the - * transaction join case (when replaying log trees). - */ - trace_btrfs_space_reservation(fs_info, "transaction", - trans->transid, bytes, 0); - btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL); - ASSERT(trans->bytes_reserved >= bytes); - trans->bytes_reserved -= bytes; - } - - ret = __btrfs_add_delayed_item(delayed_node, delayed_item); - if (unlikely(ret)) { - btrfs_err(trans->fs_info, - "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)", - name_len, name, delayed_node->root->root_key.objectid, - delayed_node->inode_id, ret); - BUG(); + } else { + btrfs_release_dir_index_item_space(trans); } mutex_unlock(&delayed_node->mutex); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0a96ea8c1d3a..68f60d50e1fd 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -520,6 +520,7 @@ static bool btree_dirty_folio(struct address_space *mapping, struct folio *folio) { struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb); + struct btrfs_subpage_info *spi = fs_info->subpage_info; struct btrfs_subpage *subpage; struct extent_buffer *eb; int cur_bit = 0; @@ -533,18 +534,19 @@ static bool btree_dirty_folio(struct address_space *mapping, btrfs_assert_tree_write_locked(eb); return filemap_dirty_folio(mapping, folio); } + + ASSERT(spi); subpage = folio_get_private(folio); - ASSERT(subpage->dirty_bitmap); - while (cur_bit < BTRFS_SUBPAGE_BITMAP_SIZE) { + for (cur_bit = spi->dirty_offset; + cur_bit < spi->dirty_offset + spi->bitmap_nr_bits; + cur_bit++) { unsigned long flags; u64 cur; - u16 tmp = (1 << cur_bit); spin_lock_irqsave(&subpage->lock, flags); - if (!(tmp & subpage->dirty_bitmap)) { + if (!test_bit(cur_bit, subpage->bitmaps)) { spin_unlock_irqrestore(&subpage->lock, flags); - cur_bit++; continue; } spin_unlock_irqrestore(&subpage->lock, flags); @@ -557,7 +559,7 @@ static bool btree_dirty_folio(struct address_space *mapping, btrfs_assert_tree_write_locked(eb); free_extent_buffer(eb); - cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits); + cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1; } return filemap_dirty_folio(mapping, folio); } @@ -1547,7 +1549,7 @@ static int transaction_kthread(void *arg) delta = ktime_get_seconds() - cur->start_time; if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags) && - cur->state < TRANS_STATE_COMMIT_START && + cur->state < TRANS_STATE_COMMIT_PREP && delta < fs_info->commit_interval) { spin_unlock(&fs_info->trans_lock); delay -= msecs_to_jiffies((delta - 1) * 1000); @@ -2682,8 +2684,8 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters); btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered); btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent); - btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_start, - BTRFS_LOCKDEP_TRANS_COMMIT_START); + btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep, + BTRFS_LOCKDEP_TRANS_COMMIT_PREP); btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked, BTRFS_LOCKDEP_TRANS_UNBLOCKED); btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed, @@ -4870,7 +4872,7 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) while (!list_empty(&fs_info->trans_list)) { t = list_first_entry(&fs_info->trans_list, struct btrfs_transaction, list); - if (t->state >= TRANS_STATE_COMMIT_START) { + if (t->state >= TRANS_STATE_COMMIT_PREP) { refcount_inc(&t->use_count); spin_unlock(&fs_info->trans_lock); btrfs_wait_for_commit(fs_info, t->transid); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a18ee7b5a166..75ab766fe156 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1958,6 +1958,13 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap, goto out_put; } + /* + * We don't need the path anymore, so release it and + * avoid deadlocks and lockdep warnings in case + * btrfs_iget() needs to lookup the inode from its root + * btree and lock the same leaf. + */ + btrfs_release_path(path); temp_inode = btrfs_iget(sb, key2.objectid, root); if (IS_ERR(temp_inode)) { ret = PTR_ERR(temp_inode); @@ -1978,7 +1985,6 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap, goto out_put; } - btrfs_release_path(path); key.objectid = key.offset; key.offset = (u64)-1; dirid = key.objectid; diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index edb9b4a0dba1..7d6ee1e609bf 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -79,7 +79,7 @@ enum btrfs_lock_nesting { }; enum btrfs_lockdep_trans_states { - BTRFS_LOCKDEP_TRANS_COMMIT_START, + BTRFS_LOCKDEP_TRANS_COMMIT_PREP, BTRFS_LOCKDEP_TRANS_UNBLOCKED, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED, BTRFS_LOCKDEP_TRANS_COMPLETED, diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index b46ab348e8e5..345c449d588c 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -639,7 +639,7 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode, refcount_inc(&trans->use_count); spin_unlock(&fs_info->trans_lock); - ASSERT(trans); + ASSERT(trans || BTRFS_FS_ERROR(fs_info)); if (trans) { if (atomic_dec_and_test(&trans->pending_ordered)) wake_up(&trans->pending_wait); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 874e4394df86..0bf42dccb041 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -56,12 +56,17 @@ static struct kmem_cache *btrfs_trans_handle_cachep; * | Call btrfs_commit_transaction() on any trans handle attached to * | transaction N * V - * Transaction N [[TRANS_STATE_COMMIT_START]] + * Transaction N [[TRANS_STATE_COMMIT_PREP]] + * | + * | If there are simultaneous calls to btrfs_commit_transaction() one will win + * | the race and the rest will wait for the winner to commit the transaction. + * | + * | The winner will wait for previous running transaction to completely finish + * | if there is one. * | - * | Will wait for previous running transaction to completely finish if there - * | is one + * Transaction N [[TRANS_STATE_COMMIT_START]] * | - * | Then one of the following happes: + * | Then one of the following happens: * | - Wait for all other trans handle holders to release. * | The btrfs_commit_transaction() caller will do the commit work. * | - Wait for current transaction to be committed by others. @@ -112,6 +117,7 @@ static struct kmem_cache *btrfs_trans_handle_cachep; */ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { [TRANS_STATE_RUNNING] = 0U, + [TRANS_STATE_COMMIT_PREP] = 0U, [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH), [TRANS_STATE_COMMIT_DOING] = (__TRANS_START | __TRANS_ATTACH | @@ -1982,7 +1988,7 @@ void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans) * Wait for the current transaction commit to start and block * subsequent transaction joins */ - btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START); + btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); wait_event(fs_info->transaction_blocked_wait, cur_trans->state >= TRANS_STATE_COMMIT_START || TRANS_ABORTED(cur_trans)); @@ -2129,7 +2135,7 @@ static void add_pending_snapshot(struct btrfs_trans_handle *trans) return; lockdep_assert_held(&trans->fs_info->trans_lock); - ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_START); + ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_PREP); list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots); } @@ -2153,7 +2159,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) ktime_t interval; ASSERT(refcount_read(&trans->use_count) == 1); - btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START); + btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); clear_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags); @@ -2213,7 +2219,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) } spin_lock(&fs_info->trans_lock); - if (cur_trans->state >= TRANS_STATE_COMMIT_START) { + if (cur_trans->state >= TRANS_STATE_COMMIT_PREP) { enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED; add_pending_snapshot(trans); @@ -2225,7 +2231,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) want_state = TRANS_STATE_SUPER_COMMITTED; btrfs_trans_state_lockdep_release(fs_info, - BTRFS_LOCKDEP_TRANS_COMMIT_START); + BTRFS_LOCKDEP_TRANS_COMMIT_PREP); ret = btrfs_end_transaction(trans); wait_for_commit(cur_trans, want_state); @@ -2237,9 +2243,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) return ret; } - cur_trans->state = TRANS_STATE_COMMIT_START; + cur_trans->state = TRANS_STATE_COMMIT_PREP; wake_up(&fs_info->transaction_blocked_wait); - btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START); + btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); if (cur_trans->list.prev != &fs_info->trans_list) { enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED; @@ -2260,11 +2266,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) btrfs_put_transaction(prev_trans); if (ret) goto lockdep_release; - } else { - spin_unlock(&fs_info->trans_lock); + spin_lock(&fs_info->trans_lock); } } else { - spin_unlock(&fs_info->trans_lock); /* * The previous transaction was aborted and was already removed * from the list of transactions at fs_info->trans_list. So we @@ -2272,11 +2276,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) * corrupt state (pointing to trees with unwritten nodes/leafs). */ if (BTRFS_FS_ERROR(fs_info)) { + spin_unlock(&fs_info->trans_lock); ret = -EROFS; goto lockdep_release; } } + cur_trans->state = TRANS_STATE_COMMIT_START; + wake_up(&fs_info->transaction_blocked_wait); + spin_unlock(&fs_info->trans_lock); + /* * Get the time spent on the work done by the commit thread and not * the time spent waiting on a previous commit @@ -2586,7 +2595,7 @@ lockdep_release: goto cleanup_transaction; lockdep_trans_commit_start_release: - btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START); + btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); btrfs_end_transaction(trans); return ret; } diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 8e9fa23bd7fe..6b309f8a99a8 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -14,6 +14,7 @@ enum btrfs_trans_state { TRANS_STATE_RUNNING, + TRANS_STATE_COMMIT_PREP, TRANS_STATE_COMMIT_START, TRANS_STATE_COMMIT_DOING, TRANS_STATE_UNBLOCKED, diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index e028fafa04f3..996271473609 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -32,10 +32,16 @@ static int efivarfs_statfs(struct dentry *dentry, struct kstatfs *buf) u64 storage_space, remaining_space, max_variable_size; efi_status_t status; - status = efivar_query_variable_info(attr, &storage_space, &remaining_space, - &max_variable_size); - if (status != EFI_SUCCESS) - return efi_status_to_err(status); + /* Some UEFI firmware does not implement QueryVariableInfo() */ + storage_space = remaining_space = 0; + if (efi_rt_services_supported(EFI_RT_SUPPORTED_QUERY_VARIABLE_INFO)) { + status = efivar_query_variable_info(attr, &storage_space, + &remaining_space, + &max_variable_size); + if (status != EFI_SUCCESS && status != EFI_UNSUPPORTED) + pr_warn_ratelimited("query_variable_info() failed: 0x%lx\n", + status); + } /* * This is not a normal filesystem, so no point in pretending it has a block diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index c91db9f57524..1e599305d85f 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -16,6 +16,7 @@ #include <linux/slab.h> #include <linux/nospec.h> #include <linux/backing-dev.h> +#include <linux/freezer.h> #include <trace/events/ext4.h> /* @@ -6906,6 +6907,21 @@ __acquires(bitlock) return ret; } +static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb, + ext4_group_t grp) +{ + if (grp < ext4_get_groups_count(sb)) + return EXT4_CLUSTERS_PER_GROUP(sb) - 1; + return (ext4_blocks_count(EXT4_SB(sb)->s_es) - + ext4_group_first_block_no(sb, grp) - 1) >> + EXT4_CLUSTER_BITS(sb); +} + +static bool ext4_trim_interrupted(void) +{ + return fatal_signal_pending(current) || freezing(current); +} + static int ext4_try_to_trim_range(struct super_block *sb, struct ext4_buddy *e4b, ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks) @@ -6913,9 +6929,12 @@ __acquires(ext4_group_lock_ptr(sb, e4b->bd_group)) __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) { ext4_grpblk_t next, count, free_count; + bool set_trimmed = false; void *bitmap; bitmap = e4b->bd_bitmap; + if (start == 0 && max >= ext4_last_grp_cluster(sb, e4b->bd_group)) + set_trimmed = true; start = max(e4b->bd_info->bb_first_free, start); count = 0; free_count = 0; @@ -6930,16 +6949,14 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) int ret = ext4_trim_extent(sb, start, next - start, e4b); if (ret && ret != -EOPNOTSUPP) - break; + return count; count += next - start; } free_count += next - start; start = next + 1; - if (fatal_signal_pending(current)) { - count = -ERESTARTSYS; - break; - } + if (ext4_trim_interrupted()) + return count; if (need_resched()) { ext4_unlock_group(sb, e4b->bd_group); @@ -6951,6 +6968,9 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) break; } + if (set_trimmed) + EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info); + return count; } @@ -6961,7 +6981,6 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) * @start: first group block to examine * @max: last group block to examine * @minblocks: minimum extent block count - * @set_trimmed: set the trimmed flag if at least one block is trimmed * * ext4_trim_all_free walks through group's block bitmap searching for free * extents. When the free extent is found, mark it as used in group buddy @@ -6971,7 +6990,7 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) static ext4_grpblk_t ext4_trim_all_free(struct super_block *sb, ext4_group_t group, ext4_grpblk_t start, ext4_grpblk_t max, - ext4_grpblk_t minblocks, bool set_trimmed) + ext4_grpblk_t minblocks) { struct ext4_buddy e4b; int ret; @@ -6988,13 +7007,10 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group, ext4_lock_group(sb, group); if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) || - minblocks < EXT4_SB(sb)->s_last_trim_minblks) { + minblocks < EXT4_SB(sb)->s_last_trim_minblks) ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks); - if (ret >= 0 && set_trimmed) - EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); - } else { + else ret = 0; - } ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); @@ -7027,7 +7043,6 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) ext4_fsblk_t first_data_blk = le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); - bool whole_group, eof = false; int ret = 0; start = range->start >> sb->s_blocksize_bits; @@ -7046,10 +7061,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) goto out; } - if (end >= max_blks - 1) { + if (end >= max_blks - 1) end = max_blks - 1; - eof = true; - } if (end <= first_data_blk) goto out; if (start < first_data_blk) @@ -7063,9 +7076,10 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) /* end now represents the last cluster to discard in this group */ end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; - whole_group = true; for (group = first_group; group <= last_group; group++) { + if (ext4_trim_interrupted()) + break; grp = ext4_get_group_info(sb, group); if (!grp) continue; @@ -7082,13 +7096,11 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) * change it for the last group, note that last_cluster is * already computed earlier by ext4_get_group_no_and_offset() */ - if (group == last_group) { + if (group == last_group) end = last_cluster; - whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1; - } if (grp->bb_free >= minlen) { cnt = ext4_trim_all_free(sb, group, first_cluster, - end, minlen, whole_group); + end, minlen); if (cnt < 0) { ret = cnt; break; diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 41a6411c600b..bbda587f76b8 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -343,17 +343,17 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode, struct buffer_head *bh) { struct ext4_dir_entry_tail *t; + int blocksize = EXT4_BLOCK_SIZE(inode->i_sb); #ifdef PARANOID struct ext4_dir_entry *d, *top; d = (struct ext4_dir_entry *)bh->b_data; top = (struct ext4_dir_entry *)(bh->b_data + - (EXT4_BLOCK_SIZE(inode->i_sb) - - sizeof(struct ext4_dir_entry_tail))); - while (d < top && d->rec_len) + (blocksize - sizeof(struct ext4_dir_entry_tail))); + while (d < top && ext4_rec_len_from_disk(d->rec_len, blocksize)) d = (struct ext4_dir_entry *)(((void *)d) + - le16_to_cpu(d->rec_len)); + ext4_rec_len_from_disk(d->rec_len, blocksize)); if (d != top) return NULL; @@ -364,7 +364,8 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode, #endif if (t->det_reserved_zero1 || - le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) || + (ext4_rec_len_from_disk(t->det_rec_len, blocksize) != + sizeof(struct ext4_dir_entry_tail)) || t->det_reserved_zero2 || t->det_reserved_ft != EXT4_FT_DIR_CSUM) return NULL; @@ -445,13 +446,14 @@ static struct dx_countlimit *get_dx_countlimit(struct inode *inode, struct ext4_dir_entry *dp; struct dx_root_info *root; int count_offset; + int blocksize = EXT4_BLOCK_SIZE(inode->i_sb); + unsigned int rlen = ext4_rec_len_from_disk(dirent->rec_len, blocksize); - if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb)) + if (rlen == blocksize) count_offset = 8; - else if (le16_to_cpu(dirent->rec_len) == 12) { + else if (rlen == 12) { dp = (struct ext4_dir_entry *)(((void *)dirent) + 12); - if (le16_to_cpu(dp->rec_len) != - EXT4_BLOCK_SIZE(inode->i_sb) - 12) + if (ext4_rec_len_from_disk(dp->rec_len, blocksize) != blocksize - 12) return NULL; root = (struct dx_root_info *)(((void *)dp + 12)); if (root->reserved_zero || @@ -1315,6 +1317,7 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh, unsigned int buflen = bh->b_size; char *base = bh->b_data; struct dx_hash_info h = *hinfo; + int blocksize = EXT4_BLOCK_SIZE(dir->i_sb); if (ext4_has_metadata_csum(dir->i_sb)) buflen -= sizeof(struct ext4_dir_entry_tail); @@ -1335,11 +1338,12 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh, map_tail--; map_tail->hash = h.hash; map_tail->offs = ((char *) de - base)>>2; - map_tail->size = le16_to_cpu(de->rec_len); + map_tail->size = ext4_rec_len_from_disk(de->rec_len, + blocksize); count++; cond_resched(); } - de = ext4_next_entry(de, dir->i_sb->s_blocksize); + de = ext4_next_entry(de, blocksize); } return count; } diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 1073259902a6..8d6f934c3d95 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -298,14 +298,12 @@ static int journal_finish_inode_data_buffers(journal_t *journal, static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh) { - struct page *page = bh->b_page; char *addr; __u32 checksum; - addr = kmap_atomic(page); - checksum = crc32_be(crc32_sum, - (void *)(addr + offset_in_page(bh->b_data)), bh->b_size); - kunmap_atomic(addr); + addr = kmap_local_folio(bh->b_folio, bh_offset(bh)); + checksum = crc32_be(crc32_sum, addr, bh->b_size); + kunmap_local(addr); return checksum; } @@ -322,7 +320,6 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag, struct buffer_head *bh, __u32 sequence) { journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag; - struct page *page = bh->b_page; __u8 *addr; __u32 csum32; __be32 seq; @@ -331,11 +328,10 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag, return; seq = cpu_to_be32(sequence); - addr = kmap_atomic(page); + addr = kmap_local_folio(bh->b_folio, bh_offset(bh)); csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq)); - csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data), - bh->b_size); - kunmap_atomic(addr); + csum32 = jbd2_chksum(j, csum32, addr, bh->b_size); + kunmap_local(addr); if (jbd2_has_feature_csum3(j)) tag3->t_checksum = cpu_to_be32(csum32); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 768fa05bcbed..30dec2bd2ecc 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1601,6 +1601,8 @@ static journal_t *journal_init_common(struct block_device *bdev, err_cleanup: percpu_counter_destroy(&journal->j_checkpoint_jh_count); + if (journal->j_chksum_driver) + crypto_free_shash(journal->j_chksum_driver); kfree(journal->j_wbuf); jbd2_journal_destroy_revoke(journal); journal_fail_superblock(journal); diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 4d1fda1f7143..5f08b5fd105a 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -935,19 +935,15 @@ static void warn_dirty_buffer(struct buffer_head *bh) /* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */ static void jbd2_freeze_jh_data(struct journal_head *jh) { - struct page *page; - int offset; char *source; struct buffer_head *bh = jh2bh(jh); J_EXPECT_JH(jh, buffer_uptodate(bh), "Possible IO failure.\n"); - page = bh->b_page; - offset = offset_in_page(bh->b_data); - source = kmap_atomic(page); + source = kmap_local_folio(bh->b_folio, bh_offset(bh)); /* Fire data frozen trigger just before we copy the data */ - jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers); - memcpy(jh->b_frozen_data, source + offset, bh->b_size); - kunmap_atomic(source); + jbd2_buffer_frozen_trigger(jh, source, jh->b_triggers); + memcpy(jh->b_frozen_data, source, bh->b_size); + kunmap_local(source); /* * Now that the frozen data is saved off, we need to store any matching diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 5ca748309c26..4199ede0583c 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1058,8 +1058,8 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, rename->rn_tname, rename->rn_tnamelen); if (status) return status; - set_change_info(&rename->rn_sinfo, &cstate->current_fh); - set_change_info(&rename->rn_tinfo, &cstate->save_fh); + set_change_info(&rename->rn_sinfo, &cstate->save_fh); + set_change_info(&rename->rn_tinfo, &cstate->current_fh); return nfs_ok; } diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 1582af33e204..c7af1095f6b5 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -1082,11 +1082,12 @@ int nfsd_pool_stats_open(struct inode *inode, struct file *file) int nfsd_pool_stats_release(struct inode *inode, struct file *file) { + struct seq_file *seq = file->private_data; + struct svc_serv *serv = seq->private; int ret = seq_release(inode, file); - struct net *net = inode->i_sb->s_fs_info; mutex_lock(&nfsd_mutex); - nfsd_put(net); + svc_put(serv); mutex_unlock(&nfsd_mutex); return ret; } diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index bae404a1bad4..d1761ec5866a 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -618,7 +618,8 @@ static int ovl_copy_up_metadata(struct ovl_copy_up_ctx *c, struct dentry *temp) if (err) return err; - if (inode->i_flags & OVL_COPY_I_FLAGS_MASK) { + if (inode->i_flags & OVL_COPY_I_FLAGS_MASK && + (S_ISREG(c->stat.mode) || S_ISDIR(c->stat.mode))) { /* * Copy the fileattr inode flags that are the source of already * copied i_flags diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index 3b4cc633d763..4193633c4c7a 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -19,7 +19,6 @@ struct ovl_aio_req { struct kiocb iocb; refcount_t ref; struct kiocb *orig_iocb; - struct fd fd; }; static struct kmem_cache *ovl_aio_request_cachep; @@ -280,7 +279,7 @@ static rwf_t ovl_iocb_to_rwf(int ifl) static inline void ovl_aio_put(struct ovl_aio_req *aio_req) { if (refcount_dec_and_test(&aio_req->ref)) { - fdput(aio_req->fd); + fput(aio_req->iocb.ki_filp); kmem_cache_free(ovl_aio_request_cachep, aio_req); } } @@ -342,10 +341,9 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter) if (!aio_req) goto out; - aio_req->fd = real; real.flags = 0; aio_req->orig_iocb = iocb; - kiocb_clone(&aio_req->iocb, iocb, real.file); + kiocb_clone(&aio_req->iocb, iocb, get_file(real.file)); aio_req->iocb.ki_complete = ovl_aio_rw_complete; refcount_set(&aio_req->ref, 2); ret = vfs_iocb_iter_read(real.file, &aio_req->iocb, iter); @@ -409,10 +407,9 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter) if (!aio_req) goto out; - aio_req->fd = real; real.flags = 0; aio_req->orig_iocb = iocb; - kiocb_clone(&aio_req->iocb, iocb, real.file); + kiocb_clone(&aio_req->iocb, iocb, get_file(real.file)); aio_req->iocb.ki_flags = ifl; aio_req->iocb.ki_complete = ovl_aio_rw_complete; refcount_set(&aio_req->ref, 2); diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c index de2dfbaae821..d7c302442c1e 100644 --- a/fs/smb/client/inode.c +++ b/fs/smb/client/inode.c @@ -2680,7 +2680,7 @@ int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start, } cifsFileInfo_put(cfile); - return -ENOTSUPP; + return -EOPNOTSUPP; } int cifs_truncate_page(struct address_space *mapping, loff_t from) diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index d9eda2e958b4..9aeecee6b91b 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -297,7 +297,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server, cifs_server_dbg(VFS, "request has less credits (%d) than required (%d)", credits->value, new_val); - return -ENOTSUPP; + return -EOPNOTSUPP; } spin_lock(&server->req_lock); @@ -1161,7 +1161,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, /* Use a fudge factor of 256 bytes in case we collide * with a different set_EAs command. */ - if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - + if (CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 < used_len + ea_name_len + ea_value_len + 1) { rc = -ENOSPC; @@ -4591,7 +4591,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, if (shdr->Command != SMB2_READ) { cifs_server_dbg(VFS, "only big read responses are supported\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } if (server->ops->is_session_expired && diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index 092b0087c9dc..44d4943e9c56 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -89,20 +89,26 @@ smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd, struct TCP_Server_Info *server) { struct smb3_hdr_req *smb3_hdr; + shdr->ProtocolId = SMB2_PROTO_NUMBER; shdr->StructureSize = cpu_to_le16(64); shdr->Command = smb2_cmd; - if (server->dialect >= SMB30_PROT_ID) { - /* After reconnect SMB3 must set ChannelSequence on subsequent reqs */ - smb3_hdr = (struct smb3_hdr_req *)shdr; - /* if primary channel is not set yet, use default channel for chan sequence num */ - if (SERVER_IS_CHAN(server)) - smb3_hdr->ChannelSequence = - cpu_to_le16(server->primary_server->channel_sequence_num); - else - smb3_hdr->ChannelSequence = cpu_to_le16(server->channel_sequence_num); - } + if (server) { + /* After reconnect SMB3 must set ChannelSequence on subsequent reqs */ + if (server->dialect >= SMB30_PROT_ID) { + smb3_hdr = (struct smb3_hdr_req *)shdr; + /* + * if primary channel is not set yet, use default + * channel for chan sequence num + */ + if (SERVER_IS_CHAN(server)) + smb3_hdr->ChannelSequence = + cpu_to_le16(server->primary_server->channel_sequence_num); + else + smb3_hdr->ChannelSequence = + cpu_to_le16(server->channel_sequence_num); + } spin_lock(&server->req_lock); /* Request up to 10 credits but don't go over the limit. */ if (server->credits >= server->max_credits) @@ -2234,7 +2240,7 @@ create_durable_v2_buf(struct cifs_open_parms *oparms) * (most servers default to 120 seconds) and most clients default to 0. * This can be overridden at mount ("handletimeout=") if the user wants * a different persistent (or resilient) handle timeout for all opens - * opens on a particular SMB3 mount. + * on a particular SMB3 mount. */ buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout); buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); @@ -2379,7 +2385,7 @@ add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp) return 0; } -/* See See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */ +/* See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */ static void setup_owner_group_sids(char *buf) { struct owner_group_sids *sids = (struct owner_group_sids *)buf; @@ -3124,6 +3130,7 @@ void SMB2_ioctl_free(struct smb_rqst *rqst) { int i; + if (rqst && rqst->rq_iov) { cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ for (i = 1; i < rqst->rq_nvec; i++) diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c index 1b5d9794ed5b..d52057a511ee 100644 --- a/fs/smb/client/transport.c +++ b/fs/smb/client/transport.c @@ -18,7 +18,7 @@ #include <linux/bvec.h> #include <linux/highmem.h> #include <linux/uaccess.h> -#include <asm/processor.h> +#include <linux/processor.h> #include <linux/mempool.h> #include <linux/sched/signal.h> #include <linux/task_io_accounting_ops.h> diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 749660110878..544022dd6d20 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -6312,7 +6312,7 @@ int smb2_read(struct ksmbd_work *work) aux_payload_buf, nbytes); kvfree(aux_payload_buf); - + aux_payload_buf = NULL; nbytes = 0; if (remain_bytes < 0) { err = (int)remain_bytes; diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c index e5e438bf5499..6c0305be895e 100644 --- a/fs/smb/server/smbacl.c +++ b/fs/smb/server/smbacl.c @@ -1420,7 +1420,6 @@ int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon, out: posix_acl_release(fattr.cf_acls); posix_acl_release(fattr.cf_dacls); - mark_inode_dirty(inode); return rc; } diff --git a/fs/stat.c b/fs/stat.c index 6822ac77aec2..6e60389d6a15 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -419,12 +419,6 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat #ifdef __ARCH_WANT_NEW_STAT -#if BITS_PER_LONG == 32 -# define choose_32_64(a,b) a -#else -# define choose_32_64(a,b) b -#endif - #ifndef INIT_STRUCT_STAT_PADDING # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st)) #endif diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c index 237c6f370ad9..9f64e7332796 100644 --- a/fs/tracefs/event_inode.c +++ b/fs/tracefs/event_inode.c @@ -185,17 +185,49 @@ static struct dentry *create_dir(const char *name, struct dentry *parent, void * /** * eventfs_set_ef_status_free - set the ef->status to free + * @ti: the tracefs_inode of the dentry * @dentry: dentry who's status to be freed * * eventfs_set_ef_status_free will be called if no more * references remain */ -void eventfs_set_ef_status_free(struct dentry *dentry) +void eventfs_set_ef_status_free(struct tracefs_inode *ti, struct dentry *dentry) { struct tracefs_inode *ti_parent; - struct eventfs_file *ef; + struct eventfs_inode *ei; + struct eventfs_file *ef, *tmp; + + /* The top level events directory may be freed by this */ + if (unlikely(ti->flags & TRACEFS_EVENT_TOP_INODE)) { + LIST_HEAD(ef_del_list); + + mutex_lock(&eventfs_mutex); + + ei = ti->private; + + /* Record all the top level files */ + list_for_each_entry_srcu(ef, &ei->e_top_files, list, + lockdep_is_held(&eventfs_mutex)) { + list_add_tail(&ef->del_list, &ef_del_list); + } + + /* Nothing should access this, but just in case! */ + ti->private = NULL; + + mutex_unlock(&eventfs_mutex); + + /* Now safely free the top level files and their children */ + list_for_each_entry_safe(ef, tmp, &ef_del_list, del_list) { + list_del(&ef->del_list); + eventfs_remove(ef); + } + + kfree(ei); + return; + } mutex_lock(&eventfs_mutex); + ti_parent = get_tracefs(dentry->d_parent->d_inode); if (!ti_parent || !(ti_parent->flags & TRACEFS_EVENT_INODE)) goto out; @@ -420,7 +452,8 @@ static int dcache_dir_open_wrapper(struct inode *inode, struct file *file) ei = ti->private; idx = srcu_read_lock(&eventfs_srcu); - list_for_each_entry_rcu(ef, &ei->e_top_files, list) { + list_for_each_entry_srcu(ef, &ei->e_top_files, list, + srcu_read_lock_held(&eventfs_srcu)) { create_dentry(ef, dentry, false); } srcu_read_unlock(&eventfs_srcu, idx); @@ -491,6 +524,9 @@ struct dentry *eventfs_create_events_dir(const char *name, struct tracefs_inode *ti; struct inode *inode; + if (security_locked_down(LOCKDOWN_TRACEFS)) + return NULL; + if (IS_ERR(dentry)) return dentry; @@ -507,7 +543,7 @@ struct dentry *eventfs_create_events_dir(const char *name, INIT_LIST_HEAD(&ei->e_top_files); ti = get_tracefs(inode); - ti->flags |= TRACEFS_EVENT_INODE; + ti->flags |= TRACEFS_EVENT_INODE | TRACEFS_EVENT_TOP_INODE; ti->private = ei; inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; @@ -538,6 +574,9 @@ struct eventfs_file *eventfs_add_subsystem_dir(const char *name, struct eventfs_inode *ei_parent; struct eventfs_file *ef; + if (security_locked_down(LOCKDOWN_TRACEFS)) + return NULL; + if (!parent) return ERR_PTR(-EINVAL); @@ -569,6 +608,9 @@ struct eventfs_file *eventfs_add_dir(const char *name, { struct eventfs_file *ef; + if (security_locked_down(LOCKDOWN_TRACEFS)) + return NULL; + if (!ef_parent) return ERR_PTR(-EINVAL); @@ -606,6 +648,9 @@ int eventfs_add_events_file(const char *name, umode_t mode, struct eventfs_inode *ei; struct eventfs_file *ef; + if (security_locked_down(LOCKDOWN_TRACEFS)) + return -ENODEV; + if (!parent) return -EINVAL; @@ -654,6 +699,9 @@ int eventfs_add_file(const char *name, umode_t mode, { struct eventfs_file *ef; + if (security_locked_down(LOCKDOWN_TRACEFS)) + return -ENODEV; + if (!ef_parent) return -EINVAL; @@ -791,7 +839,6 @@ void eventfs_remove(struct eventfs_file *ef) void eventfs_remove_events_dir(struct dentry *dentry) { struct tracefs_inode *ti; - struct eventfs_inode *ei; if (!dentry || !dentry->d_inode) return; @@ -800,8 +847,6 @@ void eventfs_remove_events_dir(struct dentry *dentry) if (!ti || !(ti->flags & TRACEFS_EVENT_INODE)) return; - ei = ti->private; d_invalidate(dentry); dput(dentry); - kfree(ei); } diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index de5b72216b1a..891653ba9cf3 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -385,7 +385,7 @@ static void tracefs_dentry_iput(struct dentry *dentry, struct inode *inode) ti = get_tracefs(inode); if (ti && ti->flags & TRACEFS_EVENT_INODE) - eventfs_set_ef_status_free(dentry); + eventfs_set_ef_status_free(ti, dentry); iput(inode); } @@ -673,6 +673,9 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent, */ struct dentry *tracefs_create_dir(const char *name, struct dentry *parent) { + if (security_locked_down(LOCKDOWN_TRACEFS)) + return NULL; + return __create_dir(name, parent, &simple_dir_inode_operations); } diff --git a/fs/tracefs/internal.h b/fs/tracefs/internal.h index 69c2b1d87c46..4f2e49e2197b 100644 --- a/fs/tracefs/internal.h +++ b/fs/tracefs/internal.h @@ -3,7 +3,8 @@ #define _TRACEFS_INTERNAL_H enum { - TRACEFS_EVENT_INODE = BIT(1), + TRACEFS_EVENT_INODE = BIT(1), + TRACEFS_EVENT_TOP_INODE = BIT(2), }; struct tracefs_inode { @@ -24,6 +25,6 @@ struct inode *tracefs_get_inode(struct super_block *sb); struct dentry *eventfs_start_creating(const char *name, struct dentry *parent); struct dentry *eventfs_failed_creating(struct dentry *dentry); struct dentry *eventfs_end_creating(struct dentry *dentry); -void eventfs_set_ef_status_free(struct dentry *dentry); +void eventfs_set_ef_status_free(struct tracefs_inode *ti, struct dentry *dentry); #endif /* _TRACEFS_INTERNAL_H */ diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h index e0462361adf9..b5bf0b6da791 100644 --- a/include/drm/drm_exec.h +++ b/include/drm/drm_exec.h @@ -52,6 +52,20 @@ struct drm_exec { }; /** + * drm_exec_obj() - Return the object for a give drm_exec index + * @exec: Pointer to the drm_exec context + * @index: The index. + * + * Return: Pointer to the locked object corresponding to @index if + * index is within the number of locked objects. NULL otherwise. + */ +static inline struct drm_gem_object * +drm_exec_obj(struct drm_exec *exec, unsigned long index) +{ + return index < exec->num_objects ? exec->objects[index] : NULL; +} + +/** * drm_exec_for_each_locked_object - iterate over all the locked objects * @exec: drm_exec object * @index: unsigned long index for the iteration @@ -59,10 +73,23 @@ struct drm_exec { * * Iterate over all the locked GEM objects inside the drm_exec object. */ -#define drm_exec_for_each_locked_object(exec, index, obj) \ - for (index = 0, obj = (exec)->objects[0]; \ - index < (exec)->num_objects; \ - ++index, obj = (exec)->objects[index]) +#define drm_exec_for_each_locked_object(exec, index, obj) \ + for ((index) = 0; ((obj) = drm_exec_obj(exec, index)); ++(index)) + +/** + * drm_exec_for_each_locked_object_reverse - iterate over all the locked + * objects in reverse locking order + * @exec: drm_exec object + * @index: unsigned long index for the iteration + * @obj: the current GEM object + * + * Iterate over all the locked GEM objects inside the drm_exec object in + * reverse locking order. Note that @index may go below zero and wrap, + * but that will be caught by drm_exec_obj(), returning a NULL object. + */ +#define drm_exec_for_each_locked_object_reverse(exec, index, obj) \ + for ((index) = (exec)->num_objects - 1; \ + ((obj) = drm_exec_obj(exec, index)); --(index)) /** * drm_exec_until_all_locked - loop until all GEM objects are locked diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h index 514c8a7a32f0..ba483c87f0e7 100644 --- a/include/drm/drm_kunit_helpers.h +++ b/include/drm/drm_kunit_helpers.h @@ -3,6 +3,8 @@ #ifndef DRM_KUNIT_HELPERS_H_ #define DRM_KUNIT_HELPERS_H_ +#include <linux/device.h> + #include <kunit/test.h> struct drm_device; @@ -51,7 +53,7 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test, { struct drm_driver *driver; - driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL); + driver = devm_kzalloc(dev, sizeof(*driver), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, driver); driver->driver_features = features; diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 4ede47649a81..44e9de51eedf 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -171,7 +171,10 @@ static __always_inline int buffer_uptodate(const struct buffer_head *bh) return test_bit_acquire(BH_Uptodate, &bh->b_state); } -#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) +static inline unsigned long bh_offset(const struct buffer_head *bh) +{ + return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1); +} /* If we *know* page->private refers to buffer_heads */ #define page_buffers(page) \ diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h index 1c849db953a5..45fca09b2319 100644 --- a/include/linux/export-internal.h +++ b/include/linux/export-internal.h @@ -52,6 +52,8 @@ #ifdef CONFIG_IA64 #define KSYM_FUNC(name) @fptr(name) +#elif defined(CONFIG_PARISC) && defined(CONFIG_64BIT) +#define KSYM_FUNC(name) P%name #else #define KSYM_FUNC(name) name #endif diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 3430cc2b05a6..0dae9db27538 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -237,7 +237,6 @@ enum i2c_driver_flags { * struct i2c_driver - represent an I2C device driver * @class: What kind of i2c device we instantiate (for detect) * @probe: Callback for device binding - * @probe_new: Transitional callback for device binding - do not use * @remove: Callback for device unbinding * @shutdown: Callback for device shutdown * @alert: Alert callback, for example for the SMBus alert protocol @@ -272,16 +271,8 @@ enum i2c_driver_flags { struct i2c_driver { unsigned int class; - union { /* Standard driver model interfaces */ - int (*probe)(struct i2c_client *client); - /* - * Legacy callback that was part of a conversion of .probe(). - * Today it has the same semantic as .probe(). Don't use for new - * code. - */ - int (*probe_new)(struct i2c_client *client); - }; + int (*probe)(struct i2c_client *client); void (*remove)(struct i2c_client *client); diff --git a/include/linux/instruction_pointer.h b/include/linux/instruction_pointer.h index cda1f706eaeb..aa0b3ffea935 100644 --- a/include/linux/instruction_pointer.h +++ b/include/linux/instruction_pointer.h @@ -2,7 +2,12 @@ #ifndef _LINUX_INSTRUCTION_POINTER_H #define _LINUX_INSTRUCTION_POINTER_H +#include <asm/linkage.h> + #define _RET_IP_ (unsigned long)__builtin_return_address(0) + +#ifndef _THIS_IP_ #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) +#endif #endif /* _LINUX_INSTRUCTION_POINTER_H */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 52d58b13e5ee..bf4913f4d7ac 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -222,6 +222,10 @@ enum { ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */ ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */ + ATA_HOST_NO_PART = (1 << 4), /* Host does not support partial */ + ATA_HOST_NO_SSC = (1 << 5), /* Host does not support slumber */ + ATA_HOST_NO_DEVSLP = (1 << 6), /* Host does not support devslp */ + /* bits 24:31 of host->flags are reserved for LLD specific flags */ /* various lengths of time */ diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index f6ef8cf5d774..4109f1bd6128 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -53,10 +53,10 @@ struct nvmefc_ls_req { void *rqstaddr; dma_addr_t rqstdma; - __le32 rqstlen; + u32 rqstlen; void *rspaddr; dma_addr_t rspdma; - __le32 rsplen; + u32 rsplen; u32 timeout; void *private; @@ -120,7 +120,7 @@ struct nvmefc_ls_req { struct nvmefc_ls_rsp { void *rspbuf; dma_addr_t rspdma; - __le32 rsplen; + u16 rsplen; void (*done)(struct nvmefc_ls_rsp *rsp); void *nvme_fc_private; /* LLDD is not to access !! */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index c99440aac1a1..a5ae4af955ff 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -80,8 +80,8 @@ struct thermal_zone_device_ops { int (*set_trip_hyst) (struct thermal_zone_device *, int, int); int (*get_crit_temp) (struct thermal_zone_device *, int *); int (*set_emul_temp) (struct thermal_zone_device *, int); - int (*get_trend) (struct thermal_zone_device *, struct thermal_trip *, - enum thermal_trend *); + int (*get_trend) (struct thermal_zone_device *, + const struct thermal_trip *, enum thermal_trend *); void (*hot)(struct thermal_zone_device *); void (*critical)(struct thermal_zone_device *); }; diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index eb5c3add939b..21ae37e49319 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -62,13 +62,13 @@ void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...); /* Used to find the offset and length of dynamic fields in trace events */ struct trace_dynamic_info { #ifdef CONFIG_CPU_BIG_ENDIAN - u16 offset; u16 len; + u16 offset; #else - u16 len; u16 offset; + u16 len; #endif -}; +} __packed; /* * The trace entry - the most basic unit of tracing. This is what @@ -650,7 +650,6 @@ struct trace_event_file { struct trace_event_call *event_call; struct event_filter __rcu *filter; struct eventfs_file *ef; - struct dentry *dir; struct trace_array *tr; struct trace_subsystem_dir *system; struct list_head triggers; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 0675be0f3fa0..c6932d1a3fa8 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -784,6 +784,11 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a) cpu_to_be32(0x0000ffff))) == 0UL; } +static inline bool ipv6_addr_v4mapped_any(const struct in6_addr *a) +{ + return ipv6_addr_v4mapped(a) && ipv4_is_zeronet(a->s6_addr32[3]); +} + static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a) { return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]); @@ -1360,7 +1365,7 @@ static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val) return 0; } -static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val) +static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val) { int ret; diff --git a/io_uring/net.c b/io_uring/net.c index 3d07bf79c1e0..7a8e298af81b 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -183,6 +183,10 @@ static int io_setup_async_msg(struct io_kiocb *req, memcpy(async_msg, kmsg, sizeof(*kmsg)); if (async_msg->msg.msg_name) async_msg->msg.msg_name = &async_msg->addr; + + if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs) + return -EAGAIN; + /* if were using fast_iov, set it to the new one */ if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) { size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov; @@ -542,6 +546,7 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg) { iomsg->msg.msg_name = &iomsg->addr; + iomsg->msg.msg_iter.nr_segs = 0; #ifdef CONFIG_COMPAT if (req->ctx->compat) diff --git a/kernel/panic.c b/kernel/panic.c index 07239d4ad81e..ffa037fa777d 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -697,6 +697,7 @@ void warn_slowpath_fmt(const char *file, int line, unsigned taint, if (!fmt) { __warn(file, line, __builtin_return_address(0), taint, NULL, NULL); + warn_rcu_exit(rcu); return; } diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 2b4a946a6ff5..8d35b9f9aaa3 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -786,9 +786,9 @@ int hibernate(void) unlock_device_hotplug(); if (snapshot_test) { pm_pr_dbg("Checking hibernation image\n"); - error = swsusp_check(snapshot_test); + error = swsusp_check(false); if (!error) - error = load_image_and_restore(snapshot_test); + error = load_image_and_restore(false); } thaw_processes(); @@ -945,14 +945,14 @@ static int software_resume(void) pm_pr_dbg("Looking for hibernation image.\n"); mutex_lock(&system_transition_mutex); - error = swsusp_check(false); + error = swsusp_check(true); if (error) goto Unlock; /* The snapshot device should not be opened while we're running */ if (!hibernate_acquire()) { error = -EBUSY; - swsusp_close(false); + swsusp_close(true); goto Unlock; } @@ -973,7 +973,7 @@ static int software_resume(void) goto Close_Finish; } - error = load_image_and_restore(false); + error = load_image_and_restore(true); thaw_processes(); Finish: pm_notifier_call_chain(PM_POST_RESTORE); @@ -987,7 +987,7 @@ static int software_resume(void) pm_pr_dbg("Hibernation image not present or could not be loaded.\n"); return error; Close_Finish: - swsusp_close(false); + swsusp_close(true); goto Finish; } diff --git a/kernel/power/power.h b/kernel/power/power.h index 46eb14dc50c3..a98f95e309a3 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -168,11 +168,11 @@ extern int swsusp_swap_in_use(void); #define SF_HW_SIG 8 /* kernel/power/hibernate.c */ -int swsusp_check(bool snapshot_test); +int swsusp_check(bool exclusive); extern void swsusp_free(void); extern int swsusp_read(unsigned int *flags_p); extern int swsusp_write(unsigned int flags); -void swsusp_close(bool snapshot_test); +void swsusp_close(bool exclusive); #ifdef CONFIG_SUSPEND extern int swsusp_unmark(void); #endif diff --git a/kernel/power/swap.c b/kernel/power/swap.c index f6ebcd00c410..74edbce2320b 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -1513,12 +1513,13 @@ end: static void *swsusp_holder; /** - * swsusp_check - Check for swsusp signature in the resume device + * swsusp_check - Check for swsusp signature in the resume device + * @exclusive: Open the resume device exclusively. */ -int swsusp_check(bool snapshot_test) +int swsusp_check(bool exclusive) { - void *holder = snapshot_test ? &swsusp_holder : NULL; + void *holder = exclusive ? &swsusp_holder : NULL; int error; hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, BLK_OPEN_READ, @@ -1563,17 +1564,18 @@ put: } /** - * swsusp_close - close swap device. + * swsusp_close - close swap device. + * @exclusive: Close the resume device which is exclusively opened. */ -void swsusp_close(bool snapshot_test) +void swsusp_close(bool exclusive) { if (IS_ERR(hib_resume_bdev)) { pr_debug("Image device not initialised\n"); return; } - blkdev_put(hib_resume_bdev, snapshot_test ? &swsusp_holder : NULL); + blkdev_put(hib_resume_bdev, exclusive ? &swsusp_holder : NULL); } /** diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8dbff6e7ad4f..cb225921bbca 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6619,6 +6619,7 @@ dequeue_throttle: /* Working cpumask for: load_balance, load_balance_newidle. */ static DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); static DEFINE_PER_CPU(cpumask_var_t, select_rq_mask); +static DEFINE_PER_CPU(cpumask_var_t, should_we_balance_tmpmask); #ifdef CONFIG_NO_HZ_COMMON @@ -9579,7 +9580,7 @@ static inline long sibling_imbalance(struct lb_env *env, imbalance /= ncores_local + ncores_busiest; /* Take advantage of resource in an empty sched group */ - if (imbalance == 0 && local->sum_nr_running == 0 && + if (imbalance <= 1 && local->sum_nr_running == 0 && busiest->sum_nr_running > 1) imbalance = 2; @@ -9767,6 +9768,15 @@ static bool update_sd_pick_busiest(struct lb_env *env, break; case group_smt_balance: + /* + * Check if we have spare CPUs on either SMT group to + * choose has spare or fully busy handling. + */ + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) + goto has_spare; + + fallthrough; + case group_fully_busy: /* * Select the fully busy group with highest avg_load. In @@ -9806,6 +9816,7 @@ static bool update_sd_pick_busiest(struct lb_env *env, else return true; } +has_spare: /* * Select not overloaded group with lowest number of idle cpus @@ -10917,6 +10928,7 @@ static int active_load_balance_cpu_stop(void *data); static int should_we_balance(struct lb_env *env) { + struct cpumask *swb_cpus = this_cpu_cpumask_var_ptr(should_we_balance_tmpmask); struct sched_group *sg = env->sd->groups; int cpu, idle_smt = -1; @@ -10940,8 +10952,9 @@ static int should_we_balance(struct lb_env *env) return 1; } + cpumask_copy(swb_cpus, group_balance_mask(sg)); /* Try to find first idle CPU */ - for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) { + for_each_cpu_and(cpu, swb_cpus, env->cpus) { if (!idle_cpu(cpu)) continue; @@ -10953,6 +10966,14 @@ static int should_we_balance(struct lb_env *env) if (!(env->sd->flags & SD_SHARE_CPUCAPACITY) && !is_core_idle(cpu)) { if (idle_smt == -1) idle_smt = cpu; + /* + * If the core is not idle, and first SMT sibling which is + * idle has been found, then its not needed to check other + * SMT siblings for idleness: + */ +#ifdef CONFIG_SCHED_SMT + cpumask_andnot(swb_cpus, swb_cpus, cpu_smt_mask(cpu)); +#endif continue; } @@ -12918,6 +12939,8 @@ __init void init_sched_fair_class(void) for_each_possible_cpu(i) { zalloc_cpumask_var_node(&per_cpu(load_balance_mask, i), GFP_KERNEL, cpu_to_node(i)); zalloc_cpumask_var_node(&per_cpu(select_rq_mask, i), GFP_KERNEL, cpu_to_node(i)); + zalloc_cpumask_var_node(&per_cpu(should_we_balance_tmpmask, i), + GFP_KERNEL, cpu_to_node(i)); #ifdef CONFIG_CFS_BANDWIDTH INIT_CSD(&cpu_rq(i)->cfsb_csd, __cfsb_csd_unthrottle, cpu_rq(i)); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 78502d4c7214..a1651edc48d5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2198,6 +2198,8 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, err = -ENOMEM; goto out_err; } + + cond_resched(); } cpus_read_lock(); @@ -2388,6 +2390,11 @@ rb_iter_head_event(struct ring_buffer_iter *iter) */ commit = rb_page_commit(iter_head_page); smp_rmb(); + + /* An event needs to be at least 8 bytes in size */ + if (iter->head > commit - 8) + goto reset; + event = __rb_page_index(iter_head_page, iter->head); length = rb_event_length(event); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 2b4ded753367..abaaf516fcae 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1772,7 +1772,7 @@ static void trace_create_maxlat_file(struct trace_array *tr, init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); tr->d_max_latency = trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, - d_tracer, &tr->max_latency, + d_tracer, tr, &tracing_max_lat_fops); } @@ -1805,7 +1805,7 @@ void latency_fsnotify(struct trace_array *tr) #define trace_create_maxlat_file(tr, d_tracer) \ trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \ - d_tracer, &tr->max_latency, &tracing_max_lat_fops) + d_tracer, tr, &tracing_max_lat_fops) #endif @@ -4973,6 +4973,33 @@ int tracing_open_generic_tr(struct inode *inode, struct file *filp) return 0; } +/* + * The private pointer of the inode is the trace_event_file. + * Update the tr ref count associated to it. + */ +int tracing_open_file_tr(struct inode *inode, struct file *filp) +{ + struct trace_event_file *file = inode->i_private; + int ret; + + ret = tracing_check_open_get_tr(file->tr); + if (ret) + return ret; + + filp->private_data = inode->i_private; + + return 0; +} + +int tracing_release_file_tr(struct inode *inode, struct file *filp) +{ + struct trace_event_file *file = inode->i_private; + + trace_array_put(file->tr); + + return 0; +} + static int tracing_mark_open(struct inode *inode, struct file *filp) { stream_open(inode, filp); @@ -6691,14 +6718,18 @@ static ssize_t tracing_max_lat_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { - return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos); + struct trace_array *tr = filp->private_data; + + return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos); } static ssize_t tracing_max_lat_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { - return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos); + struct trace_array *tr = filp->private_data; + + return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos); } #endif @@ -7752,18 +7783,20 @@ static const struct file_operations tracing_thresh_fops = { #ifdef CONFIG_TRACER_MAX_TRACE static const struct file_operations tracing_max_lat_fops = { - .open = tracing_open_generic, + .open = tracing_open_generic_tr, .read = tracing_max_lat_read, .write = tracing_max_lat_write, .llseek = generic_file_llseek, + .release = tracing_release_generic_tr, }; #endif static const struct file_operations set_tracer_fops = { - .open = tracing_open_generic, + .open = tracing_open_generic_tr, .read = tracing_set_trace_read, .write = tracing_set_trace_write, .llseek = generic_file_llseek, + .release = tracing_release_generic_tr, }; static const struct file_operations tracing_pipe_fops = { @@ -8956,12 +8989,33 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, return cnt; } +static int tracing_open_options(struct inode *inode, struct file *filp) +{ + struct trace_option_dentry *topt = inode->i_private; + int ret; + + ret = tracing_check_open_get_tr(topt->tr); + if (ret) + return ret; + + filp->private_data = inode->i_private; + return 0; +} + +static int tracing_release_options(struct inode *inode, struct file *file) +{ + struct trace_option_dentry *topt = file->private_data; + + trace_array_put(topt->tr); + return 0; +} static const struct file_operations trace_options_fops = { - .open = tracing_open_generic, + .open = tracing_open_options, .read = trace_options_read, .write = trace_options_write, .llseek = generic_file_llseek, + .release = tracing_release_options, }; /* @@ -9739,8 +9793,8 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) tr, &tracing_mark_fops); file = __find_event_file(tr, "ftrace", "print"); - if (file && file->dir) - trace_create_file("trigger", TRACE_MODE_WRITE, file->dir, + if (file && file->ef) + eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef, file, &event_trigger_fops); tr->trace_marker_file = file; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 5669dd1f90d9..77debe53f07c 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -610,6 +610,8 @@ void tracing_reset_all_online_cpus(void); void tracing_reset_all_online_cpus_unlocked(void); int tracing_open_generic(struct inode *inode, struct file *filp); int tracing_open_generic_tr(struct inode *inode, struct file *filp); +int tracing_open_file_tr(struct inode *inode, struct file *filp); +int tracing_release_file_tr(struct inode *inode, struct file *filp); bool tracing_is_disabled(void); bool tracer_tracing_is_on(struct trace_array *tr); void tracer_tracing_on(struct trace_array *tr); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index ed367d713be0..91951d038ba4 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -992,19 +992,6 @@ static void remove_subsystem(struct trace_subsystem_dir *dir) static void remove_event_file_dir(struct trace_event_file *file) { - struct dentry *dir = file->dir; - struct dentry *child; - - if (dir) { - spin_lock(&dir->d_lock); /* probably unneeded */ - list_for_each_entry(child, &dir->d_subdirs, d_child) { - if (d_really_is_positive(child)) /* probably unneeded */ - d_inode(child)->i_private = NULL; - } - spin_unlock(&dir->d_lock); - - tracefs_remove(dir); - } eventfs_remove(file->ef); list_del(&file->list); remove_subsystem(file->system); @@ -2103,9 +2090,10 @@ static const struct file_operations ftrace_set_event_notrace_pid_fops = { }; static const struct file_operations ftrace_enable_fops = { - .open = tracing_open_generic, + .open = tracing_open_file_tr, .read = event_enable_read, .write = event_enable_write, + .release = tracing_release_file_tr, .llseek = default_llseek, }; @@ -2122,9 +2110,10 @@ static const struct file_operations ftrace_event_id_fops = { }; static const struct file_operations ftrace_event_filter_fops = { - .open = tracing_open_generic, + .open = tracing_open_file_tr, .read = event_filter_read, .write = event_filter_write, + .release = tracing_release_file_tr, .llseek = default_llseek, }; @@ -2297,6 +2286,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name, { struct event_subsystem *system, *iter; struct trace_subsystem_dir *dir; + struct eventfs_file *ef; int res; /* First see if we did not already create this dir */ @@ -2329,13 +2319,14 @@ event_subsystem_dir(struct trace_array *tr, const char *name, } else __get_system(system); - dir->ef = eventfs_add_subsystem_dir(name, parent); - if (IS_ERR(dir->ef)) { + ef = eventfs_add_subsystem_dir(name, parent); + if (IS_ERR(ef)) { pr_warn("Failed to create system directory %s\n", name); __put_system(system); goto out_free; } + dir->ef = ef; dir->tr = tr; dir->ref_count = 1; dir->nr_events = 1; @@ -2415,6 +2406,7 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file) struct trace_event_call *call = file->event_call; struct eventfs_file *ef_subsystem = NULL; struct trace_array *tr = file->tr; + struct eventfs_file *ef; const char *name; int ret; @@ -2431,12 +2423,14 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file) return -ENOMEM; name = trace_event_name(call); - file->ef = eventfs_add_dir(name, ef_subsystem); - if (IS_ERR(file->ef)) { + ef = eventfs_add_dir(name, ef_subsystem); + if (IS_ERR(ef)) { pr_warn("Could not create tracefs '%s' directory\n", name); return -1; } + file->ef = ef; + if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) eventfs_add_file("enable", TRACE_MODE_WRITE, file->ef, file, &ftrace_enable_fops); diff --git a/kernel/trace/trace_events_inject.c b/kernel/trace/trace_events_inject.c index abe805d471eb..8650562bdaa9 100644 --- a/kernel/trace/trace_events_inject.c +++ b/kernel/trace/trace_events_inject.c @@ -328,7 +328,8 @@ event_inject_read(struct file *file, char __user *buf, size_t size, } const struct file_operations event_inject_fops = { - .open = tracing_open_generic, + .open = tracing_open_file_tr, .read = event_inject_read, .write = event_inject_write, + .release = tracing_release_file_tr, }; diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 9897d0bfcab7..14cb275a0bab 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -337,7 +337,7 @@ static void print_synth_event_num_val(struct trace_seq *s, break; default: - trace_seq_printf(s, print_fmt, name, val, space); + trace_seq_printf(s, print_fmt, name, val->as_u64, space); break; } } diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c index 5181aa2e760b..a6348489d45f 100644 --- a/lib/kunit/executor.c +++ b/lib/kunit/executor.c @@ -65,7 +65,7 @@ struct kunit_glob_filter { }; /* Split "suite_glob.test_glob" into two. Assumes filter_glob is not empty. */ -static void kunit_parse_glob_filter(struct kunit_glob_filter *parsed, +static int kunit_parse_glob_filter(struct kunit_glob_filter *parsed, const char *filter_glob) { const int len = strlen(filter_glob); @@ -73,16 +73,28 @@ static void kunit_parse_glob_filter(struct kunit_glob_filter *parsed, if (!period) { parsed->suite_glob = kzalloc(len + 1, GFP_KERNEL); + if (!parsed->suite_glob) + return -ENOMEM; + parsed->test_glob = NULL; strcpy(parsed->suite_glob, filter_glob); - return; + return 0; } parsed->suite_glob = kzalloc(period - filter_glob + 1, GFP_KERNEL); + if (!parsed->suite_glob) + return -ENOMEM; + parsed->test_glob = kzalloc(len - (period - filter_glob) + 1, GFP_KERNEL); + if (!parsed->test_glob) { + kfree(parsed->suite_glob); + return -ENOMEM; + } strncpy(parsed->suite_glob, filter_glob, period - filter_glob); strncpy(parsed->test_glob, period + 1, len - (period - filter_glob)); + + return 0; } /* Create a copy of suite with only tests that match test_glob. */ @@ -152,21 +164,24 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set, } copy_start = copy; - if (filter_glob) - kunit_parse_glob_filter(&parsed_glob, filter_glob); + if (filter_glob) { + *err = kunit_parse_glob_filter(&parsed_glob, filter_glob); + if (*err) + goto free_copy; + } /* Parse attribute filters */ if (filters) { filter_count = kunit_get_filter_count(filters); parsed_filters = kcalloc(filter_count, sizeof(*parsed_filters), GFP_KERNEL); if (!parsed_filters) { - kfree(copy); - return filtered; + *err = -ENOMEM; + goto free_parsed_glob; } for (j = 0; j < filter_count; j++) parsed_filters[j] = kunit_next_attr_filter(&filters, err); if (*err) - goto err; + goto free_parsed_filters; } for (i = 0; &suite_set->start[i] != suite_set->end; i++) { @@ -178,7 +193,7 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set, parsed_glob.test_glob); if (IS_ERR(filtered_suite)) { *err = PTR_ERR(filtered_suite); - goto err; + goto free_parsed_filters; } } if (filter_count > 0 && parsed_filters != NULL) { @@ -195,10 +210,11 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set, filtered_suite = new_filtered_suite; if (*err) - goto err; + goto free_parsed_filters; + if (IS_ERR(filtered_suite)) { *err = PTR_ERR(filtered_suite); - goto err; + goto free_parsed_filters; } if (!filtered_suite) break; @@ -213,17 +229,19 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set, filtered.start = copy_start; filtered.end = copy; -err: - if (*err) - kfree(copy); +free_parsed_filters: + if (filter_count) + kfree(parsed_filters); +free_parsed_glob: if (filter_glob) { kfree(parsed_glob.suite_glob); kfree(parsed_glob.test_glob); } - if (filter_count) - kfree(parsed_filters); +free_copy: + if (*err) + kfree(copy); return filtered; } diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c index 4084071d0eb5..b4f6f96b2844 100644 --- a/lib/kunit/executor_test.c +++ b/lib/kunit/executor_test.c @@ -119,7 +119,7 @@ static void parse_filter_attr_test(struct kunit *test) { int j, filter_count; struct kunit_attr_filter *parsed_filters; - char *filters = "speed>slow, module!=example"; + char filters[] = "speed>slow, module!=example", *filter = filters; int err = 0; filter_count = kunit_get_filter_count(filters); @@ -128,7 +128,7 @@ static void parse_filter_attr_test(struct kunit *test) parsed_filters = kunit_kcalloc(test, filter_count, sizeof(*parsed_filters), GFP_KERNEL); for (j = 0; j < filter_count; j++) { - parsed_filters[j] = kunit_next_attr_filter(&filters, &err); + parsed_filters[j] = kunit_next_attr_filter(&filter, &err); KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter '%s'", filters[j]); } @@ -154,6 +154,7 @@ static void filter_attr_test(struct kunit *test) .start = subsuite, .end = &subsuite[2], }; struct kunit_suite_set got; + char filter[] = "speed>slow"; int err = 0; subsuite[0] = alloc_fake_suite(test, "normal_suite", dummy_attr_test_cases); @@ -168,7 +169,7 @@ static void filter_attr_test(struct kunit *test) * attribute is unset and thus, the filtering is based on the parent attribute * of slow. */ - got = kunit_filter_suites(&suite_set, NULL, "speed>slow", NULL, &err); + got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start); KUNIT_ASSERT_EQ(test, err, 0); kfree_at_end(test, got.start); @@ -191,12 +192,13 @@ static void filter_attr_empty_test(struct kunit *test) .start = subsuite, .end = &subsuite[2], }; struct kunit_suite_set got; + char filter[] = "module!=dummy"; int err = 0; subsuite[0] = alloc_fake_suite(test, "suite1", dummy_attr_test_cases); subsuite[1] = alloc_fake_suite(test, "suite2", dummy_attr_test_cases); - got = kunit_filter_suites(&suite_set, NULL, "module!=dummy", NULL, &err); + got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err); KUNIT_ASSERT_EQ(test, err, 0); kfree_at_end(test, got.start); /* just in case */ @@ -211,12 +213,13 @@ static void filter_attr_skip_test(struct kunit *test) .start = subsuite, .end = &subsuite[1], }; struct kunit_suite_set got; + char filter[] = "speed>slow"; int err = 0; subsuite[0] = alloc_fake_suite(test, "suite", dummy_attr_test_cases); /* Want: suite(slow, normal), NULL -> suite(slow with SKIP, normal), NULL */ - got = kunit_filter_suites(&suite_set, NULL, "speed>slow", "skip", &err); + got = kunit_filter_suites(&suite_set, NULL, filter, "skip", &err); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start); KUNIT_ASSERT_EQ(test, err, 0); kfree_at_end(test, got.start); diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 49698a168437..421f13981412 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -784,12 +784,13 @@ static int kunit_module_notify(struct notifier_block *nb, unsigned long val, switch (val) { case MODULE_STATE_LIVE: - kunit_module_init(mod); break; case MODULE_STATE_GOING: kunit_module_exit(mod); break; case MODULE_STATE_COMING: + kunit_module_init(mod); + break; case MODULE_STATE_UNFORMED: break; } diff --git a/mm/mremap.c b/mm/mremap.c index 056478c106ee..382e81c33fc4 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -715,7 +715,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, } vma_iter_init(&vmi, mm, old_addr); - if (!do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false)) { + if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) { /* OOM: unable to split vma, just get accounts right */ if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) vm_acct_memory(old_len >> PAGE_SHIFT); diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index 629daacc9607..b71dab630a87 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -594,6 +594,7 @@ static int fill_frame_info(struct hsr_frame_info *frame, proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto; /* FIXME: */ netdev_warn_once(skb->dev, "VLAN not yet supported"); + return -EINVAL; } frame->is_from_san = false; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 9cf64ee47dd2..ca0ff15dc8fa 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -355,14 +355,14 @@ static void __inet_del_ifa(struct in_device *in_dev, { struct in_ifaddr *promote = NULL; struct in_ifaddr *ifa, *ifa1; - struct in_ifaddr *last_prim; + struct in_ifaddr __rcu **last_prim; struct in_ifaddr *prev_prom = NULL; int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev); ASSERT_RTNL(); ifa1 = rtnl_dereference(*ifap); - last_prim = rtnl_dereference(in_dev->ifa_list); + last_prim = ifap; if (in_dev->dead) goto no_promotions; @@ -376,7 +376,7 @@ static void __inet_del_ifa(struct in_device *in_dev, while ((ifa = rtnl_dereference(*ifap1)) != NULL) { if (!(ifa->ifa_flags & IFA_F_SECONDARY) && ifa1->ifa_scope <= ifa->ifa_scope) - last_prim = ifa; + last_prim = &ifa->ifa_next; if (!(ifa->ifa_flags & IFA_F_SECONDARY) || ifa1->ifa_mask != ifa->ifa_mask || @@ -440,9 +440,9 @@ no_promotions: rcu_assign_pointer(prev_prom->ifa_next, next_sec); - last_sec = rtnl_dereference(last_prim->ifa_next); + last_sec = rtnl_dereference(*last_prim); rcu_assign_pointer(promote->ifa_next, last_sec); - rcu_assign_pointer(last_prim->ifa_next, promote); + rcu_assign_pointer(*last_prim, promote); } promote->ifa_flags &= ~IFA_F_SECONDARY; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 7876b7d703cb..c32f5e28758b 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -815,41 +815,45 @@ static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb, const struct net *net, unsigned short port, int l3mdev, const struct sock *sk) { + if (!net_eq(ib2_net(tb), net) || tb->port != port || + tb->l3mdev != l3mdev) + return false; + #if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family != tb->family) + if (sk->sk_family != tb->family) { + if (sk->sk_family == AF_INET) + return ipv6_addr_v4mapped(&tb->v6_rcv_saddr) && + tb->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr; + return false; + } if (sk->sk_family == AF_INET6) - return net_eq(ib2_net(tb), net) && tb->port == port && - tb->l3mdev == l3mdev && - ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr); - else + return ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr); #endif - return net_eq(ib2_net(tb), net) && tb->port == port && - tb->l3mdev == l3mdev && tb->rcv_saddr == sk->sk_rcv_saddr; + return tb->rcv_saddr == sk->sk_rcv_saddr; } bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net, unsigned short port, int l3mdev, const struct sock *sk) { + if (!net_eq(ib2_net(tb), net) || tb->port != port || + tb->l3mdev != l3mdev) + return false; + #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family != tb->family) { if (sk->sk_family == AF_INET) - return net_eq(ib2_net(tb), net) && tb->port == port && - tb->l3mdev == l3mdev && - ipv6_addr_any(&tb->v6_rcv_saddr); + return ipv6_addr_any(&tb->v6_rcv_saddr) || + ipv6_addr_v4mapped_any(&tb->v6_rcv_saddr); return false; } if (sk->sk_family == AF_INET6) - return net_eq(ib2_net(tb), net) && tb->port == port && - tb->l3mdev == l3mdev && - ipv6_addr_any(&tb->v6_rcv_saddr); - else + return ipv6_addr_any(&tb->v6_rcv_saddr); #endif - return net_eq(ib2_net(tb), net) && tb->port == port && - tb->l3mdev == l3mdev && tb->rcv_saddr == 0; + return tb->rcv_saddr == 0; } /* The socket's bhash2 hashbucket spinlock must be held when this is called */ diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 4580f61426bb..dd1d8ffd5f59 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -930,15 +930,18 @@ partial_message: out_error: kcm_push(kcm); - if (copied && sock->type == SOCK_SEQPACKET) { + if (sock->type == SOCK_SEQPACKET) { /* Wrote some bytes before encountering an * error, return partial success. */ - goto partial_message; - } - - if (head != kcm->seq_skb) + if (copied) + goto partial_message; + if (head != kcm->seq_skb) + kfree_skb(head); + } else { kfree_skb(head); + kcm->seq_skb = NULL; + } err = sk_stream_error(sk, msg->msg_flags, err); diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index bd01dd31e4bd..d520ee62c8ec 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -1662,6 +1662,7 @@ void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport) { struct smc_link_group *lgr, *n; + spin_lock_bh(&smc_lgr_list.lock); list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) { struct smc_link *link; @@ -1680,6 +1681,7 @@ void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport) if (link) smc_llc_add_link_local(link); } + spin_unlock_bh(&smc_lgr_list.lock); } /* link is down - switch connections to alternate link, diff --git a/net/smc/smc_stats.h b/net/smc/smc_stats.h index b60fe1eb37ab..aa8928975cc6 100644 --- a/net/smc/smc_stats.h +++ b/net/smc/smc_stats.h @@ -243,8 +243,9 @@ while (0) #define SMC_STAT_SERV_SUCC_INC(net, _ini) \ do { \ typeof(_ini) i = (_ini); \ - bool is_v2 = (i->smcd_version & SMC_V2); \ bool is_smcd = (i->is_smcd); \ + u8 version = is_smcd ? i->smcd_version : i->smcr_version; \ + bool is_v2 = (version & SMC_V2); \ typeof(net->smc.smc_stats) smc_stats = (net)->smc.smc_stats; \ if (is_v2 && is_smcd) \ this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \ diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 1ed4a611631f..d1fc295b83b5 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -817,7 +817,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, psock = sk_psock_get(sk); if (!psock || !policy) { err = tls_push_record(sk, flags, record_type); - if (err && sk->sk_err == EBADMSG) { + if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) { *copied -= sk_msg_free(sk, msg); tls_free_open_rec(sk); err = -sk->sk_err; @@ -846,7 +846,7 @@ more_data: switch (psock->eval) { case __SK_PASS: err = tls_push_record(sk, flags, record_type); - if (err && sk->sk_err == EBADMSG) { + if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) { *copied -= sk_msg_free(sk, msg); tls_free_open_rec(sk); err = -sk->sk_err; diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst index c59cc57286ba..346f5ec50682 100644 --- a/scripts/Makefile.modinst +++ b/scripts/Makefile.modinst @@ -113,7 +113,7 @@ quiet_cmd_sign := endif # Create necessary directories -$(shell mkdir -p $(sort $(dir $(install-y)))) +$(foreach dir, $(sort $(dir $(install-y))), $(shell mkdir -p $(dir))) $(dst)/%.ko: $(extmod_prefix)%.ko FORCE $(call cmd,install) diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 34a5386d444a..de499dce5265 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -1228,6 +1228,15 @@ static void check_export_symbol(struct module *mod, struct elf_info *elf, */ s->is_func = (ELF_ST_TYPE(sym->st_info) == STT_FUNC); + /* + * For parisc64, symbols prefixed $$ from the library have the symbol type + * STT_LOPROC. They should be handled as functions too. + */ + if (elf->hdr->e_ident[EI_CLASS] == ELFCLASS64 && + elf->hdr->e_machine == EM_PARISC && + ELF_ST_TYPE(sym->st_info) == STT_LOPROC) + s->is_func = true; + if (match(secname, PATTERNS(INIT_SECTIONS))) warn("%s: %s: EXPORT_SYMBOL used for init symbol. Remove __init or EXPORT_SYMBOL.\n", mod->name, name); diff --git a/scripts/package/install-extmod-build b/scripts/package/install-extmod-build index af7fe9f5b1e4..8a7051fad087 100755 --- a/scripts/package/install-extmod-build +++ b/scripts/package/install-extmod-build @@ -20,7 +20,7 @@ mkdir -p "${destdir}" find "arch/${SRCARCH}" -maxdepth 1 -name 'Makefile*' find include scripts -type f -o -type l find "arch/${SRCARCH}" -name Kbuild.platforms -o -name Platform - find "$(find "arch/${SRCARCH}" -name include -o -name scripts -type d)" -type f + find "arch/${SRCARCH}" -name include -o -name scripts -type d ) | tar -c -f - -C "${srctree}" -T - | tar -xf - -C "${destdir}" { diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 10350534de6d..2aa0e219d721 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -2775,14 +2775,20 @@ static int selinux_umount(struct vfsmount *mnt, int flags) static int selinux_fs_context_submount(struct fs_context *fc, struct super_block *reference) { - const struct superblock_security_struct *sbsec; + const struct superblock_security_struct *sbsec = selinux_superblock(reference); struct selinux_mnt_opts *opts; + /* + * Ensure that fc->security remains NULL when no options are set + * as expected by selinux_set_mnt_opts(). + */ + if (!(sbsec->flags & (FSCONTEXT_MNT|CONTEXT_MNT|DEFCONTEXT_MNT))) + return 0; + opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) return -ENOMEM; - sbsec = selinux_superblock(reference); if (sbsec->flags & FSCONTEXT_MNT) opts->fscontext_sid = sbsec->sid; if (sbsec->flags & CONTEXT_MNT) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 1384090530db..e308d1ba664e 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -4333,7 +4333,8 @@ static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn continue; } - if (insn_func(dest) && insn_func(dest) == insn_func(insn)) { + if (insn_func(dest) && insn_func(insn) && + insn_func(dest)->pfunc == insn_func(insn)->pfunc) { /* * Anything from->to self is either _THIS_IP_ or * IRET-to-self. diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest index cb5f18c06593..c778d4dcc17e 100755 --- a/tools/testing/selftests/ftrace/ftracetest +++ b/tools/testing/selftests/ftrace/ftracetest @@ -31,6 +31,9 @@ err_ret=1 # kselftest skip code is 4 err_skip=4 +# umount required +UMOUNT_DIR="" + # cgroup RT scheduling prevents chrt commands from succeeding, which # induces failures in test wakeup tests. Disable for the duration of # the tests. @@ -45,6 +48,9 @@ setup() { cleanup() { echo $sched_rt_runtime_orig > $sched_rt_runtime + if [ -n "${UMOUNT_DIR}" ]; then + umount ${UMOUNT_DIR} ||: + fi } errexit() { # message @@ -124,6 +130,7 @@ parse_opts() { # opts ;; --logdir|-l) LOG_DIR=$2 + LINK_PTR= shift 2 ;; *.tc) @@ -160,11 +167,13 @@ if [ -z "$TRACING_DIR" ]; then mount -t tracefs nodev /sys/kernel/tracing || errexit "Failed to mount /sys/kernel/tracing" TRACING_DIR="/sys/kernel/tracing" + UMOUNT_DIR=${TRACING_DIR} # If debugfs exists, then so does /sys/kernel/debug elif [ -d "/sys/kernel/debug" ]; then mount -t debugfs nodev /sys/kernel/debug || errexit "Failed to mount /sys/kernel/debug" TRACING_DIR="/sys/kernel/debug/tracing" + UMOUNT_DIR=${TRACING_DIR} else err_ret=$err_skip errexit "debugfs and tracefs are not configured in this kernel" @@ -181,7 +190,10 @@ fi TOP_DIR=`absdir $0` TEST_DIR=$TOP_DIR/test.d TEST_CASES=`find_testcases $TEST_DIR` -LOG_DIR=$TOP_DIR/logs/`date +%Y%m%d-%H%M%S`/ +LOG_TOP_DIR=$TOP_DIR/logs +LOG_DATE=`date +%Y%m%d-%H%M%S` +LOG_DIR=$LOG_TOP_DIR/$LOG_DATE/ +LINK_PTR=$LOG_TOP_DIR/latest KEEP_LOG=0 KTAP=0 DEBUG=0 @@ -207,6 +219,10 @@ else LOG_FILE=$LOG_DIR/ftracetest.log mkdir -p $LOG_DIR || errexit "Failed to make a log directory: $LOG_DIR" date > $LOG_FILE + if [ "x-$LINK_PTR" != "x-" ]; then + unlink $LINK_PTR + ln -fs $LOG_DATE $LINK_PTR + fi fi # Define text colors diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc index 0eb47fbb3f44..42422e425107 100644 --- a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc +++ b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc @@ -39,7 +39,7 @@ instance_read() { instance_set() { while :; do - echo 1 > foo/events/sched/sched_switch + echo 1 > foo/events/sched/sched_switch/enable done 2> /dev/null } diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc index 213d890ed188..174376ddbc6c 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc @@ -1,7 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test inter-event histogram trigger trace action with dynamic string param -# requires: set_event synthetic_events events/sched/sched_process_exec/hist "char name[]' >> synthetic_events":README ping:program +# requires: set_event synthetic_events events/sched/sched_process_exec/hist "' >> synthetic_events":README ping:program fail() { #msg echo $1 diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc index 955e3ceea44b..b927ee54c02d 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc @@ -1,7 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test synthetic_events syntax parser errors -# requires: synthetic_events error_log "char name[]' >> synthetic_events":README +# requires: synthetic_events error_log "' >> synthetic_events":README check_error() { # command-with-error-pos-by-^ ftrace_errlog_check 'synthetic_events' "$1" 'synthetic_events' diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh index 261c73cab41b..cd2fb43eea61 100644 --- a/tools/testing/selftests/kselftest/runner.sh +++ b/tools/testing/selftests/kselftest/runner.sh @@ -36,7 +36,8 @@ tap_timeout() { # Make sure tests will time out if utility is available. if [ -x /usr/bin/timeout ] ; then - /usr/bin/timeout --foreground "$kselftest_timeout" $1 + /usr/bin/timeout --foreground "$kselftest_timeout" \ + /usr/bin/timeout "$kselftest_timeout" $1 else $1 fi diff --git a/tools/testing/selftests/kselftest_deps.sh b/tools/testing/selftests/kselftest_deps.sh index 4bc14d9e8ff1..de59cc8f03c3 100755 --- a/tools/testing/selftests/kselftest_deps.sh +++ b/tools/testing/selftests/kselftest_deps.sh @@ -46,11 +46,11 @@ fi print_targets=0 while getopts "p" arg; do - case $arg in - p) + case $arg in + p) print_targets=1 shift;; - esac + esac done if [ $# -eq 0 ] @@ -92,6 +92,10 @@ pass_cnt=0 # Get all TARGETS from selftests Makefile targets=$(grep -E "^TARGETS +|^TARGETS =" Makefile | cut -d "=" -f2) +# Initially, in LDLIBS related lines, the dep checker needs +# to ignore lines containing the following strings: +filter="\$(VAR_LDLIBS)\|pkg-config\|PKG_CONFIG\|IOURING_EXTRA_LIBS" + # Single test case if [ $# -eq 2 ] then @@ -100,6 +104,8 @@ then l1_test $test l2_test $test l3_test $test + l4_test $test + l5_test $test print_results $1 $2 exit $? @@ -113,7 +119,7 @@ fi # Append space at the end of the list to append more tests. l1_tests=$(grep -r --include=Makefile "^LDLIBS" | \ - grep -v "VAR_LDLIBS" | awk -F: '{print $1}') + grep -v "$filter" | awk -F: '{print $1}' | uniq) # Level 2: LDLIBS set dynamically. # @@ -126,7 +132,7 @@ l1_tests=$(grep -r --include=Makefile "^LDLIBS" | \ # Append space at the end of the list to append more tests. l2_tests=$(grep -r --include=Makefile ": LDLIBS" | \ - grep -v "VAR_LDLIBS" | awk -F: '{print $1}') + grep -v "$filter" | awk -F: '{print $1}' | uniq) # Level 3 # memfd and others use pkg-config to find mount and fuse libs @@ -138,11 +144,32 @@ l2_tests=$(grep -r --include=Makefile ": LDLIBS" | \ # VAR_LDLIBS := $(shell pkg-config fuse --libs 2>/dev/null) l3_tests=$(grep -r --include=Makefile "^VAR_LDLIBS" | \ - grep -v "pkg-config" | awk -F: '{print $1}') + grep -v "pkg-config\|PKG_CONFIG" | awk -F: '{print $1}' | uniq) -#echo $l1_tests -#echo $l2_1_tests -#echo $l3_tests +# Level 4 +# some tests may fall back to default using `|| echo -l<libname>` +# if pkg-config doesn't find the libs, instead of using VAR_LDLIBS +# as per level 3 checks. +# e.g: +# netfilter/Makefile +# LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl) +l4_tests=$(grep -r --include=Makefile "^LDLIBS" | \ + grep "pkg-config\|PKG_CONFIG" | awk -F: '{print $1}' | uniq) + +# Level 5 +# some tests may use IOURING_EXTRA_LIBS to add extra libs to LDLIBS, +# which in turn may be defined in a sub-Makefile +# e.g.: +# mm/Makefile +# $(OUTPUT)/gup_longterm: LDLIBS += $(IOURING_EXTRA_LIBS) +l5_tests=$(grep -r --include=Makefile "LDLIBS +=.*\$(IOURING_EXTRA_LIBS)" | \ + awk -F: '{print $1}' | uniq) + +#echo l1_tests $l1_tests +#echo l2_tests $l2_tests +#echo l3_tests $l3_tests +#echo l4_tests $l4_tests +#echo l5_tests $l5_tests all_tests print_results $1 $2 @@ -164,24 +191,32 @@ all_tests() for test in $l3_tests; do l3_test $test done + + for test in $l4_tests; do + l4_test $test + done + + for test in $l5_tests; do + l5_test $test + done } # Use same parsing used for l1_tests and pick libraries this time. l1_test() { test_libs=$(grep --include=Makefile "^LDLIBS" $test | \ - grep -v "VAR_LDLIBS" | \ + grep -v "$filter" | \ sed -e 's/\:/ /' | \ sed -e 's/+/ /' | cut -d "=" -f 2) check_libs $test $test_libs } -# Use same parsing used for l2__tests and pick libraries this time. +# Use same parsing used for l2_tests and pick libraries this time. l2_test() { test_libs=$(grep --include=Makefile ": LDLIBS" $test | \ - grep -v "VAR_LDLIBS" | \ + grep -v "$filter" | \ sed -e 's/\:/ /' | sed -e 's/+/ /' | \ cut -d "=" -f 2) @@ -197,6 +232,24 @@ l3_test() check_libs $test $test_libs } +l4_test() +{ + test_libs=$(grep --include=Makefile "^VAR_LDLIBS\|^LDLIBS" $test | \ + grep "\(pkg-config\|PKG_CONFIG\).*|| echo " | \ + sed -e 's/.*|| echo //' | sed -e 's/)$//') + + check_libs $test $test_libs +} + +l5_test() +{ + tests=$(find $(dirname "$test") -type f -name "*.mk") + test_libs=$(grep "^IOURING_EXTRA_LIBS +\?=" $tests | \ + cut -d "=" -f 2) + + check_libs $test $test_libs +} + check_libs() { diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c index d8ecacd03ecf..9f99ea42f45f 100644 --- a/tools/testing/selftests/kvm/riscv/get-reg-list.c +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c @@ -12,19 +12,37 @@ #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK) +static bool isa_ext_cant_disable[KVM_RISCV_ISA_EXT_MAX]; + bool filter_reg(__u64 reg) { + switch (reg & ~REG_MASK) { /* - * Some ISA extensions are optional and not present on all host, - * but they can't be disabled through ISA_EXT registers when present. - * So, to make life easy, just filtering out these kind of registers. + * Same set of ISA_EXT registers are not present on all host because + * ISA_EXT registers are visible to the KVM user space based on the + * ISA extensions available on the host. Also, disabling an ISA + * extension using corresponding ISA_EXT register does not affect + * the visibility of the ISA_EXT register itself. + * + * Based on above, we should filter-out all ISA_EXT registers. */ - switch (reg & ~REG_MASK) { + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_V: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVNAPOT: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR: @@ -32,6 +50,15 @@ bool filter_reg(__u64 reg) case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM: return true; + /* AIA registers are always available when Ssaia can't be disabled */ + case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect): + case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1): + case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2): + case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh): + case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph): + case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h): + case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h): + return isa_ext_cant_disable[KVM_RISCV_ISA_EXT_SSAIA]; default: break; } @@ -50,24 +77,27 @@ static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext) unsigned long value; ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value); - if (ret) { - printf("Failed to get ext %d", ext); - return false; - } - - return !!value; + return (ret) ? false : !!value; } void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) { + unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 }; struct vcpu_reg_sublist *s; + int rc; + + for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) + __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(i), &isa_ext_state[i]); /* * Disable all extensions which were enabled by default * if they were available in the risc-v host. */ - for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) - __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0); + for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) { + rc = __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0); + if (rc && isa_ext_state[i]) + isa_ext_cant_disable[i] = true; + } for_each_sublist(c, s) { if (!s->feature) @@ -506,10 +536,6 @@ static __u64 base_regs[] = { KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time), KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare), KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state), - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A, - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C, - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I, - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M, KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01, KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME, KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI, diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index d17854285f2b..118e0964bda9 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk @@ -106,7 +106,7 @@ endef run_tests: all ifdef building_out_of_srctree @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \ - rsync -aLq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \ + rsync -aq --copy-unsafe-links $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \ fi @if [ "X$(TEST_PROGS)" != "X" ]; then \ $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \ @@ -120,7 +120,7 @@ endif define INSTALL_SINGLE_RULE $(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH)) - $(if $(INSTALL_LIST),rsync -aL $(INSTALL_LIST) $(INSTALL_PATH)/) + $(if $(INSTALL_LIST),rsync -a --copy-unsafe-links $(INSTALL_LIST) $(INSTALL_PATH)/) endef define INSTALL_RULE diff --git a/tools/testing/selftests/net/bind_bhash.sh b/tools/testing/selftests/net/bind_bhash.sh index ca0292d4b441..a28563bdaae0 100755 --- a/tools/testing/selftests/net/bind_bhash.sh +++ b/tools/testing/selftests/net/bind_bhash.sh @@ -2,7 +2,7 @@ # SPDX-License-Identifier: GPL-2.0 NR_FILES=32768 -SAVED_NR_FILES=$(ulimit -n) +readonly NETNS="ns-$(mktemp -u XXXXXX)" # default values port=443 @@ -36,21 +36,21 @@ while getopts "ha:p:64" opt; do done setup() { + ip netns add "${NETNS}" + ip -netns "${NETNS}" link add veth0 type veth peer name veth1 + ip -netns "${NETNS}" link set lo up + ip -netns "${NETNS}" link set veth0 up + ip -netns "${NETNS}" link set veth1 up + if [[ "$use_v6" == true ]]; then - ip addr add $addr_v6 nodad dev eth0 + ip -netns "${NETNS}" addr add $addr_v6 nodad dev veth0 else - ip addr add $addr_v4 dev lo + ip -netns "${NETNS}" addr add $addr_v4 dev lo fi - ulimit -n $NR_FILES } cleanup() { - if [[ "$use_v6" == true ]]; then - ip addr del $addr_v6 dev eth0 - else - ip addr del $addr_v4/32 dev lo - fi - ulimit -n $SAVED_NR_FILES + ip netns del "${NETNS}" } if [[ "$addr" != "" ]]; then @@ -59,8 +59,10 @@ if [[ "$addr" != "" ]]; then fi setup if [[ "$use_v6" == true ]] ; then - ./bind_bhash $port "ipv6" $addr_v6 + ip netns exec "${NETNS}" sh -c \ + "ulimit -n ${NR_FILES};./bind_bhash ${port} ipv6 ${addr_v6}" else - ./bind_bhash $port "ipv4" $addr_v4 + ip netns exec "${NETNS}" sh -c \ + "ulimit -n ${NR_FILES};./bind_bhash ${port} ipv4 ${addr_v4}" fi cleanup diff --git a/tools/testing/selftests/net/bind_wildcard.c b/tools/testing/selftests/net/bind_wildcard.c index 58edfc15d28b..a2662348cdb1 100644 --- a/tools/testing/selftests/net/bind_wildcard.c +++ b/tools/testing/selftests/net/bind_wildcard.c @@ -6,41 +6,91 @@ #include "../kselftest_harness.h" +struct in6_addr in6addr_v4mapped_any = { + .s6_addr = { + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 255, 255, + 0, 0, 0, 0 + } +}; + +struct in6_addr in6addr_v4mapped_loopback = { + .s6_addr = { + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 255, 255, + 127, 0, 0, 1 + } +}; + FIXTURE(bind_wildcard) { struct sockaddr_in addr4; struct sockaddr_in6 addr6; - int expected_errno; }; FIXTURE_VARIANT(bind_wildcard) { const __u32 addr4_const; const struct in6_addr *addr6_const; + int expected_errno; }; FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_any) { .addr4_const = INADDR_ANY, .addr6_const = &in6addr_any, + .expected_errno = EADDRINUSE, }; FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_local) { .addr4_const = INADDR_ANY, .addr6_const = &in6addr_loopback, + .expected_errno = 0, +}; + +FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_v4mapped_any) +{ + .addr4_const = INADDR_ANY, + .addr6_const = &in6addr_v4mapped_any, + .expected_errno = EADDRINUSE, +}; + +FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_v4mapped_local) +{ + .addr4_const = INADDR_ANY, + .addr6_const = &in6addr_v4mapped_loopback, + .expected_errno = EADDRINUSE, }; FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_any) { .addr4_const = INADDR_LOOPBACK, .addr6_const = &in6addr_any, + .expected_errno = EADDRINUSE, }; FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_local) { .addr4_const = INADDR_LOOPBACK, .addr6_const = &in6addr_loopback, + .expected_errno = 0, +}; + +FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_v4mapped_any) +{ + .addr4_const = INADDR_LOOPBACK, + .addr6_const = &in6addr_v4mapped_any, + .expected_errno = EADDRINUSE, +}; + +FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_v4mapped_local) +{ + .addr4_const = INADDR_LOOPBACK, + .addr6_const = &in6addr_v4mapped_loopback, + .expected_errno = EADDRINUSE, }; FIXTURE_SETUP(bind_wildcard) @@ -52,11 +102,6 @@ FIXTURE_SETUP(bind_wildcard) self->addr6.sin6_family = AF_INET6; self->addr6.sin6_port = htons(0); self->addr6.sin6_addr = *variant->addr6_const; - - if (variant->addr6_const == &in6addr_any) - self->expected_errno = EADDRINUSE; - else - self->expected_errno = 0; } FIXTURE_TEARDOWN(bind_wildcard) @@ -65,6 +110,7 @@ FIXTURE_TEARDOWN(bind_wildcard) void bind_sockets(struct __test_metadata *_metadata, FIXTURE_DATA(bind_wildcard) *self, + int expected_errno, struct sockaddr *addr1, socklen_t addrlen1, struct sockaddr *addr2, socklen_t addrlen2) { @@ -86,9 +132,9 @@ void bind_sockets(struct __test_metadata *_metadata, ASSERT_GT(fd[1], 0); ret = bind(fd[1], addr2, addrlen2); - if (self->expected_errno) { + if (expected_errno) { ASSERT_EQ(ret, -1); - ASSERT_EQ(errno, self->expected_errno); + ASSERT_EQ(errno, expected_errno); } else { ASSERT_EQ(ret, 0); } @@ -99,14 +145,14 @@ void bind_sockets(struct __test_metadata *_metadata, TEST_F(bind_wildcard, v4_v6) { - bind_sockets(_metadata, self, - (struct sockaddr *)&self->addr4, sizeof(self->addr6), + bind_sockets(_metadata, self, variant->expected_errno, + (struct sockaddr *)&self->addr4, sizeof(self->addr4), (struct sockaddr *)&self->addr6, sizeof(self->addr6)); } TEST_F(bind_wildcard, v6_v4) { - bind_sockets(_metadata, self, + bind_sockets(_metadata, self, variant->expected_errno, (struct sockaddr *)&self->addr6, sizeof(self->addr6), (struct sockaddr *)&self->addr4, sizeof(self->addr4)); } diff --git a/tools/testing/selftests/user_events/abi_test.c b/tools/testing/selftests/user_events/abi_test.c index 5125c42efe65..22374d29ffdd 100644 --- a/tools/testing/selftests/user_events/abi_test.c +++ b/tools/testing/selftests/user_events/abi_test.c @@ -19,6 +19,7 @@ #include <asm/unistd.h> #include "../kselftest_harness.h" +#include "user_events_selftests.h" const char *data_file = "/sys/kernel/tracing/user_events_data"; const char *enable_file = "/sys/kernel/tracing/events/user_events/__abi_event/enable"; @@ -93,6 +94,8 @@ FIXTURE(user) { }; FIXTURE_SETUP(user) { + USER_EVENT_FIXTURE_SETUP(return); + change_event(false); self->check = 0; } diff --git a/tools/testing/selftests/user_events/config b/tools/testing/selftests/user_events/config new file mode 100644 index 000000000000..64f7a9a90cec --- /dev/null +++ b/tools/testing/selftests/user_events/config @@ -0,0 +1 @@ +CONFIG_USER_EVENTS=y diff --git a/tools/testing/selftests/user_events/dyn_test.c b/tools/testing/selftests/user_events/dyn_test.c index 91a4444ad42b..32c827a52d7d 100644 --- a/tools/testing/selftests/user_events/dyn_test.c +++ b/tools/testing/selftests/user_events/dyn_test.c @@ -15,6 +15,7 @@ #include <unistd.h> #include "../kselftest_harness.h" +#include "user_events_selftests.h" const char *abi_file = "/sys/kernel/tracing/user_events_data"; const char *enable_file = "/sys/kernel/tracing/events/user_events/__test_event/enable"; @@ -146,6 +147,7 @@ FIXTURE(user) { }; FIXTURE_SETUP(user) { + USER_EVENT_FIXTURE_SETUP(return); } FIXTURE_TEARDOWN(user) { diff --git a/tools/testing/selftests/user_events/ftrace_test.c b/tools/testing/selftests/user_events/ftrace_test.c index 5beb0aef1d81..6a260caeeddc 100644 --- a/tools/testing/selftests/user_events/ftrace_test.c +++ b/tools/testing/selftests/user_events/ftrace_test.c @@ -16,6 +16,7 @@ #include <unistd.h> #include "../kselftest_harness.h" +#include "user_events_selftests.h" const char *data_file = "/sys/kernel/tracing/user_events_data"; const char *status_file = "/sys/kernel/tracing/user_events_status"; @@ -206,6 +207,8 @@ FIXTURE(user) { }; FIXTURE_SETUP(user) { + USER_EVENT_FIXTURE_SETUP(return); + self->status_fd = open(status_file, O_RDONLY); ASSERT_NE(-1, self->status_fd); diff --git a/tools/testing/selftests/user_events/perf_test.c b/tools/testing/selftests/user_events/perf_test.c index 8b09be566fa2..f893398cda05 100644 --- a/tools/testing/selftests/user_events/perf_test.c +++ b/tools/testing/selftests/user_events/perf_test.c @@ -17,6 +17,7 @@ #include <asm/unistd.h> #include "../kselftest_harness.h" +#include "user_events_selftests.h" const char *data_file = "/sys/kernel/tracing/user_events_data"; const char *id_file = "/sys/kernel/tracing/events/user_events/__test_event/id"; @@ -113,6 +114,8 @@ FIXTURE(user) { }; FIXTURE_SETUP(user) { + USER_EVENT_FIXTURE_SETUP(return); + self->data_fd = open(data_file, O_RDWR); ASSERT_NE(-1, self->data_fd); } diff --git a/tools/testing/selftests/user_events/user_events_selftests.h b/tools/testing/selftests/user_events/user_events_selftests.h new file mode 100644 index 000000000000..690378942f82 --- /dev/null +++ b/tools/testing/selftests/user_events/user_events_selftests.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _USER_EVENTS_SELFTESTS_H +#define _USER_EVENTS_SELFTESTS_H + +#include <sys/stat.h> +#include <sys/types.h> +#include <sys/mount.h> +#include <unistd.h> +#include <errno.h> + +#include "../kselftest.h" + +static inline bool tracefs_enabled(char **message, bool *fail) +{ + struct stat buf; + int ret; + + *message = ""; + *fail = false; + + /* Ensure tracefs is installed */ + ret = stat("/sys/kernel/tracing", &buf); + + if (ret == -1) { + *message = "Tracefs is not installed"; + return false; + } + + /* Ensure mounted tracefs */ + ret = stat("/sys/kernel/tracing/README", &buf); + + if (ret == -1 && errno == ENOENT) { + if (mount(NULL, "/sys/kernel/tracing", "tracefs", 0, NULL) != 0) { + *message = "Cannot mount tracefs"; + *fail = true; + return false; + } + + ret = stat("/sys/kernel/tracing/README", &buf); + } + + if (ret == -1) { + *message = "Cannot access tracefs"; + *fail = true; + return false; + } + + return true; +} + +static inline bool user_events_enabled(char **message, bool *fail) +{ + struct stat buf; + int ret; + + *message = ""; + *fail = false; + + if (getuid() != 0) { + *message = "Must be run as root"; + *fail = true; + return false; + } + + if (!tracefs_enabled(message, fail)) + return false; + + /* Ensure user_events is installed */ + ret = stat("/sys/kernel/tracing/user_events_data", &buf); + + if (ret == -1) { + switch (errno) { + case ENOENT: + *message = "user_events is not installed"; + return false; + + default: + *message = "Cannot access user_events_data"; + *fail = true; + return false; + } + } + + return true; +} + +#define USER_EVENT_FIXTURE_SETUP(statement) do { \ + char *message; \ + bool fail; \ + if (!user_events_enabled(&message, &fail)) { \ + if (fail) { \ + TH_LOG("Setup failed due to: %s", message); \ + ASSERT_FALSE(fail); \ + } \ + SKIP(statement, "Skipping due to: %s", message); \ + } \ +} while (0) + +#endif /* _USER_EVENTS_SELFTESTS_H */ |