diff options
author | David S. Miller <davem@davemloft.net> | 2020-08-02 01:02:12 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-08-02 01:02:12 -0700 |
commit | bd0b33b24897ba9ddad221e8ac5b6f0e38a2e004 (patch) | |
tree | 3f03062a9e759dba7b09df0feccbb58b8f7ab4cc | |
parent | 7126bd5c8bcbc015cf89864cf71d750e8f33f924 (diff) | |
parent | ac3a0c8472969a03c0496ae774b3a29eb26c8d5a (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Resolved kernel/bpf/btf.c using instructions from merge commit
69138b34a7248d2396ab85c8652e20c0c39beaba
Signed-off-by: David S. Miller <davem@davemloft.net>
207 files changed, 1699 insertions, 835 deletions
diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs index f6d9c2a8d528..2e9ae311e02d 100644 --- a/Documentation/ABI/testing/debugfs-driver-habanalabs +++ b/Documentation/ABI/testing/debugfs-driver-habanalabs @@ -16,7 +16,16 @@ Description: Allow the root user to disable/enable in runtime the clock gating mechanism in Gaudi. Due to how Gaudi is built, the clock gating needs to be disabled in order to access the registers of the TPC and MME engines. This is sometimes needed - during debug by the user and hence the user needs this option + during debug by the user and hence the user needs this option. + The user can supply a bitmask value, each bit represents + a different engine to disable/enable its clock gating feature. + The bitmask is composed of 20 bits: + 0 - 7 : DMA channels + 8 - 11 : MME engines + 12 - 19 : TPC engines + The bit's location of a specific engine can be determined + using (1 << GAUDI_ENGINE_ID_*). GAUDI_ENGINE_ID_* values + are defined in uapi habanalabs.h file in enum gaudi_engine_id What: /sys/kernel/debug/habanalabs/hl<n>/command_buffers Date: Jan 2019 diff --git a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml index 526593c8c614..4cc1a670c986 100644 --- a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml +++ b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml @@ -47,6 +47,9 @@ properties: $ref: /schemas/types.yaml#/definitions/phandle-array description: Phandle to the device SRAM + iommus: + maxItems: 1 + memory-region: description: CMA pool to use for buffers allocation instead of the default diff --git a/Documentation/networking/bareudp.rst b/Documentation/networking/bareudp.rst index ff406563ea88..b9d04ee6dac1 100644 --- a/Documentation/networking/bareudp.rst +++ b/Documentation/networking/bareudp.rst @@ -8,9 +8,8 @@ There are various L3 encapsulation standards using UDP being discussed to leverage the UDP based load balancing capability of different networks. MPLSoUDP (__ https://tools.ietf.org/html/rfc7510) is one among them. -The Bareudp tunnel module provides a generic L3 encapsulation tunnelling -support for tunnelling different L3 protocols like MPLS, IP, NSH etc. inside -a UDP tunnel. +The Bareudp tunnel module provides a generic L3 encapsulation support for +tunnelling different L3 protocols like MPLS, IP, NSH etc. inside a UDP tunnel. Special Handling ---------------- diff --git a/Documentation/networking/devlink/devlink-trap.rst b/Documentation/networking/devlink/devlink-trap.rst index 1e3f3ffee248..2014307fbe63 100644 --- a/Documentation/networking/devlink/devlink-trap.rst +++ b/Documentation/networking/devlink/devlink-trap.rst @@ -486,6 +486,10 @@ narrow. The description of these groups must be added to the following table: - Contains packet traps for packets that should be locally delivered after routing, but do not match more specific packet traps (e.g., ``ipv4_bgp``) + * - ``external_delivery`` + - Contains packet traps for packets that should be routed through an + external interface (e.g., management interface) that does not belong to + the same device (e.g., switch ASIC) as the ingress interface * - ``ipv6`` - Contains packet traps for various IPv6 control packets (e.g., Router Advertisements) diff --git a/MAINTAINERS b/MAINTAINERS index 7bc4360b592f..b4cf52caf4f4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -782,7 +782,7 @@ F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h F: include/linux/mfd/altera-a10sr.h ALTERA TRIPLE SPEED ETHERNET DRIVER -M: Thor Thayer <thor.thayer@linux.intel.com> +M: Joyce Ooi <joyce.ooi@intel.com> L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/altera/ @@ -1425,7 +1425,7 @@ F: arch/arm*/include/asm/perf_event.h F: arch/arm*/kernel/hw_breakpoint.c F: arch/arm*/kernel/perf_* F: arch/arm/oprofile/common.c -F: drivers/perf/* +F: drivers/perf/ F: include/linux/perf/arm_pmu.h ARM PORT @@ -14191,7 +14191,8 @@ F: Documentation/devicetree/bindings/net/qcom,ethqos.txt F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c QUALCOMM GENERIC INTERFACE I2C DRIVER -M: Alok Chauhan <alokc@codeaurora.org> +M: Akash Asthana <akashast@codeaurora.org> +M: Mukesh Savaliya <msavaliy@codeaurora.org> L: linux-i2c@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Supported @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 8 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Kleptomaniac Octopus # *DOCUMENTATION* @@ -568,7 +568,7 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) ifneq ($(CROSS_COMPILE),) CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) -CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) +CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) endif ifneq ($(GCC_TOOLCHAIN),) @@ -1769,7 +1769,7 @@ PHONY += descend $(build-dirs) descend: $(build-dirs) $(build-dirs): prepare $(Q)$(MAKE) $(build)=$@ \ - single-build=$(if $(filter-out $@/, $(filter $@/%, $(single-no-ko))),1) \ + single-build=$(if $(filter-out $@/, $(filter $@/%, $(KBUILD_SINGLE_TARGETS))),1) \ need-builtin=1 need-modorder=1 clean-dirs := $(addprefix _clean_, $(clean-dirs)) diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index 348116501aa2..9b1a24cc5e91 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -342,7 +342,8 @@ comphy: phy@18300 { compatible = "marvell,armada-380-comphy"; - reg = <0x18300 0x100>; + reg-names = "comphy", "conf"; + reg = <0x18300 0x100>, <0x18460 4>; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/imx6qdl-icore.dtsi b/arch/arm/boot/dts/imx6qdl-icore.dtsi index 756f3a9f1b4f..12997dae35d9 100644 --- a/arch/arm/boot/dts/imx6qdl-icore.dtsi +++ b/arch/arm/boot/dts/imx6qdl-icore.dtsi @@ -397,7 +397,7 @@ pinctrl_usbotg: usbotggrp { fsl,pins = < - MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059 + MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059 >; }; @@ -409,6 +409,7 @@ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070 MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070 MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070 + MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0 >; }; diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts index 825924448ab4..14fd1de52a68 100644 --- a/arch/arm/boot/dts/imx6sx-sabreauto.dts +++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts @@ -99,7 +99,7 @@ &fec2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet2>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <ðphy0>; fsl,magic-packet; status = "okay"; diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi index 3e5fb72f21fc..c99aa273c296 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dtsi +++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi @@ -213,7 +213,7 @@ &fec2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet2>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <ðphy2>; status = "okay"; }; diff --git a/arch/arm/boot/dts/keystone-k2g-evm.dts b/arch/arm/boot/dts/keystone-k2g-evm.dts index db640bab8c1d..8b3d64c913d8 100644 --- a/arch/arm/boot/dts/keystone-k2g-evm.dts +++ b/arch/arm/boot/dts/keystone-k2g-evm.dts @@ -402,7 +402,7 @@ &gbe0 { phy-handle = <ðphy0>; - phy-mode = "rgmii-id"; + phy-mode = "rgmii-rxid"; status = "okay"; }; diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index bf531efc0610..0f95a6ef8543 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -198,7 +198,7 @@ default-pool { compatible = "shared-dma-pool"; size = <0x6000000>; - alloc-ranges = <0x4a000000 0x6000000>; + alloc-ranges = <0x40000000 0x10000000>; reusable; linux,cma-default; }; diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi index e6b036734a64..c2b4fbf552a3 100644 --- a/arch/arm/boot/dts/sun5i.dtsi +++ b/arch/arm/boot/dts/sun5i.dtsi @@ -117,7 +117,7 @@ default-pool { compatible = "shared-dma-pool"; size = <0x6000000>; - alloc-ranges = <0x4a000000 0x6000000>; + alloc-ranges = <0x40000000 0x10000000>; reusable; linux,cma-default; }; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index ffe1d10a1a84..6d6a37940db2 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -181,7 +181,7 @@ default-pool { compatible = "shared-dma-pool"; size = <0x6000000>; - alloc-ranges = <0x4a000000 0x6000000>; + alloc-ranges = <0x40000000 0x10000000>; reusable; linux,cma-default; }; diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h index f44f448537f2..1a3eedbac4a2 100644 --- a/arch/arm/include/asm/percpu.h +++ b/arch/arm/include/asm/percpu.h @@ -5,6 +5,8 @@ #ifndef _ASM_ARM_PERCPU_H_ #define _ASM_ARM_PERCPU_H_ +#include <asm/thread_info.h> + /* * Same as asm-generic/percpu.h, except that we store the per cpu offset * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 02ca7adf5375..7fff88e61252 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -683,6 +683,12 @@ static void disable_single_step(struct perf_event *bp) arch_install_hw_breakpoint(bp); } +static int watchpoint_fault_on_uaccess(struct pt_regs *regs, + struct arch_hw_breakpoint *info) +{ + return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER; +} + static void watchpoint_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { @@ -742,16 +748,27 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, } pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); + + /* + * If we triggered a user watchpoint from a uaccess routine, + * then handle the stepping ourselves since userspace really + * can't help us with this. + */ + if (watchpoint_fault_on_uaccess(regs, info)) + goto step; + perf_bp_event(wp, regs); /* - * If no overflow handler is present, insert a temporary - * mismatch breakpoint so we can single-step over the - * watchpoint trigger. + * Defer stepping to the overflow handler if one is installed. + * Otherwise, insert a temporary mismatch breakpoint so that + * we can single-step over the watchpoint trigger. */ - if (is_default_overflow_handler(wp)) - enable_single_step(wp, instruction_pointer(regs)); + if (!is_default_overflow_handler(wp)) + goto unlock; +step: + enable_single_step(wp, instruction_pointer(regs)); unlock: rcu_read_unlock(); } diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index 6bfdca4769a7..fddd08a6e063 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -184,6 +184,7 @@ static void __init patch_vdso(void *ehdr) if (!cntvct_ok) { vdso_nullpatch_one(&einfo, "__vdso_gettimeofday"); vdso_nullpatch_one(&einfo, "__vdso_clock_gettime"); + vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64"); } } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 628028bfbb92..bcd82614c25d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -966,7 +966,7 @@ void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md, pud_t *pud; p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual); - if (!WARN_ON(!p4d)) + if (WARN_ON(!p4d)) return; pud = pud_alloc(mm, p4d, md->virtual); if (WARN_ON(!pud)) diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi index 78b1361dfbb9..9ce78a7b117d 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi @@ -161,6 +161,7 @@ resets = <&ccu RST_BUS_VE>; interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>; allwinner,sram = <&ve_sram 1>; + iommus = <&iommu 3>; }; gpu: gpu@1800000 { diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 12f0eb56a1cc..619db9b4c9d5 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -77,9 +77,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { } "663:\n\t" \ newinstr "\n" \ "664:\n\t" \ - ".previous\n\t" \ ".org . - (664b-663b) + (662b-661b)\n\t" \ - ".org . - (662b-661b) + (664b-663b)\n" \ + ".org . - (662b-661b) + (664b-663b)\n\t" \ + ".previous\n" \ ".endif\n" #define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \ diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h index b6f7bc6da5fb..93a161b3bf3f 100644 --- a/arch/arm64/include/asm/checksum.h +++ b/arch/arm64/include/asm/checksum.h @@ -24,16 +24,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { __uint128_t tmp; u64 sum; + int n = ihl; /* we want it signed */ tmp = *(const __uint128_t *)iph; iph += 16; - ihl -= 4; + n -= 4; tmp += ((tmp >> 64) | (tmp << 64)); sum = tmp >> 64; do { sum += *(const u32 *)iph; iph += 4; - } while (--ihl); + } while (--n > 0); sum += ((sum >> 32) | (sum << 32)); return csum_fold((__force u32)(sum >> 32)); diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index ea268d88b6f7..a0c8a0b65259 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -30,7 +30,6 @@ #include <linux/threads.h> #include <linux/cpumask.h> #include <linux/thread_info.h> -#include <asm/pointer_auth.h> DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 118953d41763..6dd4171c9530 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -212,6 +212,8 @@ atomic64_set(atomic64_t *v, s64 i) _atomic_spin_unlock_irqrestore(v, flags); } +#define atomic64_set_release(v, i) atomic64_set((v), (i)) + static __inline__ s64 atomic64_read(const atomic64_t *v) { diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index ab5c215cf46c..068958575871 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void); extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_); +extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_); /* don't worry...optimizer will get rid of most of this */ static inline unsigned long @@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) #endif case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int)old, (unsigned int)new_); + case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_); } __cmpxchg_called_with_bad_pointer(); return old; diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index 70ffbcf889b8..2e4d1f05a926 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)prev; } + +u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new) +{ + unsigned long flags; + u8 prev; + + _atomic_spin_lock_irqsave(ptr, flags); + if ((prev = *ptr) == old) + *ptr = new; + _atomic_spin_unlock_irqrestore(ptr, flags); + return prev; +} diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 0fc8bad878b2..446e54c3f71e 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -3072,10 +3072,18 @@ do_hash_page: ori r0,r0,DSISR_BAD_FAULT_64S@l and. r0,r5,r0 /* weird error? */ bne- handle_page_fault /* if not, try to insert a HPTE */ + + /* + * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then + * don't call hash_page, just fail the fault. This is required to + * prevent re-entrancy problems in the hash code, namely perf + * interrupts hitting while something holds H_PAGE_BUSY, and taking a + * hash fault. See the comment in hash_preload(). + */ ld r11, PACA_THREAD_INFO(r13) - lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ - andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ - bne 77f /* then don't call hash_page now */ + lwz r0,TI_PREEMPT(r11) + andis. r0,r0,NMI_MASK@h + bne 77f /* * r3 contains the trap number diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 468169e33c86..9b9f92ad0e7a 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1559,6 +1559,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, pgd_t *pgdir; int rc, ssize, update_flags = 0; unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0); + unsigned long flags; BUG_ON(get_region_id(ea) != USER_REGION_ID); @@ -1592,6 +1593,28 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, return; #endif /* CONFIG_PPC_64K_PAGES */ + /* + * __hash_page_* must run with interrupts off, as it sets the + * H_PAGE_BUSY bit. It's possible for perf interrupts to hit at any + * time and may take a hash fault reading the user stack, see + * read_user_stack_slow() in the powerpc/perf code. + * + * If that takes a hash fault on the same page as we lock here, it + * will bail out when seeing H_PAGE_BUSY set, and retry the access + * leading to an infinite loop. + * + * Disabling interrupts here does not prevent perf interrupts, but it + * will prevent them taking hash faults (see the NMI test in + * do_hash_page), then read_user_stack's copy_from_user_nofault will + * fail and perf will fall back to read_user_stack_slow(), which + * walks the Linux page tables. + * + * Interrupts must also be off for the duration of the + * mm_is_thread_local test and update, to prevent preempt running the + * mm on another CPU (XXX: this may be racy vs kthread_use_mm). + */ + local_irq_save(flags); + /* Is that local to this CPU ? */ if (mm_is_thread_local(mm)) update_flags |= HPTE_LOCAL_UPDATE; @@ -1614,6 +1637,8 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, mm_ctx_user_psize(&mm->context), mm_ctx_user_psize(&mm->context), pte_val(*ptep)); + + local_irq_restore(flags); } /* diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index cd6a742ac6ef..01d70280d287 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2179,6 +2179,12 @@ static void __perf_event_interrupt(struct pt_regs *regs) perf_read_regs(regs); + /* + * If perf interrupts hit in a local_irq_disable (soft-masked) region, + * we consider them as NMIs. This is required to prevent hash faults on + * user addresses when reading callchains. See the NMI test in + * do_hash_page. + */ nmi = perf_intr_is_nmi(regs); if (nmi) nmi_enter(); diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index 22d968bfe9bb..d770da3f8b6f 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -12,6 +12,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address); extern void pmd_free(struct mm_struct *mm, pmd_t *pmd); +#define __pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, (pmdp)) #endif static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, @@ -33,13 +34,4 @@ do { \ tlb_remove_page((tlb), (pte)); \ } while (0) -#if CONFIG_PGTABLE_LEVELS > 2 -#define __pmd_free_tlb(tlb, pmdp, addr) \ -do { \ - struct page *page = virt_to_page(pmdp); \ - pgtable_pmd_page_dtor(page); \ - tlb_remove_page((tlb), page); \ -} while (0); -#endif - #endif /* __ASM_SH_PGALLOC_H */ diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 956a7a03b0c8..9bac5bbb67f3 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -199,7 +199,7 @@ syscall_trace_entry: mov.l @(OFF_R7,r15), r7 ! arg3 mov.l @(OFF_R3,r15), r3 ! syscall_nr ! - mov.l 2f, r10 ! Number of syscalls + mov.l 6f, r10 ! Number of syscalls cmp/hs r10, r3 bf syscall_call mov #-ENOSYS, r0 @@ -353,7 +353,7 @@ ENTRY(system_call) tst r9, r8 bf syscall_trace_entry ! - mov.l 2f, r8 ! Number of syscalls + mov.l 6f, r8 ! Number of syscalls cmp/hs r8, r3 bt syscall_badsys ! @@ -392,7 +392,7 @@ syscall_exit: #if !defined(CONFIG_CPU_SH2) 1: .long TRA #endif -2: .long NR_syscalls +6: .long NR_syscalls 3: .long sys_call_table 7: .long do_syscall_trace_enter 8: .long do_syscall_trace_leave diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 42c672f1584e..cbe6aa77d50d 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -947,7 +947,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, trace_binder_unmap_user_end(alloc, index); } mmap_read_unlock(mm); - mmput(mm); + mmput_async(mm); trace_binder_unmap_kernel_start(alloc, index); diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index d9fd70280482..7f814da3c2d0 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -433,9 +433,15 @@ static int atmtcp_remove_persistent(int itf) return -EMEDIUMTYPE; } dev_data = PRIV(dev); - if (!dev_data->persist) return 0; + if (!dev_data->persist) { + atm_dev_put(dev); + return 0; + } dev_data->persist = 0; - if (PRIV(dev)->vcc) return 0; + if (PRIV(dev)->vcc) { + atm_dev_put(dev); + return 0; + } kfree(dev_data); atm_dev_put(dev); atm_dev_deregister(dev); diff --git a/drivers/base/property.c b/drivers/base/property.c index 1e6d75e65938..d58aa98fe964 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -721,7 +721,7 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev, return next; /* When no more children in primary, continue with secondary */ - if (!IS_ERR_OR_NULL(fwnode->secondary)) + if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) next = fwnode_get_next_child_node(fwnode->secondary, child); return next; diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 934c92dcb9ab..687d4af6945d 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -814,7 +814,8 @@ static struct inode *devmem_inode; #ifdef CONFIG_IO_STRICT_DEVMEM void revoke_devmem(struct resource *res) { - struct inode *inode = READ_ONCE(devmem_inode); + /* pairs with smp_store_release() in devmem_init_inode() */ + struct inode *inode = smp_load_acquire(&devmem_inode); /* * Check that the initialization has completed. Losing the race @@ -1028,8 +1029,11 @@ static int devmem_init_inode(void) return rc; } - /* publish /dev/mem initialized */ - WRITE_ONCE(devmem_inode, inode); + /* + * Publish /dev/mem initialized. + * Pairs with smp_load_acquire() in revoke_devmem(). + */ + smp_store_release(&devmem_inode, inode); return 0; } diff --git a/drivers/char/random.c b/drivers/char/random.c index 2a41b21623ae..d20ba1b104ca 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1277,6 +1277,7 @@ void add_interrupt_randomness(int irq, int irq_flags) fast_mix(fast_pool); add_interrupt_bench(cycles); + this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]); if (unlikely(crng_init == 0)) { if ((fast_pool->count >= 64) && diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 039e0f91dba8..6945c3c96637 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype, fw_cfg_sel_ko, "%d", entry->select); - if (err) - goto err_register; + if (err) { + kobject_put(&entry->kobj); + return err; + } /* add raw binary content access */ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw); @@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) err_add_raw: kobject_del(&entry->kobj); -err_register: kfree(entry); return err; } diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c index b0c31789a909..3fa2c5992173 100644 --- a/drivers/fpga/dfl-afu-main.c +++ b/drivers/fpga/dfl-afu-main.c @@ -83,7 +83,8 @@ int __afu_port_disable(struct platform_device *pdev) * on this port and minimum soft reset pulse width has elapsed. * Driver polls port_soft_reset_ack to determine if reset done by HW. */ - if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST, + if (readq_poll_timeout(base + PORT_HDR_CTRL, v, + v & PORT_CTRL_SFTRST_ACK, RST_POLL_INVL, RST_POLL_TIMEOUT)) { dev_err(&pdev->dev, "timeout, fail to reset device\n"); return -ETIMEDOUT; diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c index 538755062ab7..a78c409bf2c4 100644 --- a/drivers/fpga/dfl-pci.c +++ b/drivers/fpga/dfl-pci.c @@ -227,7 +227,6 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) { struct cci_drvdata *drvdata = pci_get_drvdata(pcidev); struct dfl_fpga_cdev *cdev = drvdata->cdev; - int ret = 0; if (!num_vfs) { /* @@ -239,6 +238,8 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) dfl_fpga_cdev_config_ports_pf(cdev); } else { + int ret; + /* * before enable SRIOV, put released ports into VF access mode * first of all. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index d7e17e34fee1..21292098bc02 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -692,9 +692,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return n ? -EFAULT : 0; } case AMDGPU_INFO_DEV_INFO: { - struct drm_amdgpu_info_device dev_info = {}; + struct drm_amdgpu_info_device dev_info; uint64_t vm_size; + memset(&dev_info, 0, sizeof(dev_info)); dev_info.device_id = dev->pdev->device; dev_info.chip_rev = adev->rev_id; dev_info.external_rev = adev->external_rev_id; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index ebb8a28ff002..02e6f8c4dde0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -778,7 +778,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, tmp_str++; while (isspace(*++tmp_str)); - while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { + while (tmp_str[0]) { + sub_str = strsep(&tmp_str, delimiter); ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); if (ret) return -EINVAL; @@ -1038,7 +1039,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) memcpy(buf_cpy, buf, bytes); buf_cpy[bytes] = '\0'; tmp = buf_cpy; - while ((sub_str = strsep(&tmp, delimiter)) != NULL) { + while (tmp[0]) { + sub_str = strsep(&tmp, delimiter); if (strlen(sub_str)) { ret = kstrtol(sub_str, 0, &level); if (ret) @@ -1635,7 +1637,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, i++; memcpy(buf_cpy, buf, count-i); tmp_str = buf_cpy; - while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { + while (tmp_str[0]) { + sub_str = strsep(&tmp_str, delimiter); ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); if (ret) return -EINVAL; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 86ffa0c2880f..710edc70e37e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -8717,20 +8717,38 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, * the same resource. If we have a new DC context as part of * the DM atomic state from validation we need to free it and * retain the existing one instead. + * + * Furthermore, since the DM atomic state only contains the DC + * context and can safely be annulled, we can free the state + * and clear the associated private object now to free + * some memory and avoid a possible use-after-free later. */ - struct dm_atomic_state *new_dm_state, *old_dm_state; - new_dm_state = dm_atomic_get_new_state(state); - old_dm_state = dm_atomic_get_old_state(state); + for (i = 0; i < state->num_private_objs; i++) { + struct drm_private_obj *obj = state->private_objs[i].ptr; - if (new_dm_state && old_dm_state) { - if (new_dm_state->context) - dc_release_state(new_dm_state->context); + if (obj->funcs == adev->dm.atomic_obj.funcs) { + int j = state->num_private_objs-1; - new_dm_state->context = old_dm_state->context; + dm_atomic_destroy_state(obj, + state->private_objs[i].state); + + /* If i is not at the end of the array then the + * last element needs to be moved to where i was + * before the array can safely be truncated. + */ + if (i != j) + state->private_objs[i] = + state->private_objs[j]; - if (old_dm_state->context) - dc_retain_state(old_dm_state->context); + state->private_objs[j].ptr = NULL; + state->private_objs[j].state = NULL; + state->private_objs[j].old_state = NULL; + state->private_objs[j].new_state = NULL; + + state->num_private_objs = j; + break; + } } } diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 05d8373888e8..079f46f5cdb6 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -146,6 +146,7 @@ int bochs_kms_init(struct bochs_device *bochs) bochs->dev->mode_config.preferred_depth = 24; bochs->dev->mode_config.prefer_shadow = 0; bochs->dev->mode_config.prefer_shadow_fbdev = 1; + bochs->dev->mode_config.fbdev_use_iomem = true; bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; bochs->dev->mode_config.funcs = &bochs_mode_funcs; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 87b58c1acff4..648eb23d0784 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -1224,6 +1224,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) adv7511->bridge.funcs = &adv7511_bridge_funcs; adv7511->bridge.of_node = dev->of_node; + adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA; drm_bridge_add(&adv7511->bridge); diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index b14d725bf609..c7bc194bbce3 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -917,11 +917,6 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge, struct drm_panel *panel; int ret; - if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { - DRM_ERROR("Fix bridge driver to make connector optional!"); - return -EINVAL; - } - ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel, &panel_bridge); if (ret) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 5609e164805f..89cfd68ef400 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -399,7 +399,11 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper, unsigned int y; for (y = clip->y1; y < clip->y2; y++) { - memcpy(dst, src, len); + if (!fb_helper->dev->mode_config.fbdev_use_iomem) + memcpy(dst, src, len); + else + memcpy_toio((void __iomem *)dst, src, len); + src += fb->pitches[0]; dst += fb->pitches[0]; } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 7bf628e13023..ee2058ad482c 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -871,9 +871,6 @@ err: * @file_priv: drm file-private structure * * Open an object using the global name, returning a handle and the size. - * - * This handle (of course) holds a reference to the object, so the object - * will not go away until the handle is deleted. */ int drm_gem_open_ioctl(struct drm_device *dev, void *data, @@ -898,14 +895,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, &handle); - drm_gem_object_put_unlocked(obj); if (ret) - return ret; + goto err; args->handle = handle; args->size = obj->size; - return 0; +err: + drm_gem_object_put_unlocked(obj); + return ret; } /** diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index bb27c82757f1..bf7888ad9ad4 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -923,7 +923,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc, } } - tr.len = chunk; + tr.len = chunk * 2; len -= chunk; ret = spi_sync(spi, &m); diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index b50b44e76279..8fc3f67e3e76 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -322,10 +322,8 @@ static int drm_of_lvds_get_remote_pixels_type( * configurations by passing the endpoints explicitly to * drm_of_lvds_get_dual_link_pixel_order(). */ - if (!current_pt || pixels_type != current_pt) { - of_node_put(remote_port); + if (!current_pt || pixels_type != current_pt) return -EINVAL; - } } return pixels_type; diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c index 08802e5177f6..4d2290f88edb 100644 --- a/drivers/gpu/drm/mcde/mcde_display.c +++ b/drivers/gpu/drm/mcde/mcde_display.c @@ -1060,9 +1060,14 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe, */ if (fb) { mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0)); - if (!mcde->video_mode) - /* Send a single frame using software sync */ - mcde_display_send_one_frame(mcde); + if (!mcde->video_mode) { + /* + * Send a single frame using software sync if the flow + * is not active yet. + */ + if (mcde->flow_active == 0) + mcde_display_send_one_frame(mcde); + } dev_info_once(mcde->dev, "sent first display update\n"); } else { /* diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 519f99868e35..800b7757252e 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2073,7 +2073,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) */ if (core->assign_windows) { core->func->wndw.owner(core); - core->func->update(core, interlock, false); + nv50_disp_atomic_commit_core(state, interlock); core->assign_windows = false; interlock[NV50_DISP_INTERLOCK_CORE] = 0; } @@ -2506,7 +2506,7 @@ nv50_display_create(struct drm_device *dev) if (disp->disp->object.oclass >= TU102_DISP) nouveau_display(dev)->format_modifiers = wndwc57e_modifiers; else - if (disp->disp->object.oclass >= GF110_DISP) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI) nouveau_display(dev)->format_modifiers = disp90xx_modifiers; else nouveau_display(dev)->format_modifiers = disp50xx_modifiers; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 496c4621cc78..07373bbc2acf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -191,6 +191,7 @@ nouveau_decode_mod(struct nouveau_drm *drm, uint32_t *tile_mode, uint8_t *kind) { + struct nouveau_display *disp = nouveau_display(drm->dev); BUG_ON(!tile_mode || !kind); if (modifier == DRM_FORMAT_MOD_LINEAR) { @@ -202,6 +203,12 @@ nouveau_decode_mod(struct nouveau_drm *drm, * Extract the block height and kind from the corresponding * modifier fields. See drm_fourcc.h for details. */ + + if ((modifier & (0xffull << 12)) == 0ull) { + /* Legacy modifier. Translate to this dev's 'kind.' */ + modifier |= disp->format_modifiers[0] & (0xffull << 12); + } + *tile_mode = (uint32_t)(modifier & 0xF); *kind = (uint8_t)((modifier >> 12) & 0xFF); @@ -227,6 +234,16 @@ nouveau_framebuffer_get_layout(struct drm_framebuffer *fb, } } +static const u64 legacy_modifiers[] = { + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0), + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1), + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2), + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3), + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4), + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5), + DRM_FORMAT_MOD_INVALID +}; + static int nouveau_validate_decode_mod(struct nouveau_drm *drm, uint64_t modifier, @@ -247,8 +264,14 @@ nouveau_validate_decode_mod(struct nouveau_drm *drm, (disp->format_modifiers[mod] != modifier); mod++); - if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) - return -EINVAL; + if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) { + for (mod = 0; + (legacy_modifiers[mod] != DRM_FORMAT_MOD_INVALID) && + (legacy_modifiers[mod] != modifier); + mod++); + if (legacy_modifiers[mod] == DRM_FORMAT_MOD_INVALID) + return -EINVAL; + } nouveau_decode_mod(drm, modifier, tile_mode, kind); diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 3d11b84d4cf9..d5c23d1c20d8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -315,7 +315,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, struct drm_framebuffer *fb; struct nouveau_channel *chan; struct nouveau_bo *nvbo; - struct drm_mode_fb_cmd2 mode_cmd; + struct drm_mode_fb_cmd2 mode_cmd = {}; int ret; mode_cmd.width = sizes->surface_width; @@ -590,6 +590,7 @@ fini: drm_fb_helper_fini(&fbcon->helper); free: kfree(fbcon); + drm->fbcon = NULL; return ret; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c index dcf08249374a..dffcac249211 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c @@ -117,15 +117,6 @@ nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type, { struct nvkm_ior *ior; - /* First preference is to reuse the OR that is currently armed - * on HW, if any, in order to prevent unnecessary switching. - */ - list_for_each_entry(ior, &outp->disp->ior, head) { - if (!ior->identity && !!ior->func->hda.hpd == hda && - !ior->asy.outp && ior->arm.outp == outp) - return nvkm_outp_acquire_ior(outp, user, ior); - } - /* Failing that, a completely unused OR is the next best thing. */ list_for_each_entry(ior, &outp->disp->ior, head) { if (!ior->identity && !!ior->func->hda.hpd == hda && @@ -173,6 +164,27 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda) return nvkm_outp_acquire_ior(outp, user, ior); } + /* First preference is to reuse the OR that is currently armed + * on HW, if any, in order to prevent unnecessary switching. + */ + list_for_each_entry(ior, &outp->disp->ior, head) { + if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp) { + /*XXX: For various complicated reasons, we can't outright switch + * the boot-time OR on the first modeset without some fairly + * invasive changes. + * + * The systems that were fixed by modifying the OR selection + * code to account for HDA support shouldn't regress here as + * the HDA-enabled ORs match the relevant output's pad macro + * index, and the firmware seems to select an OR this way. + * + * This warning is to make it obvious if that proves wrong. + */ + WARN_ON(hda && !ior->func->hda.hpd); + return nvkm_outp_acquire_ior(outp, user, ior); + } + } + /* If we don't need HDA, first try to acquire an OR that doesn't * support it to leave free the ones that do. */ diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index 46fe1805c588..2649469070aa 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -615,9 +615,9 @@ static const struct panel_desc boe_tv101wum_nl6_desc = { static const struct drm_display_mode auo_kd101n80_45na_default_mode = { .clock = 157000, .hdisplay = 1200, - .hsync_start = 1200 + 80, - .hsync_end = 1200 + 80 + 24, - .htotal = 1200 + 80 + 24 + 36, + .hsync_start = 1200 + 60, + .hsync_end = 1200 + 60 + 24, + .htotal = 1200 + 60 + 24 + 56, .vdisplay = 1920, .vsync_start = 1920 + 16, .vsync_end = 1920 + 16 + 4, diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 5178f87d6574..4aeb960ccf15 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1250,7 +1250,21 @@ static const struct panel_desc boe_nv133fhm_n61 = { .height = 165, }, .delay = { - .hpd_absent_delay = 200, + /* + * When power is first given to the panel there's a short + * spike on the HPD line. It was explained that this spike + * was until the TCON data download was complete. On + * one system this was measured at 8 ms. We'll put 15 ms + * in the prepare delay just to be safe and take it away + * from the hpd_absent_delay (which would otherwise be 200 ms) + * to handle this. That means: + * - If HPD isn't hooked up you still have 200 ms delay. + * - If HPD is hooked up we won't try to look at it for the + * first 15 ms. + */ + .prepare = 15, + .hpd_absent_delay = 185, + .unprepare = 500, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c index 5427f047faf0..1589179d5eb9 100644 --- a/drivers/i2c/i2c-core-slave.c +++ b/drivers/i2c/i2c-core-slave.c @@ -18,10 +18,8 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) { int ret; - if (!client || !slave_cb) { - WARN(1, "insufficient data\n"); + if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n")) return -EINVAL; - } if (!(client->flags & I2C_CLIENT_SLAVE)) dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n", @@ -60,6 +58,9 @@ int i2c_slave_unregister(struct i2c_client *client) { int ret; + if (IS_ERR_OR_NULL(client)) + return -EINVAL; + if (!client->adapter->algo->unreg_slave) { dev_err(&client->dev, "%s: not supported by adapter\n", __func__); return -EOPNOTSUPP; diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index 655795bfa0ee..513825e424bf 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -72,6 +72,15 @@ static void rdma_dim_init(struct ib_cq *cq) INIT_WORK(&dim->work, ib_cq_rdma_dim_work); } +static void rdma_dim_destroy(struct ib_cq *cq) +{ + if (!cq->dim) + return; + + cancel_work_sync(&cq->dim->work); + kfree(cq->dim); +} + static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) { int rc; @@ -266,6 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, return cq; out_destroy_cq: + rdma_dim_destroy(cq); rdma_restrack_del(&cq->res); cq->device->ops.destroy_cq(cq, udata); out_free_wc: @@ -331,12 +341,10 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata) WARN_ON_ONCE(1); } + rdma_dim_destroy(cq); trace_cq_free(cq); rdma_restrack_del(&cq->res); cq->device->ops.destroy_cq(cq, udata); - if (cq->dim) - cancel_work_sync(&cq->dim->work); - kfree(cq->dim); kfree(cq->wc); kfree(cq); } diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 5b87eee8ccc8..d03dacaef788 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1084,6 +1084,8 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, size_t in_size; int ret; + if (in_len < offsetofend(typeof(cmd), reserved)) + return -EINVAL; in_size = min_t(size_t, in_len, sizeof(cmd)); if (copy_from_user(&cmd, inbuf, in_size)) return -EFAULT; @@ -1141,6 +1143,8 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, size_t in_size; int ret; + if (in_len < offsetofend(typeof(cmd), reserved)) + return -EINVAL; in_size = min_t(size_t, in_len, sizeof(cmd)); if (copy_from_user(&cmd, inbuf, in_size)) return -EFAULT; diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 1ab676b66894..77dca1e05bba 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -1797,9 +1797,7 @@ static bool init_prefetch_work(struct ib_pd *pd, work->frags[i].mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); if (!work->frags[i].mr) { - work->num_sge = i - 1; - if (i) - destroy_prefetch_work(work); + work->num_sge = i; return false; } @@ -1865,6 +1863,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, srcu_key = srcu_read_lock(&dev->odp_srcu); if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) { srcu_read_unlock(&dev->odp_srcu, srcu_key); + destroy_prefetch_work(work); return -EINVAL; } queue_work(system_unbound_wq, &work->work); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index e050eade97a1..1225b8d77510 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1766,15 +1766,14 @@ err: } static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev, + struct mlx5_ib_qp *qp, struct ib_qp_init_attr *init_attr, - struct mlx5_ib_create_qp *ucmd, void *qpc) { int scqe_sz; bool allow_scat_cqe = false; - if (ucmd) - allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE; + allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE; if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) return; @@ -1853,8 +1852,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, u32 *in; int err; - mutex_init(&qp->mutex); - if (attr->sq_sig_type == IB_SIGNAL_ALL_WR) qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; @@ -1938,7 +1935,6 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, u32 *in; int err; - mutex_init(&qp->mutex); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); @@ -2012,7 +2008,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, } if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) && (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC)) - configure_requester_scat_cqe(dev, init_attr, ucmd, qpc); + configure_requester_scat_cqe(dev, qp, init_attr, qpc); if (qp->rq.wqe_cnt) { MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); @@ -2129,7 +2125,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, u32 *in; int err; - mutex_init(&qp->mutex); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); @@ -2543,13 +2538,18 @@ static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag, return; } - if (flag == MLX5_QP_FLAG_SCATTER_CQE) { + switch (flag) { + case MLX5_QP_FLAG_SCATTER_CQE: + case MLX5_QP_FLAG_ALLOW_SCATTER_CQE: /* - * We don't return error if this flag was provided, - * and mlx5 doesn't have right capability. - */ - *flags &= ~MLX5_QP_FLAG_SCATTER_CQE; + * We don't return error if these flags were provided, + * and mlx5 doesn't have right capability. + */ + *flags &= ~(MLX5_QP_FLAG_SCATTER_CQE | + MLX5_QP_FLAG_ALLOW_SCATTER_CQE); return; + default: + break; } mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag); } @@ -2589,6 +2589,8 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp); process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE, MLX5_CAP_GEN(mdev, sctr_data_cqe), qp); + process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE, + MLX5_CAP_GEN(mdev, sctr_data_cqe), qp); if (qp->type == IB_QPT_RAW_PACKET) { cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || @@ -2963,6 +2965,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, goto free_ucmd; } + mutex_init(&qp->mutex); qp->type = type; if (udata) { err = process_vendor_flags(dev, qp, params.ucmd, attr); diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 7db35dd6ad74..332a8ba94b81 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -901,8 +901,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, qp->s_tail_ack_queue = 0; qp->s_acked_ack_queue = 0; qp->s_num_rd_atomic = 0; - if (qp->r_rq.kwq) - qp->r_rq.kwq->count = qp->r_rq.size; qp->r_sge.num_sge = 0; atomic_set(&qp->s_reserved_used, 0); } @@ -2367,31 +2365,6 @@ bad_lkey: } /** - * get_count - count numbers of request work queue entries - * in circular buffer - * @rq: data structure for request queue entry - * @tail: tail indices of the circular buffer - * @head: head indices of the circular buffer - * - * Return - total number of entries in the circular buffer - */ -static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head) -{ - u32 count; - - count = head; - - if (count >= rq->size) - count = 0; - if (count < tail) - count += rq->size - tail; - else - count -= tail; - - return count; -} - -/** * get_rvt_head - get head indices of the circular buffer * @rq: data structure for request queue entry * @ip: the QP @@ -2465,7 +2438,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) { head = get_rvt_head(rq, ip); - kwq->count = get_count(rq, tail, head); + kwq->count = rvt_get_rq_count(rq, head, tail); } if (unlikely(kwq->count == 0)) { ret = 0; @@ -2500,7 +2473,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) * the number of remaining WQEs. */ if (kwq->count < srq->limit) { - kwq->count = get_count(rq, tail, get_rvt_head(rq, ip)); + kwq->count = + rvt_get_rq_count(rq, + get_rvt_head(rq, ip), tail); if (kwq->count < srq->limit) { struct ib_event ev; diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c index 977906cc0d11..c58735f4c94a 100644 --- a/drivers/infiniband/sw/rdmavt/rc.c +++ b/drivers/infiniband/sw/rdmavt/rc.c @@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp) * not atomic, which is OK, since the fuzziness is * resolved as further ACKs go out. */ - credits = head - tail; - if ((int)credits < 0) - credits += qp->r_rq.size; + credits = rvt_get_rq_count(&qp->r_rq, head, tail); } /* * Binary search the credit table to find the code to diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index e5f998744501..9e1ab701785c 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -243,6 +243,7 @@ static int aggregate_requests(struct icc_node *node) { struct icc_provider *p = node->provider; struct icc_req *r; + u32 avg_bw, peak_bw; node->avg_bw = 0; node->peak_bw = 0; @@ -251,9 +252,14 @@ static int aggregate_requests(struct icc_node *node) p->pre_aggregate(node); hlist_for_each_entry(r, &node->req_list, req_node) { - if (!r->enabled) - continue; - p->aggregate(node, r->tag, r->avg_bw, r->peak_bw, + if (r->enabled) { + avg_bw = r->avg_bw; + peak_bw = r->peak_bw; + } else { + avg_bw = 0; + peak_bw = 0; + } + p->aggregate(node, r->tag, avg_bw, peak_bw, &node->avg_bw, &node->peak_bw); } diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c index e94f3c5228b7..42c6c5581662 100644 --- a/drivers/interconnect/qcom/msm8916.c +++ b/drivers/interconnect/qcom/msm8916.c @@ -197,13 +197,13 @@ DEFINE_QNODE(pcnoc_int_0, MSM8916_PNOC_INT_0, 8, -1, -1, MSM8916_PNOC_SNOC_MAS, DEFINE_QNODE(pcnoc_int_1, MSM8916_PNOC_INT_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS); DEFINE_QNODE(pcnoc_m_0, MSM8916_PNOC_MAS_0, 8, -1, -1, MSM8916_PNOC_INT_0); DEFINE_QNODE(pcnoc_m_1, MSM8916_PNOC_MAS_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS); -DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 8, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS); -DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 8, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG); -DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 8, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB); -DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 8, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG); -DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 8, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG); -DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 8, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1); -DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 8, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG); +DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 4, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS); +DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 4, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG); +DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 4, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB); +DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 4, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG); +DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 4, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG); +DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 4, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1); +DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 4, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG); DEFINE_QNODE(pcnoc_snoc_mas, MSM8916_PNOC_SNOC_MAS, 8, 29, -1, MSM8916_PNOC_SNOC_SLV); DEFINE_QNODE(pcnoc_snoc_slv, MSM8916_PNOC_SNOC_SLV, 8, -1, 45, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC, MSM8916_SNOC_INT_1); DEFINE_QNODE(qdss_int, MSM8916_SNOC_QDSS_INT, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC); diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c index b0f62cbbdc87..f3a8f113865d 100644 --- a/drivers/misc/habanalabs/command_submission.c +++ b/drivers/misc/habanalabs/command_submission.c @@ -499,11 +499,19 @@ static int validate_queue_index(struct hl_device *hdev, struct asic_fixed_properties *asic = &hdev->asic_prop; struct hw_queue_properties *hw_queue_prop; + /* This must be checked here to prevent out-of-bounds access to + * hw_queues_props array + */ + if (chunk->queue_index >= HL_MAX_QUEUES) { + dev_err(hdev->dev, "Queue index %d is invalid\n", + chunk->queue_index); + return -EINVAL; + } + hw_queue_prop = &asic->hw_queues_props[chunk->queue_index]; - if ((chunk->queue_index >= HL_MAX_QUEUES) || - (hw_queue_prop->type == QUEUE_TYPE_NA)) { - dev_err(hdev->dev, "Queue index %d is invalid\n", + if (hw_queue_prop->type == QUEUE_TYPE_NA) { + dev_err(hdev->dev, "Queue index %d is not applicable\n", chunk->queue_index); return -EINVAL; } diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c index fc4372c18ce2..0bc036e01ee8 100644 --- a/drivers/misc/habanalabs/debugfs.c +++ b/drivers/misc/habanalabs/debugfs.c @@ -36,7 +36,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, pkt.i2c_reg = i2c_reg; rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - HL_DEVICE_TIMEOUT_USEC, (long *) val); + 0, (long *) val); if (rc) dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc); @@ -63,7 +63,7 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, pkt.value = cpu_to_le64(val); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - HL_DEVICE_TIMEOUT_USEC, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc); @@ -87,7 +87,7 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state) pkt.value = cpu_to_le64(state); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - HL_DEVICE_TIMEOUT_USEC, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc); @@ -981,7 +981,7 @@ static ssize_t hl_clk_gate_read(struct file *f, char __user *buf, if (*ppos) return 0; - sprintf(tmp_buf, "%d\n", hdev->clock_gating); + sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask); rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf, strlen(tmp_buf) + 1); @@ -993,7 +993,7 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf, { struct hl_dbg_device_entry *entry = file_inode(f)->i_private; struct hl_device *hdev = entry->hdev; - u32 value; + u64 value; ssize_t rc; if (atomic_read(&hdev->in_reset)) { @@ -1002,19 +1002,12 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf, return 0; } - rc = kstrtouint_from_user(buf, count, 10, &value); + rc = kstrtoull_from_user(buf, count, 16, &value); if (rc) return rc; - if (value) { - hdev->clock_gating = 1; - if (hdev->asic_funcs->enable_clock_gating) - hdev->asic_funcs->enable_clock_gating(hdev); - } else { - if (hdev->asic_funcs->disable_clock_gating) - hdev->asic_funcs->disable_clock_gating(hdev); - hdev->clock_gating = 0; - } + hdev->clock_gating_mask = value; + hdev->asic_funcs->set_clock_gating(hdev); return count; } diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c index 2b38a119704c..59608d1bac88 100644 --- a/drivers/misc/habanalabs/device.c +++ b/drivers/misc/habanalabs/device.c @@ -608,7 +608,7 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable) hdev->in_debug = 0; if (!hdev->hard_reset_pending) - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); goto out; } diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c index baf790cf4b78..d27841cb5bcb 100644 --- a/drivers/misc/habanalabs/firmware_if.c +++ b/drivers/misc/habanalabs/firmware_if.c @@ -61,7 +61,7 @@ int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode) pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT); return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, - sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL); + sizeof(pkt), 0, NULL); } int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, @@ -144,7 +144,7 @@ int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type) pkt.value = cpu_to_le64(event_type); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - HL_DEVICE_TIMEOUT_USEC, &result); + 0, &result); if (rc) dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type); @@ -183,7 +183,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr, ARMCP_PKT_CTL_OPCODE_SHIFT); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt, - total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result); + total_pkt_size, 0, &result); if (rc) dev_err(hdev->dev, "failed to unmask IRQ array\n"); @@ -204,7 +204,7 @@ int hl_fw_test_cpu_queue(struct hl_device *hdev) test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt, - sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result); + sizeof(test_pkt), 0, &result); if (!rc) { if (result != ARMCP_PACKET_FENCE_VAL) @@ -248,7 +248,7 @@ int hl_fw_send_heartbeat(struct hl_device *hdev) hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt, - sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result); + sizeof(hb_pkt), 0, &result); if ((rc) || (result != ARMCP_PACKET_FENCE_VAL)) rc = -EIO; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 834470d10b46..637a9d608707 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -80,6 +80,7 @@ #define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) #define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) #define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */ +#define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */ #define GAUDI_QMAN0_FENCE_VAL 0x72E91AB9 @@ -98,6 +99,11 @@ #define GAUDI_ARB_WDT_TIMEOUT 0x1000000 +#define GAUDI_CLK_GATE_DEBUGFS_MASK (\ + BIT(GAUDI_ENGINE_ID_MME_0) |\ + BIT(GAUDI_ENGINE_ID_MME_2) |\ + GENMASK_ULL(GAUDI_ENGINE_ID_TPC_7, GAUDI_ENGINE_ID_TPC_0)) + static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = { "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3", "gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3", @@ -106,14 +112,14 @@ static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = { }; static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = { - [GAUDI_PCI_DMA_1] = 0, - [GAUDI_PCI_DMA_2] = 1, - [GAUDI_PCI_DMA_3] = 5, - [GAUDI_HBM_DMA_1] = 2, - [GAUDI_HBM_DMA_2] = 3, - [GAUDI_HBM_DMA_3] = 4, - [GAUDI_HBM_DMA_4] = 6, - [GAUDI_HBM_DMA_5] = 7 + [GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0, + [GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1, + [GAUDI_PCI_DMA_3] = GAUDI_ENGINE_ID_DMA_5, + [GAUDI_HBM_DMA_1] = GAUDI_ENGINE_ID_DMA_2, + [GAUDI_HBM_DMA_2] = GAUDI_ENGINE_ID_DMA_3, + [GAUDI_HBM_DMA_3] = GAUDI_ENGINE_ID_DMA_4, + [GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_6, + [GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_7 }; static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = { @@ -1819,7 +1825,7 @@ static void gaudi_init_golden_registers(struct hl_device *hdev) gaudi_init_rate_limiter(hdev); - gaudi_disable_clock_gating(hdev); + hdev->asic_funcs->disable_clock_gating(hdev); for (tpc_id = 0, tpc_offset = 0; tpc_id < TPC_NUMBER_OF_ENGINES; @@ -2531,46 +2537,55 @@ static void gaudi_tpc_stall(struct hl_device *hdev) WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT); } -static void gaudi_enable_clock_gating(struct hl_device *hdev) +static void gaudi_set_clock_gating(struct hl_device *hdev) { struct gaudi_device *gaudi = hdev->asic_specific; u32 qman_offset; int i; - if (!hdev->clock_gating) - return; - - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) - return; - /* In case we are during debug session, don't enable the clock gate * as it may interfere */ if (hdev->in_debug) return; - for (i = 0, qman_offset = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) { + for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) { + if (!(hdev->clock_gating_mask & + (BIT_ULL(gaudi_dma_assignment[i])))) + continue; + qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET; WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN); WREG32(mmDMA0_QM_CGM_CFG + qman_offset, QMAN_UPPER_CP_CGM_PWR_GATE_EN); } - for (; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) { + for (i = GAUDI_HBM_DMA_1 ; i < GAUDI_DMA_MAX ; i++) { + if (!(hdev->clock_gating_mask & + (BIT_ULL(gaudi_dma_assignment[i])))) + continue; + qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET; WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN); WREG32(mmDMA0_QM_CGM_CFG + qman_offset, QMAN_COMMON_CP_CGM_PWR_GATE_EN); } - WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); - WREG32(mmMME0_QM_CGM_CFG, - QMAN_COMMON_CP_CGM_PWR_GATE_EN); - WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); - WREG32(mmMME2_QM_CGM_CFG, - QMAN_COMMON_CP_CGM_PWR_GATE_EN); + if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0))) { + WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); + WREG32(mmMME0_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN); + } + + if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2))) { + WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); + WREG32(mmMME2_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN); + } for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) { + if (!(hdev->clock_gating_mask & + (BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i)))) + continue; + WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN); WREG32(mmTPC0_QM_CGM_CFG + qman_offset, @@ -2663,7 +2678,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset) gaudi_stop_hbm_dma_qmans(hdev); gaudi_stop_pci_dma_qmans(hdev); - gaudi_disable_clock_gating(hdev); + hdev->asic_funcs->disable_clock_gating(hdev); msleep(wait_timeout_ms); @@ -3003,7 +3018,7 @@ static int gaudi_hw_init(struct hl_device *hdev) gaudi_init_tpc_qmans(hdev); - gaudi_enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); gaudi_enable_timestamp(hdev); @@ -3112,7 +3127,9 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset) HW_CAP_HBM_DMA | HW_CAP_PLL | HW_CAP_MMU | HW_CAP_SRAM_SCRAMBLER | - HW_CAP_HBM_SCRAMBLER); + HW_CAP_HBM_SCRAMBLER | + HW_CAP_CLK_GATE); + memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat)); } @@ -3463,6 +3480,9 @@ static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg, return 0; } + if (!timeout) + timeout = GAUDI_MSG_TO_CPU_TIMEOUT_USEC; + return hl_fw_send_cpu_message(hdev, GAUDI_QUEUE_ID_CPU_PQ, msg, len, timeout, result); } @@ -3865,6 +3885,12 @@ static int gaudi_validate_cb(struct hl_device *hdev, rc = -EPERM; break; + case PACKET_WREG_BULK: + dev_err(hdev->dev, + "User not allowed to use WREG_BULK\n"); + rc = -EPERM; + break; + case PACKET_LOAD_AND_EXE: rc = gaudi_validate_load_and_exe_pkt(hdev, parser, (struct packet_load_and_exe *) user_pkt); @@ -3880,7 +3906,6 @@ static int gaudi_validate_cb(struct hl_device *hdev, break; case PACKET_WREG_32: - case PACKET_WREG_BULK: case PACKET_MSG_LONG: case PACKET_MSG_SHORT: case PACKET_REPEAT: @@ -4521,13 +4546,18 @@ static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val) int rc = 0; if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) { + + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && + (hdev->clock_gating_mask & + GAUDI_CLK_GATE_DEBUGFS_MASK)) { + dev_err_ratelimited(hdev->dev, "Can't read register - clock gating is enabled!\n"); rc = -EFAULT; } else { *val = RREG32(addr - CFG_BASE); } + } else if ((addr >= SRAM_BASE_ADDR) && (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) { *val = readl(hdev->pcie_bar[SRAM_BAR_ID] + @@ -4563,13 +4593,18 @@ static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val) int rc = 0; if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) { + + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && + (hdev->clock_gating_mask & + GAUDI_CLK_GATE_DEBUGFS_MASK)) { + dev_err_ratelimited(hdev->dev, "Can't write register - clock gating is enabled!\n"); rc = -EFAULT; } else { WREG32(addr - CFG_BASE, val); } + } else if ((addr >= SRAM_BASE_ADDR) && (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) { writel(val, hdev->pcie_bar[SRAM_BAR_ID] + @@ -4605,7 +4640,11 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val) int rc = 0; if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) { - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) { + + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && + (hdev->clock_gating_mask & + GAUDI_CLK_GATE_DEBUGFS_MASK)) { + dev_err_ratelimited(hdev->dev, "Can't read register - clock gating is enabled!\n"); rc = -EFAULT; @@ -4615,6 +4654,7 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val) *val = (((u64) val_h) << 32) | val_l; } + } else if ((addr >= SRAM_BASE_ADDR) && (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) { *val = readq(hdev->pcie_bar[SRAM_BAR_ID] + @@ -4651,7 +4691,11 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val) int rc = 0; if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) { - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) { + + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && + (hdev->clock_gating_mask & + GAUDI_CLK_GATE_DEBUGFS_MASK)) { + dev_err_ratelimited(hdev->dev, "Can't write register - clock gating is enabled!\n"); rc = -EFAULT; @@ -4660,6 +4704,7 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val) WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val)); } + } else if ((addr >= SRAM_BASE_ADDR) && (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) { writeq(val, hdev->pcie_bar[SRAM_BAR_ID] + @@ -4881,7 +4926,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid) gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid); gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid); - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); } @@ -5262,7 +5307,7 @@ static void gaudi_print_ecc_info_generic(struct hl_device *hdev, } if (disable_clock_gating) { - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); } } @@ -5749,7 +5794,7 @@ static bool gaudi_tpc_read_interrupts(struct hl_device *hdev, u8 tpc_id, /* Clear interrupts */ WREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset, 0); - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); @@ -6265,7 +6310,7 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask, if (s) seq_puts(s, "\n"); - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); @@ -6366,7 +6411,7 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel, dev_err(hdev->dev, "Timeout while waiting for TPC%d icache prefetch\n", tpc_id); - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); return -EIO; } @@ -6395,7 +6440,7 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel, 1000, kernel_timeout); - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); if (rc) { @@ -6736,7 +6781,7 @@ static const struct hl_asic_funcs gaudi_funcs = { .mmu_invalidate_cache = gaudi_mmu_invalidate_cache, .mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range, .send_heartbeat = gaudi_send_heartbeat, - .enable_clock_gating = gaudi_enable_clock_gating, + .set_clock_gating = gaudi_set_clock_gating, .disable_clock_gating = gaudi_disable_clock_gating, .debug_coresight = gaudi_debug_coresight, .is_device_idle = gaudi_is_device_idle, diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 0d2952bb58df..88460b2138d8 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -88,6 +88,7 @@ #define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100) #define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) #define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */ +#define GOYA_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */ #define GOYA_QMAN0_FENCE_VAL 0xD169B243 @@ -2830,6 +2831,9 @@ int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len, return 0; } + if (!timeout) + timeout = GOYA_MSG_TO_CPU_TIMEOUT_USEC; + return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len, timeout, result); } @@ -4431,8 +4435,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr, pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY << ARMCP_PKT_CTL_OPCODE_SHIFT); - rc = goya_send_cpu_message(hdev, (u32 *) pkt, total_pkt_size, - HL_DEVICE_TIMEOUT_USEC, &result); + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt, + total_pkt_size, 0, &result); if (rc) dev_err(hdev->dev, "failed to unmask IRQ array\n"); @@ -4464,8 +4468,8 @@ static int goya_unmask_irq(struct hl_device *hdev, u16 event_type) ARMCP_PKT_CTL_OPCODE_SHIFT); pkt.value = cpu_to_le64(event_type); - rc = goya_send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - HL_DEVICE_TIMEOUT_USEC, &result); + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, &result); if (rc) dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type); @@ -5028,14 +5032,14 @@ int goya_armcp_info_get(struct hl_device *hdev) return 0; } -static void goya_enable_clock_gating(struct hl_device *hdev) +static void goya_set_clock_gating(struct hl_device *hdev) { - + /* clock gating not supported in Goya */ } static void goya_disable_clock_gating(struct hl_device *hdev) { - + /* clock gating not supported in Goya */ } static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask, @@ -5259,7 +5263,7 @@ static const struct hl_asic_funcs goya_funcs = { .mmu_invalidate_cache = goya_mmu_invalidate_cache, .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range, .send_heartbeat = goya_send_heartbeat, - .enable_clock_gating = goya_enable_clock_gating, + .set_clock_gating = goya_set_clock_gating, .disable_clock_gating = goya_disable_clock_gating, .debug_coresight = goya_debug_coresight, .is_device_idle = goya_is_device_idle, diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index 1ecdcf8b763a..194d83352696 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -578,8 +578,9 @@ enum hl_pll_frequency { * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with * ASID-VA-size mask. * @send_heartbeat: send is-alive packet to ArmCP and verify response. - * @enable_clock_gating: enable clock gating for reducing power consumption. - * @disable_clock_gating: disable clock for accessing registers on HBW. + * @set_clock_gating: enable/disable clock gating per engine according to + * clock gating mask in hdev + * @disable_clock_gating: disable clock gating completely * @debug_coresight: perform certain actions on Coresight for debugging. * @is_device_idle: return true if device is idle, false otherwise. * @soft_reset_late_init: perform certain actions needed after soft reset. @@ -587,7 +588,11 @@ enum hl_pll_frequency { * @hw_queues_unlock: release H/W queues lock. * @get_pci_id: retrieve PCI ID. * @get_eeprom_data: retrieve EEPROM data from F/W. - * @send_cpu_message: send buffer to ArmCP. + * @send_cpu_message: send message to F/W. If the message is timedout, the + * driver will eventually reset the device. The timeout can + * be determined by the calling function or it can be 0 and + * then the timeout is the default timeout for the specific + * ASIC * @get_hw_state: retrieve the H/W state * @pci_bars_map: Map PCI BARs. * @set_dram_bar_base: Set DRAM BAR to map specific device address. Returns @@ -680,7 +685,7 @@ struct hl_asic_funcs { int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard, u32 asid, u64 va, u64 size); int (*send_heartbeat)(struct hl_device *hdev); - void (*enable_clock_gating)(struct hl_device *hdev); + void (*set_clock_gating)(struct hl_device *hdev); void (*disable_clock_gating)(struct hl_device *hdev); int (*debug_coresight)(struct hl_device *hdev, void *data); bool (*is_device_idle)(struct hl_device *hdev, u32 *mask, @@ -1398,6 +1403,9 @@ struct hl_device_idle_busy_ts { * @max_power: the max power of the device, as configured by the sysadmin. This * value is saved so in case of hard-reset, the driver will restore * this value and update the F/W after the re-initialization + * @clock_gating_mask: is clock gating enabled. bitmask that represents the + * different engines. See debugfs-driver-habanalabs for + * details. * @in_reset: is device in reset flow. * @curr_pll_profile: current PLL profile. * @cs_active_cnt: number of active command submissions on this device (active @@ -1425,7 +1433,6 @@ struct hl_device_idle_busy_ts { * @init_done: is the initialization of the device done. * @mmu_enable: is MMU enabled. * @mmu_huge_page_opt: is MMU huge pages optimization enabled. - * @clock_gating: is clock gating enabled. * @device_cpu_disabled: is the device CPU disabled (due to timeouts) * @dma_mask: the dma mask that was set for this device * @in_debug: is device under debug. This, together with fpriv_list, enforces @@ -1493,6 +1500,7 @@ struct hl_device { atomic64_t dram_used_mem; u64 timeout_jiffies; u64 max_power; + u64 clock_gating_mask; atomic_t in_reset; enum hl_pll_frequency curr_pll_profile; int cs_active_cnt; @@ -1514,7 +1522,6 @@ struct hl_device { u8 dram_default_page_mapping; u8 pmmu_huge_range; u8 init_done; - u8 clock_gating; u8 device_cpu_disabled; u8 dma_mask; u8 in_debug; diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c index 8652c7e5d7f1..22716da9f85f 100644 --- a/drivers/misc/habanalabs/habanalabs_drv.c +++ b/drivers/misc/habanalabs/habanalabs_drv.c @@ -232,7 +232,7 @@ static void set_driver_behavior_per_device(struct hl_device *hdev) hdev->fw_loading = 1; hdev->cpu_queues_enable = 1; hdev->heartbeat = 1; - hdev->clock_gating = 1; + hdev->clock_gating_mask = ULONG_MAX; hdev->reset_pcilink = 0; hdev->axi_drain = 0; diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/hwmon.c index 8c6cd77e6af6..b997336fa75f 100644 --- a/drivers/misc/habanalabs/hwmon.c +++ b/drivers/misc/habanalabs/hwmon.c @@ -10,7 +10,6 @@ #include <linux/pci.h> #include <linux/hwmon.h> -#define SENSORS_PKT_TIMEOUT 1000000 /* 1s */ #define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1) int hl_build_hwmon_channel_info(struct hl_device *hdev, @@ -323,7 +322,7 @@ int hl_get_temperature(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, value); + 0, value); if (rc) { dev_err(hdev->dev, @@ -350,7 +349,7 @@ int hl_set_temperature(struct hl_device *hdev, pkt.value = __cpu_to_le64(value); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, @@ -374,7 +373,7 @@ int hl_get_voltage(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, value); + 0, value); if (rc) { dev_err(hdev->dev, @@ -400,7 +399,7 @@ int hl_get_current(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, value); + 0, value); if (rc) { dev_err(hdev->dev, @@ -426,7 +425,7 @@ int hl_get_fan_speed(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, value); + 0, value); if (rc) { dev_err(hdev->dev, @@ -452,7 +451,7 @@ int hl_get_pwm_info(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, value); + 0, value); if (rc) { dev_err(hdev->dev, @@ -479,7 +478,7 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, pkt.value = cpu_to_le64(value); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, @@ -502,7 +501,7 @@ int hl_set_voltage(struct hl_device *hdev, pkt.value = __cpu_to_le64(value); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, @@ -527,7 +526,7 @@ int hl_set_current(struct hl_device *hdev, pkt.value = __cpu_to_le64(value); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/sysfs.c index 5d78d5e1c782..70b6b1863c2e 100644 --- a/drivers/misc/habanalabs/sysfs.c +++ b/drivers/misc/habanalabs/sysfs.c @@ -9,9 +9,6 @@ #include <linux/pci.h> -#define SET_CLK_PKT_TIMEOUT 1000000 /* 1s */ -#define SET_PWR_PKT_TIMEOUT 1000000 /* 1s */ - long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr) { struct armcp_packet pkt; @@ -29,7 +26,7 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr) pkt.pll_index = cpu_to_le32(pll_index); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SET_CLK_PKT_TIMEOUT, &result); + 0, &result); if (rc) { dev_err(hdev->dev, @@ -54,7 +51,7 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq) pkt.value = cpu_to_le64(freq); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SET_CLK_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, @@ -74,7 +71,7 @@ u64 hl_get_max_power(struct hl_device *hdev) ARMCP_PKT_CTL_OPCODE_SHIFT); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SET_PWR_PKT_TIMEOUT, &result); + 0, &result); if (rc) { dev_err(hdev->dev, "Failed to get max power, error %d\n", rc); @@ -96,7 +93,7 @@ void hl_set_max_power(struct hl_device *hdev, u64 value) pkt.value = cpu_to_le64(value); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SET_PWR_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, "Failed to set max power, error %d\n", rc); diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 44eb2b1d0416..3b6664c7e73c 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -406,19 +406,34 @@ free_dst: return err; } +static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto) +{ + if (bareudp->ethertype == proto) + return true; + + if (!bareudp->multi_proto_mode) + return false; + + if (bareudp->ethertype == htons(ETH_P_MPLS_UC) && + proto == htons(ETH_P_MPLS_MC)) + return true; + + if (bareudp->ethertype == htons(ETH_P_IP) && + proto == htons(ETH_P_IPV6)) + return true; + + return false; +} + static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev) { struct bareudp_dev *bareudp = netdev_priv(dev); struct ip_tunnel_info *info = NULL; int err; - if (skb->protocol != bareudp->ethertype) { - if (!bareudp->multi_proto_mode || - (skb->protocol != htons(ETH_P_MPLS_MC) && - skb->protocol != htons(ETH_P_IPV6))) { - err = -EINVAL; - goto tx_error; - } + if (!bareudp_proto_valid(bareudp, skb->protocol)) { + err = -EINVAL; + goto tx_error; } info = skb_tunnel_info(skb); diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 8d13ea370db1..66e67b24a887 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2446,6 +2446,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port->reset = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(port->reset)) { dev_err(dev, "no reset\n"); + clk_disable_unprepare(port->pclk); return PTR_ERR(port->reset); } reset_control_reset(port->reset); @@ -2501,8 +2502,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) IRQF_SHARED, port_names[port->id], port); - if (ret) + if (ret) { + clk_disable_unprepare(port->pclk); return ret; + } ret = register_netdev(netdev); if (!ret) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index b1bea0309a7c..87776ce3539b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1093,16 +1093,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, int k, sizeoflast; dma_addr_t dma; - if (type == DESC_TYPE_SKB) { - struct sk_buff *skb = (struct sk_buff *)priv; - int ret; - - ret = hns3_fill_skb_desc(ring, skb, desc); - if (unlikely(ret < 0)) - return ret; - - dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); - } else if (type == DESC_TYPE_FRAGLIST_SKB) { + if (type == DESC_TYPE_FRAGLIST_SKB || + type == DESC_TYPE_SKB) { struct sk_buff *skb = (struct sk_buff *)priv; dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); @@ -1439,6 +1431,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) next_to_use_head = ring->next_to_use; + ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]); + if (unlikely(ret < 0)) + goto fill_err; + ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); if (unlikely(ret < 0)) goto fill_err; @@ -4153,8 +4149,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) return; if (linkup) { - netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); + netif_carrier_on(netdev); if (netif_msg_link(handle)) netdev_info(netdev, "link up\n"); } else { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index bb4a6327035d..36575e72a915 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5806,9 +5806,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, /* to avoid rule conflict, when user configure rule by ethtool, * we need to clear all arfs rules */ + spin_lock_bh(&hdev->fd_rule_lock); hclge_clear_arfs_rules(handle); - spin_lock_bh(&hdev->fd_rule_lock); ret = hclge_fd_config_rule(hdev, rule); spin_unlock_bh(&hdev->fd_rule_lock); @@ -5851,6 +5851,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, return ret; } +/* make sure being called after lock up with fd_rule_lock */ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, bool clear_list) { @@ -5863,7 +5864,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, if (!hnae3_dev_fd_supported(hdev)) return; - spin_lock_bh(&hdev->fd_rule_lock); for_each_set_bit(location, hdev->fd_bmap, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, @@ -5880,8 +5880,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, bitmap_zero(hdev->fd_bmap, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); } - - spin_unlock_bh(&hdev->fd_rule_lock); } static int hclge_restore_fd_entries(struct hnae3_handle *handle) @@ -6263,7 +6261,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, u16 flow_id, struct flow_keys *fkeys) { struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_fd_rule_tuples new_tuples; + struct hclge_fd_rule_tuples new_tuples = {}; struct hclge_dev *hdev = vport->back; struct hclge_fd_rule *rule; u16 tmp_queue_id; @@ -6273,19 +6271,17 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, if (!hnae3_dev_fd_supported(hdev)) return -EOPNOTSUPP; - memset(&new_tuples, 0, sizeof(new_tuples)); - hclge_fd_get_flow_tuples(fkeys, &new_tuples); - - spin_lock_bh(&hdev->fd_rule_lock); - /* when there is already fd rule existed add by user, * arfs should not work */ + spin_lock_bh(&hdev->fd_rule_lock); if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { spin_unlock_bh(&hdev->fd_rule_lock); return -EOPNOTSUPP; } + hclge_fd_get_flow_tuples(fkeys, &new_tuples); + /* check is there flow director filter existed for this flow, * if not, create a new filter for it; * if filter exist with different queue id, modify the filter; @@ -6368,6 +6364,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev) #endif } +/* make sure being called after lock up with fd_rule_lock */ static void hclge_clear_arfs_rules(struct hnae3_handle *handle) { #ifdef CONFIG_RFS_ACCEL @@ -6420,10 +6417,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) hdev->fd_en = enable; clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; - if (!enable) + + if (!enable) { + spin_lock_bh(&hdev->fd_rule_lock); hclge_del_all_fd_entries(handle, clear); - else + spin_unlock_bh(&hdev->fd_rule_lock); + } else { hclge_restore_fd_entries(handle); + } } static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) @@ -6886,8 +6887,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle) int i; set_bit(HCLGE_STATE_DOWN, &hdev->state); - + spin_lock_bh(&hdev->fd_rule_lock); hclge_clear_arfs_rules(handle); + spin_unlock_bh(&hdev->fd_rule_lock); /* If it is not PF reset, the firmware will disable the MAC, * so it only need to stop phy here. @@ -9040,11 +9042,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, bool writen_to_tbl = false; int ret = 0; - /* When device is resetting, firmware is unable to handle - * mailbox. Just record the vlan id, and remove it after + /* When device is resetting or reset failed, firmware is unable to + * handle mailbox. Just record the vlan id, and remove it after * reset finished. */ - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) { + if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { set_bit(vlan_id, vport->vlan_del_fail_bmap); return -EBUSY; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index a10b022d1951..9162856de1b1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1592,11 +1592,12 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, if (proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; - /* When device is resetting, firmware is unable to handle - * mailbox. Just record the vlan id, and remove it after + /* When device is resetting or reset failed, firmware is unable to + * handle mailbox. Just record the vlan id, and remove it after * reset finished. */ - if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) { + if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { set_bit(vlan_id, hdev->vlan_del_fail_bmap); return -EBUSY; } @@ -3439,23 +3440,36 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, { struct hnae3_handle *nic = &hdev->nic; struct hclge_vf_to_pf_msg send_msg; + int ret; rtnl_lock(); - hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); - rtnl_unlock(); + + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { + dev_warn(&hdev->pdev->dev, + "is resetting when updating port based vlan info\n"); + rtnl_unlock(); + return; + } + + ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) { + rtnl_unlock(); + return; + } /* send msg to PF and wait update port based vlan info */ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, HCLGE_MBX_PORT_BASE_VLAN_CFG); memcpy(send_msg.data, port_base_vlan_info, data_size); - hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); - - if (state == HNAE3_PORT_BASE_VLAN_DISABLE) - nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; - else - nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + if (!ret) { + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = state; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + } - rtnl_lock(); hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); rtnl_unlock(); } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 0fd7eae25fe9..5afb3c9c52d2 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3206,7 +3206,7 @@ req_rx_irq_failed: req_tx_irq_failed: for (j = 0; j < i; j++) { free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); - irq_dispose_mapping(adapter->rx_scrq[j]->irq); + irq_dispose_mapping(adapter->tx_scrq[j]->irq); } release_sub_crqs(adapter, 1); return rc; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index ae0a6332fd30..b2f2fcfdf732 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -301,10 +301,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) */ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; ret_val = e1000_disable_ulp_lpt_lp(hw, true); - if (ret_val) { + if (ret_val) e_warn("Failed to disable ULP\n"); - goto out; - } ret_val = hw->phy.ops.acquire(hw); if (ret_val) { diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index b97916b91aba..4f05f6efe6af 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6215,9 +6215,18 @@ static void igb_reset_task(struct work_struct *work) struct igb_adapter *adapter; adapter = container_of(work, struct igb_adapter, reset_task); + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGB_DOWN, &adapter->state) || + test_bit(__IGB_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + igb_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); igb_reinit_locked(adapter); + rtnl_unlock(); } /** diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 64786568af0d..75a8c407e815 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1730,10 +1730,12 @@ static void otx2_reset_task(struct work_struct *work) if (!netif_running(pf->netdev)) return; + rtnl_lock(); otx2_stop(pf->netdev); pf->reset_count++; otx2_open(pf->netdev); netif_trans_update(pf->netdev); + rtnl_unlock(); } static const struct net_device_ops otx2_netdev_ops = { @@ -2111,6 +2113,7 @@ static void otx2_remove(struct pci_dev *pdev) pf = netdev_priv(netdev); + cancel_work_sync(&pf->reset_task); /* Disable link notifications */ otx2_cgx_config_linkevents(pf, false); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index f4227517dc8e..92a3db69a6cd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -617,6 +617,8 @@ static void otx2vf_remove(struct pci_dev *pdev) vf = netdev_priv(netdev); + cancel_work_sync(&vf->reset_task); + unregister_netdev(netdev); otx2vf_disable_mbox_intr(vf); otx2_detach_resources(&vf->mbox); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 20db302d31ce..0870fe78ea38 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -171,11 +171,21 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth, return 0; } -static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed) +static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, + phy_interface_t interface, int speed) { u32 val; int ret; + if (interface == PHY_INTERFACE_MODE_TRGMII) { + mtk_w32(eth, TRGMII_MODE, INTF_MODE); + val = 500000000; + ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); + if (ret) + dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); + return; + } + val = (speed == SPEED_1000) ? INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100; mtk_w32(eth, val, INTF_MODE); @@ -262,10 +272,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, state->interface)) goto err_phy; } else { - if (state->interface != - PHY_INTERFACE_MODE_TRGMII) - mtk_gmac0_rgmii_adjust(mac->hw, - state->speed); + mtk_gmac0_rgmii_adjust(mac->hw, + state->interface, + state->speed); /* mt7623_pad_clk_setup */ for (i = 0 ; i < NUM_TRGMII_CTRL; i++) @@ -2887,6 +2896,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) eth->netdev[id]->irq = eth->irq[0]; eth->netdev[id]->dev.of_node = np; + eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; + return 0; free_netdev: diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 954c22c79f6b..258c7a96f269 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -4356,12 +4356,14 @@ end: static void mlx4_shutdown(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; mlx4_info(persist->dev, "mlx4_shutdown was called\n"); mutex_lock(&persist->interface_state_mutex); if (persist->interface_state & MLX4_INTERFACE_STATE_UP) mlx4_unload_one(pdev); mutex_unlock(&persist->interface_state_mutex); + mlx4_pci_disable_device(dev); } static const struct pci_error_handlers mlx4_err_handler = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c index bdb71332cbf2..3e44e4d820c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c @@ -183,13 +183,16 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw, static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev) { - struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5e_rep_priv *rpriv; + struct mlx5e_priv *priv; /* A given netdev is not a representor or not a slave of LAG configuration */ if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev)) return false; + priv = netdev_priv(netdev); + rpriv = priv->ppriv; + /* Egress acl forward to vport is supported only non-uplink representor */ return rpriv->rep->vport != MLX5_VPORT_UPLINK; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index f8af109d34cc..79cc42d88eec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -551,19 +551,31 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, } } - tun_dst = tun_rx_dst(enc_opts.key.len); + if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst, + key.enc_ip.tos, key.enc_ip.ttl, + key.enc_tp.dst, TUNNEL_KEY, + key32_to_tunnel_id(key.enc_key_id.keyid), + enc_opts.key.len); + } else if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst, + key.enc_ip.tos, key.enc_ip.ttl, + key.enc_tp.dst, 0, TUNNEL_KEY, + key32_to_tunnel_id(key.enc_key_id.keyid), + enc_opts.key.len); + } else { + netdev_dbg(priv->netdev, + "Couldn't restore tunnel, unsupported addr_type: %d\n", + key.enc_control.addr_type); + return false; + } + if (!tun_dst) { - WARN_ON_ONCE(true); + netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n"); return false; } - ip_tunnel_key_init(&tun_dst->u.tun_info.key, - key.enc_ipv4.src, key.enc_ipv4.dst, - key.enc_ip.tos, key.enc_ip.ttl, - 0, /* label */ - key.enc_tp.src, key.enc_tp.dst, - key32_to_tunnel_id(key.enc_key_id.keyid), - TUNNEL_KEY); + tun_dst->u.tun_info.key.tp_src = key.enc_tp.src; if (enc_opts.key.len) ip_tunnel_info_opts_set(&tun_dst->u.tun_info, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c index 951ea26d96bc..e472ed0eacfb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c @@ -301,6 +301,8 @@ static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv, MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, ETH_P_TEB); } + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c index 58b13192df23..2805416c32a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c @@ -80,6 +80,8 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv, gre_key.key, be32_to_cpu(enc_keyid.key->keyid)); } + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c index 37b176801bcc..038a0f1cecec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c @@ -136,6 +136,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni, be32_to_cpu(enc_keyid.key->keyid)); + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f374348fa810..72c91ff55465 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -417,7 +417,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, &rq->wq_ctrl); if (err) - return err; + goto err_rq_wq_destroy; rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR]; @@ -445,7 +445,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl); if (err) - return err; + goto err_rq_wq_destroy; rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; @@ -3062,6 +3062,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE; } +static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev, + enum mlx5_port_status state) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int vport_admin_state; + + mlx5_set_port_admin_status(mdev, state); + + if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS) + return; + + if (state == MLX5_PORT_UP) + vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO; + else + vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN; + + mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state); +} + int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3094,7 +3113,7 @@ int mlx5e_open(struct net_device *netdev) mutex_lock(&priv->state_lock); err = mlx5e_open_locked(netdev); if (!err) - mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); + mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock); return err; @@ -3128,7 +3147,7 @@ int mlx5e_close(struct net_device *netdev) return -ENODEV; mutex_lock(&priv->state_lock); - mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN); + mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN); err = mlx5e_close_locked(netdev); mutex_unlock(&priv->state_lock); @@ -5177,7 +5196,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) /* Marking the link as currently not needed by the Driver */ if (!netif_running(netdev)) - mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); + mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN); mlx5e_set_netdev_mtu_boundaries(priv); mlx5e_set_dev_port_mtu(priv); @@ -5384,6 +5403,8 @@ err_cleanup_tx: profile->cleanup_tx(priv); out: + set_bit(MLX5E_STATE_DESTROYING, &priv->state); + cancel_work_sync(&priv->update_stats_work); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 111477086f66..48989541e2ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -936,6 +936,7 @@ err_close_drop_rq: static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) { + mlx5e_ethtool_cleanup_steering(priv); rep_vport_rx_rule_destroy(priv); mlx5e_destroy_rep_root_ft(priv); mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); @@ -1080,6 +1081,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) mlx5e_rep_tc_enable(priv); + mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK, + 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO); mlx5_lag_add(mdev, netdev); priv->events_nb.notifier_call = uplink_rep_async_event; mlx5_notifier_register(mdev, &priv->events_nb); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 7a0c22d05575..fd53d101d8fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2247,6 +2247,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, match.key->vlan_priority); *match_level = MLX5_MATCH_L2; + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index e8f900e9577e..6e6a9a563992 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1632,7 +1632,7 @@ abort: mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); } - + esw_destroy_tsar(esw); return err; } @@ -1687,8 +1687,6 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) else if (esw->mode == MLX5_ESWITCH_OFFLOADS) esw_offloads_disable(esw); - esw_destroy_tsar(esw); - old_mode = esw->mode; esw->mode = MLX5_ESWITCH_NONE; @@ -1698,6 +1696,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); } + esw_destroy_tsar(esw); + if (clear_vf) mlx5_eswitch_clear_vf_vports_info(esw); } @@ -1949,6 +1949,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT; + int other_vport = 1; int err = 0; if (!ESW_ALLOWED(esw)) @@ -1956,15 +1958,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, if (IS_ERR(evport)) return PTR_ERR(evport); + if (vport == MLX5_VPORT_UPLINK) { + opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK; + other_vport = 0; + vport = 0; + } mutex_lock(&esw->state_lock); - err = mlx5_modify_vport_admin_state(esw->dev, - MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, - vport, 1, link_state); + err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state); if (err) { - mlx5_core_warn(esw->dev, - "Failed to set vport %d link state, err = %d", - vport, err); + mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d", + vport, opmod, err); goto unlock; } @@ -2006,8 +2010,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); int err = 0; - if (!ESW_ALLOWED(esw)) - return -EPERM; if (IS_ERR(evport)) return PTR_ERR(evport); if (vlan > 4095 || qos > 7) @@ -2035,6 +2037,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, u8 set_flags = 0; int err; + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (vlan || qos) set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 1f52b329e915..867d8120b8a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -685,6 +685,8 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { r static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {} static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } +static inline +int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) { return ERR_PTR(-EOPNOTSUPP); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index be610d40749a..d2516922d867 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -236,6 +236,15 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, return &esw->offloads.vport_reps[idx]; } +static void +mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec, + struct mlx5_esw_flow_attr *attr) +{ + if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) && + attr && attr->in_rep && attr->in_rep->vport == MLX5_VPORT_UPLINK) + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; +} static void mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, @@ -259,9 +268,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, mlx5_eswitch_get_vport_metadata_mask()); spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; - misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); - if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc))) - spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; } else { misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); @@ -279,10 +285,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; } - - if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) && - attr->in_rep->vport == MLX5_VPORT_UPLINK) - spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; } struct mlx5_flow_handle * @@ -396,6 +398,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, goto err_esw_get; } + mlx5_eswitch_set_rule_flow_source(esw, spec, attr); + if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr, &flow_act, dest, i); @@ -462,6 +466,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, i++; mlx5_eswitch_set_rule_source_port(esw, spec, attr); + mlx5_eswitch_set_rule_flow_source(esw, spec, attr); if (attr->outer_match_level != MLX5_MATCH_NONE) spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 6904ad96af48..a10814814856 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -796,7 +796,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root, return ft; } -/* If reverse if false then return the first flow table in next priority of +/* If reverse is false then return the first flow table in next priority of * prio in the tree, else return the last flow table in the previous priority * of prio in the tree. */ @@ -828,34 +828,16 @@ static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio) return find_closest_ft(prio, true); } -static struct fs_prio *find_fwd_ns_prio(struct mlx5_flow_root_namespace *root, - struct mlx5_flow_namespace *ns) -{ - struct mlx5_flow_namespace *root_ns = &root->ns; - struct fs_prio *iter_prio; - struct fs_prio *prio; - - fs_get_obj(prio, ns->node.parent); - list_for_each_entry(iter_prio, &root_ns->node.children, node.list) { - if (iter_prio == prio && - !list_is_last(&prio->node.children, &iter_prio->node.list)) - return list_next_entry(iter_prio, node.list); - } - return NULL; -} - static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft, struct mlx5_flow_act *flow_act) { - struct mlx5_flow_root_namespace *root = find_root(&ft->node); struct fs_prio *prio; + bool next_ns; - if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) - prio = find_fwd_ns_prio(root, ft->ns); - else - fs_get_obj(prio, ft->node.parent); + next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS; + fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent); - return (prio) ? find_next_chained_ft(prio) : NULL; + return find_next_chained_ft(prio); } static int connect_fts_in_prio(struct mlx5_core_dev *dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index ef0706d15a5b..2d55b7c22c03 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -273,17 +273,17 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp, if (rq->extts.index >= clock->ptp_info.n_pins) return -EINVAL; + pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index); + if (pin < 0) + return -EBUSY; + if (on) { - pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index); - if (pin < 0) - return -EBUSY; pin_mode = MLX5_PIN_MODE_IN; pattern = !!(rq->extts.flags & PTP_FALLING_EDGE); field_select = MLX5_MTPPS_FS_PIN_MODE | MLX5_MTPPS_FS_PATTERN | MLX5_MTPPS_FS_ENABLE; } else { - pin = rq->extts.index; field_select = MLX5_MTPPS_FS_ENABLE; } @@ -331,12 +331,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, if (rq->perout.index >= clock->ptp_info.n_pins) return -EINVAL; - if (on) { - pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, - rq->perout.index); - if (pin < 0) - return -EBUSY; + pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + if (on) { pin_mode = MLX5_PIN_MODE_OUT; pattern = MLX5_OUT_PATTERN_PERIODIC; ts.tv_sec = rq->perout.period.sec; @@ -362,7 +362,6 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, MLX5_MTPPS_FS_ENABLE | MLX5_MTPPS_FS_TIME_STAMP; } else { - pin = rq->perout.index; field_select = MLX5_MTPPS_FS_ENABLE; } @@ -409,10 +408,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp, return 0; } +enum { + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0), + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1), +}; + static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan) { - return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0; + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, + ptp_info); + + switch (func) { + case PTP_PF_NONE: + return 0; + case PTP_PF_EXTTS: + return !(clock->pps_info.pin_caps[pin] & + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN); + case PTP_PF_PEROUT: + return !(clock->pps_info.pin_caps[pin] & + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT); + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; } static const struct ptp_clock_info mlx5_ptp_clock_info = { @@ -432,6 +452,38 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = { .verify = NULL, }; +static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin, + u32 *mtpps, u32 mtpps_size) +{ + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {}; + + MLX5_SET(mtpps_reg, in, pin, pin); + + return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps, + mtpps_size, MLX5_REG_MTPPS, 0, 0); +} + +static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin) +{ + struct mlx5_core_dev *mdev = clock->mdev; + u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {}; + u8 mode; + int err; + + err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out)); + if (err || !MLX5_GET(mtpps_reg, out, enable)) + return PTP_PF_NONE; + + mode = MLX5_GET(mtpps_reg, out, pin_mode); + + if (mode == MLX5_PIN_MODE_IN) + return PTP_PF_EXTTS; + else if (mode == MLX5_PIN_MODE_OUT) + return PTP_PF_PEROUT; + + return PTP_PF_NONE; +} + static int mlx5_init_pin_config(struct mlx5_clock *clock) { int i; @@ -451,8 +503,8 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock) sizeof(clock->ptp_info.pin_config[i].name), "mlx5_pps%d", i); clock->ptp_info.pin_config[i].index = i; - clock->ptp_info.pin_config[i].func = PTP_PF_NONE; - clock->ptp_info.pin_config[i].chan = i; + clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i); + clock->ptp_info.pin_config[i].chan = 0; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index b01f8f2fab63..866381e72960 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1815,7 +1815,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, bulk_list, cb, cb_priv, tid); if (err) { - kfree(trans); + kfree_rcu(trans, rcu); return err; } return 0; @@ -2053,11 +2053,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, break; } } - rcu_read_unlock(); - if (!found) + if (!found) { + rcu_read_unlock(); goto drop; + } rxl->func(skb, local_port, rxl_item->priv); + rcu_read_unlock(); return; drop: diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 1d23ad70e83d..28a2576eb783 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5595,6 +5595,7 @@ enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST, MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY, MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP, + MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE, MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME, MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP, MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 019ed503aadf..0521e9d48c45 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5001,15 +5001,6 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt) { - /* Packets with link-local destination IP arriving to the router - * are trapped to the CPU, so no need to program specific routes - * for them. Only allow prefix routes (usually one fe80::/64) so - * that packets are trapped for the right reason. - */ - if ((ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) && - (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))) - return true; - /* Multicast routes aren't supported, so ignore them. Neighbour * Discovery packets are specifically trapped. */ @@ -8078,16 +8069,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, mlxsw_sp->router = router; router->mlxsw_sp = mlxsw_sp; - router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event; - err = register_inetaddr_notifier(&router->inetaddr_nb); - if (err) - goto err_register_inetaddr_notifier; - - router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event; - err = register_inet6addr_notifier(&router->inet6addr_nb); - if (err) - goto err_register_inet6addr_notifier; - INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list); err = __mlxsw_sp_router_init(mlxsw_sp); if (err) @@ -8128,12 +8109,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_neigh_init; - mlxsw_sp->router->netevent_nb.notifier_call = - mlxsw_sp_router_netevent_event; - err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb); - if (err) - goto err_register_netevent_notifier; - err = mlxsw_sp_mp_hash_init(mlxsw_sp); if (err) goto err_mp_hash_init; @@ -8142,6 +8117,22 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_dscp_init; + router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event; + err = register_inetaddr_notifier(&router->inetaddr_nb); + if (err) + goto err_register_inetaddr_notifier; + + router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event; + err = register_inet6addr_notifier(&router->inet6addr_nb); + if (err) + goto err_register_inet6addr_notifier; + + mlxsw_sp->router->netevent_nb.notifier_call = + mlxsw_sp_router_netevent_event; + err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb); + if (err) + goto err_register_netevent_notifier; + mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event; err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp), &mlxsw_sp->router->fib_nb, @@ -8152,10 +8143,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, return 0; err_register_fib_notifier: -err_dscp_init: -err_mp_hash_init: unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb); err_register_netevent_notifier: + unregister_inet6addr_notifier(&router->inet6addr_nb); +err_register_inet6addr_notifier: + unregister_inetaddr_notifier(&router->inetaddr_nb); +err_register_inetaddr_notifier: + mlxsw_core_flush_owq(); +err_dscp_init: +err_mp_hash_init: mlxsw_sp_neigh_fini(mlxsw_sp); err_neigh_init: mlxsw_sp_vrs_fini(mlxsw_sp); @@ -8174,10 +8170,6 @@ err_ipips_init: err_rifs_init: __mlxsw_sp_router_fini(mlxsw_sp); err_router_init: - unregister_inet6addr_notifier(&router->inet6addr_nb); -err_register_inet6addr_notifier: - unregister_inetaddr_notifier(&router->inetaddr_nb); -err_register_inetaddr_notifier: mutex_destroy(&mlxsw_sp->router->lock); kfree(mlxsw_sp->router); return err; @@ -8188,6 +8180,9 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &mlxsw_sp->router->fib_nb); unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb); + unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb); + unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb); + mlxsw_core_flush_owq(); mlxsw_sp_neigh_fini(mlxsw_sp); mlxsw_sp_vrs_fini(mlxsw_sp); mlxsw_sp_mr_fini(mlxsw_sp); @@ -8197,8 +8192,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_ipips_fini(mlxsw_sp); mlxsw_sp_rifs_fini(mlxsw_sp); __mlxsw_sp_router_fini(mlxsw_sp); - unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb); - unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb); mutex_destroy(&mlxsw_sp->router->lock); kfree(mlxsw_sp->router); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 157a42c63066..1e38dfe7cf64 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -328,6 +328,9 @@ mlxsw_sp_trap_policer_items_arr[] = { { .policer = MLXSW_SP_TRAP_POLICER(18, 1024, 128), }, + { + .policer = MLXSW_SP_TRAP_POLICER(19, 1024, 512), + }, }; static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = { @@ -422,6 +425,11 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = { .priority = 2, }, { + .group = DEVLINK_TRAP_GROUP_GENERIC(EXTERNAL_DELIVERY, 19), + .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE, + .priority = 1, + }, + { .group = DEVLINK_TRAP_GROUP_GENERIC(IPV6, 15), .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6, .priority = 2, @@ -882,11 +890,11 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = { }, }, { - .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, LOCAL_DELIVERY, + .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, EXTERNAL_DELIVERY, TRAP), .listeners_arr = { - MLXSW_SP_RXL_MARK(RTR_INGRESS0, IP2ME, TRAP_TO_CPU, - false), + MLXSW_SP_RXL_MARK(RTR_INGRESS0, EXTERNAL_ROUTE, + TRAP_TO_CPU, false), }, }, { diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index f2d94b026d88..867c680f5917 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -497,21 +497,21 @@ void ocelot_get_txtstamp(struct ocelot *ocelot) spin_unlock_irqrestore(&port->tx_skbs.lock, flags); - /* Next ts */ - ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); + /* Get the h/w timestamp */ + ocelot_get_hwtimestamp(ocelot, &ts); if (unlikely(!skb_match)) continue; - /* Get the h/w timestamp */ - ocelot_get_hwtimestamp(ocelot, &ts); - /* Set the timestamp into the skb */ memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); skb_tstamp_tx(skb_match, &shhwtstamps); dev_kfree_skb_any(skb_match); + + /* Next ts */ + ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); } } EXPORT_SYMBOL(ocelot_get_txtstamp); diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index d2708a57f2ff..4075f5e59955 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -1299,19 +1299,21 @@ static int nixge_probe(struct platform_device *pdev) netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT); err = nixge_of_get_resources(pdev); if (err) - return err; + goto free_netdev; __nixge_hw_set_mac_address(ndev); priv->tx_irq = platform_get_irq_byname(pdev, "tx"); if (priv->tx_irq < 0) { netdev_err(ndev, "could not find 'tx' irq"); - return priv->tx_irq; + err = priv->tx_irq; + goto free_netdev; } priv->rx_irq = platform_get_irq_byname(pdev, "rx"); if (priv->rx_irq < 0) { netdev_err(ndev, "could not find 'rx' irq"); - return priv->rx_irq; + err = priv->rx_irq; + goto free_netdev; } priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 728dd6429d80..85e4cf229a96 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -2002,7 +2002,7 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg) netif_device_detach(lif->netdev); err = ionic_stop(lif->netdev); if (err) - return err; + goto reset_out; } if (cb) @@ -2012,6 +2012,8 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg) err = ionic_open(lif->netdev); netif_device_attach(lif->netdev); } + +reset_out: mutex_unlock(&lif->queue_lock); return err; diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 5f123a8cf68e..d2fdb5430d27 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2261,12 +2261,14 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, minor = get_free_serial_index(); if (minor < 0) - goto exit; + goto exit2; /* register our minor number */ serial->parent->dev = tty_port_register_device_attr(&serial->port, tty_drv, minor, &serial->parent->interface->dev, serial->parent, hso_serial_dev_groups); + if (IS_ERR(serial->parent->dev)) + goto exit2; /* fill in specific data for later use */ serial->minor = minor; @@ -2311,6 +2313,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, return 0; exit: hso_serial_tty_unregister(serial); +exit2: hso_serial_common_free(serial); return -1; } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index eccbf4cd7149..442507f25aad 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -377,10 +377,6 @@ struct lan78xx_net { struct tasklet_struct bh; struct delayed_work wq; - struct usb_host_endpoint *ep_blkin; - struct usb_host_endpoint *ep_blkout; - struct usb_host_endpoint *ep_intr; - int msg_enable; struct urb *urb_intr; @@ -2860,78 +2856,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) return NETDEV_TX_OK; } -static int -lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf) -{ - int tmp; - struct usb_host_interface *alt = NULL; - struct usb_host_endpoint *in = NULL, *out = NULL; - struct usb_host_endpoint *status = NULL; - - for (tmp = 0; tmp < intf->num_altsetting; tmp++) { - unsigned ep; - - in = NULL; - out = NULL; - status = NULL; - alt = intf->altsetting + tmp; - - for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { - struct usb_host_endpoint *e; - int intr = 0; - - e = alt->endpoint + ep; - switch (e->desc.bmAttributes) { - case USB_ENDPOINT_XFER_INT: - if (!usb_endpoint_dir_in(&e->desc)) - continue; - intr = 1; - /* FALLTHROUGH */ - case USB_ENDPOINT_XFER_BULK: - break; - default: - continue; - } - if (usb_endpoint_dir_in(&e->desc)) { - if (!intr && !in) - in = e; - else if (intr && !status) - status = e; - } else { - if (!out) - out = e; - } - } - if (in && out) - break; - } - if (!alt || !in || !out) - return -EINVAL; - - dev->pipe_in = usb_rcvbulkpipe(dev->udev, - in->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - dev->pipe_out = usb_sndbulkpipe(dev->udev, - out->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - dev->ep_intr = status; - - return 0; -} - static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) { struct lan78xx_priv *pdata = NULL; int ret; int i; - ret = lan78xx_get_endpoints(dev, intf); - if (ret) { - netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n", - ret); - return ret; - } - dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL); pdata = (struct lan78xx_priv *)(dev->data[0]); @@ -3700,6 +3630,7 @@ static void lan78xx_stat_monitor(struct timer_list *t) static int lan78xx_probe(struct usb_interface *intf, const struct usb_device_id *id) { + struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr; struct lan78xx_net *dev; struct net_device *netdev; struct usb_device *udev; @@ -3748,6 +3679,34 @@ static int lan78xx_probe(struct usb_interface *intf, mutex_init(&dev->stats.access_lock); + if (intf->cur_altsetting->desc.bNumEndpoints < 3) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); + ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in); + if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); + ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out); + if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) { + ret = -ENODEV; + goto out2; + } + + ep_intr = &intf->cur_altsetting->endpoint[2]; + if (!usb_endpoint_is_int_in(&ep_intr->desc)) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_intr = usb_rcvintpipe(dev->udev, + usb_endpoint_num(&ep_intr->desc)); + ret = lan78xx_bind(dev, intf); if (ret < 0) goto out2; @@ -3759,18 +3718,7 @@ static int lan78xx_probe(struct usb_interface *intf, netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER); - dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; - dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; - dev->ep_intr = (intf->cur_altsetting)->endpoint + 2; - - dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); - dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); - - dev->pipe_intr = usb_rcvintpipe(dev->udev, - dev->ep_intr->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - period = dev->ep_intr->desc.bInterval; - + period = ep_intr->desc.bInterval; maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0); buf = kmalloc(maxp, GFP_KERNEL); if (buf) { @@ -3783,6 +3731,7 @@ static int lan78xx_probe(struct usb_interface *intf, usb_fill_int_urb(dev->urb_intr, dev->udev, dev->pipe_intr, buf, maxp, intr_complete, dev, period); + dev->urb_intr->transfer_flags |= URB_FREE_BUFFER; } } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index a43c97b13924..77658425db8a 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1376,6 +1376,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, for (h = 0; h < FDB_HASH_SIZE; ++h) { struct vxlan_fdb *f; + rcu_read_lock(); hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { struct vxlan_rdst *rd; @@ -1387,8 +1388,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, cb->nlh->nlmsg_seq, RTM_NEWNEIGH, NLM_F_MULTI, NULL); - if (err < 0) + if (err < 0) { + rcu_read_unlock(); goto out; + } skip_nh: *idx += 1; continue; @@ -1403,12 +1406,15 @@ skip_nh: cb->nlh->nlmsg_seq, RTM_NEWNEIGH, NLM_F_MULTI, rd); - if (err < 0) + if (err < 0) { + rcu_read_unlock(); goto out; + } skip: *idx += 1; } } + rcu_read_unlock(); } out: return err; @@ -3070,8 +3076,10 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP))) continue; /* the all_zeros_mac entry is deleted at vxlan_uninit */ - if (!is_zero_ether_addr(f->eth_addr)) - vxlan_fdb_destroy(vxlan, f, true, true); + if (is_zero_ether_addr(f->eth_addr) && + f->vni == vxlan->cfg.vni) + continue; + vxlan_fdb_destroy(vxlan, f, true, true); } spin_unlock_bh(&vxlan->hash_lock[h]); } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index add040168e67..4ee2330c603e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1102,6 +1102,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, int pos; int len; + if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) + return 0; + c.identify.opcode = nvme_admin_identify; c.identify.nsid = cpu_to_le32(nsid); c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; @@ -1115,18 +1118,6 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, if (status) { dev_warn(ctrl->device, "Identify Descriptors failed (%d)\n", status); - /* - * Don't treat non-retryable errors as fatal, as we potentially - * already have a NGUID or EUI-64. If we failed with DNR set, - * we want to silently ignore the error as we can still - * identify the device, but if the status has DNR set, we want - * to propagate the error back specifically for the disk - * revalidation flow to make sure we don't abandon the - * device just because of a temporal retry-able error (such - * as path of transport errors). - */ - if (status > 0 && (status & NVME_SC_DNR)) - status = 0; goto free_data; } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 1de3f9b827aa..09ffc3246f60 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -129,6 +129,13 @@ enum nvme_quirks { * Don't change the value of the temperature threshold feature */ NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14), + + /* + * The controller doesn't handle the Identify Namespace + * Identification Descriptor list subcommand despite claiming + * NVMe 1.3 compliance. + */ + NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15), }; /* diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index b1d18f0633c7..d4b1ff747123 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3099,6 +3099,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS | NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ @@ -3122,6 +3124,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), .driver_data = NVME_QUIRK_SINGLE_VECTOR }, diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 79ef2b8e2b3c..f3a91818167b 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1382,6 +1382,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, if (nctrl->opts->tos >= 0) ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); + /* Set 10 seconds timeout for icresp recvmsg */ + queue->sock->sk->sk_rcvtimeo = 10 * HZ; + queue->sock->sk->sk_allocation = GFP_ATOMIC; nvme_tcp_set_queue_io_cpu(queue); queue->request = NULL; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 812bfc32ecb8..2ea61abd5830 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2330,6 +2330,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); +static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev) +{ + pci_info(dev, "Disabling ASPM L0s/L1\n"); + pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); +} + +/* + * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the + * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected; + * disable both L0s and L1 for now to be safe. + */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1); + /* * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain * Link bit cleared after starting the link retrain process to allow this diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index ff1ee159dca2..f8ff30cdafa6 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -7,6 +7,8 @@ config PINCTRL_MSM select PINCONF select GENERIC_PINCONF select GPIOLIB_IRQCHIP + select IRQ_DOMAIN_HIERARCHY + select IRQ_FASTEOI_HIERARCHY_HANDLERS config PINCTRL_APQ8064 tristate "Qualcomm APQ8064 pin controller driver" diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 83b7d64bc4c1..c322f30a2064 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -832,6 +832,52 @@ static void msm_gpio_irq_unmask(struct irq_data *d) msm_gpio_irq_clear_unmask(d, false); } +/** + * msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent. + * @d: The irq dta. + * + * This is much like msm_gpio_update_dual_edge_pos() but for IRQs that are + * normally handled by the parent irqchip. The logic here is slightly + * different due to what's easy to do with our parent, but in principle it's + * the same. + */ +static void msm_gpio_update_dual_edge_parent(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct msm_pinctrl *pctrl = gpiochip_get_data(gc); + const struct msm_pingroup *g = &pctrl->soc->groups[d->hwirq]; + int loop_limit = 100; + unsigned int val; + unsigned int type; + + /* Read the value and make a guess about what edge we need to catch */ + val = msm_readl_io(pctrl, g) & BIT(g->in_bit); + type = val ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING; + + do { + /* Set the parent to catch the next edge */ + irq_chip_set_type_parent(d, type); + + /* + * Possibly the line changed between when we last read "val" + * (and decided what edge we needed) and when set the edge. + * If the value didn't change (or changed and then changed + * back) then we're done. + */ + val = msm_readl_io(pctrl, g) & BIT(g->in_bit); + if (type == IRQ_TYPE_EDGE_RISING) { + if (!val) + return; + type = IRQ_TYPE_EDGE_FALLING; + } else if (type == IRQ_TYPE_EDGE_FALLING) { + if (val) + return; + type = IRQ_TYPE_EDGE_RISING; + } + } while (loop_limit-- > 0); + dev_warn_once(pctrl->dev, "dual-edge irq failed to stabilize\n"); +} + static void msm_gpio_irq_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -840,8 +886,11 @@ static void msm_gpio_irq_ack(struct irq_data *d) unsigned long flags; u32 val; - if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) + if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) { + if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) + msm_gpio_update_dual_edge_parent(d); return; + } g = &pctrl->soc->groups[d->hwirq]; @@ -860,6 +909,17 @@ static void msm_gpio_irq_ack(struct irq_data *d) raw_spin_unlock_irqrestore(&pctrl->lock, flags); } +static bool msm_gpio_needs_dual_edge_parent_workaround(struct irq_data *d, + unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct msm_pinctrl *pctrl = gpiochip_get_data(gc); + + return type == IRQ_TYPE_EDGE_BOTH && + pctrl->soc->wakeirq_dual_edge_errata && d->parent_data && + test_bit(d->hwirq, pctrl->skip_wake_irqs); +} + static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -868,11 +928,21 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) unsigned long flags; u32 val; + if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) { + set_bit(d->hwirq, pctrl->dual_edge_irqs); + irq_set_handler_locked(d, handle_fasteoi_ack_irq); + msm_gpio_update_dual_edge_parent(d); + return 0; + } + if (d->parent_data) irq_chip_set_type_parent(d, type); - if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) + if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) { + clear_bit(d->hwirq, pctrl->dual_edge_irqs); + irq_set_handler_locked(d, handle_fasteoi_irq); return 0; + } g = &pctrl->soc->groups[d->hwirq]; diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h index 9452da18a78b..7486fe08eb9b 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.h +++ b/drivers/pinctrl/qcom/pinctrl-msm.h @@ -113,6 +113,9 @@ struct msm_gpio_wakeirq_map { * @pull_no_keeper: The SoC does not support keeper bias. * @wakeirq_map: The map of wakeup capable GPIOs and the pin at PDC/MPM * @nwakeirq_map: The number of entries in @wakeirq_map + * @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need + * to be aware that their parent can't handle dual + * edge interrupts. */ struct msm_pinctrl_soc_data { const struct pinctrl_pin_desc *pins; @@ -128,6 +131,7 @@ struct msm_pinctrl_soc_data { const int *reserved_gpios; const struct msm_gpio_wakeirq_map *wakeirq_map; unsigned int nwakeirq_map; + bool wakeirq_dual_edge_errata; }; extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops; diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c index 1b6465a882f2..1d9acad3c1ce 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc7180.c +++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c @@ -1147,6 +1147,7 @@ static const struct msm_pinctrl_soc_data sc7180_pinctrl = { .ntiles = ARRAY_SIZE(sc7180_tiles), .wakeirq_map = sc7180_pdc_map, .nwakeirq_map = ARRAY_SIZE(sc7180_pdc_map), + .wakeirq_dual_edge_errata = true, }; static int sc7180_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0ba7a65e7c8d..06056e9ec333 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -547,6 +547,15 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) scsi_uninit_cmd(cmd); } +static void scsi_run_queue_async(struct scsi_device *sdev) +{ + if (scsi_target(sdev)->single_lun || + !list_empty(&sdev->host->starved_list)) + kblockd_schedule_work(&sdev->requeue_work); + else + blk_mq_run_hw_queues(sdev->request_queue, true); +} + /* Returns false when no more bytes to process, true if there are more */ static bool scsi_end_request(struct request *req, blk_status_t error, unsigned int bytes) @@ -591,11 +600,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error, __blk_mq_end_request(req, error); - if (scsi_target(sdev)->single_lun || - !list_empty(&sdev->host->starved_list)) - kblockd_schedule_work(&sdev->requeue_work); - else - blk_mq_run_hw_queues(q, true); + scsi_run_queue_async(sdev); percpu_ref_put(&q->q_usage_counter); return false; @@ -1702,6 +1707,7 @@ out_put_budget: */ if (req->rq_flags & RQF_DONTPREP) scsi_mq_uninit_cmd(cmd); + scsi_run_queue_async(sdev); break; } return ret; diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c index 560649be9d13..e035c9f757a1 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1032.c +++ b/drivers/staging/comedi/drivers/addi_apci_1032.c @@ -106,14 +106,22 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, unsigned int *data) { struct apci1032_private *devpriv = dev->private; - unsigned int shift, oldmask; + unsigned int shift, oldmask, himask, lomask; switch (data[0]) { case INSN_CONFIG_DIGITAL_TRIG: if (data[1] != 0) return -EINVAL; shift = data[3]; - oldmask = (1U << shift) - 1; + if (shift < 32) { + oldmask = (1U << shift) - 1; + himask = data[4] << shift; + lomask = data[5] << shift; + } else { + oldmask = 0xffffffffu; + himask = 0; + lomask = 0; + } switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: devpriv->ctrl = 0; @@ -136,8 +144,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: if (devpriv->ctrl != (APCI1032_CTRL_INT_ENA | @@ -154,8 +162,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; default: return -EINVAL; diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c index 689acd69a1b9..816dd25b9d0e 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1500.c +++ b/drivers/staging/comedi/drivers/addi_apci_1500.c @@ -452,13 +452,14 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev, struct apci1500_private *devpriv = dev->private; unsigned int trig = data[1]; unsigned int shift = data[3]; - unsigned int hi_mask = data[4] << shift; - unsigned int lo_mask = data[5] << shift; - unsigned int chan_mask = hi_mask | lo_mask; - unsigned int old_mask = (1 << shift) - 1; + unsigned int hi_mask; + unsigned int lo_mask; + unsigned int chan_mask; + unsigned int old_mask; unsigned int pm; unsigned int pt; unsigned int pp; + unsigned int invalid_chan; if (trig > 1) { dev_dbg(dev->class_dev, @@ -466,7 +467,20 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev, return -EINVAL; } - if (chan_mask > 0xffff) { + if (shift <= 16) { + hi_mask = data[4] << shift; + lo_mask = data[5] << shift; + old_mask = (1U << shift) - 1; + invalid_chan = (data[4] | data[5]) >> (16 - shift); + } else { + hi_mask = 0; + lo_mask = 0; + old_mask = 0xffff; + invalid_chan = data[4] | data[5]; + } + chan_mask = hi_mask | lo_mask; + + if (invalid_chan) { dev_dbg(dev->class_dev, "invalid digital trigger channel\n"); return -EINVAL; } diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c index 10501fe6bb25..1268ba34be5f 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1564.c +++ b/drivers/staging/comedi/drivers/addi_apci_1564.c @@ -331,14 +331,22 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, unsigned int *data) { struct apci1564_private *devpriv = dev->private; - unsigned int shift, oldmask; + unsigned int shift, oldmask, himask, lomask; switch (data[0]) { case INSN_CONFIG_DIGITAL_TRIG: if (data[1] != 0) return -EINVAL; shift = data[3]; - oldmask = (1U << shift) - 1; + if (shift < 32) { + oldmask = (1U << shift) - 1; + himask = data[4] << shift; + lomask = data[5] << shift; + } else { + oldmask = 0xffffffffu; + himask = 0; + lomask = 0; + } switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: devpriv->ctrl = 0; @@ -362,8 +370,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: if (devpriv->ctrl != (APCI1564_DI_IRQ_ENA | @@ -380,8 +388,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; default: return -EINVAL; diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c index 4d1eccb5041d..4518c2680b7c 100644 --- a/drivers/staging/comedi/drivers/ni_6527.c +++ b/drivers/staging/comedi/drivers/ni_6527.c @@ -332,7 +332,7 @@ static int ni6527_intr_insn_config(struct comedi_device *dev, case COMEDI_DIGITAL_TRIG_ENABLE_EDGES: /* check shift amount */ shift = data[3]; - if (shift >= s->n_chan) { + if (shift >= 32) { mask = 0; rising = 0; falling = 0; diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c index 4689b2170e4f..456603fd26c0 100644 --- a/drivers/staging/wlan-ng/prism2usb.c +++ b/drivers/staging/wlan-ng/prism2usb.c @@ -61,11 +61,25 @@ static int prism2sta_probe_usb(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *dev; - + const struct usb_endpoint_descriptor *epd; + const struct usb_host_interface *iface_desc = interface->cur_altsetting; struct wlandevice *wlandev = NULL; struct hfa384x *hw = NULL; int result = 0; + if (iface_desc->desc.bNumEndpoints != 2) { + result = -ENODEV; + goto failed; + } + + result = -EINVAL; + epd = &iface_desc->endpoint[1].desc; + if (!usb_endpoint_is_bulk_in(epd)) + goto failed; + epd = &iface_desc->endpoint[2].desc; + if (!usb_endpoint_is_bulk_out(epd)) + goto failed; + dev = interface_to_usbdev(interface); wlandev = create_wlan(); if (!wlandev) { diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index fc118f649887..cae61d1ebec5 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -524,6 +524,7 @@ static void __init serial8250_isa_init_ports(void) */ up->mcr_mask = ~ALPHA_KLUDGE_MCR; up->mcr_force = ALPHA_KLUDGE_MCR; + serial8250_set_defaults(up); } /* chain base port ops to support Remote Supervisor Adapter */ @@ -547,7 +548,6 @@ static void __init serial8250_isa_init_ports(void) port->membase = old_serial_port[i].iomem_base; port->iotype = old_serial_port[i].io_type; port->regshift = old_serial_port[i].iomem_reg_shift; - serial8250_set_defaults(up); port->irqflags |= irqflag; if (serial8250_isa_config != NULL) diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index ddb6aeb76dc5..04b9af7ed941 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -326,7 +326,17 @@ static void setup_gpio(struct pci_dev *pcidev, u8 __iomem *p) * devices will export them as GPIOs, so we pre-configure them safely * as inputs. */ - u8 dir = pcidev->vendor == PCI_VENDOR_ID_EXAR ? 0xff : 0x00; + + u8 dir = 0x00; + + if ((pcidev->vendor == PCI_VENDOR_ID_EXAR) && + (pcidev->subsystem_vendor != PCI_VENDOR_ID_SEALEVEL)) { + // Configure GPIO as inputs for Commtech adapters + dir = 0xff; + } else { + // Configure GPIO as outputs for SeaLevel adapters + dir = 0x00; + } writeb(0x00, p + UART_EXAR_MPIOINT_7_0); writeb(0x00, p + UART_EXAR_MPIOLVL_7_0); diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index f839380c2f4c..98b8a3e30733 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -306,8 +306,21 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, } #endif + /* + * Store the requested baud rate before calling the generic 8250 + * set_termios method. Standard 8250 port expects bauds to be + * no higher than (uartclk / 16) so the baud will be clamped if it + * gets out of that bound. Mediatek 8250 port supports speed + * higher than that, therefore we'll get original baud rate back + * after calling the generic set_termios method and recalculate + * the speed later in this method. + */ + baud = tty_termios_baud_rate(termios); + serial8250_do_set_termios(port, termios, old); + tty_termios_encode_baud_rate(termios, baud, baud); + /* * Mediatek UARTs use an extra highspeed register (MTK_UART_HIGHS) * @@ -339,6 +352,11 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, */ spin_lock_irqsave(&port->lock, flags); + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + /* set DLAB we have cval saved in up->lcr from the call to the core */ serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB); serial_dl_write(up, quot); diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index 8de8bac9c6c7..04d1b0807e66 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -635,7 +635,7 @@ static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup) } static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup, - struct tty_port *tty) + struct tty_port *port) { do { char flag = TTY_NORMAL; @@ -653,16 +653,18 @@ static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup, ch = (unsigned char) tegra_uart_read(tup, UART_RX); tup->uport.icount.rx++; - if (!uart_handle_sysrq_char(&tup->uport, ch) && tty) - tty_insert_flip_char(tty, ch, flag); + if (uart_handle_sysrq_char(&tup->uport, ch)) + continue; if (tup->uport.ignore_status_mask & UART_LSR_DR) continue; + + tty_insert_flip_char(port, ch, flag); } while (1); } static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup, - struct tty_port *tty, + struct tty_port *port, unsigned int count) { int copied; @@ -672,17 +674,13 @@ static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup, return; tup->uport.icount.rx += count; - if (!tty) { - dev_err(tup->uport.dev, "No tty port\n"); - return; - } if (tup->uport.ignore_status_mask & UART_LSR_DR) return; dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys, count, DMA_FROM_DEVICE); - copied = tty_insert_flip_string(tty, + copied = tty_insert_flip_string(port, ((unsigned char *)(tup->rx_dma_buf_virt)), count); if (copied != count) { WARN_ON(1); diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 672cfa075e28..2833f1418d6d 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1580,8 +1580,10 @@ static int cdns_uart_probe(struct platform_device *pdev) * If register_console() don't assign value, then console_port pointer * is cleanup. */ - if (!console_port) + if (!console_port) { + cdns_uart_console.index = id; console_port = port; + } #endif rc = uart_add_one_port(&cdns_uart_uart_driver, port); @@ -1594,8 +1596,10 @@ static int cdns_uart_probe(struct platform_device *pdev) #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE /* This is not port which is used for console that's why clean it up */ if (console_port == port && - !(cdns_uart_uart_driver.cons->flags & CON_ENABLED)) + !(cdns_uart_uart_driver.cons->flags & CON_ENABLED)) { console_port = NULL; + cdns_uart_console.index = -1; + } #endif cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node, diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 48a8199f7845..42d8c67a481f 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1092,10 +1092,19 @@ static const struct tty_port_operations vc_port_ops = { .destruct = vc_port_destruct, }; +/* + * Change # of rows and columns (0 means unchanged/the size of fg_console) + * [this is to be used together with some user program + * like resize that changes the hardware videomode] + */ +#define VC_MAXCOL (32767) +#define VC_MAXROW (32767) + int vc_allocate(unsigned int currcons) /* return 0 on success */ { struct vt_notifier_param param; struct vc_data *vc; + int err; WARN_CONSOLE_UNLOCKED(); @@ -1125,6 +1134,11 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ if (!*vc->vc_uni_pagedir_loc) con_set_default_unimap(vc); + err = -EINVAL; + if (vc->vc_cols > VC_MAXCOL || vc->vc_rows > VC_MAXROW || + vc->vc_screenbuf_size > KMALLOC_MAX_SIZE || !vc->vc_screenbuf_size) + goto err_free; + err = -ENOMEM; vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL); if (!vc->vc_screenbuf) goto err_free; @@ -1143,7 +1157,7 @@ err_free: visual_deinit(vc); kfree(vc); vc_cons[currcons].d = NULL; - return -ENOMEM; + return err; } static inline int resize_screen(struct vc_data *vc, int width, int height, @@ -1158,14 +1172,6 @@ static inline int resize_screen(struct vc_data *vc, int width, int height, return err; } -/* - * Change # of rows and columns (0 means unchanged/the size of fg_console) - * [this is to be used together with some user program - * like resize that changes the hardware videomode] - */ -#define VC_RESIZE_MAXCOL (32767) -#define VC_RESIZE_MAXROW (32767) - /** * vc_do_resize - resizing method for the tty * @tty: tty being resized @@ -1201,7 +1207,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, user = vc->vc_resize_user; vc->vc_resize_user = 0; - if (cols > VC_RESIZE_MAXCOL || lines > VC_RESIZE_MAXROW) + if (cols > VC_MAXCOL || lines > VC_MAXROW) return -EINVAL; new_cols = (cols ? cols : vc->vc_cols); @@ -1212,7 +1218,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) return 0; - if (new_screen_size > KMALLOC_MAX_SIZE) + if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size) return -EINVAL; newscreen = kzalloc(new_screen_size, GFP_USER); if (!newscreen) @@ -3393,6 +3399,7 @@ static int __init con_init(void) INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); tty_port_init(&vc->port); visual_init(vc, currcons, 1); + /* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */ vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT); vc_init(vc, vc->vc_rows, vc->vc_cols, currcons || !vc->vc_sw->con_save_screen); diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index fea555570ad4..45c54d56ecbd 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -557,6 +557,10 @@ static bool need_bw_sch(struct usb_host_endpoint *ep, if (is_fs_or_ls(speed) && !has_tt) return false; + /* skip endpoint with zero maxpkt */ + if (usb_endpoint_maxp(&ep->desc) == 0) + return false; + return true; } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index ef513c2fb843..9234c82e70e4 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -265,6 +265,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1142) xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + pdev->device == 0x2142) + xhci->quirks |= XHCI_NO_64BIT_SUPPORT; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 2eaf5c0af80c..ee6bf01775bb 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -856,7 +856,7 @@ static int tegra_xusb_init_context(struct tegra_xusb *tegra) if (!tegra->context.ipfs) return -ENOMEM; - tegra->context.fpci = devm_kcalloc(tegra->dev, soc->ipfs.num_offsets, + tegra->context.fpci = devm_kcalloc(tegra->dev, soc->fpci.num_offsets, sizeof(u32), GFP_KERNEL); if (!tegra->context.fpci) return -ENOMEM; diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 6fb4d7ecfa19..b22adf03f584 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1215,7 +1215,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) continue; } - switch (v_req.type) { + switch (vhost32_to_cpu(vq, v_req.type)) { case VIRTIO_SCSI_T_TMF: vc.req = &v_req.tmf; vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req); diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index ca935c09a261..35ebeeccde4d 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -216,7 +216,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = info->var.xoffset + rs; region.dy = 0; region.width = rw; @@ -224,7 +224,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset; region.dy = info->var.yoffset + bs; region.width = rs; diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index dfa9a8aa4509..78f3a5621478 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -201,7 +201,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = 0; region.dy = info->var.yoffset; region.height = rw; @@ -209,7 +209,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset + bs; region.dy = 0; region.height = info->var.yres_virtual; diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index ce08251bfd38..fd098ff17574 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -184,7 +184,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = 0; region.dy = info->var.yoffset + rs; region.height = rw; @@ -192,7 +192,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset; region.dy = info->var.yoffset; region.height = info->var.yres; diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 1936afc78fec..e165a3fad29a 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -231,7 +231,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dy = 0; region.dx = info->var.xoffset; region.width = rw; @@ -239,7 +239,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dy = info->var.yoffset; region.dx = info->var.xoffset; region.height = bh; diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 5809e5f5b157..5c92e4a50882 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -85,7 +85,7 @@ config VIRTIO_MEM depends on VIRTIO depends on MEMORY_HOTPLUG_SPARSE depends on MEMORY_HOTREMOVE - select CONTIG_ALLOC + depends on CONTIG_ALLOC help This driver provides access to virtio-mem paravirtualized memory devices, allowing to hotplug and hotunplug memory. diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 1f157d2f4952..8be02f333b7a 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -578,10 +578,14 @@ static int init_vqs(struct virtio_balloon *vb) static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) { if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, - &vb->config_read_bitmap)) + &vb->config_read_bitmap)) { virtio_cread(vb->vdev, struct virtio_balloon_config, free_page_hint_cmd_id, &vb->cmd_id_received_cache); + /* Legacy balloon config space is LE, unlike all other devices. */ + if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) + vb->cmd_id_received_cache = le32_to_cpu((__force __le32)vb->cmd_id_received_cache); + } return vb->cmd_id_received_cache; } @@ -974,6 +978,11 @@ static int virtballoon_probe(struct virtio_device *vdev) /* * Let the hypervisor know that we are expecting a * specific value to be written back in balloon pages. + * + * If the PAGE_POISON value was larger than a byte we would + * need to byte swap poison_val here to guarantee it is + * little-endian. However for now it is a single byte so we + * can pass it as-is. */ if (!want_init_on_free()) memset(&poison_val, PAGE_POISON, sizeof(poison_val)); diff --git a/fs/io_uring.c b/fs/io_uring.c index 32b0064f806e..493e5047e67c 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4199,10 +4199,9 @@ static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt) hash_del(&req->hash_node); io_poll_complete(req, req->result, 0); - req->flags |= REQ_F_COMP_LOCKED; - io_put_req_find_next(req, nxt); spin_unlock_irq(&ctx->completion_lock); + io_put_req_find_next(req, nxt); io_cqring_ev_posted(ctx); } @@ -4658,6 +4657,10 @@ static int io_poll_add(struct io_kiocb *req) struct io_poll_table ipt; __poll_t mask; + /* ->work is in union with hash_node and others */ + io_req_work_drop_env(req); + req->flags &= ~REQ_F_WORK_INITIALIZED; + INIT_HLIST_NODE(&req->hash_node); INIT_LIST_HEAD(&req->list); ipt.pt._qproc = io_poll_queue_proc; diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 8b1e020e9a03..30a3aab312e6 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -456,7 +456,7 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer, #if !defined(inb) && !defined(_inb) #define _inb _inb -static inline u16 _inb(unsigned long addr) +static inline u8 _inb(unsigned long addr) { u8 val; @@ -482,7 +482,7 @@ static inline u16 _inw(unsigned long addr) #if !defined(inl) && !defined(_inl) #define _inl _inl -static inline u16 _inl(unsigned long addr) +static inline u32 _inl(unsigned long addr) { u32 val; diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 6c3ef49b46b3..e73dea5c7333 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -866,6 +866,18 @@ struct drm_mode_config { bool prefer_shadow_fbdev; /** + * @fbdev_use_iomem: + * + * Set to true if framebuffer reside in iomem. + * When set to true memcpy_toio() is used when copying the framebuffer in + * drm_fb_helper.drm_fb_helper_dirty_blit_real(). + * + * FIXME: This should be replaced with a per-mapping is_iomem + * flag (like ttm does), and then used everywhere in fbdev code. + */ + bool fbdev_use_iomem; + + /** * @quirk_addfb_prefer_xbgr_30bpp: * * Special hack for legacy ADDFB to keep nouveau userspace happy. Should diff --git a/include/linux/i2c.h b/include/linux/i2c.h index ee328cf80bd9..4e7714c88f95 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -1001,7 +1001,7 @@ static inline u32 i2c_acpi_find_bus_speed(struct device *dev) static inline struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, struct i2c_board_info *info) { - return NULL; + return ERR_PTR(-ENODEV); } static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 9e64710bc54f..35e4dc66b326 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -4388,6 +4388,7 @@ struct mlx5_ifc_query_vport_state_out_bits { enum { MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1, + MLX5_VPORT_STATE_OP_MOD_UPLINK = 0x2, }; struct mlx5_ifc_arm_monitor_counter_in_bits { diff --git a/include/linux/random.h b/include/linux/random.h index 45e1f8fa742b..9ab7443bd91b 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/list.h> #include <linux/once.h> +#include <asm/percpu.h> #include <uapi/linux/random.h> @@ -119,6 +120,8 @@ struct rnd_state { __u32 s1, s2, s3, s4; }; +DECLARE_PER_CPU(struct rnd_state, net_rand_state); + u32 prandom_u32_state(struct rnd_state *state); void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index d3432ee65de7..68dab3e08aad 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -84,7 +84,7 @@ struct bucket_table { struct lockdep_map dep_map; - struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp; + struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; /* @@ -261,13 +261,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, void *arg); void rhashtable_destroy(struct rhashtable *ht); -struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash); -struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash); -struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, - struct bucket_table *tbl, - unsigned int hash); +struct rhash_lock_head __rcu **rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash); +struct rhash_lock_head __rcu **__rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash); +struct rhash_lock_head __rcu **rht_bucket_nested_insert( + struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash); #define rht_dereference(p, ht) \ rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) @@ -284,21 +283,21 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, #define rht_entry(tpos, pos, member) \ ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) -static inline struct rhash_lock_head *const *rht_bucket( +static inline struct rhash_lock_head __rcu *const *rht_bucket( const struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : &tbl->buckets[hash]; } -static inline struct rhash_lock_head **rht_bucket_var( +static inline struct rhash_lock_head __rcu **rht_bucket_var( struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : &tbl->buckets[hash]; } -static inline struct rhash_lock_head **rht_bucket_insert( +static inline struct rhash_lock_head __rcu **rht_bucket_insert( struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : @@ -325,7 +324,7 @@ static inline struct rhash_lock_head **rht_bucket_insert( */ static inline void rht_lock(struct bucket_table *tbl, - struct rhash_lock_head **bkt) + struct rhash_lock_head __rcu **bkt) { local_bh_disable(); bit_spin_lock(0, (unsigned long *)bkt); @@ -333,7 +332,7 @@ static inline void rht_lock(struct bucket_table *tbl, } static inline void rht_lock_nested(struct bucket_table *tbl, - struct rhash_lock_head **bucket, + struct rhash_lock_head __rcu **bucket, unsigned int subclass) { local_bh_disable(); @@ -342,18 +341,18 @@ static inline void rht_lock_nested(struct bucket_table *tbl, } static inline void rht_unlock(struct bucket_table *tbl, - struct rhash_lock_head **bkt) + struct rhash_lock_head __rcu **bkt) { lock_map_release(&tbl->dep_map); bit_spin_unlock(0, (unsigned long *)bkt); local_bh_enable(); } -static inline struct rhash_head __rcu *__rht_ptr( - struct rhash_lock_head *const *bkt) +static inline struct rhash_head *__rht_ptr( + struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt) { - return (struct rhash_head __rcu *) - ((unsigned long)*bkt & ~BIT(0) ?: + return (struct rhash_head *) + ((unsigned long)p & ~BIT(0) ?: (unsigned long)RHT_NULLS_MARKER(bkt)); } @@ -365,47 +364,41 @@ static inline struct rhash_head __rcu *__rht_ptr( * access is guaranteed, such as when destroying the table. */ static inline struct rhash_head *rht_ptr_rcu( - struct rhash_lock_head *const *bkt) + struct rhash_lock_head __rcu *const *bkt) { - struct rhash_head __rcu *p = __rht_ptr(bkt); - - return rcu_dereference(p); + return __rht_ptr(rcu_dereference(*bkt), bkt); } static inline struct rhash_head *rht_ptr( - struct rhash_lock_head *const *bkt, + struct rhash_lock_head __rcu *const *bkt, struct bucket_table *tbl, unsigned int hash) { - return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash); + return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt); } static inline struct rhash_head *rht_ptr_exclusive( - struct rhash_lock_head *const *bkt) + struct rhash_lock_head __rcu *const *bkt) { - return rcu_dereference_protected(__rht_ptr(bkt), 1); + return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt); } -static inline void rht_assign_locked(struct rhash_lock_head **bkt, +static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, struct rhash_head *obj) { - struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; - if (rht_is_a_nulls(obj)) obj = NULL; - rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(0))); + rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0))); } static inline void rht_assign_unlock(struct bucket_table *tbl, - struct rhash_lock_head **bkt, + struct rhash_lock_head __rcu **bkt, struct rhash_head *obj) { - struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; - if (rht_is_a_nulls(obj)) obj = NULL; lock_map_release(&tbl->dep_map); - rcu_assign_pointer(*p, obj); + rcu_assign_pointer(*bkt, (void *)obj); preempt_enable(); __release(bitlock); local_bh_enable(); @@ -593,7 +586,7 @@ static inline struct rhash_head *__rhashtable_lookup( .ht = ht, .key = key, }; - struct rhash_lock_head *const *bkt; + struct rhash_lock_head __rcu *const *bkt; struct bucket_table *tbl; struct rhash_head *he; unsigned int hash; @@ -709,7 +702,7 @@ static inline void *__rhashtable_insert_fast( .ht = ht, .key = key, }; - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct bucket_table *tbl; struct rhash_head *head; @@ -995,7 +988,7 @@ static inline int __rhashtable_remove_fast_one( struct rhash_head *obj, const struct rhashtable_params params, bool rhlist) { - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; unsigned int hash; @@ -1147,7 +1140,7 @@ static inline int __rhashtable_replace_fast( struct rhash_head *obj_old, struct rhash_head *obj_new, const struct rhashtable_params params) { - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; unsigned int hash; diff --git a/include/net/addrconf.h b/include/net/addrconf.h index fdb07105384c..8418b7d38468 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -274,6 +274,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr); int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); +void __ipv6_sock_ac_close(struct sock *sk); void ipv6_sock_ac_close(struct sock *sk); int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr); diff --git a/include/net/devlink.h b/include/net/devlink.h index 19d990c8edcc..0606967cb501 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -732,6 +732,7 @@ enum devlink_trap_group_generic_id { DEVLINK_TRAP_GROUP_GENERIC_ID_PIM, DEVLINK_TRAP_GROUP_GENERIC_ID_UC_LB, DEVLINK_TRAP_GROUP_GENERIC_ID_LOCAL_DELIVERY, + DEVLINK_TRAP_GROUP_GENERIC_ID_EXTERNAL_DELIVERY, DEVLINK_TRAP_GROUP_GENERIC_ID_IPV6, DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_EVENT, DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_GENERAL, @@ -929,6 +930,8 @@ enum devlink_trap_group_generic_id { "uc_loopback" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_LOCAL_DELIVERY \ "local_delivery" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_EXTERNAL_DELIVERY \ + "external_delivery" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_IPV6 \ "ipv6" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_PTP_EVENT \ diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 5c09ba299cde..2737d24ec244 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -944,7 +944,7 @@ struct xfrm_dst { static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) { #ifdef CONFIG_XFRM - if (dst->xfrm) { + if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst; return xdst->path; @@ -956,7 +956,7 @@ static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst) { #ifdef CONFIG_XFRM - if (dst->xfrm) { + if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { struct xfrm_dst *xdst = (struct xfrm_dst *) dst; return xdst->child; } @@ -1636,13 +1636,16 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, void *); void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net); int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); -struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, +struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, + const struct xfrm_mark *mark, + u32 if_id, u8 type, int dir, struct xfrm_selector *sel, struct xfrm_sec_ctx *ctx, int delete, int *err); -struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8, - int dir, u32 id, int delete, int *err); +struct xfrm_policy *xfrm_policy_byid(struct net *net, + const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, u32 id, int delete, + int *err); int xfrm_policy_flush(struct net *net, u8 type, bool task_valid); void xfrm_policy_hash_rebuild(struct net *net); u32 xfrm_get_acqseq(void); diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index c4369a6c2951..2f1fc23602cb 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -305,6 +305,25 @@ struct rvt_rq { spinlock_t lock ____cacheline_aligned_in_smp; }; +/** + * rvt_get_rq_count - count numbers of request work queue entries + * in circular buffer + * @rq: data structure for request queue entry + * @head: head indices of the circular buffer + * @tail: tail indices of the circular buffer + * + * Return - total number of entries in the Receive Queue + */ + +static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail) +{ + u32 count = head - tail; + + if ((s32)count < 0) + count += rq->size; + return count; +} + /* * This structure holds the information that the send tasklet needs * to send a RDMA read response or atomic operation. diff --git a/kernel/audit.c b/kernel/audit.c index 8c201f414226..b2301bdc9773 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1851,7 +1851,6 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, } audit_get_stamp(ab->ctx, &t, &serial); - audit_clear_dummy(ab->ctx); audit_log_format(ab, "audit(%llu.%03lu:%u): ", (unsigned long long)t.tv_sec, t.tv_nsec/1000000, serial); diff --git a/kernel/audit.h b/kernel/audit.h index f0233dc40b17..ddc22878433d 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -290,13 +290,6 @@ extern int audit_signal_info_syscall(struct task_struct *t); extern void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx); extern struct list_head *audit_killed_trees(void); - -static inline void audit_clear_dummy(struct audit_context *ctx) -{ - if (ctx) - ctx->dummy = 0; -} - #else /* CONFIG_AUDITSYSCALL */ #define auditsc_get_stamp(c, t, s) 0 #define audit_put_watch(w) {} @@ -330,7 +323,6 @@ static inline int audit_signal_info_syscall(struct task_struct *t) } #define audit_filter_inodes(t, c) AUDIT_DISABLED -#define audit_clear_dummy(c) {} #endif /* CONFIG_AUDITSYSCALL */ extern char *audit_unpack_string(void **bufp, size_t *remain, size_t len); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 468a23390457..fd840c40abf7 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1417,6 +1417,9 @@ static void audit_log_proctitle(void) struct audit_context *context = audit_context(); struct audit_buffer *ab; + if (!context || context->dummy) + return; + ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE); if (!ab) return; /* audit_panic or being filtered */ diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index ee36b7f60936..4ff0de860ab7 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -4082,7 +4082,7 @@ int btf_resolve_helper_id(struct bpf_verifier_log *log, { int id; - if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID) + if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID || !btf_vmlinux) return -EINVAL; id = fn->btf_id[arg]; if (!id || id > btf_vmlinux->nr_types) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index d4378d7d442b..7b136953f214 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -779,15 +779,20 @@ static void htab_elem_free_rcu(struct rcu_head *head) htab_elem_free(htab, l); } -static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) +static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) { struct bpf_map *map = &htab->map; + void *ptr; if (map->ops->map_fd_put_ptr) { - void *ptr = fd_htab_map_get_ptr(map, l); - + ptr = fd_htab_map_get_ptr(map, l); map->ops->map_fd_put_ptr(ptr); } +} + +static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) +{ + htab_put_fd_value(htab, l); if (htab_is_prealloc(htab)) { __pcpu_freelist_push(&htab->freelist, &l->fnode); @@ -839,6 +844,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, */ pl_new = this_cpu_ptr(htab->extra_elems); l_new = *pl_new; + htab_put_fd_value(htab, old_elem); *pl_new = old_elem; } else { struct pcpu_freelist_node *l; diff --git a/kernel/signal.c b/kernel/signal.c index ee22ec78fd6d..6f16f7c5d375 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -719,7 +719,7 @@ static int dequeue_synchronous_signal(kernel_siginfo_t *info) * Return the first synchronous signal in the queue. */ list_for_each_entry(q, &pending->list, list) { - /* Synchronous signals have a postive si_code */ + /* Synchronous signals have a positive si_code */ if ((q->info.si_code > SI_USER) && (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { sync = q; diff --git a/kernel/time/timer.c b/kernel/time/timer.c index df1ff803acc4..026ac01af9da 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -43,6 +43,7 @@ #include <linux/sched/debug.h> #include <linux/slab.h> #include <linux/compat.h> +#include <linux/random.h> #include <linux/uaccess.h> #include <asm/unistd.h> @@ -1742,6 +1743,13 @@ void update_process_times(int user_tick) scheduler_tick(); if (IS_ENABLED(CONFIG_POSIX_TIMERS)) run_posix_cpu_timers(); + + /* The current CPU might make use of net randoms without receiving IRQs + * to renew them often enough. Let's update the net_rand_state from a + * non-constant value that's not affine to the number of calls to make + * sure it's updated when there's some activity (we don't care in idle). + */ + this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick); } /** diff --git a/lib/random32.c b/lib/random32.c index 763b920a6206..3d749abb9e80 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void) } #endif -static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; +DEFINE_PER_CPU(struct rnd_state, net_rand_state); /** * prandom_u32_state - seeded pseudo-random number generator. diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 9f6890aedd1a..c949c1e3b87c 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -31,7 +31,7 @@ union nested_table { union nested_table __rcu *table; - struct rhash_lock_head *bucket; + struct rhash_lock_head __rcu *bucket; }; static u32 head_hashfn(struct rhashtable *ht, @@ -222,7 +222,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, } static int rhashtable_rehash_one(struct rhashtable *ht, - struct rhash_lock_head **bkt, + struct rhash_lock_head __rcu **bkt, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); @@ -275,7 +275,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); - struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash); + struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); int err; if (!bkt) @@ -485,7 +485,7 @@ fail: } static void *rhashtable_lookup_one(struct rhashtable *ht, - struct rhash_lock_head **bkt, + struct rhash_lock_head __rcu **bkt, struct bucket_table *tbl, unsigned int hash, const void *key, struct rhash_head *obj) { @@ -535,12 +535,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, return ERR_PTR(-ENOENT); } -static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, - struct rhash_lock_head **bkt, - struct bucket_table *tbl, - unsigned int hash, - struct rhash_head *obj, - void *data) +static struct bucket_table *rhashtable_insert_one( + struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, + struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj, + void *data) { struct bucket_table *new_tbl; struct rhash_head *head; @@ -591,7 +589,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, { struct bucket_table *new_tbl; struct bucket_table *tbl; - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; unsigned int hash; void *data; @@ -1173,8 +1171,8 @@ void rhashtable_destroy(struct rhashtable *ht) } EXPORT_SYMBOL_GPL(rhashtable_destroy); -struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash) +struct rhash_lock_head __rcu **__rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); @@ -1202,10 +1200,10 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, } EXPORT_SYMBOL_GPL(__rht_bucket_nested); -struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash) +struct rhash_lock_head __rcu **rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash) { - static struct rhash_lock_head *rhnull; + static struct rhash_lock_head __rcu *rhnull; if (!rhnull) INIT_RHT_NULLS_HEAD(rhnull); @@ -1213,9 +1211,8 @@ struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, } EXPORT_SYMBOL_GPL(rht_bucket_nested); -struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, - struct bucket_table *tbl, - unsigned int hash) +struct rhash_lock_head __rcu **rht_bucket_nested_insert( + struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 13cd683a658a..12ecacf0c55f 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -362,6 +362,10 @@ static void p9_read_work(struct work_struct *work) if (m->rreq->status == REQ_STATUS_SENT) { list_del(&m->rreq->req_list); p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD); + } else if (m->rreq->status == REQ_STATUS_FLSHD) { + /* Ignore replies associated with a cancelled request. */ + p9_debug(P9_DEBUG_TRANS, + "Ignore replies associated with a cancelled request\n"); } else { spin_unlock(&m->client->lock); p9_debug(P9_DEBUG_ERROR, @@ -703,11 +707,20 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req) { p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req); + spin_lock(&client->lock); + /* Ignore cancelled request if message has been received + * before lock. + */ + if (req->status == REQ_STATUS_RCVD) { + spin_unlock(&client->lock); + return 0; + } + /* we haven't received a response for oldreq, * remove it from the list. */ - spin_lock(&client->lock); list_del(&req->req_list); + req->status = REQ_STATUS_FLSHD; spin_unlock(&client->lock); p9_req_put(req); @@ -803,20 +816,28 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd) return -ENOMEM; ts->rd = fget(rfd); + if (!ts->rd) + goto out_free_ts; + if (!(ts->rd->f_mode & FMODE_READ)) + goto out_put_rd; ts->wr = fget(wfd); - if (!ts->rd || !ts->wr) { - if (ts->rd) - fput(ts->rd); - if (ts->wr) - fput(ts->wr); - kfree(ts); - return -EIO; - } + if (!ts->wr) + goto out_put_rd; + if (!(ts->wr->f_mode & FMODE_WRITE)) + goto out_put_wr; client->trans = ts; client->status = Connected; return 0; + +out_put_wr: + fput(ts->wr); +out_put_rd: + fput(ts->rd); +out_free_ts: + kfree(ts); + return -EIO; } static int p9_socket_open(struct p9_client *client, struct socket *csocket) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 33d8458fdd4a..4b7fc430793c 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1338,6 +1338,9 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, { struct discovery_state *d = &hdev->discovery; + if (len > HCI_MAX_AD_LENGTH) + return; + bacpy(&d->last_adv_addr, bdaddr); d->last_adv_addr_type = bdaddr_type; d->last_adv_rssi = rssi; @@ -5379,7 +5382,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *direct_addr, - u8 direct_addr_type, s8 rssi, u8 *data, u8 len) + u8 direct_addr_type, s8 rssi, u8 *data, u8 len, + bool ext_adv) { struct discovery_state *d = &hdev->discovery; struct smp_irk *irk; @@ -5401,6 +5405,11 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, return; } + if (!ext_adv && len > HCI_MAX_AD_LENGTH) { + bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); + return; + } + /* Find the end of the data in case the report contains padded zero * bytes at the end causing an invalid length value. * @@ -5461,7 +5470,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, */ conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type, direct_addr); - if (conn && type == LE_ADV_IND) { + if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { /* Store report for later inclusion by * mgmt_device_connected */ @@ -5516,7 +5525,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, * event or send an immediate device found event if the data * should not be stored for later. */ - if (!has_pending_adv_report(hdev)) { + if (!ext_adv && !has_pending_adv_report(hdev)) { /* If the report will trigger a SCAN_REQ store it for * later merging. */ @@ -5551,7 +5560,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, /* If the new report will trigger a SCAN_REQ store it for * later merging. */ - if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { + if (!ext_adv && (type == LE_ADV_IND || + type == LE_ADV_SCAN_IND)) { store_pending_adv_report(hdev, bdaddr, bdaddr_type, rssi, flags, data, len); return; @@ -5591,7 +5601,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) rssi = ev->data[ev->length]; process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, rssi, - ev->data, ev->length); + ev->data, ev->length, false); } else { bt_dev_err(hdev, "Dropping invalid advertising data"); } @@ -5663,7 +5673,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) if (legacy_evt_type != LE_ADV_INVALID) { process_adv_report(hdev, legacy_evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, ev->rssi, - ev->data, ev->length); + ev->data, ev->length, + !(evt_type & LE_EXT_ADV_LEGACY_PDU)); } ptr += sizeof(*ev) + ev->length; @@ -5861,7 +5872,8 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, &ev->direct_addr, - ev->direct_addr_type, ev->rssi, NULL, 0); + ev->direct_addr_type, ev->rssi, NULL, 0, + false); ptr += sizeof(*ev); } diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index f580c3344cb3..51a941b56ec3 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -34,7 +34,7 @@ static void __stop_umh(void) static int bpfilter_send_req(struct mbox_request *req) { struct mbox_reply reply; - loff_t pos; + loff_t pos = 0; ssize_t n; if (!bpfilter_ops.info.tgid) diff --git a/net/compat.c b/net/compat.c index 091875bd6210..93ea07e93ae5 100644 --- a/net/compat.c +++ b/net/compat.c @@ -202,7 +202,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk, /* Advance. */ kcmsg = (struct cmsghdr *)((char *)kcmsg + tmp); - ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen); + ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, cmsg.cmsg_len); } /* diff --git a/net/core/devlink.c b/net/core/devlink.c index 0ca89196a367..5fdebb7289e9 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -1206,7 +1206,9 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg, devlink_sb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq); - if (err && err != -EOPNOTSUPP) { + if (err == -EOPNOTSUPP) { + err = 0; + } else if (err) { mutex_unlock(&devlink->lock); goto out; } @@ -1415,7 +1417,9 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg, devlink, devlink_sb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq); - if (err && err != -EOPNOTSUPP) { + if (err == -EOPNOTSUPP) { + err = 0; + } else if (err) { mutex_unlock(&devlink->lock); goto out; } @@ -1656,7 +1660,9 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg, devlink_sb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq); - if (err && err != -EOPNOTSUPP) { + if (err == -EOPNOTSUPP) { + err = 0; + } else if (err) { mutex_unlock(&devlink->lock); goto out; } @@ -3470,7 +3476,9 @@ static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI); - if (err && err != -EOPNOTSUPP) { + if (err == -EOPNOTSUPP) { + err = 0; + } else if (err) { mutex_unlock(&devlink->lock); goto out; } @@ -3740,7 +3748,9 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI); - if (err && err != -EOPNOTSUPP) { + if (err == -EOPNOTSUPP) { + err = 0; + } else if (err) { mutex_unlock(&devlink->lock); goto out; } @@ -4697,7 +4707,9 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->extack); mutex_unlock(&devlink->lock); - if (err && err != -EOPNOTSUPP) + if (err == -EOPNOTSUPP) + err = 0; + else if (err) break; idx++; } @@ -8817,6 +8829,7 @@ static const struct devlink_trap_group devlink_trap_group_generic[] = { DEVLINK_TRAP_GROUP(PIM), DEVLINK_TRAP_GROUP(UC_LB), DEVLINK_TRAP_GROUP(LOCAL_DELIVERY), + DEVLINK_TRAP_GROUP(EXTERNAL_DELIVERY), DEVLINK_TRAP_GROUP(IPV6), DEVLINK_TRAP_GROUP(PTP_EVENT), DEVLINK_TRAP_GROUP(PTP_GENERAL), diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index dcb0802a47d5..c89b46fec153 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1864,7 +1864,7 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb) while ((l = leaf_walk_rcu(&tp, key)) != NULL) { struct key_vector *local_l = NULL, *local_tp; - hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { + hlist_for_each_entry(fa, &l->leaf, fa_list) { struct fib_alias *new_fa; if (local_tb->tb_id != fa->tb_id) diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 893261230ffc..dacdea7fcb62 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -183,7 +183,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) return 0; } -void ipv6_sock_ac_close(struct sock *sk) +void __ipv6_sock_ac_close(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct net_device *dev = NULL; @@ -191,10 +191,7 @@ void ipv6_sock_ac_close(struct sock *sk) struct net *net = sock_net(sk); int prev_index; - if (!np->ipv6_ac_list) - return; - - rtnl_lock(); + ASSERT_RTNL(); pac = np->ipv6_ac_list; np->ipv6_ac_list = NULL; @@ -211,6 +208,16 @@ void ipv6_sock_ac_close(struct sock *sk) sock_kfree_s(sk, pac, sizeof(*pac)); pac = next; } +} + +void ipv6_sock_ac_close(struct sock *sk) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + + if (!np->ipv6_ac_list) + return; + rtnl_lock(); + __ipv6_sock_ac_close(sk); rtnl_unlock(); } diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index c43592771126..52c2f063529f 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -805,10 +805,17 @@ int esp6_input_done2(struct sk_buff *skb, int err) if (x->encap) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); + int offset = skb_network_offset(skb) + sizeof(*ip6h); struct xfrm_encap_tmpl *encap = x->encap; - struct udphdr *uh = (void *)(skb_network_header(skb) + hdr_len); - struct tcphdr *th = (void *)(skb_network_header(skb) + hdr_len); - __be16 source; + u8 nexthdr = ip6h->nexthdr; + __be16 frag_off, source; + struct udphdr *uh; + struct tcphdr *th; + + offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); + uh = (void *)(skb->data + offset); + th = (void *)(skb->data + offset); + hdr_len += offset; switch (x->encap->encap_type) { case TCP_ENCAP_ESPINTCP: diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 20c740976334..43a894bf9a1b 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -458,6 +458,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, fl6_free_socklist(sk); __ipv6_sock_mc_close(sk); + __ipv6_sock_ac_close(sk); /* * Sock is moving from IPv6 to IPv4 (sk_prot), so diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 33f5efbad0a9..48d499d763fa 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3686,14 +3686,14 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, rt->fib6_src.plen = cfg->fc_src_len; #endif if (nh) { - if (!nexthop_get(nh)) { - NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); - goto out; - } if (rt->fib6_src.plen) { NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing"); goto out; } + if (!nexthop_get(nh)) { + NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); + goto out; + } rt->nh = nh; fib6_nh = nexthop_fib6_nh(rt->nh); } else { diff --git a/net/key/af_key.c b/net/key/af_key.c index f13626c1a985..c12dbc51ef5f 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1849,6 +1849,13 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; + if ((xfilter->sadb_x_filter_splen >= + (sizeof(xfrm_address_t) << 3)) || + (xfilter->sadb_x_filter_dplen >= + (sizeof(xfrm_address_t) << 3))) { + mutex_unlock(&pfk->dump_lock); + return -EINVAL; + } filter = kmalloc(sizeof(*filter), GFP_KERNEL); if (filter == NULL) { mutex_unlock(&pfk->dump_lock); @@ -2400,7 +2407,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa return err; } - xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN, + xp = xfrm_policy_bysel_ctx(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN, pol->sadb_x_policy_dir - 1, &sel, pol_ctx, 1, &err); security_xfrm_policy_free(pol_ctx); @@ -2651,7 +2658,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_ return -EINVAL; delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2); - xp = xfrm_policy_byid(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN, + xp = xfrm_policy_byid(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN, dir, pol->sadb_x_policy_id, delete, &err); if (xp == NULL) return -ENOENT; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 24e9e00decb8..87fddd84c621 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2173,6 +2173,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev) ieee80211_stop_mesh(sdata); mutex_lock(&sdata->local->mtx); ieee80211_vif_release_channel(sdata); + kfree(sdata->u.mesh.ie); mutex_unlock(&sdata->local->mtx); return 0; diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index d0db6af16427..7ecd801a943b 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -620,6 +620,19 @@ int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata, int mesh_add_he_6ghz_cap_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { + struct ieee80211_supported_band *sband; + const struct ieee80211_sband_iftype_data *iftd; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; + + iftd = ieee80211_get_sband_iftype_data(sband, + NL80211_IFTYPE_MESH_POINT); + /* The device doesn't support HE in mesh mode or at all */ + if (!iftd) + return 0; + ieee80211_ie_build_he_6ghz_cap(sdata, skb); return 0; } diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index fe4e853c61f4..48f31ac9233c 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -519,6 +519,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, del_timer_sync(&mpath->timer); atomic_dec(&sdata->u.mesh.mpaths); atomic_dec(&tbl->entries); + mesh_path_flush_pending(mpath); kfree_rcu(mpath, rcu); } diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index bf9abfa25bc3..1dc747de30cc 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1923,9 +1923,7 @@ void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, if (sta) { tx_pending = atomic_sub_return(tx_airtime, &sta->airtime[ac].aql_tx_pending); - if (WARN_ONCE(tx_pending < 0, - "STA %pM AC %d txq pending airtime underflow: %u, %u", - sta->addr, ac, tx_pending, tx_airtime)) + if (tx_pending < 0) atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending, tx_pending, 0); } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index ec35a92df8ed..dca01d7e6e3e 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -4227,11 +4227,12 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata, test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) goto out_free; + memset(info, 0, sizeof(*info)); + if (unlikely(!multicast && skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) - ieee80211_store_ack_skb(local, skb, &info->flags, NULL); - - memset(info, 0, sizeof(*info)); + info->ack_frame_id = ieee80211_store_ack_skb(local, skb, + &info->flags, NULL); if (unlikely(sdata->control_port_protocol == ehdr->h_proto)) { if (sdata->control_port_no_encrypt) diff --git a/net/mac80211/util.c b/net/mac80211/util.c index b768ebd7117f..c8504ffc71a1 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2877,6 +2877,10 @@ void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata, if (WARN_ON(!iftd)) return; + /* Check for device HE 6 GHz capability before adding element */ + if (!iftd->he_6ghz_capa.capa) + return; + cap = le16_to_cpu(iftd->he_6ghz_capa.capa); cap &= ~IEEE80211_HE_6GHZ_CAP_SM_PS; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 687f0bea2b35..d3fe7296e1c9 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2180,7 +2180,7 @@ do_connect: /* on successful connect, the msk state will be moved to established by * subflow_finish_connect() */ - if (!err || err == EINPROGRESS) + if (!err || err == -EINPROGRESS) mptcp_copy_inaddrs(sock->sk, ssock->sk); else inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); diff --git a/net/rds/recv.c b/net/rds/recv.c index c8404971d5ab..aba4afe4dfed 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -450,12 +450,13 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc, int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) { struct rds_notifier *notifier; - struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */ + struct rds_rdma_notify cmsg; unsigned int count = 0, max_messages = ~0U; unsigned long flags; LIST_HEAD(copy); int err = 0; + memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */ /* put_cmsg copies to user space and thus may sleep. We can't do this * with rs_lock held, so first grab as many notifications as we can stuff diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index f07970207b54..38a46167523f 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -288,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, */ ret = rxrpc_connect_call(rx, call, cp, srx, gfp); if (ret < 0) - goto error; + goto error_attached_to_socket; trace_rxrpc_call(call->debug_id, rxrpc_call_connected, atomic_read(&call->usage), here, NULL); @@ -308,18 +308,29 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, error_dup_user_ID: write_unlock(&rx->call_lock); release_sock(&rx->sk); - ret = -EEXIST; - -error: __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, - RX_CALL_DEAD, ret); + RX_CALL_DEAD, -EEXIST); trace_rxrpc_call(call->debug_id, rxrpc_call_error, - atomic_read(&call->usage), here, ERR_PTR(ret)); + atomic_read(&call->usage), here, ERR_PTR(-EEXIST)); rxrpc_release_call(rx, call); mutex_unlock(&call->user_mutex); rxrpc_put_call(call, rxrpc_call_put); - _leave(" = %d", ret); - return ERR_PTR(ret); + _leave(" = -EEXIST"); + return ERR_PTR(-EEXIST); + + /* We got an error, but the call is attached to the socket and is in + * need of release. However, we might now race with recvmsg() when + * completing the call queues it. Return 0 from sys_sendmsg() and + * leave the error to recvmsg() to deal with. + */ +error_attached_to_socket: + trace_rxrpc_call(call->debug_id, rxrpc_call_error, + atomic_read(&call->usage), here, ERR_PTR(ret)); + set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); + __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, + RX_CALL_DEAD, ret); + _leave(" = c=%08x [err]", call->debug_id); + return call; } /* diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 19e141eeed17..8cbe0bf20ed5 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -212,9 +212,11 @@ void rxrpc_disconnect_call(struct rxrpc_call *call) call->peer->cong_cwnd = call->cong_cwnd; - spin_lock_bh(&conn->params.peer->lock); - hlist_del_rcu(&call->error_link); - spin_unlock_bh(&conn->params.peer->lock); + if (!hlist_unhashed(&call->error_link)) { + spin_lock_bh(&call->peer->lock); + hlist_del_rcu(&call->error_link); + spin_unlock_bh(&call->peer->lock); + } if (rxrpc_is_client_call(call)) return rxrpc_disconnect_client_call(call); diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 490b1927215c..efecc5a8f67d 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -620,7 +620,7 @@ try_again: goto error_unlock_call; } - if (msg->msg_name) { + if (msg->msg_name && call->peer) { struct sockaddr_rxrpc *srx = msg->msg_name; size_t len = sizeof(call->peer->srx); diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 03a30d014bb6..f3f6da6e4ad2 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -681,6 +681,9 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) if (IS_ERR(call)) return PTR_ERR(call); /* ... and we have the call lock. */ + ret = 0; + if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) + goto out_put_unlock; } else { switch (READ_ONCE(call->state)) { case RXRPC_CALL_UNINITIALISED: diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index f2f663e88610..97e27946897f 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -1543,10 +1543,10 @@ static int __init ct_init_module(void) return 0; -err_tbl_init: - destroy_workqueue(act_ct_wq); err_register: tcf_ct_flow_tables_uninit(); +err_tbl_init: + destroy_workqueue(act_ct_wq); return err; } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index a096682ec0ad..5c57ac2ee75b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -13272,13 +13272,13 @@ static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info) if (!wdev_running(wdev)) return -ENETDOWN; } - - if (!vcmd->doit) - return -EOPNOTSUPP; } else { wdev = NULL; } + if (!vcmd->doit) + return -EOPNOTSUPP; + if (info->attrs[NL80211_ATTR_VENDOR_DATA]) { data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]); len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]); diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c index 100e29682b48..827ccdf2db57 100644 --- a/net/xfrm/espintcp.c +++ b/net/xfrm/espintcp.c @@ -15,6 +15,7 @@ static void handle_nonesp(struct espintcp_ctx *ctx, struct sk_buff *skb, { if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, skb->truesize)) { + XFRM_INC_STATS(sock_net(sk), LINUX_MIB_XFRMINERROR); kfree_skb(skb); return; } @@ -49,23 +50,51 @@ static void espintcp_rcv(struct strparser *strp, struct sk_buff *skb) struct espintcp_ctx *ctx = container_of(strp, struct espintcp_ctx, strp); struct strp_msg *rxm = strp_msg(skb); + int len = rxm->full_len - 2; u32 nonesp_marker; int err; + /* keepalive packet? */ + if (unlikely(len == 1)) { + u8 data; + + err = skb_copy_bits(skb, rxm->offset + 2, &data, 1); + if (err < 0) { + XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR); + kfree_skb(skb); + return; + } + + if (data == 0xff) { + kfree_skb(skb); + return; + } + } + + /* drop other short messages */ + if (unlikely(len <= sizeof(nonesp_marker))) { + XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR); + kfree_skb(skb); + return; + } + err = skb_copy_bits(skb, rxm->offset + 2, &nonesp_marker, sizeof(nonesp_marker)); if (err < 0) { + XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR); kfree_skb(skb); return; } /* remove header, leave non-ESP marker/SPI */ if (!__pskb_pull(skb, rxm->offset + 2)) { + XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINERROR); kfree_skb(skb); return; } if (pskb_trim(skb, rxm->full_len - 2) != 0) { + XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINERROR); kfree_skb(skb); return; } @@ -91,7 +120,7 @@ static int espintcp_parse(struct strparser *strp, struct sk_buff *skb) return err; len = be16_to_cpu(blen); - if (len < 6) + if (len < 2) return -EINVAL; return len; @@ -109,8 +138,11 @@ static int espintcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, flags |= nonblock ? MSG_DONTWAIT : 0; skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, &off, &err); - if (!skb) + if (!skb) { + if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN) + return 0; return err; + } copied = len; if (copied > skb->len) @@ -213,7 +245,7 @@ retry: return 0; } -static int espintcp_push_msgs(struct sock *sk) +static int espintcp_push_msgs(struct sock *sk, int flags) { struct espintcp_ctx *ctx = espintcp_getctx(sk); struct espintcp_msg *emsg = &ctx->partial; @@ -227,12 +259,12 @@ static int espintcp_push_msgs(struct sock *sk) ctx->tx_running = 1; if (emsg->skb) - err = espintcp_sendskb_locked(sk, emsg, 0); + err = espintcp_sendskb_locked(sk, emsg, flags); else - err = espintcp_sendskmsg_locked(sk, emsg, 0); + err = espintcp_sendskmsg_locked(sk, emsg, flags); if (err == -EAGAIN) { ctx->tx_running = 0; - return 0; + return flags & MSG_DONTWAIT ? -EAGAIN : 0; } if (!err) memset(emsg, 0, sizeof(*emsg)); @@ -257,7 +289,7 @@ int espintcp_push_skb(struct sock *sk, struct sk_buff *skb) offset = skb_transport_offset(skb); len = skb->len - offset; - espintcp_push_msgs(sk); + espintcp_push_msgs(sk, 0); if (emsg->len) { kfree_skb(skb); @@ -270,7 +302,7 @@ int espintcp_push_skb(struct sock *sk, struct sk_buff *skb) emsg->len = len; emsg->skb = skb; - espintcp_push_msgs(sk); + espintcp_push_msgs(sk, 0); return 0; } @@ -287,7 +319,7 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) char buf[2] = {0}; int err, end; - if (msg->msg_flags) + if (msg->msg_flags & ~MSG_DONTWAIT) return -EOPNOTSUPP; if (size > MAX_ESPINTCP_MSG) @@ -298,9 +330,10 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) lock_sock(sk); - err = espintcp_push_msgs(sk); + err = espintcp_push_msgs(sk, msg->msg_flags & MSG_DONTWAIT); if (err < 0) { - err = -ENOBUFS; + if (err != -EAGAIN || !(msg->msg_flags & MSG_DONTWAIT)) + err = -ENOBUFS; goto unlock; } @@ -337,10 +370,9 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) tcp_rate_check_app_limited(sk); - err = espintcp_push_msgs(sk); + err = espintcp_push_msgs(sk, msg->msg_flags & MSG_DONTWAIT); /* this message could be partially sent, keep it */ - if (err < 0) - goto unlock; + release_sock(sk); return size; @@ -374,7 +406,7 @@ static void espintcp_tx_work(struct work_struct *work) lock_sock(sk); if (!ctx->tx_running) - espintcp_push_msgs(sk); + espintcp_push_msgs(sk, 0); release_sock(sk); } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index be150475b28b..042ea9b40c7b 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -39,7 +39,7 @@ #ifdef CONFIG_XFRM_STATISTICS #include <net/snmp.h> #endif -#ifdef CONFIG_INET_ESPINTCP +#ifdef CONFIG_XFRM_ESPINTCP #include <net/espintcp.h> #endif @@ -1433,14 +1433,10 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, spin_unlock_bh(&pq->hold_queue.lock); } -static bool xfrm_policy_mark_match(struct xfrm_policy *policy, - struct xfrm_policy *pol) +static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark, + struct xfrm_policy *pol) { - if (policy->mark.v == pol->mark.v && - policy->priority == pol->priority) - return true; - - return false; + return mark->v == pol->mark.v && mark->m == pol->mark.m; } static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed) @@ -1503,7 +1499,7 @@ static void xfrm_policy_insert_inexact_list(struct hlist_head *chain, if (pol->type == policy->type && pol->if_id == policy->if_id && !selector_cmp(&pol->selector, &policy->selector) && - xfrm_policy_mark_match(policy, pol) && + xfrm_policy_mark_match(&policy->mark, pol) && xfrm_sec_ctx_match(pol->security, policy->security) && !WARN_ON(delpol)) { delpol = pol; @@ -1538,7 +1534,7 @@ static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain, if (pol->type == policy->type && pol->if_id == policy->if_id && !selector_cmp(&pol->selector, &policy->selector) && - xfrm_policy_mark_match(policy, pol) && + xfrm_policy_mark_match(&policy->mark, pol) && xfrm_sec_ctx_match(pol->security, policy->security) && !WARN_ON(delpol)) { if (excl) @@ -1610,9 +1606,8 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) EXPORT_SYMBOL(xfrm_policy_insert); static struct xfrm_policy * -__xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, - u8 type, int dir, - struct xfrm_selector *sel, +__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark, + u32 if_id, u8 type, int dir, struct xfrm_selector *sel, struct xfrm_sec_ctx *ctx) { struct xfrm_policy *pol; @@ -1623,7 +1618,7 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, hlist_for_each_entry(pol, chain, bydst) { if (pol->type == type && pol->if_id == if_id && - (mark & pol->mark.m) == pol->mark.v && + xfrm_policy_mark_match(mark, pol) && !selector_cmp(sel, &pol->selector) && xfrm_sec_ctx_match(ctx, pol->security)) return pol; @@ -1632,11 +1627,10 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, return NULL; } -struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, - struct xfrm_selector *sel, - struct xfrm_sec_ctx *ctx, int delete, - int *err) +struct xfrm_policy * +xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, struct xfrm_selector *sel, + struct xfrm_sec_ctx *ctx, int delete, int *err) { struct xfrm_pol_inexact_bin *bin = NULL; struct xfrm_policy *pol, *ret = NULL; @@ -1703,9 +1697,9 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, } EXPORT_SYMBOL(xfrm_policy_bysel_ctx); -struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, u32 id, int delete, - int *err) +struct xfrm_policy * +xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, u32 id, int delete, int *err) { struct xfrm_policy *pol, *ret; struct hlist_head *chain; @@ -1720,8 +1714,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, ret = NULL; hlist_for_each_entry(pol, chain, byidx) { if (pol->type == type && pol->index == id && - pol->if_id == if_id && - (mark & pol->mark.m) == pol->mark.v) { + pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) { xfrm_pol_hold(pol); if (delete) { *err = security_xfrm_policy_delete( @@ -4167,7 +4160,7 @@ void __init xfrm_init(void) seqcount_init(&xfrm_policy_hash_generation); xfrm_input_init(); -#ifdef CONFIG_INET_ESPINTCP +#ifdef CONFIG_XFRM_ESPINTCP espintcp_init(); #endif diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index e6cfaa680ef3..fbb7d9d06478 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1863,7 +1863,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, struct km_event c; int delete; struct xfrm_mark m; - u32 mark = xfrm_mark_get(attrs, &m); u32 if_id = 0; p = nlmsg_data(nlh); @@ -1880,8 +1879,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + xfrm_mark_get(attrs, &m); + if (p->index) - xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, delete, &err); + xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, + p->index, delete, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; @@ -1898,8 +1900,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; } - xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, &p->sel, - ctx, delete, &err); + xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, + &p->sel, ctx, delete, &err); security_xfrm_policy_free(ctx); } if (xp == NULL) @@ -2166,7 +2168,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, u8 type = XFRM_POLICY_TYPE_MAIN; int err = -ENOENT; struct xfrm_mark m; - u32 mark = xfrm_mark_get(attrs, &m); u32 if_id = 0; err = copy_from_user_policy_type(&type, attrs); @@ -2180,8 +2181,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + xfrm_mark_get(attrs, &m); + if (p->index) - xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, 0, &err); + xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index, + 0, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; @@ -2198,7 +2202,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; } - xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, + xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, &p->sel, ctx, 0, &err); security_xfrm_policy_free(ctx); } diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 6aea65c65745..45f2ab2ec2d4 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -138,11 +138,19 @@ char *read_text_file(const char *filename) char *get_line(char **stringp) { + char *orig = *stringp, *next; + /* do not return the unwanted extra line at EOF */ - if (*stringp && **stringp == '\0') + if (!orig || *orig == '\0') return NULL; - return strsep(stringp, "\n"); + next = strchr(orig, '\n'); + if (next) + *next++ = '\0'; + + *stringp = next; + + return orig; } /* A list of all modules we processed */ diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 7e3ae4534df9..803978d69e3c 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -2935,6 +2935,10 @@ static int hda_codec_runtime_suspend(struct device *dev) struct hda_codec *codec = dev_to_hda_codec(dev); unsigned int state; + /* Nothing to do if card registration fails and the component driver never probes */ + if (!codec->card) + return 0; + cancel_delayed_work_sync(&codec->jackpoll_work); state = hda_call_codec_suspend(codec); if (codec->link_down_at_suspend || @@ -2949,6 +2953,10 @@ static int hda_codec_runtime_resume(struct device *dev) { struct hda_codec *codec = dev_to_hda_codec(dev); + /* Nothing to do if card registration fails and the component driver never probes */ + if (!codec->card) + return 0; + codec_display_power(codec, true); snd_hdac_codec_link_up(&codec->core); hda_call_codec_resume(codec); diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h index 82e26442724b..a356fb0e5773 100644 --- a/sound/pci/hda/hda_controller.h +++ b/sound/pci/hda/hda_controller.h @@ -41,7 +41,7 @@ /* 24 unused */ #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ -/* 27 unused */ +#define AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP (1 << 27) /* Workaround for spurious wakeups after suspend */ #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ #define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ #define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 3565e2ab0965..3fbba2e51e36 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -298,7 +298,8 @@ enum { /* PCH for HSW/BDW; with runtime PM */ /* no i915 binding for this as HSW/BDW has another controller for HDMI */ #define AZX_DCAPS_INTEL_PCH \ - (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME) + (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\ + AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) /* HSW HDMI */ #define AZX_DCAPS_INTEL_HASWELL \ @@ -1028,7 +1029,14 @@ static int azx_suspend(struct device *dev) chip = card->private_data; bus = azx_bus(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); - pm_runtime_force_suspend(dev); + /* An ugly workaround: direct call of __azx_runtime_suspend() and + * __azx_runtime_resume() for old Intel platforms that suffer from + * spurious wakeups after S3 suspend + */ + if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) + __azx_runtime_suspend(chip); + else + pm_runtime_force_suspend(dev); if (bus->irq >= 0) { free_irq(bus->irq, chip); bus->irq = -1; @@ -1057,7 +1065,10 @@ static int azx_resume(struct device *dev) if (azx_acquire_irq(chip, 1) < 0) return -EIO; - pm_runtime_force_resume(dev); + if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) + __azx_runtime_resume(chip, false); + else + pm_runtime_force_resume(dev); snd_power_change_state(card, SNDRV_CTL_POWER_D0); trace_azx_resume(chip); diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 41eaa89660c3..cd46247988e4 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -2440,6 +2440,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp, mutex_lock(&spec->bind_lock); spec->use_acomp_notifier = use_acomp; spec->codec->relaxed_resume = use_acomp; + spec->codec->bus->keep_power = 0; /* reprogram each jack detection logic depending on the notifier */ for (i = 0; i < spec->num_pins; i++) reprogram_jack_detect(spec->codec, @@ -2534,7 +2535,6 @@ static void generic_acomp_init(struct hda_codec *codec, if (!snd_hdac_acomp_init(&codec->bus->core, &spec->drm_audio_ops, match_bound_vga, 0)) { spec->acomp_registered = true; - codec->bus->keep_power = 0; } } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 1b2d8e56390a..29f5878f0c50 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5975,6 +5975,16 @@ static void alc_fixup_disable_mic_vref(struct hda_codec *codec, snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ); } +static void alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + if (action != HDA_FIXUP_ACT_INIT) + return; + + msleep(100); + alc_write_coef_idx(codec, 0x65, 0x0); +} + /* for hda_fixup_thinkpad_acpi() */ #include "thinkpad_helper.c" @@ -6152,8 +6162,10 @@ enum { ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS, ALC269VC_FIXUP_ACER_HEADSET_MIC, ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE, - ALC289_FIXUP_ASUS_G401, + ALC289_FIXUP_ASUS_GA401, + ALC289_FIXUP_ASUS_GA502, ALC256_FIXUP_ACER_MIC_NO_PRESENCE, + ALC285_FIXUP_HP_GPIO_AMP_INIT, }; static const struct hda_fixup alc269_fixups[] = { @@ -7363,7 +7375,14 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MIC }, - [ALC289_FIXUP_ASUS_G401] = { + [ALC289_FIXUP_ASUS_GA401] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x03a11020 }, /* headset mic with jack detect */ + { } + }, + }, + [ALC289_FIXUP_ASUS_GA502] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x19, 0x03a11020 }, /* headset mic with jack detect */ @@ -7379,6 +7398,12 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE }, + [ALC285_FIXUP_HP_GPIO_AMP_INIT] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_gpio_amp_init, + .chained = true, + .chain_id = ALC285_FIXUP_HP_GPIO_LED + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -7529,7 +7554,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED), - SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), @@ -7561,7 +7586,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), - SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_G401), + SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), + SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), @@ -7581,7 +7607,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC), SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), - SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC225_FIXUP_HEADSET_JACK), + SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE), SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 40b7cd13fed9..a69d9e75f66f 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -367,6 +367,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, ifnum = 0; goto add_sync_ep_from_ifnum; case USB_ID(0x07fd, 0x0008): /* MOTU M Series */ + case USB_ID(0x31e9, 0x0001): /* Solid State Logic SSL2 */ case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */ case USB_ID(0x0d9a, 0x00df): /* RTX6001 */ ep = 0x81; diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 5b36c589a029..ba4f33804af1 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -2861,6 +2861,7 @@ process_dynamic_array_len(struct tep_event *event, struct tep_print_arg *arg, if (read_expected(TEP_EVENT_DELIM, ")") < 0) goto out_err; + free_token(token); type = read_token(&token); *tok = token; diff --git a/tools/lib/traceevent/plugins/Makefile b/tools/lib/traceevent/plugins/Makefile index 349bb81482ab..680d883efe05 100644 --- a/tools/lib/traceevent/plugins/Makefile +++ b/tools/lib/traceevent/plugins/Makefile @@ -197,7 +197,7 @@ define do_generate_dynamic_list_file xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\ if [ "$$symbol_type" = "U W" ];then \ (echo '{'; \ - $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\ + $(NM) -u -D $1 | awk 'NF>1 {sub("@.*", "", $$2); print "\t"$$2";"}' | sort -u;\ echo '};'; \ ) > $2; \ else \ diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c index 0a6e75b8777a..28a5d0c18b1d 100644 --- a/tools/perf/arch/arm/util/auxtrace.c +++ b/tools/perf/arch/arm/util/auxtrace.c @@ -56,7 +56,7 @@ struct auxtrace_record struct perf_pmu *cs_etm_pmu; struct evsel *evsel; bool found_etm = false; - bool found_spe = false; + struct perf_pmu *found_spe = NULL; static struct perf_pmu **arm_spe_pmus = NULL; static int nr_spes = 0; int i = 0; @@ -74,12 +74,12 @@ struct auxtrace_record evsel->core.attr.type == cs_etm_pmu->type) found_etm = true; - if (!nr_spes) + if (!nr_spes || found_spe) continue; for (i = 0; i < nr_spes; i++) { if (evsel->core.attr.type == arm_spe_pmus[i]->type) { - found_spe = true; + found_spe = arm_spe_pmus[i]; break; } } @@ -96,7 +96,7 @@ struct auxtrace_record #if defined(__aarch64__) if (found_spe) - return arm_spe_recording_init(err, arm_spe_pmus[i]); + return arm_spe_recording_init(err, found_spe); #endif /* diff --git a/tools/perf/tests/shell/record+zstd_comp_decomp.sh b/tools/perf/tests/shell/record+zstd_comp_decomp.sh index 63a91ec473bb..045723b3d992 100755 --- a/tools/perf/tests/shell/record+zstd_comp_decomp.sh +++ b/tools/perf/tests/shell/record+zstd_comp_decomp.sh @@ -12,7 +12,8 @@ skip_if_no_z_record() { collect_z_record() { echo "Collecting compressed record file:" - $perf_tool record -o $trace_file -g -z -F 5000 -- \ + [[ "$(uname -m)" != s390x ]] && gflag='-g' + $perf_tool record -o $trace_file $gflag -z -F 5000 -- \ dd count=500 if=/dev/urandom of=/dev/null } diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c index f7ee8fa377ad..6ccecbd39476 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c @@ -5,10 +5,60 @@ #include "test_btf_map_in_map.skel.h" +static int duration; + +static __u32 bpf_map_id(struct bpf_map *map) +{ + struct bpf_map_info info; + __u32 info_len = sizeof(info); + int err; + + memset(&info, 0, info_len); + err = bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, &info_len); + if (err) + return 0; + return info.id; +} + +/* + * Trigger synchronize_rcu() in kernel. + * + * ARRAY_OF_MAPS/HASH_OF_MAPS lookup/update operations trigger synchronize_rcu() + * if looking up an existing non-NULL element or updating the map with a valid + * inner map FD. Use this fact to trigger synchronize_rcu(): create map-in-map, + * create a trivial ARRAY map, update map-in-map with ARRAY inner map. Then + * cleanup. At the end, at least one synchronize_rcu() would be called. + */ +static int kern_sync_rcu(void) +{ + int inner_map_fd, outer_map_fd, err, zero = 0; + + inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0); + if (CHECK(inner_map_fd < 0, "inner_map_create", "failed %d\n", -errno)) + return -1; + + outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL, + sizeof(int), inner_map_fd, 1, 0); + if (CHECK(outer_map_fd < 0, "outer_map_create", "failed %d\n", -errno)) { + close(inner_map_fd); + return -1; + } + + err = bpf_map_update_elem(outer_map_fd, &zero, &inner_map_fd, 0); + if (err) + err = -errno; + CHECK(err, "outer_map_update", "failed %d\n", err); + close(inner_map_fd); + close(outer_map_fd); + return err; +} + void test_btf_map_in_map(void) { - int duration = 0, err, key = 0, val; - struct test_btf_map_in_map* skel; + int err, key = 0, val, i; + struct test_btf_map_in_map *skel; + int outer_arr_fd, outer_hash_fd; + int fd, map1_fd, map2_fd, map1_id, map2_id; skel = test_btf_map_in_map__open_and_load(); if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n")) @@ -18,32 +68,78 @@ void test_btf_map_in_map(void) if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) goto cleanup; + map1_fd = bpf_map__fd(skel->maps.inner_map1); + map2_fd = bpf_map__fd(skel->maps.inner_map2); + outer_arr_fd = bpf_map__fd(skel->maps.outer_arr); + outer_hash_fd = bpf_map__fd(skel->maps.outer_hash); + /* inner1 = input, inner2 = input + 1 */ - val = bpf_map__fd(skel->maps.inner_map1); - bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0); - val = bpf_map__fd(skel->maps.inner_map2); - bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0); + map1_fd = bpf_map__fd(skel->maps.inner_map1); + bpf_map_update_elem(outer_arr_fd, &key, &map1_fd, 0); + map2_fd = bpf_map__fd(skel->maps.inner_map2); + bpf_map_update_elem(outer_hash_fd, &key, &map2_fd, 0); skel->bss->input = 1; usleep(1); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val); + bpf_map_lookup_elem(map1_fd, &key, &val); CHECK(val != 1, "inner1", "got %d != exp %d\n", val, 1); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val); + bpf_map_lookup_elem(map2_fd, &key, &val); CHECK(val != 2, "inner2", "got %d != exp %d\n", val, 2); /* inner1 = input + 1, inner2 = input */ - val = bpf_map__fd(skel->maps.inner_map2); - bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0); - val = bpf_map__fd(skel->maps.inner_map1); - bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0); + bpf_map_update_elem(outer_arr_fd, &key, &map2_fd, 0); + bpf_map_update_elem(outer_hash_fd, &key, &map1_fd, 0); skel->bss->input = 3; usleep(1); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val); + bpf_map_lookup_elem(map1_fd, &key, &val); CHECK(val != 4, "inner1", "got %d != exp %d\n", val, 4); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val); + bpf_map_lookup_elem(map2_fd, &key, &val); CHECK(val != 3, "inner2", "got %d != exp %d\n", val, 3); + for (i = 0; i < 5; i++) { + val = i % 2 ? map1_fd : map2_fd; + err = bpf_map_update_elem(outer_hash_fd, &key, &val, 0); + if (CHECK_FAIL(err)) { + printf("failed to update hash_of_maps on iter #%d\n", i); + goto cleanup; + } + err = bpf_map_update_elem(outer_arr_fd, &key, &val, 0); + if (CHECK_FAIL(err)) { + printf("failed to update hash_of_maps on iter #%d\n", i); + goto cleanup; + } + } + + map1_id = bpf_map_id(skel->maps.inner_map1); + map2_id = bpf_map_id(skel->maps.inner_map2); + CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n"); + CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n"); + + test_btf_map_in_map__destroy(skel); + skel = NULL; + + /* we need to either wait for or force synchronize_rcu(), before + * checking for "still exists" condition, otherwise map could still be + * resolvable by ID, causing false positives. + * + * Older kernels (5.8 and earlier) freed map only after two + * synchronize_rcu()s, so trigger two, to be entirely sure. + */ + CHECK(kern_sync_rcu(), "sync_rcu", "failed\n"); + CHECK(kern_sync_rcu(), "sync_rcu", "failed\n"); + + fd = bpf_map_get_fd_by_id(map1_id); + if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) { + close(fd); + goto cleanup; + } + fd = bpf_map_get_fd_by_id(map2_id); + if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) { + close(fd); + goto cleanup; + } + cleanup: test_btf_map_in_map__destroy(skel); } diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index 8294ae3ffb3c..43c9cda199b8 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -318,6 +318,9 @@ class DebugfsDir: continue if os.path.isfile(p): + # We need to init trap_flow_action_cookie before read it + if f == "trap_flow_action_cookie": + cmd('echo deadbeef > %s/%s' % (path, f)) _, out = cmd('cat %s/%s' % (path, f)) dfs[f] = out.strip() elif os.path.isdir(p): diff --git a/tools/testing/selftests/bpf/verifier/event_output.c b/tools/testing/selftests/bpf/verifier/event_output.c index 99f8f582c02b..c5e805980409 100644 --- a/tools/testing/selftests/bpf/verifier/event_output.c +++ b/tools/testing/selftests/bpf/verifier/event_output.c @@ -112,6 +112,7 @@ "perfevent for cgroup sockopt", .insns = { __PERF_EVENT_INSNS__ }, .prog_type = BPF_PROG_TYPE_CGROUP_SOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, .fixup_map_event_output = { 4 }, .result = ACCEPT, .retval = 1, diff --git a/tools/testing/selftests/net/forwarding/ethtool.sh b/tools/testing/selftests/net/forwarding/ethtool.sh index ea7a11a9f788..dbb9fcf759e0 100755 --- a/tools/testing/selftests/net/forwarding/ethtool.sh +++ b/tools/testing/selftests/net/forwarding/ethtool.sh @@ -235,8 +235,6 @@ check_highest_speed_is_chosen() fi local -a speeds_arr=($(common_speeds_get $h1 $h2 0 1)) - # Remove the first speed, h1 does not advertise this speed. - unset speeds_arr[0] max_speed=${speeds_arr[0]} for current in ${speeds_arr[@]}; do diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c index 8c8c7d79c38d..2c522f7a0aec 100644 --- a/tools/testing/selftests/net/psock_fanout.c +++ b/tools/testing/selftests/net/psock_fanout.c @@ -350,7 +350,8 @@ static int test_datapath(uint16_t typeflags, int port_off, int fds[2], fds_udp[2][2], ret; fprintf(stderr, "\ntest: datapath 0x%hx ports %hu,%hu\n", - typeflags, PORT_BASE, PORT_BASE + port_off); + typeflags, (uint16_t)PORT_BASE, + (uint16_t)(PORT_BASE + port_off)); fds[0] = sock_fanout_open(typeflags, 0); fds[1] = sock_fanout_open(typeflags, 0); diff --git a/tools/testing/selftests/net/rxtimestamp.c b/tools/testing/selftests/net/rxtimestamp.c index 221fdece47d4..e4613ce4ed69 100644 --- a/tools/testing/selftests/net/rxtimestamp.c +++ b/tools/testing/selftests/net/rxtimestamp.c @@ -359,8 +359,7 @@ int main(int argc, char **argv) bool strict = false; int arg_index = 0; int failures = 0; - int s, t; - char opt; + int s, t, opt; while ((opt = getopt_long(argc, argv, "", long_options, &arg_index)) != -1) { diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c index ceaad78e9667..3155fbbf644b 100644 --- a/tools/testing/selftests/net/so_txtime.c +++ b/tools/testing/selftests/net/so_txtime.c @@ -121,7 +121,7 @@ static bool do_recv_one(int fdr, struct timed_send *ts) if (rbuf[0] != ts->data) error(1, 0, "payload mismatch. expected %c", ts->data); - if (labs(tstop - texpect) > cfg_variance_us) + if (llabs(tstop - texpect) > cfg_variance_us) error(1, 0, "exceeds variance (%d us)", cfg_variance_us); return false; diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c index 4555f88252ba..a61b7b3da549 100644 --- a/tools/testing/selftests/net/tcp_mmap.c +++ b/tools/testing/selftests/net/tcp_mmap.c @@ -344,7 +344,7 @@ int main(int argc, char *argv[]) { struct sockaddr_storage listenaddr, addr; unsigned int max_pacing_rate = 0; - size_t total = 0; + uint64_t total = 0; char *host = NULL; int fd, c, on = 1; char *buffer; @@ -473,12 +473,12 @@ int main(int argc, char *argv[]) zflg = 0; } while (total < FILE_SZ) { - ssize_t wr = FILE_SZ - total; + int64_t wr = FILE_SZ - total; if (wr > chunk_size) wr = chunk_size; /* Note : we just want to fill the pipe with 0 bytes */ - wr = send(fd, buffer, wr, zflg ? MSG_ZEROCOPY : 0); + wr = send(fd, buffer, (size_t)wr, zflg ? MSG_ZEROCOPY : 0); if (wr <= 0) break; total += wr; |