diff options
Diffstat (limited to 'include')
157 files changed, 3083 insertions, 1124 deletions
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index 517a5231cc1b..34fb3431a8f3 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -53,6 +53,24 @@ enum { GHES_SEV_PANIC = 0x3, }; +#ifdef CONFIG_ACPI_APEI_GHES +/** + * ghes_register_vendor_record_notifier - register a notifier for vendor + * records that the kernel would otherwise ignore. + * @nb: pointer to the notifier_block structure of the event handler. + * + * return 0 : SUCCESS, non-zero : FAIL + */ +int ghes_register_vendor_record_notifier(struct notifier_block *nb); + +/** + * ghes_unregister_vendor_record_notifier - unregister the previously + * registered vendor record notifier. + * @nb: pointer to the notifier_block structure of the vendor record handler. + */ +void ghes_unregister_vendor_record_notifier(struct notifier_block *nb); +#endif + int ghes_estatus_pool_init(int num_ghes); /* From drivers/edac/ghes_edac.c */ diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 62ebdc731ee2..e78bbb9a07e9 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -39,6 +39,7 @@ mandatory-y += mmiowb.h mandatory-y += mmu.h mandatory-y += mmu_context.h mandatory-y += module.h +mandatory-y += module.lds.h mandatory-y += msi.h mandatory-y += pci.h mandatory-y += percpu.h diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index dabf8cb7203b..9ea83d80eb6f 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -911,18 +911,6 @@ static inline void iowrite64_rep(volatile void __iomem *addr, #include <linux/vmalloc.h> #define __io_virt(x) ((void __force *)(x)) -#ifndef CONFIG_GENERIC_IOMAP -struct pci_dev; -extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); - -#ifndef pci_iounmap -#define pci_iounmap pci_iounmap -static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) -{ -} -#endif -#endif /* CONFIG_GENERIC_IOMAP */ - /* * Change virtual addresses to physical addresses and vv. * These are pretty trivial @@ -1016,6 +1004,16 @@ static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) port &= IO_SPACE_LIMIT; return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; } +#define __pci_ioport_unmap __pci_ioport_unmap +static inline void __pci_ioport_unmap(void __iomem *p) +{ + uintptr_t start = (uintptr_t) PCI_IOBASE; + uintptr_t addr = (uintptr_t) p; + + if (addr >= start && addr < start + IO_SPACE_LIMIT) + return; + iounmap(p); +} #endif #ifndef ioport_unmap @@ -1030,6 +1028,23 @@ extern void ioport_unmap(void __iomem *p); #endif /* CONFIG_GENERIC_IOMAP */ #endif /* CONFIG_HAS_IOPORT_MAP */ +#ifndef CONFIG_GENERIC_IOMAP +struct pci_dev; +extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); + +#ifndef __pci_ioport_unmap +static inline void __pci_ioport_unmap(void __iomem *p) {} +#endif + +#ifndef pci_iounmap +#define pci_iounmap pci_iounmap +static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) +{ + __pci_ioport_unmap(p); +} +#endif +#endif /* CONFIG_GENERIC_IOMAP */ + /* * Convert a virtual cached pointer to an uncached pointer */ diff --git a/include/asm-generic/module.lds.h b/include/asm-generic/module.lds.h new file mode 100644 index 000000000000..f210d5c1b78b --- /dev/null +++ b/include/asm-generic/module.lds.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_GENERIC_MODULE_LDS_H +#define __ASM_GENERIC_MODULE_LDS_H + +/* + * <asm/module.lds.h> can specify arch-specific sections for linking modules. + * Empty for the asm-generic header. + */ + +#endif /* __ASM_GENERIC_MODULE_LDS_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index e1843976754a..cd14444bf600 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -734,7 +734,8 @@ THERMAL_TABLE(governor) \ EARLYCON_TABLE() \ LSM_TABLE() \ - EARLY_LSM_TABLE() + EARLY_LSM_TABLE() \ + KUNIT_TABLE() #define INIT_TEXT \ *(.init.text .init.text.*) \ @@ -932,6 +933,13 @@ KEEP(*(.con_initcall.init)) \ __con_initcall_end = .; +/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */ +#define KUNIT_TABLE() \ + . = ALIGN(8); \ + __kunit_suites_start = .; \ + KEEP(*(.kunit_test_suites)) \ + __kunit_suites_end = .; + #ifdef CONFIG_BLK_DEV_INITRD #define INIT_RAM_FS \ . = ALIGN(4); \ diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h index 8cec5a1e1806..5ec4137231e3 100644 --- a/include/dt-bindings/clock/dra7.h +++ b/include/dt-bindings/clock/dra7.h @@ -332,6 +332,7 @@ #define DRA7_L4SEC_DES_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1b0) #define DRA7_L4SEC_RNG_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1c0) #define DRA7_L4SEC_SHAM_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1c8) +#define DRA7_L4SEC_SHAM2_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1f8) /* l4per2 clocks */ #define DRA7_L4PER2_CLKCTRL_OFFSET 0xc diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h index bc8a3c53a54b..e259cc01f22f 100644 --- a/include/dt-bindings/clock/exynos5250.h +++ b/include/dt-bindings/clock/exynos5250.h @@ -172,8 +172,10 @@ #define CLK_MOUT_GPLL 1025 #define CLK_MOUT_ACLK200_DISP1_SUB 1026 #define CLK_MOUT_ACLK300_DISP1_SUB 1027 +#define CLK_MOUT_APLL 1028 +#define CLK_MOUT_MPLL 1029 /* must be greater than maximal clock id */ -#define CLK_NR_CLKS 1028 +#define CLK_NR_CLKS 1030 #endif /* _DT_BINDINGS_CLOCK_EXYNOS_5250_H */ diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h index 02d5ac469a3d..9fffc6ceaadd 100644 --- a/include/dt-bindings/clock/exynos5420.h +++ b/include/dt-bindings/clock/exynos5420.h @@ -230,6 +230,12 @@ #define CLK_MOUT_USER_MAU_EPLL 659 #define CLK_MOUT_SCLK_SPLL 660 #define CLK_MOUT_MX_MSPLL_CCORE_PHY 661 +#define CLK_MOUT_SW_ACLK_G3D 662 +#define CLK_MOUT_APLL 663 +#define CLK_MOUT_MSPLL_CPU 664 +#define CLK_MOUT_KPLL 665 +#define CLK_MOUT_MSPLL_KFC 666 + /* divider clocks */ #define CLK_DOUT_PIXEL 768 diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h index 7a23f289b27f..e8d68fbb6e3f 100644 --- a/include/dt-bindings/clock/imx8mp-clock.h +++ b/include/dt-bindings/clock/imx8mp-clock.h @@ -180,7 +180,7 @@ #define IMX8MP_CLK_MEDIA_MIPI_PHY1_REF 171 #define IMX8MP_CLK_MEDIA_DISP1_PIX 172 #define IMX8MP_CLK_MEDIA_CAM2_PIX 173 -#define IMX8MP_CLK_MEDIA_MIPI_PHY2_REF 174 +#define IMX8MP_CLK_MEDIA_LDB 174 #define IMX8MP_CLK_MEDIA_MIPI_CSI2_ESC 175 #define IMX8MP_CLK_PCIE2_CTRL 176 #define IMX8MP_CLK_PCIE2_PHY 177 diff --git a/include/dt-bindings/clock/mt8167-clk.h b/include/dt-bindings/clock/mt8167-clk.h new file mode 100644 index 000000000000..a96158edd817 --- /dev/null +++ b/include/dt-bindings/clock/mt8167-clk.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 MediaTek Inc. + * Copyright (c) 2020 BayLibre, SAS. + * Author: James Liao <jamesjj.liao@mediatek.com> + * Fabien Parent <fparent@baylibre.com> + */ + +#ifndef _DT_BINDINGS_CLK_MT8167_H +#define _DT_BINDINGS_CLK_MT8167_H + +/* MT8167 is based on MT8516 */ +#include <dt-bindings/clock/mt8516-clk.h> + +/* APMIXEDSYS */ + +#define CLK_APMIXED_TVDPLL (CLK_APMIXED_NR_CLK + 0) +#define CLK_APMIXED_LVDSPLL (CLK_APMIXED_NR_CLK + 1) +#define CLK_APMIXED_HDMI_REF (CLK_APMIXED_NR_CLK + 2) +#define MT8167_CLK_APMIXED_NR_CLK (CLK_APMIXED_NR_CLK + 3) + +/* TOPCKGEN */ + +#define CLK_TOP_DSI0_LNTC_DSICK (CLK_TOP_NR_CLK + 0) +#define CLK_TOP_VPLL_DPIX (CLK_TOP_NR_CLK + 1) +#define CLK_TOP_LVDSTX_CLKDIG_CTS (CLK_TOP_NR_CLK + 2) +#define CLK_TOP_HDMTX_CLKDIG_CTS (CLK_TOP_NR_CLK + 3) +#define CLK_TOP_LVDSPLL (CLK_TOP_NR_CLK + 4) +#define CLK_TOP_LVDSPLL_D2 (CLK_TOP_NR_CLK + 5) +#define CLK_TOP_LVDSPLL_D4 (CLK_TOP_NR_CLK + 6) +#define CLK_TOP_LVDSPLL_D8 (CLK_TOP_NR_CLK + 7) +#define CLK_TOP_MIPI_26M (CLK_TOP_NR_CLK + 8) +#define CLK_TOP_TVDPLL (CLK_TOP_NR_CLK + 9) +#define CLK_TOP_TVDPLL_D2 (CLK_TOP_NR_CLK + 10) +#define CLK_TOP_TVDPLL_D4 (CLK_TOP_NR_CLK + 11) +#define CLK_TOP_TVDPLL_D8 (CLK_TOP_NR_CLK + 12) +#define CLK_TOP_TVDPLL_D16 (CLK_TOP_NR_CLK + 13) +#define CLK_TOP_PWM_MM (CLK_TOP_NR_CLK + 14) +#define CLK_TOP_CAM_MM (CLK_TOP_NR_CLK + 15) +#define CLK_TOP_MFG_MM (CLK_TOP_NR_CLK + 16) +#define CLK_TOP_SPM_52M (CLK_TOP_NR_CLK + 17) +#define CLK_TOP_MIPI_26M_DBG (CLK_TOP_NR_CLK + 18) +#define CLK_TOP_SCAM_MM (CLK_TOP_NR_CLK + 19) +#define CLK_TOP_SMI_MM (CLK_TOP_NR_CLK + 20) +#define CLK_TOP_26M_HDMI_SIFM (CLK_TOP_NR_CLK + 21) +#define CLK_TOP_26M_CEC (CLK_TOP_NR_CLK + 22) +#define CLK_TOP_32K_CEC (CLK_TOP_NR_CLK + 23) +#define CLK_TOP_GCPU_B (CLK_TOP_NR_CLK + 24) +#define CLK_TOP_RG_VDEC (CLK_TOP_NR_CLK + 25) +#define CLK_TOP_RG_FDPI0 (CLK_TOP_NR_CLK + 26) +#define CLK_TOP_RG_FDPI1 (CLK_TOP_NR_CLK + 27) +#define CLK_TOP_RG_AXI_MFG (CLK_TOP_NR_CLK + 28) +#define CLK_TOP_RG_SLOW_MFG (CLK_TOP_NR_CLK + 29) +#define CLK_TOP_GFMUX_EMI1X_SEL (CLK_TOP_NR_CLK + 30) +#define CLK_TOP_CSW_MUX_MFG_SEL (CLK_TOP_NR_CLK + 31) +#define CLK_TOP_CAMTG_MM_SEL (CLK_TOP_NR_CLK + 32) +#define CLK_TOP_PWM_MM_SEL (CLK_TOP_NR_CLK + 33) +#define CLK_TOP_SPM_52M_SEL (CLK_TOP_NR_CLK + 34) +#define CLK_TOP_MFG_MM_SEL (CLK_TOP_NR_CLK + 35) +#define CLK_TOP_SMI_MM_SEL (CLK_TOP_NR_CLK + 36) +#define CLK_TOP_SCAM_MM_SEL (CLK_TOP_NR_CLK + 37) +#define CLK_TOP_VDEC_MM_SEL (CLK_TOP_NR_CLK + 38) +#define CLK_TOP_DPI0_MM_SEL (CLK_TOP_NR_CLK + 39) +#define CLK_TOP_DPI1_MM_SEL (CLK_TOP_NR_CLK + 40) +#define CLK_TOP_AXI_MFG_IN_SEL (CLK_TOP_NR_CLK + 41) +#define CLK_TOP_SLOW_MFG_SEL (CLK_TOP_NR_CLK + 42) +#define MT8167_CLK_TOP_NR_CLK (CLK_TOP_NR_CLK + 43) + +/* MFGCFG */ + +#define CLK_MFG_BAXI 0 +#define CLK_MFG_BMEM 1 +#define CLK_MFG_BG3D 2 +#define CLK_MFG_B26M 3 +#define CLK_MFG_NR_CLK 4 + +/* MMSYS */ + +#define CLK_MM_SMI_COMMON 0 +#define CLK_MM_SMI_LARB0 1 +#define CLK_MM_CAM_MDP 2 +#define CLK_MM_MDP_RDMA 3 +#define CLK_MM_MDP_RSZ0 4 +#define CLK_MM_MDP_RSZ1 5 +#define CLK_MM_MDP_TDSHP 6 +#define CLK_MM_MDP_WDMA 7 +#define CLK_MM_MDP_WROT 8 +#define CLK_MM_FAKE_ENG 9 +#define CLK_MM_DISP_OVL0 10 +#define CLK_MM_DISP_RDMA0 11 +#define CLK_MM_DISP_RDMA1 12 +#define CLK_MM_DISP_WDMA 13 +#define CLK_MM_DISP_COLOR 14 +#define CLK_MM_DISP_CCORR 15 +#define CLK_MM_DISP_AAL 16 +#define CLK_MM_DISP_GAMMA 17 +#define CLK_MM_DISP_DITHER 18 +#define CLK_MM_DISP_UFOE 19 +#define CLK_MM_DISP_PWM_MM 20 +#define CLK_MM_DISP_PWM_26M 21 +#define CLK_MM_DSI_ENGINE 22 +#define CLK_MM_DSI_DIGITAL 23 +#define CLK_MM_DPI0_ENGINE 24 +#define CLK_MM_DPI0_PXL 25 +#define CLK_MM_LVDS_PXL 26 +#define CLK_MM_LVDS_CTS 27 +#define CLK_MM_DPI1_ENGINE 28 +#define CLK_MM_DPI1_PXL 29 +#define CLK_MM_HDMI_PXL 30 +#define CLK_MM_HDMI_SPDIF 31 +#define CLK_MM_HDMI_ADSP_BCK 32 +#define CLK_MM_HDMI_PLL 33 +#define CLK_MM_NR_CLK 34 + +/* IMGSYS */ + +#define CLK_IMG_LARB1_SMI 0 +#define CLK_IMG_CAM_SMI 1 +#define CLK_IMG_CAM_CAM 2 +#define CLK_IMG_SEN_TG 3 +#define CLK_IMG_SEN_CAM 4 +#define CLK_IMG_VENC 5 +#define CLK_IMG_NR_CLK 6 + +/* VDECSYS */ + +#define CLK_VDEC_CKEN 0 +#define CLK_VDEC_LARB1_CKEN 1 +#define CLK_VDEC_NR_CLK 2 + +#endif /* _DT_BINDINGS_CLK_MT8167_H */ diff --git a/include/dt-bindings/clock/qcom,dispcc-sm8150.h b/include/dt-bindings/clock/qcom,dispcc-sm8150.h new file mode 120000 index 000000000000..0312b4544acb --- /dev/null +++ b/include/dt-bindings/clock/qcom,dispcc-sm8150.h @@ -0,0 +1 @@ +qcom,dispcc-sm8250.h
\ No newline at end of file diff --git a/include/dt-bindings/clock/qcom,dispcc-sm8250.h b/include/dt-bindings/clock/qcom,dispcc-sm8250.h new file mode 100644 index 000000000000..fdaca6ad5c85 --- /dev/null +++ b/include/dt-bindings/clock/qcom,dispcc-sm8250.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM8250_H +#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM8250_H + +/* DISP_CC clock registers */ +#define DISP_CC_MDSS_AHB_CLK 0 +#define DISP_CC_MDSS_AHB_CLK_SRC 1 +#define DISP_CC_MDSS_BYTE0_CLK 2 +#define DISP_CC_MDSS_BYTE0_CLK_SRC 3 +#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 4 +#define DISP_CC_MDSS_BYTE0_INTF_CLK 5 +#define DISP_CC_MDSS_BYTE1_CLK 6 +#define DISP_CC_MDSS_BYTE1_CLK_SRC 7 +#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 8 +#define DISP_CC_MDSS_BYTE1_INTF_CLK 9 +#define DISP_CC_MDSS_DP_AUX1_CLK 10 +#define DISP_CC_MDSS_DP_AUX1_CLK_SRC 11 +#define DISP_CC_MDSS_DP_AUX_CLK 12 +#define DISP_CC_MDSS_DP_AUX_CLK_SRC 13 +#define DISP_CC_MDSS_DP_LINK1_CLK 14 +#define DISP_CC_MDSS_DP_LINK1_CLK_SRC 15 +#define DISP_CC_MDSS_DP_LINK1_DIV_CLK_SRC 16 +#define DISP_CC_MDSS_DP_LINK1_INTF_CLK 17 +#define DISP_CC_MDSS_DP_LINK_CLK 18 +#define DISP_CC_MDSS_DP_LINK_CLK_SRC 19 +#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 20 +#define DISP_CC_MDSS_DP_LINK_INTF_CLK 21 +#define DISP_CC_MDSS_DP_PIXEL1_CLK 22 +#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC 23 +#define DISP_CC_MDSS_DP_PIXEL2_CLK 24 +#define DISP_CC_MDSS_DP_PIXEL2_CLK_SRC 25 +#define DISP_CC_MDSS_DP_PIXEL_CLK 26 +#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 27 +#define DISP_CC_MDSS_ESC0_CLK 28 +#define DISP_CC_MDSS_ESC0_CLK_SRC 29 +#define DISP_CC_MDSS_ESC1_CLK 30 +#define DISP_CC_MDSS_ESC1_CLK_SRC 31 +#define DISP_CC_MDSS_MDP_CLK 32 +#define DISP_CC_MDSS_MDP_CLK_SRC 33 +#define DISP_CC_MDSS_MDP_LUT_CLK 34 +#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 35 +#define DISP_CC_MDSS_PCLK0_CLK 36 +#define DISP_CC_MDSS_PCLK0_CLK_SRC 37 +#define DISP_CC_MDSS_PCLK1_CLK 38 +#define DISP_CC_MDSS_PCLK1_CLK_SRC 39 +#define DISP_CC_MDSS_ROT_CLK 40 +#define DISP_CC_MDSS_ROT_CLK_SRC 41 +#define DISP_CC_MDSS_RSCC_AHB_CLK 42 +#define DISP_CC_MDSS_RSCC_VSYNC_CLK 43 +#define DISP_CC_MDSS_VSYNC_CLK 44 +#define DISP_CC_MDSS_VSYNC_CLK_SRC 45 +#define DISP_CC_PLL0 46 +#define DISP_CC_PLL1 47 + +/* DISP_CC Reset */ +#define DISP_CC_MDSS_CORE_BCR 0 +#define DISP_CC_MDSS_RSCC_BCR 1 + +/* DISP_CC GDSCR */ +#define MDSS_GDSC 0 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8994.h b/include/dt-bindings/clock/qcom,gcc-msm8994.h index 938969309e00..507b8d6effd2 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8994.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8994.h @@ -126,5 +126,41 @@ #define GCC_USB3_PHY_AUX_CLK 116 #define GCC_USB_HS_SYSTEM_CLK 117 #define GCC_SDCC1_AHB_CLK 118 +#define GCC_LPASS_Q6_AXI_CLK 119 +#define GCC_MSS_Q6_BIMC_AXI_CLK 120 +#define GCC_PCIE_0_CFG_AHB_CLK 121 +#define GCC_PCIE_0_MSTR_AXI_CLK 122 +#define GCC_PCIE_0_SLV_AXI_CLK 123 +#define GCC_PCIE_1_CFG_AHB_CLK 124 +#define GCC_PCIE_1_MSTR_AXI_CLK 125 +#define GCC_PCIE_1_SLV_AXI_CLK 126 +#define GCC_PDM_AHB_CLK 127 +#define GCC_SDCC2_AHB_CLK 128 +#define GCC_SDCC3_AHB_CLK 129 +#define GCC_SDCC4_AHB_CLK 130 +#define GCC_TSIF_AHB_CLK 131 +#define GCC_UFS_AHB_CLK 132 +#define GCC_UFS_RX_SYMBOL_0_CLK 133 +#define GCC_UFS_RX_SYMBOL_1_CLK 134 +#define GCC_UFS_TX_SYMBOL_0_CLK 135 +#define GCC_UFS_TX_SYMBOL_1_CLK 136 +#define GCC_USB2_HS_PHY_SLEEP_CLK 137 +#define GCC_USB30_SLEEP_CLK 138 +#define GCC_USB_HS_AHB_CLK 139 +#define GCC_USB_PHY_CFG_AHB2PHY_CLK 140 + +/* GDSCs */ +#define PCIE_GDSC 0 +#define PCIE_0_GDSC 1 +#define PCIE_1_GDSC 2 +#define USB30_GDSC 3 +#define UFS_GDSC 4 + +/* Resets */ +#define USB3_PHY_RESET 0 +#define USB3PHY_PHY_RESET 1 +#define PCIE_PHY_0_RESET 2 +#define PCIE_PHY_1_RESET 3 +#define QUSB2_PHY_RESET 4 #endif diff --git a/include/dt-bindings/clock/qcom,videocc-sm8150.h b/include/dt-bindings/clock/qcom,videocc-sm8150.h new file mode 100644 index 000000000000..e24ee840cfdb --- /dev/null +++ b/include/dt-bindings/clock/qcom,videocc-sm8150.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8150_H +#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8150_H + +/* VIDEO_CC clocks */ +#define VIDEO_CC_IRIS_AHB_CLK 0 +#define VIDEO_CC_IRIS_CLK_SRC 1 +#define VIDEO_CC_MVS0_CORE_CLK 2 +#define VIDEO_CC_MVS1_CORE_CLK 3 +#define VIDEO_CC_MVSC_CORE_CLK 4 +#define VIDEO_CC_PLL0 5 + +/* VIDEO_CC Resets */ +#define VIDEO_CC_MVSC_CORE_CLK_BCR 0 + +/* VIDEO_CC GDSCRs */ +#define VENUS_GDSC 0 +#define VCODEC0_GDSC 1 +#define VCODEC1_GDSC 2 + +#endif diff --git a/include/dt-bindings/clock/qcom,videocc-sm8250.h b/include/dt-bindings/clock/qcom,videocc-sm8250.h new file mode 100644 index 000000000000..2b2b3867af25 --- /dev/null +++ b/include/dt-bindings/clock/qcom,videocc-sm8250.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8250_H +#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8250_H + +/* VIDEO_CC clocks */ +#define VIDEO_CC_MVS0_CLK_SRC 0 +#define VIDEO_CC_MVS0C_CLK 1 +#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 2 +#define VIDEO_CC_MVS1_CLK_SRC 3 +#define VIDEO_CC_MVS1_DIV2_CLK 4 +#define VIDEO_CC_MVS1C_CLK 5 +#define VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC 6 +#define VIDEO_CC_PLL0 7 +#define VIDEO_CC_PLL1 8 + +/* VIDEO_CC resets */ +#define VIDEO_CC_CVP_INTERFACE_BCR 0 +#define VIDEO_CC_CVP_MVS0_BCR 1 +#define VIDEO_CC_MVS0C_CLK_ARES 2 +#define VIDEO_CC_CVP_MVS0C_BCR 3 +#define VIDEO_CC_CVP_MVS1_BCR 4 +#define VIDEO_CC_MVS1C_CLK_ARES 5 +#define VIDEO_CC_CVP_MVS1C_BCR 6 + +#define MVS0C_GDSC 0 +#define MVS1C_GDSC 1 +#define MVS0_GDSC 2 +#define MVS1_GDSC 3 + +#endif diff --git a/include/dt-bindings/clock/r8a779a0-cpg-mssr.h b/include/dt-bindings/clock/r8a779a0-cpg-mssr.h new file mode 100644 index 000000000000..f1d737ca7ca1 --- /dev/null +++ b/include/dt-bindings/clock/r8a779a0-cpg-mssr.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A779A0_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A779A0_CPG_MSSR_H__ + +#include <dt-bindings/clock/renesas-cpg-mssr.h> + +/* r8a779A0 CPG Core Clocks */ +#define R8A779A0_CLK_Z0 0 +#define R8A779A0_CLK_ZX 1 +#define R8A779A0_CLK_Z1 2 +#define R8A779A0_CLK_ZR 3 +#define R8A779A0_CLK_ZS 4 +#define R8A779A0_CLK_ZT 5 +#define R8A779A0_CLK_ZTR 6 +#define R8A779A0_CLK_S1D1 7 +#define R8A779A0_CLK_S1D2 8 +#define R8A779A0_CLK_S1D4 9 +#define R8A779A0_CLK_S1D8 10 +#define R8A779A0_CLK_S1D12 11 +#define R8A779A0_CLK_S3D1 12 +#define R8A779A0_CLK_S3D2 13 +#define R8A779A0_CLK_S3D4 14 +#define R8A779A0_CLK_LB 15 +#define R8A779A0_CLK_CP 16 +#define R8A779A0_CLK_CL 17 +#define R8A779A0_CLK_CL16MCK 18 +#define R8A779A0_CLK_ZB30 19 +#define R8A779A0_CLK_ZB30D2 20 +#define R8A779A0_CLK_ZB30D4 21 +#define R8A779A0_CLK_ZB31 22 +#define R8A779A0_CLK_ZB31D2 23 +#define R8A779A0_CLK_ZB31D4 24 +#define R8A779A0_CLK_SD0H 25 +#define R8A779A0_CLK_SD0 26 +#define R8A779A0_CLK_RPC 27 +#define R8A779A0_CLK_RPCD2 28 +#define R8A779A0_CLK_MSO 29 +#define R8A779A0_CLK_CANFD 30 +#define R8A779A0_CLK_CSI0 31 +#define R8A779A0_CLK_FRAY 32 +#define R8A779A0_CLK_DSI 33 +#define R8A779A0_CLK_VIP 34 +#define R8A779A0_CLK_ADGH 35 +#define R8A779A0_CLK_CNNDSP 36 +#define R8A779A0_CLK_ICU 37 +#define R8A779A0_CLK_ICUD2 38 +#define R8A779A0_CLK_VCBUS 39 +#define R8A779A0_CLK_CBFUSA 40 +#define R8A779A0_CLK_R 41 +#define R8A779A0_CLK_OSC 42 + +#endif /* __DT_BINDINGS_CLOCK_R8A779A0_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/sun50i-a100-ccu.h b/include/dt-bindings/clock/sun50i-a100-ccu.h new file mode 100644 index 000000000000..28dc36e1a232 --- /dev/null +++ b/include/dt-bindings/clock/sun50i-a100-ccu.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com> + */ + +#ifndef _DT_BINDINGS_CLK_SUN50I_A100_H_ +#define _DT_BINDINGS_CLK_SUN50I_A100_H_ + +#define CLK_PLL_PERIPH0 3 + +#define CLK_CPUX 24 + +#define CLK_APB1 29 + +#define CLK_MBUS 31 +#define CLK_DE 32 +#define CLK_BUS_DE 33 +#define CLK_G2D 34 +#define CLK_BUS_G2D 35 +#define CLK_GPU 36 +#define CLK_BUS_GPU 37 +#define CLK_CE 38 +#define CLK_BUS_CE 39 +#define CLK_VE 40 +#define CLK_BUS_VE 41 +#define CLK_BUS_DMA 42 +#define CLK_BUS_MSGBOX 43 +#define CLK_BUS_SPINLOCK 44 +#define CLK_BUS_HSTIMER 45 +#define CLK_AVS 46 +#define CLK_BUS_DBG 47 +#define CLK_BUS_PSI 48 +#define CLK_BUS_PWM 49 +#define CLK_BUS_IOMMU 50 +#define CLK_MBUS_DMA 51 +#define CLK_MBUS_VE 52 +#define CLK_MBUS_CE 53 +#define CLK_MBUS_NAND 54 +#define CLK_MBUS_CSI 55 +#define CLK_MBUS_ISP 56 +#define CLK_MBUS_G2D 57 + +#define CLK_NAND0 59 +#define CLK_NAND1 60 +#define CLK_BUS_NAND 61 +#define CLK_MMC0 62 +#define CLK_MMC1 63 +#define CLK_MMC2 64 +#define CLK_MMC3 65 +#define CLK_BUS_MMC0 66 +#define CLK_BUS_MMC1 67 +#define CLK_BUS_MMC2 68 +#define CLK_BUS_UART0 69 +#define CLK_BUS_UART1 70 +#define CLK_BUS_UART2 71 +#define CLK_BUS_UART3 72 +#define CLK_BUS_UART4 73 +#define CLK_BUS_I2C0 74 +#define CLK_BUS_I2C1 75 +#define CLK_BUS_I2C2 76 +#define CLK_BUS_I2C3 77 +#define CLK_SPI0 78 +#define CLK_SPI1 79 +#define CLK_SPI2 80 +#define CLK_BUS_SPI0 81 +#define CLK_BUS_SPI1 82 +#define CLK_BUS_SPI2 83 +#define CLK_EMAC_25M 84 +#define CLK_BUS_EMAC 85 +#define CLK_IR_RX 86 +#define CLK_BUS_IR_RX 87 +#define CLK_IR_TX 88 +#define CLK_BUS_IR_TX 89 +#define CLK_BUS_GPADC 90 +#define CLK_BUS_THS 91 +#define CLK_I2S0 92 +#define CLK_I2S1 93 +#define CLK_I2S2 94 +#define CLK_I2S3 95 +#define CLK_BUS_I2S0 96 +#define CLK_BUS_I2S1 97 +#define CLK_BUS_I2S2 98 +#define CLK_BUS_I2S3 99 +#define CLK_SPDIF 100 +#define CLK_BUS_SPDIF 101 +#define CLK_DMIC 102 +#define CLK_BUS_DMIC 103 +#define CLK_AUDIO_DAC 104 +#define CLK_AUDIO_ADC 105 +#define CLK_AUDIO_4X 106 +#define CLK_BUS_AUDIO_CODEC 107 +#define CLK_USB_OHCI0 108 +#define CLK_USB_PHY0 109 +#define CLK_USB_OHCI1 110 +#define CLK_USB_PHY1 111 +#define CLK_BUS_OHCI0 112 +#define CLK_BUS_OHCI1 113 +#define CLK_BUS_EHCI0 114 +#define CLK_BUS_EHCI1 115 +#define CLK_BUS_OTG 116 +#define CLK_BUS_LRADC 117 +#define CLK_BUS_DPSS_TOP0 118 +#define CLK_BUS_DPSS_TOP1 119 +#define CLK_MIPI_DSI 120 +#define CLK_BUS_MIPI_DSI 121 +#define CLK_TCON_LCD 122 +#define CLK_BUS_TCON_LCD 123 +#define CLK_LEDC 124 +#define CLK_BUS_LEDC 125 +#define CLK_CSI_TOP 126 +#define CLK_CSI0_MCLK 127 +#define CLK_CSI1_MCLK 128 +#define CLK_BUS_CSI 129 +#define CLK_CSI_ISP 130 + +#endif /* _DT_BINDINGS_CLK_SUN50I_A100_H_ */ diff --git a/include/dt-bindings/clock/sun50i-a100-r-ccu.h b/include/dt-bindings/clock/sun50i-a100-r-ccu.h new file mode 100644 index 000000000000..07312e7264fb --- /dev/null +++ b/include/dt-bindings/clock/sun50i-a100-r-ccu.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com> + */ + +#ifndef _DT_BINDINGS_CLK_SUN50I_A100_R_CCU_H_ +#define _DT_BINDINGS_CLK_SUN50I_A100_R_CCU_H_ + +#define CLK_R_APB1 2 + +#define CLK_R_APB1_TIMER 4 +#define CLK_R_APB1_TWD 5 +#define CLK_R_APB1_PWM 6 +#define CLK_R_APB1_BUS_PWM 7 +#define CLK_R_APB1_PPU 8 +#define CLK_R_APB2_UART 9 +#define CLK_R_APB2_I2C0 10 +#define CLK_R_APB2_I2C1 11 +#define CLK_R_APB1_IR 12 +#define CLK_R_APB1_BUS_IR 13 +#define CLK_R_AHB_BUS_RTC 14 + +#endif /* _DT_BINDINGS_CLK_SUN50I_A100_R_CCU_H_ */ diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h index 0f2d60e884dc..373644e46747 100644 --- a/include/dt-bindings/clock/vf610-clock.h +++ b/include/dt-bindings/clock/vf610-clock.h @@ -196,6 +196,7 @@ #define VF610_CLK_TCON0 187 #define VF610_CLK_TCON1 188 #define VF610_CLK_CAAM 189 -#define VF610_CLK_END 190 +#define VF610_CLK_CRC 190 +#define VF610_CLK_END 191 #endif /* __DT_BINDINGS_CLOCK_VF610_H */ diff --git a/include/dt-bindings/power/r8a779a0-sysc.h b/include/dt-bindings/power/r8a779a0-sysc.h new file mode 100644 index 000000000000..57929e459a67 --- /dev/null +++ b/include/dt-bindings/power/r8a779a0-sysc.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_POWER_R8A779A0_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A779A0_SYSC_H__ + +/* + * These power domain indices match the Power Domain Register Numbers (PDR) + */ + +#define R8A779A0_PD_A1E0D0C0 0 +#define R8A779A0_PD_A1E0D0C1 1 +#define R8A779A0_PD_A1E0D1C0 2 +#define R8A779A0_PD_A1E0D1C1 3 +#define R8A779A0_PD_A1E1D0C0 4 +#define R8A779A0_PD_A1E1D0C1 5 +#define R8A779A0_PD_A1E1D1C0 6 +#define R8A779A0_PD_A1E1D1C1 7 +#define R8A779A0_PD_A2E0D0 16 +#define R8A779A0_PD_A2E0D1 17 +#define R8A779A0_PD_A2E1D0 18 +#define R8A779A0_PD_A2E1D1 19 +#define R8A779A0_PD_A3E0 20 +#define R8A779A0_PD_A3E1 21 +#define R8A779A0_PD_3DG_A 24 +#define R8A779A0_PD_3DG_B 25 +#define R8A779A0_PD_A1CNN2 32 +#define R8A779A0_PD_A1DSP0 33 +#define R8A779A0_PD_A2IMP01 34 +#define R8A779A0_PD_A2DP0 35 +#define R8A779A0_PD_A2CV0 36 +#define R8A779A0_PD_A2CV1 37 +#define R8A779A0_PD_A2CV4 38 +#define R8A779A0_PD_A2CV6 39 +#define R8A779A0_PD_A2CN2 40 +#define R8A779A0_PD_A1CNN0 41 +#define R8A779A0_PD_A2CN0 42 +#define R8A779A0_PD_A3IR 43 +#define R8A779A0_PD_A1CNN1 44 +#define R8A779A0_PD_A1DSP1 45 +#define R8A779A0_PD_A2IMP23 46 +#define R8A779A0_PD_A2DP1 47 +#define R8A779A0_PD_A2CV2 48 +#define R8A779A0_PD_A2CV3 49 +#define R8A779A0_PD_A2CV5 50 +#define R8A779A0_PD_A2CV7 51 +#define R8A779A0_PD_A2CN1 52 +#define R8A779A0_PD_A3VIP0 56 +#define R8A779A0_PD_A3VIP1 57 +#define R8A779A0_PD_A3VIP2 58 +#define R8A779A0_PD_A3VIP3 59 +#define R8A779A0_PD_A3ISP01 60 +#define R8A779A0_PD_A3ISP23 61 + +/* Always-on power area */ +#define R8A779A0_PD_ALWAYS_ON 64 + +#endif /* __DT_BINDINGS_POWER_R8A779A0_SYSC_H__ */ diff --git a/include/dt-bindings/power/summit,smb347-charger.h b/include/dt-bindings/power/summit,smb347-charger.h new file mode 100644 index 000000000000..d918bf321a71 --- /dev/null +++ b/include/dt-bindings/power/summit,smb347-charger.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: (GPL-2.0-or-later or MIT) */ +/* + * Author: David Heidelberg <david@ixit.cz> + */ + +#ifndef _DT_BINDINGS_SMB347_CHARGER_H +#define _DT_BINDINGS_SMB347_CHARGER_H + +/* Charging compensation method */ +#define SMB3XX_SOFT_TEMP_COMPENSATE_NONE 0 +#define SMB3XX_SOFT_TEMP_COMPENSATE_CURRENT 1 +#define SMB3XX_SOFT_TEMP_COMPENSATE_VOLTAGE 2 + +/* Charging enable control */ +#define SMB3XX_CHG_ENABLE_SW 0 +#define SMB3XX_CHG_ENABLE_PIN_ACTIVE_LOW 1 +#define SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH 2 + +#endif diff --git a/include/dt-bindings/reset/sun50i-a100-ccu.h b/include/dt-bindings/reset/sun50i-a100-ccu.h new file mode 100644 index 000000000000..55c0ada99885 --- /dev/null +++ b/include/dt-bindings/reset/sun50i-a100-ccu.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com> + */ + +#ifndef _DT_BINDINGS_RESET_SUN50I_A100_H_ +#define _DT_BINDINGS_RESET_SUN50I_A100_H_ + +#define RST_MBUS 0 +#define RST_BUS_DE 1 +#define RST_BUS_G2D 2 +#define RST_BUS_GPU 3 +#define RST_BUS_CE 4 +#define RST_BUS_VE 5 +#define RST_BUS_DMA 6 +#define RST_BUS_MSGBOX 7 +#define RST_BUS_SPINLOCK 8 +#define RST_BUS_HSTIMER 9 +#define RST_BUS_DBG 10 +#define RST_BUS_PSI 11 +#define RST_BUS_PWM 12 +#define RST_BUS_DRAM 13 +#define RST_BUS_NAND 14 +#define RST_BUS_MMC0 15 +#define RST_BUS_MMC1 16 +#define RST_BUS_MMC2 17 +#define RST_BUS_UART0 18 +#define RST_BUS_UART1 19 +#define RST_BUS_UART2 20 +#define RST_BUS_UART3 21 +#define RST_BUS_UART4 22 +#define RST_BUS_I2C0 23 +#define RST_BUS_I2C1 24 +#define RST_BUS_I2C2 25 +#define RST_BUS_I2C3 26 +#define RST_BUS_SPI0 27 +#define RST_BUS_SPI1 28 +#define RST_BUS_SPI2 29 +#define RST_BUS_EMAC 30 +#define RST_BUS_IR_RX 31 +#define RST_BUS_IR_TX 32 +#define RST_BUS_GPADC 33 +#define RST_BUS_THS 34 +#define RST_BUS_I2S0 35 +#define RST_BUS_I2S1 36 +#define RST_BUS_I2S2 37 +#define RST_BUS_I2S3 38 +#define RST_BUS_SPDIF 39 +#define RST_BUS_DMIC 40 +#define RST_BUS_AUDIO_CODEC 41 +#define RST_USB_PHY0 42 +#define RST_USB_PHY1 43 +#define RST_BUS_OHCI0 44 +#define RST_BUS_OHCI1 45 +#define RST_BUS_EHCI0 46 +#define RST_BUS_EHCI1 47 +#define RST_BUS_OTG 48 +#define RST_BUS_LRADC 49 +#define RST_BUS_DPSS_TOP0 50 +#define RST_BUS_DPSS_TOP1 51 +#define RST_BUS_MIPI_DSI 52 +#define RST_BUS_TCON_LCD 53 +#define RST_BUS_LVDS 54 +#define RST_BUS_LEDC 55 +#define RST_BUS_CSI 56 +#define RST_BUS_CSI_ISP 57 + +#endif /* _DT_BINDINGS_RESET_SUN50I_A100_H_ */ diff --git a/include/dt-bindings/reset/sun50i-a100-r-ccu.h b/include/dt-bindings/reset/sun50i-a100-r-ccu.h new file mode 100644 index 000000000000..737bf6f66626 --- /dev/null +++ b/include/dt-bindings/reset/sun50i-a100-r-ccu.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com> + */ + +#ifndef _DT_BINDINGS_RST_SUN50I_A100_R_CCU_H_ +#define _DT_BINDINGS_RST_SUN50I_A100_R_CCU_H_ + +#define RST_R_APB1_TIMER 0 +#define RST_R_APB1_BUS_PWM 1 +#define RST_R_APB1_PPU 2 +#define RST_R_APB2_UART 3 +#define RST_R_APB2_I2C0 4 +#define RST_R_APB2_I2C1 5 +#define RST_R_APB1_BUS_IR 6 +#define RST_R_AHB_BUS_RTC 7 + +#endif /* _DT_BINDINGS_RST_SUN50I_A100_R_CCU_H_ */ diff --git a/include/kunit/test.h b/include/kunit/test.h index 3391f38389f8..a423fffefea0 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -25,6 +25,7 @@ typedef void (*kunit_resource_free_t)(struct kunit_resource *); /** * struct kunit_resource - represents a *test managed resource* * @data: for the user to store arbitrary data. + * @name: optional name * @free: a user supplied function to free the resource. Populated by * kunit_resource_alloc(). * @@ -80,10 +81,10 @@ typedef void (*kunit_resource_free_t)(struct kunit_resource *); */ struct kunit_resource { void *data; - const char *name; /* optional name */ + const char *name; + kunit_resource_free_t free; /* private: internal use only. */ - kunit_resource_free_t free; struct kref refcount; struct list_head node; }; @@ -238,10 +239,19 @@ size_t kunit_suite_num_test_cases(struct kunit_suite *suite); unsigned int kunit_test_case_num(struct kunit_suite *suite, struct kunit_case *test_case); -int __kunit_test_suites_init(struct kunit_suite **suites); +int __kunit_test_suites_init(struct kunit_suite * const * const suites); void __kunit_test_suites_exit(struct kunit_suite **suites); +#if IS_BUILTIN(CONFIG_KUNIT) +int kunit_run_all_tests(void); +#else +static inline int kunit_run_all_tests(void) +{ + return 0; +} +#endif /* IS_BUILTIN(CONFIG_KUNIT) */ + /** * kunit_test_suites() - used to register one or more &struct kunit_suite * with KUnit. @@ -251,34 +261,57 @@ void __kunit_test_suites_exit(struct kunit_suite **suites); * Registers @suites_list with the test framework. See &struct kunit_suite for * more information. * - * When builtin, KUnit tests are all run as late_initcalls; this means - * that they cannot test anything where tests must run at a different init - * phase. One significant restriction resulting from this is that KUnit - * cannot reliably test anything that is initialize in the late_init phase; - * another is that KUnit is useless to test things that need to be run in - * an earlier init phase. - * - * An alternative is to build the tests as a module. Because modules - * do not support multiple late_initcall()s, we need to initialize an - * array of suites for a module. - * - * TODO(brendanhiggins@google.com): Don't run all KUnit tests as - * late_initcalls. I have some future work planned to dispatch all KUnit - * tests from the same place, and at the very least to do so after - * everything else is definitely initialized. + * If a test suite is built-in, module_init() gets translated into + * an initcall which we don't want as the idea is that for builtins + * the executor will manage execution. So ensure we do not define + * module_{init|exit} functions for the builtin case when registering + * suites via kunit_test_suites() below. */ -#define kunit_test_suites(suites_list...) \ - static struct kunit_suite *suites[] = {suites_list, NULL}; \ - static int kunit_test_suites_init(void) \ +#ifdef MODULE +#define kunit_test_suites_for_module(__suites) \ + static int __init kunit_test_suites_init(void) \ { \ - return __kunit_test_suites_init(suites); \ + return __kunit_test_suites_init(__suites); \ } \ - late_initcall(kunit_test_suites_init); \ + module_init(kunit_test_suites_init); \ + \ static void __exit kunit_test_suites_exit(void) \ { \ - return __kunit_test_suites_exit(suites); \ + return __kunit_test_suites_exit(__suites); \ } \ module_exit(kunit_test_suites_exit) +#else +#define kunit_test_suites_for_module(__suites) +#endif /* MODULE */ + +#define __kunit_test_suites(unique_array, unique_suites, ...) \ + static struct kunit_suite *unique_array[] = { __VA_ARGS__, NULL }; \ + kunit_test_suites_for_module(unique_array); \ + static struct kunit_suite **unique_suites \ + __used __section(.kunit_test_suites) = unique_array + +/** + * kunit_test_suites() - used to register one or more &struct kunit_suite + * with KUnit. + * + * @suites: a statically allocated list of &struct kunit_suite. + * + * Registers @suites with the test framework. See &struct kunit_suite for + * more information. + * + * When builtin, KUnit tests are all run via executor; this is done + * by placing the array of struct kunit_suite * in the .kunit_test_suites + * ELF section. + * + * An alternative is to build the tests as a module. Because modules do not + * support multiple initcall()s, we need to initialize an array of suites for a + * module. + * + */ +#define kunit_test_suites(...) \ + __kunit_test_suites(__UNIQUE_ID(array), \ + __UNIQUE_ID(suites), \ + __VA_ARGS__) #define kunit_test_suite(suite) kunit_test_suites(&suite) @@ -348,6 +381,7 @@ static inline void kunit_put_resource(struct kunit_resource *res) * none is supplied, the resource data value is simply set to @data. * If an init function is supplied, @data is passed to it instead. * @free: a user-supplied function to free the resource (if needed). + * @res: The resource. * @data: value to pass to init function or set in resource data field. */ int kunit_add_resource(struct kunit *test, @@ -361,7 +395,9 @@ int kunit_add_resource(struct kunit *test, * @test: The test context object. * @init: a user-supplied function to initialize the resource data, if needed. * @free: a user-supplied function to free the resource data, if needed. - * @name_data: name and data to be set for resource. + * @res: The resource. + * @name: name to be set for resource. + * @data: value to pass to init function or set in resource data field. */ int kunit_add_named_resource(struct kunit *test, kunit_resource_init_t init, @@ -499,8 +535,8 @@ static inline int kunit_destroy_named_resource(struct kunit *test, } /** - * kunit_remove_resource: remove resource from resource list associated with - * test. + * kunit_remove_resource() - remove resource from resource list associated with + * test. * @test: The test context object. * @res: The resource to be removed. * diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index dbf4f08d42e5..1d94acd0bc85 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -35,6 +35,7 @@ struct kvm_pmu { u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); +u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); @@ -109,6 +110,10 @@ static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) { return 0; } +static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) +{ + return 0; +} #endif #endif diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 15c706fb0a37..885c9ffc835c 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -49,6 +49,7 @@ #define ARM_SMCCC_OWNER_OEM 3 #define ARM_SMCCC_OWNER_STANDARD 4 #define ARM_SMCCC_OWNER_STANDARD_HYP 5 +#define ARM_SMCCC_OWNER_VENDOR_HYP 6 #define ARM_SMCCC_OWNER_TRUSTED_APP 48 #define ARM_SMCCC_OWNER_TRUSTED_APP_END 49 #define ARM_SMCCC_OWNER_TRUSTED_OS 50 @@ -227,87 +228,67 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #define __count_args(...) \ ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0) -#define __constraint_write_0 \ - "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3) -#define __constraint_write_1 \ - "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3) -#define __constraint_write_2 \ - "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3) -#define __constraint_write_3 \ - "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3) -#define __constraint_write_4 __constraint_write_3 -#define __constraint_write_5 __constraint_write_4 -#define __constraint_write_6 __constraint_write_5 -#define __constraint_write_7 __constraint_write_6 - -#define __constraint_read_0 -#define __constraint_read_1 -#define __constraint_read_2 -#define __constraint_read_3 -#define __constraint_read_4 "r" (r4) -#define __constraint_read_5 __constraint_read_4, "r" (r5) -#define __constraint_read_6 __constraint_read_5, "r" (r6) -#define __constraint_read_7 __constraint_read_6, "r" (r7) +#define __constraint_read_0 "r" (arg0) +#define __constraint_read_1 __constraint_read_0, "r" (arg1) +#define __constraint_read_2 __constraint_read_1, "r" (arg2) +#define __constraint_read_3 __constraint_read_2, "r" (arg3) +#define __constraint_read_4 __constraint_read_3, "r" (arg4) +#define __constraint_read_5 __constraint_read_4, "r" (arg5) +#define __constraint_read_6 __constraint_read_5, "r" (arg6) +#define __constraint_read_7 __constraint_read_6, "r" (arg7) #define __declare_arg_0(a0, res) \ struct arm_smccc_res *___res = res; \ - register unsigned long r0 asm("r0") = (u32)a0; \ - register unsigned long r1 asm("r1"); \ - register unsigned long r2 asm("r2"); \ - register unsigned long r3 asm("r3") + register unsigned long arg0 asm("r0") = (u32)a0 #define __declare_arg_1(a0, a1, res) \ typeof(a1) __a1 = a1; \ struct arm_smccc_res *___res = res; \ - register unsigned long r0 asm("r0") = (u32)a0; \ - register unsigned long r1 asm("r1") = __a1; \ - register unsigned long r2 asm("r2"); \ - register unsigned long r3 asm("r3") + register unsigned long arg0 asm("r0") = (u32)a0; \ + register typeof(a1) arg1 asm("r1") = __a1 #define __declare_arg_2(a0, a1, a2, res) \ typeof(a1) __a1 = a1; \ typeof(a2) __a2 = a2; \ struct arm_smccc_res *___res = res; \ - register unsigned long r0 asm("r0") = (u32)a0; \ - register unsigned long r1 asm("r1") = __a1; \ - register unsigned long r2 asm("r2") = __a2; \ - register unsigned long r3 asm("r3") + register unsigned long arg0 asm("r0") = (u32)a0; \ + register typeof(a1) arg1 asm("r1") = __a1; \ + register typeof(a2) arg2 asm("r2") = __a2 #define __declare_arg_3(a0, a1, a2, a3, res) \ typeof(a1) __a1 = a1; \ typeof(a2) __a2 = a2; \ typeof(a3) __a3 = a3; \ struct arm_smccc_res *___res = res; \ - register unsigned long r0 asm("r0") = (u32)a0; \ - register unsigned long r1 asm("r1") = __a1; \ - register unsigned long r2 asm("r2") = __a2; \ - register unsigned long r3 asm("r3") = __a3 + register unsigned long arg0 asm("r0") = (u32)a0; \ + register typeof(a1) arg1 asm("r1") = __a1; \ + register typeof(a2) arg2 asm("r2") = __a2; \ + register typeof(a3) arg3 asm("r3") = __a3 #define __declare_arg_4(a0, a1, a2, a3, a4, res) \ typeof(a4) __a4 = a4; \ __declare_arg_3(a0, a1, a2, a3, res); \ - register unsigned long r4 asm("r4") = __a4 + register typeof(a4) arg4 asm("r4") = __a4 #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ typeof(a5) __a5 = a5; \ __declare_arg_4(a0, a1, a2, a3, a4, res); \ - register unsigned long r5 asm("r5") = __a5 + register typeof(a5) arg5 asm("r5") = __a5 #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ typeof(a6) __a6 = a6; \ __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ - register unsigned long r6 asm("r6") = __a6 + register typeof(a6) arg6 asm("r6") = __a6 #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ typeof(a7) __a7 = a7; \ __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ - register unsigned long r7 asm("r7") = __a7 + register typeof(a7) arg7 asm("r7") = __a7 #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) #define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) #define ___constraints(count) \ - : __constraint_write_ ## count \ : __constraint_read_ ## count \ : "memory" #define __constraints(count) ___constraints(count) @@ -319,8 +300,13 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, */ #define __arm_smccc_1_1(inst, ...) \ do { \ + register unsigned long r0 asm("r0"); \ + register unsigned long r1 asm("r1"); \ + register unsigned long r2 asm("r2"); \ + register unsigned long r3 asm("r3"); \ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ - asm volatile(inst "\n" \ + asm volatile(inst "\n" : \ + "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \ __constraints(__count_args(__VA_ARGS__))); \ if (___res) \ *___res = (typeof(*___res)){r0, r1, r2, r3}; \ @@ -366,7 +352,7 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #define __fail_smccc_1_1(...) \ do { \ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ - asm ("" __constraints(__count_args(__VA_ARGS__))); \ + asm ("" : __constraints(__count_args(__VA_ARGS__))); \ if (___res) \ ___res->a0 = SMCCC_RET_NOT_SUPPORTED; \ } while (0) diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h index b0f4424f34fc..f8254fd53e15 100644 --- a/include/linux/bcm47xx_sprom.h +++ b/include/linux/bcm47xx_sprom.h @@ -9,9 +9,19 @@ #include <linux/kernel.h> #include <linux/vmalloc.h> +struct ssb_sprom; + #ifdef CONFIG_BCM47XX_SPROM +void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix, + bool fallback); int bcm47xx_sprom_register_fallbacks(void); #else +static inline void bcm47xx_fill_sprom(struct ssb_sprom *sprom, + const char *prefix, + bool fallback) +{ +} + static inline int bcm47xx_sprom_register_fallbacks(void) { return -ENOTSUPP; diff --git a/include/linux/bcm963xx_tag.h b/include/linux/bcm963xx_tag.h index b87945cb6946..7edb809a2586 100644 --- a/include/linux/bcm963xx_tag.h +++ b/include/linux/bcm963xx_tag.h @@ -84,7 +84,7 @@ struct bcm_tag { char flash_layout_ver[FLASHLAYOUTVER_LEN]; /* 196-199: kernel+rootfs CRC32 */ __u32 fskernel_crc; - /* 200-215: Unused except on Alice Gate where is is information */ + /* 200-215: Unused except on Alice Gate where it is information */ char information2[TAGINFO2_LEN]; /* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */ __u32 image_crc; diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 99f2ac30b1d9..5b74bdf159d6 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -188,12 +188,10 @@ static inline unsigned fls_long(unsigned long l) static inline int get_count_order(unsigned int count) { - int order; + if (count == 0) + return -1; - order = fls(count) - 1; - if (count & (count - 1)) - order++; - return order; + return fls(--count); } /** @@ -206,10 +204,7 @@ static inline int get_count_order_long(unsigned long l) { if (l == 0UL) return -1; - else if (l & (l - 1UL)) - return (int)fls_long(l); - else - return (int)fls_long(l) - 1; + return (int)fls_long(--l); } /** diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c09375e0a0eb..639cae2c158b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -8,6 +8,7 @@ #include <linux/genhd.h> #include <linux/list.h> #include <linux/llist.h> +#include <linux/minmax.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/pagemap.h> diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 2f98d2fce62e..ed71bd1a0825 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -136,7 +136,7 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, struct ctl_table *table, int write, - void **buf, size_t *pcount, loff_t *ppos, + char **buf, size_t *pcount, loff_t *ppos, enum bpf_attach_type type); int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, diff --git a/include/linux/bvec.h b/include/linux/bvec.h index dd74503f7e5e..2efec10bf792 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -7,10 +7,14 @@ #ifndef __LINUX_BVEC_ITER_H #define __LINUX_BVEC_ITER_H -#include <linux/kernel.h> #include <linux/bug.h> #include <linux/errno.h> +#include <linux/limits.h> +#include <linux/minmax.h> #include <linux/mm.h> +#include <linux/types.h> + +struct page; /** * struct bio_vec - a contiguous range of physical memory addresses diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 76371aaae2d1..60b324efd1c4 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -54,7 +54,7 @@ struct ceph_connection_operations { int (*check_message_signature) (struct ceph_msg *msg); }; -/* use format string %s%d */ +/* use format string %s%lld */ #define ENTITY_NAME(n) ceph_entity_type_name((n).type), le64_to_cpu((n).num) struct ceph_messenger { diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h index ce4ffeb384d7..b658961156a0 100644 --- a/include/linux/ceph/mon_client.h +++ b/include/linux/ceph/mon_client.h @@ -142,7 +142,7 @@ int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what, int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what, ceph_monc_callback_t cb, u64 private_data); -int ceph_monc_blacklist_add(struct ceph_mon_client *monc, +int ceph_monc_blocklist_add(struct ceph_mon_client *monc, struct ceph_entity_addr *client_addr); extern int ceph_monc_open_session(struct ceph_mon_client *monc); diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index 3f4498fef6ad..cad9acfbc320 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -137,6 +137,17 @@ int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp, const char *fmt, ...); void ceph_oid_destroy(struct ceph_object_id *oid); +struct workspace_manager { + struct list_head idle_ws; + spinlock_t ws_lock; + /* Number of free workspaces */ + int free_ws; + /* Total number of allocated workspaces */ + atomic_t total_ws; + /* Waiters for a free workspace */ + wait_queue_head_t ws_wait; +}; + struct ceph_pg_mapping { struct rb_node node; struct ceph_pg pgid; @@ -184,8 +195,7 @@ struct ceph_osdmap { * the list of osds that store+replicate them. */ struct crush_map *crush; - struct mutex crush_workspace_mutex; - void *crush_workspace; + struct workspace_manager crush_wsm; }; static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd) diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index 3a518fd0eaad..43a7a1573b51 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -424,7 +424,7 @@ enum { }; #define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/ -#define EBLACKLISTED ESHUTDOWN /* blacklisted */ +#define EBLOCKLISTED ESHUTDOWN /* blocklisted */ /* xattr comparison */ enum { diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 7a899e83835d..e58e8c207782 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -7,6 +7,12 @@ #include <linux/fs.h> #include <asm/siginfo.h> +struct core_vma_metadata { + unsigned long start, end; + unsigned long flags; + unsigned long dump_size; +}; + /* * These are the only things you should do on a core-file: use only these * functions to write out all the necessary info. @@ -16,6 +22,11 @@ extern int dump_skip(struct coredump_params *cprm, size_t nr); extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); extern int dump_align(struct coredump_params *cprm, int align); extern void dump_truncate(struct coredump_params *cprm); +int dump_user_range(struct coredump_params *cprm, unsigned long start, + unsigned long len); +int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count, + struct core_vma_metadata **vma_meta, + size_t *vma_data_size_ptr); #ifdef CONFIG_COREDUMP extern void do_coredump(const kernel_siginfo_t *siginfo); #else diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 6f524bbf71a2..bc56287a1ed1 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -183,6 +183,7 @@ enum cpuhp_state { CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE, + CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE, CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RCUTREE_ONLINE, diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h index 2f811baf78d2..30dba392b730 100644 --- a/include/linux/crush/crush.h +++ b/include/linux/crush/crush.h @@ -346,6 +346,9 @@ struct crush_work_bucket { struct crush_work { struct crush_work_bucket **work; /* Per-bucket working store */ +#ifdef __KERNEL__ + struct list_head item; +#endif }; #ifdef __KERNEL__ diff --git a/include/linux/dax.h b/include/linux/dax.h index e15357223565..b52f084aa643 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -149,6 +149,7 @@ int dax_writeback_mapping_range(struct address_space *mapping, struct dax_device *dax_dev, struct writeback_control *wbc); struct page *dax_layout_busy_page(struct address_space *mapping); +struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); dax_entry_t dax_lock_page(struct page *page); void dax_unlock_page(struct page *page, dax_entry_t cookie); #else @@ -179,6 +180,11 @@ static inline struct page *dax_layout_busy_page(struct address_space *mapping) return NULL; } +static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages) +{ + return NULL; +} + static inline int dax_writeback_mapping_range(struct address_space *mapping, struct dax_device *dax_dev, struct writeback_control *wbc) { diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 2f4a74efa6be..121a2430d7f7 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -228,12 +228,7 @@ int devfreq_resume_device(struct devfreq *devfreq); void devfreq_suspend(void); void devfreq_resume(void); -/** - * update_devfreq() - Reevaluate the device and configure frequency - * @devfreq: the devfreq device - * - * Note: devfreq->lock must be held - */ +/* update_devfreq() - Reevaluate the device and configure frequency */ int update_devfreq(struct devfreq *devfreq); /* Helper functions for devfreq user device driver with OPP. */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 3c383ddd92dd..a5dbb57a687f 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -38,9 +38,6 @@ #define F2FS_MAX_QUOTAS 3 #define F2FS_ENC_UTF8_12_1 1 -#define F2FS_ENC_STRICT_MODE_FL (1 << 0) -#define f2fs_has_strict_mode(sbi) \ - (sbi->s_encoding_flags & F2FS_ENC_STRICT_MODE_FL) #define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */ #define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */ diff --git a/include/linux/fault-inject-usercopy.h b/include/linux/fault-inject-usercopy.h new file mode 100644 index 000000000000..56c3a693fdd9 --- /dev/null +++ b/include/linux/fault-inject-usercopy.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_FAULT_INJECT_USERCOPY_H__ +#define __LINUX_FAULT_INJECT_USERCOPY_H__ + +/* + * This header provides a wrapper for injecting failures to user space memory + * access functions. + */ + +#include <linux/types.h> + +#ifdef CONFIG_FAULT_INJECTION_USERCOPY + +bool should_fail_usercopy(void); + +#else + +static inline bool should_fail_usercopy(void) { return false; } + +#endif /* CONFIG_FAULT_INJECTION_USERCOPY */ + +#endif /* __LINUX_FAULT_INJECT_USERCOPY_H__ */ diff --git a/include/linux/fs.h b/include/linux/fs.h index ae97d87a00d2..16e3789634d3 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1366,6 +1366,12 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_ACTIVE (1<<30) #define SB_NOUSER (1<<31) +/* These flags relate to encoding and casefolding */ +#define SB_ENC_STRICT_MODE_FL (1 << 0) + +#define sb_has_strict_encoding(sb) \ + (sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL) + /* * Umount options */ @@ -1436,6 +1442,10 @@ struct super_block { #ifdef CONFIG_FS_VERITY const struct fsverity_operations *s_vop; #endif +#ifdef CONFIG_UNICODE + struct unicode_map *s_encoding; + __u16 s_encoding_flags; +#endif struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ struct list_head s_mounts; /* list of mounts; _not_ for fs use */ struct block_device *s_bdev; @@ -1884,8 +1894,6 @@ static inline int call_mmap(struct file *file, struct vm_area_struct *vma) extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); -extern ssize_t vfs_readv(struct file *, const struct iovec __user *, - unsigned long, loff_t *, rwf_t); extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, loff_t, size_t, unsigned int); extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in, @@ -2209,6 +2217,7 @@ struct file_system_type { #define FS_HAS_SUBTYPE 4 #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ #define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ +#define FS_THP_SUPPORT 8192 /* Remove once all fs converted */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ int (*init_fs_context)(struct fs_context *); const struct fs_parameter_spec *parameters; @@ -2696,33 +2705,6 @@ static inline errseq_t file_sample_sb_err(struct file *file) return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); } -static inline int filemap_nr_thps(struct address_space *mapping) -{ -#ifdef CONFIG_READ_ONLY_THP_FOR_FS - return atomic_read(&mapping->nr_thps); -#else - return 0; -#endif -} - -static inline void filemap_nr_thps_inc(struct address_space *mapping) -{ -#ifdef CONFIG_READ_ONLY_THP_FOR_FS - atomic_inc(&mapping->nr_thps); -#else - WARN_ON_ONCE(1); -#endif -} - -static inline void filemap_nr_thps_dec(struct address_space *mapping) -{ -#ifdef CONFIG_READ_ONLY_THP_FOR_FS - atomic_dec(&mapping->nr_thps); -#else - WARN_ON_ONCE(1); -#endif -} - extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync); extern int vfs_fsync(struct file *file, int datasync); @@ -2964,13 +2946,9 @@ extern int sb_min_blocksize(struct super_block *, int); extern int generic_file_mmap(struct file *, struct vm_area_struct *); extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); -extern int generic_remap_checks(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - loff_t *count, unsigned int remap_flags); +extern int generic_write_check_limits(struct file *file, loff_t pos, + loff_t *count); extern int generic_file_rw_checks(struct file *file_in, struct file *file_out); -extern int generic_copy_file_checks(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - size_t *count, unsigned int flags); extern ssize_t generic_file_buffered_read(struct kiocb *iocb, struct iov_iter *to, ssize_t already_read); extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); @@ -3215,6 +3193,12 @@ extern int generic_file_fsync(struct file *, loff_t, loff_t, int); extern int generic_check_addressable(unsigned, u64); +#ifdef CONFIG_UNICODE +extern int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str); +extern int generic_ci_d_compare(const struct dentry *dentry, unsigned int len, + const char *str, const struct qstr *name); +#endif + #ifdef CONFIG_MIGRATION extern int buffer_migrate_page(struct address_space *, struct page *, struct page *, diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h index 1e4e0de4ef8b..1ef421818d3a 100644 --- a/include/linux/i2c-smbus.h +++ b/include/linux/i2c-smbus.h @@ -38,6 +38,18 @@ static inline int of_i2c_setup_smbus_alert(struct i2c_adapter *adap) return 0; } #endif +#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_I2C_SLAVE) +struct i2c_client *i2c_new_slave_host_notify_device(struct i2c_adapter *adapter); +void i2c_free_slave_host_notify_device(struct i2c_client *client); +#else +static inline struct i2c_client *i2c_new_slave_host_notify_device(struct i2c_adapter *adapter) +{ + return ERR_PTR(-ENOSYS); +} +static inline void i2c_free_slave_host_notify_device(struct i2c_client *client) +{ +} +#endif #if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_DMI) void i2c_register_spd(struct i2c_adapter *adap); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index fc55ea41d323..56622658b215 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -344,7 +344,7 @@ const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj) { - struct device * const dev = container_of(kobj, struct device, kobj); + struct device * const dev = kobj_to_dev(kobj); return to_i2c_client(dev); } diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h index 91a8612b8bf9..fb88e23a99d3 100644 --- a/include/linux/idle_inject.h +++ b/include/linux/idle_inject.h @@ -28,6 +28,6 @@ void idle_inject_get_duration(struct idle_inject_device *ii_dev, unsigned int *idle_duration_us); void idle_inject_set_latency(struct idle_inject_device *ii_dev, - unsigned int latency_ns); + unsigned int latency_us); #endif /* __IDLE_INJECT_H__ */ diff --git a/include/linux/idr.h b/include/linux/idr.h index 3ade03e5c7af..a0dce14090a9 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -263,7 +263,8 @@ void ida_destroy(struct ida *ida); * * Allocate an ID between 0 and %INT_MAX, inclusive. * - * Context: Any context. + * Context: Any context. It is safe to call this function without + * locking in your code. * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, * or %-ENOSPC if there are no free IDs. */ @@ -280,7 +281,8 @@ static inline int ida_alloc(struct ida *ida, gfp_t gfp) * * Allocate an ID between @min and %INT_MAX, inclusive. * - * Context: Any context. + * Context: Any context. It is safe to call this function without + * locking in your code. * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, * or %-ENOSPC if there are no free IDs. */ @@ -297,7 +299,8 @@ static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) * * Allocate an ID between 0 and @max, inclusive. * - * Context: Any context. + * Context: Any context. It is safe to call this function without + * locking in your code. * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, * or %-ENOSPC if there are no free IDs. */ @@ -311,6 +314,10 @@ static inline void ida_init(struct ida *ida) xa_init_flags(&ida->xa, IDA_INIT_FLAGS); } +/* + * ida_simple_get() and ida_simple_remove() are deprecated. Use + * ida_alloc() and ida_free() instead respectively. + */ #define ida_simple_get(ida, start, end, gfp) \ ida_alloc_range(ida, start, (end) - 1, gfp) #define ida_simple_remove(ida, id) ida_free(ida, id) diff --git a/include/linux/input/sparse-keymap.h b/include/linux/input/sparse-keymap.h index d25d1452dc6e..d0dddc14ebc8 100644 --- a/include/linux/input/sparse-keymap.h +++ b/include/linux/input/sparse-keymap.h @@ -20,6 +20,7 @@ * private definitions. * @code: Device-specific data identifying the button/switch * @keycode: KEY_* code assigned to a key/button + * @sw: struct with code/value used by KE_SW and KE_VSW * @sw.code: SW_* code assigned to a switch * @sw.value: Value that should be sent in an input even when KE_SW * switch is toggled. KE_VSW switches ignore this field and diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 96315cfaf6d1..868364cea3b7 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -4,18 +4,33 @@ #include <linux/sched.h> #include <linux/xarray.h> -#include <linux/percpu-refcount.h> + +struct io_identity { + struct files_struct *files; + struct mm_struct *mm; +#ifdef CONFIG_BLK_CGROUP + struct cgroup_subsys_state *blkcg_css; +#endif + const struct cred *creds; + struct nsproxy *nsproxy; + struct fs_struct *fs; + unsigned long fsize; +#ifdef CONFIG_AUDIT + kuid_t loginuid; + unsigned int sessionid; +#endif + refcount_t count; +}; struct io_uring_task { /* submission side */ struct xarray xa; struct wait_queue_head wait; struct file *last; - atomic_long_t req_issue; - - /* completion side */ - bool in_idle ____cacheline_aligned_in_smp; - atomic_long_t req_complete; + struct percpu_counter inflight; + struct io_identity __identity; + struct io_identity *identity; + bool in_idle; }; #if defined(CONFIG_IO_URING) diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 6c2b06fe8beb..5135d4b86cd6 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -58,6 +58,10 @@ struct resource { #define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */ #define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */ +/* IORESOURCE_SYSRAM specific bits. */ +#define IORESOURCE_SYSRAM_DRIVER_MANAGED 0x02000000 /* Always detected via a driver. */ +#define IORESOURCE_SYSRAM_MERGEABLE 0x04000000 /* Resource can be merged. */ + #define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ #define IORESOURCE_DISABLED 0x10000000 @@ -103,7 +107,6 @@ struct resource { #define IORESOURCE_MEM_32BIT (3<<3) #define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ #define IORESOURCE_MEM_EXPANSIONROM (1<<6) -#define IORESOURCE_MEM_DRIVER_MANAGED (1<<7) /* PnP I/O specific bits (IORESOURCE_BITS) */ #define IORESOURCE_IO_16BIT_ADDR (1<<0) @@ -248,8 +251,10 @@ extern struct resource * __request_region(struct resource *, extern void __release_region(struct resource *, resource_size_t, resource_size_t); #ifdef CONFIG_MEMORY_HOTREMOVE -extern int release_mem_region_adjustable(struct resource *, resource_size_t, - resource_size_t); +extern void release_mem_region_adjustable(resource_size_t, resource_size_t); +#endif +#ifdef CONFIG_MEMORY_HOTPLUG +extern void merge_system_ram_resource(struct resource *res); #endif /* Wrappers for managed devices */ diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 08f904943ab2..fb3d71ad6eea 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -289,6 +289,7 @@ typedef struct journal_superblock_s #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004 #define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008 #define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010 +#define JBD2_FEATURE_INCOMPAT_FAST_COMMIT 0x00000020 /* See "journal feature predicate functions" below */ @@ -299,7 +300,8 @@ typedef struct journal_superblock_s JBD2_FEATURE_INCOMPAT_64BIT | \ JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \ JBD2_FEATURE_INCOMPAT_CSUM_V2 | \ - JBD2_FEATURE_INCOMPAT_CSUM_V3) + JBD2_FEATURE_INCOMPAT_CSUM_V3 | \ + JBD2_FEATURE_INCOMPAT_FAST_COMMIT) #ifdef __KERNEL__ @@ -452,8 +454,8 @@ struct jbd2_inode { struct jbd2_revoke_table_s; /** - * struct handle_s - The handle_s type is the concrete type associated with - * handle_t. + * struct jbd2_journal_handle - The jbd2_journal_handle type is the concrete + * type associated with handle_t. * @h_transaction: Which compound transaction is this update a part of? * @h_journal: Which journal handle belongs to - used iff h_reserved set. * @h_rsv_handle: Handle reserved for finishing the logical operation. @@ -629,7 +631,9 @@ struct transaction_s struct journal_head *t_shadow_list; /* - * List of inodes whose data we've modified in data=ordered mode. + * List of inodes associated with the transaction; e.g., ext4 uses + * this to track inodes in data=ordered and data=journal mode that + * need special handling on transaction commit; also used by ocfs2. * [j_list_lock] */ struct list_head t_inode_list; @@ -747,6 +751,11 @@ jbd2_time_diff(unsigned long start, unsigned long end) #define JBD2_NR_BATCH 64 +enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY}; + +#define JBD2_FC_REPLAY_STOP 0 +#define JBD2_FC_REPLAY_CONTINUE 1 + /** * struct journal_s - The journal_s type is the concrete type associated with * journal_t. @@ -858,6 +867,13 @@ struct journal_s wait_queue_head_t j_wait_reserved; /** + * @j_fc_wait: + * + * Wait queue to wait for completion of async fast commits. + */ + wait_queue_head_t j_fc_wait; + + /** * @j_checkpoint_mutex: * * Semaphore for locking against concurrent checkpoints. @@ -915,6 +931,30 @@ struct journal_s unsigned long j_last; /** + * @j_fc_first: + * + * The block number of the first fast commit block in the journal + * [j_state_lock]. + */ + unsigned long j_fc_first; + + /** + * @j_fc_off: + * + * Number of fast commit blocks currently allocated. + * [j_state_lock]. + */ + unsigned long j_fc_off; + + /** + * @j_fc_last: + * + * The block number one beyond the last fast commit block in the journal + * [j_state_lock]. + */ + unsigned long j_fc_last; + + /** * @j_dev: Device where we store the journal. */ struct block_device *j_dev; @@ -1065,6 +1105,12 @@ struct journal_s struct buffer_head **j_wbuf; /** + * @j_fc_wbuf: Array of fast commit bhs for + * jbd2_journal_commit_transaction. + */ + struct buffer_head **j_fc_wbuf; + + /** * @j_wbufsize: * * Size of @j_wbuf array. @@ -1072,6 +1118,13 @@ struct journal_s int j_wbufsize; /** + * @j_fc_wbufsize: + * + * Size of @j_fc_wbuf array. + */ + int j_fc_wbufsize; + + /** * @j_last_sync_writer: * * The pid of the last person to run a synchronous operation @@ -1111,6 +1164,27 @@ struct journal_s void (*j_commit_callback)(journal_t *, transaction_t *); + /** + * @j_submit_inode_data_buffers: + * + * This function is called for all inodes associated with the + * committing transaction marked with JI_WRITE_DATA flag + * before we start to write out the transaction to the journal. + */ + int (*j_submit_inode_data_buffers) + (struct jbd2_inode *); + + /** + * @j_finish_inode_data_buffers: + * + * This function is called for all inodes associated with the + * committing transaction marked with JI_WAIT_DATA flag + * after we have written the transaction to the journal + * but before we write out the commit block. + */ + int (*j_finish_inode_data_buffers) + (struct jbd2_inode *); + /* * Journal statistics */ @@ -1170,6 +1244,30 @@ struct journal_s */ struct lockdep_map j_trans_commit_map; #endif + + /** + * @j_fc_cleanup_callback: + * + * Clean-up after fast commit or full commit. JBD2 calls this function + * after every commit operation. + */ + void (*j_fc_cleanup_callback)(struct journal_s *journal, int); + + /* + * @j_fc_replay_callback: + * + * File-system specific function that performs replay of a fast + * commit. JBD2 calls this function for each fast commit block found in + * the journal. This function should return JBD2_FC_REPLAY_CONTINUE + * to indicate that the block was processed correctly and more fast + * commit replay should continue. Return value of JBD2_FC_REPLAY_STOP + * indicates the end of replay (no more blocks remaining). A negative + * return value indicates error. + */ + int (*j_fc_replay_callback)(struct journal_s *journal, + struct buffer_head *bh, + enum passtype pass, int off, + tid_t expected_commit_id); }; #define jbd2_might_wait_for_commit(j) \ @@ -1240,6 +1338,7 @@ JBD2_FEATURE_INCOMPAT_FUNCS(64bit, 64BIT) JBD2_FEATURE_INCOMPAT_FUNCS(async_commit, ASYNC_COMMIT) JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2) JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3) +JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT) /* * Journal flag definitions @@ -1253,6 +1352,8 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3) #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file * data write error in ordered * mode */ +#define JBD2_FAST_COMMIT_ONGOING 0x100 /* Fast commit is ongoing */ +#define JBD2_FULL_COMMIT_ONGOING 0x200 /* Full commit is ongoing */ /* * Function declarations for the journaling transaction and buffer @@ -1421,6 +1522,10 @@ extern int jbd2_journal_inode_ranged_write(handle_t *handle, extern int jbd2_journal_inode_ranged_wait(handle_t *handle, struct jbd2_inode *inode, loff_t start_byte, loff_t length); +extern int jbd2_journal_submit_inode_data_buffers( + struct jbd2_inode *jinode); +extern int jbd2_journal_finish_inode_data_buffers( + struct jbd2_inode *jinode); extern int jbd2_journal_begin_ordered_truncate(journal_t *journal, struct jbd2_inode *inode, loff_t new_size); extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode); @@ -1505,6 +1610,17 @@ void __jbd2_log_wait_for_space(journal_t *journal); extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); extern int jbd2_cleanup_journal_tail(journal_t *); +/* Fast commit related APIs */ +int jbd2_fc_init(journal_t *journal, int num_fc_blks); +int jbd2_fc_begin_commit(journal_t *journal, tid_t tid); +int jbd2_fc_end_commit(journal_t *journal); +int jbd2_fc_end_commit_fallback(journal_t *journal, tid_t tid); +int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out); +int jbd2_submit_inode_data(struct jbd2_inode *jinode); +int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode); +int jbd2_fc_wait_bufs(journal_t *journal, int num_blks); +int jbd2_fc_release_bufs(journal_t *journal); + /* * is_journal_abort * diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index fed6ba96c527..5e13f801c902 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -3,8 +3,9 @@ #define _LINUX_JIFFIES_H #include <linux/cache.h> +#include <linux/limits.h> #include <linux/math64.h> -#include <linux/kernel.h> +#include <linux/minmax.h> #include <linux/types.h> #include <linux/time.h> #include <linux/timex.h> diff --git a/include/linux/kernel.h b/include/linux/kernel.h index e4aa29b1ad62..c629215fdad9 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -11,6 +11,7 @@ #include <linux/compiler.h> #include <linux/bitops.h> #include <linux/log2.h> +#include <linux/minmax.h> #include <linux/typecheck.h> #include <linux/printk.h> #include <linux/build_bug.h> @@ -833,155 +834,6 @@ ftrace_vprintk(const char *fmt, va_list ap) static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } #endif /* CONFIG_TRACING */ -/* - * min()/max()/clamp() macros must accomplish three things: - * - * - avoid multiple evaluations of the arguments (so side-effects like - * "x++" happen only once) when non-constant. - * - perform strict type-checking (to generate warnings instead of - * nasty runtime surprises). See the "unnecessary" pointer comparison - * in __typecheck(). - * - retain result as a constant expressions when called with only - * constant expressions (to avoid tripping VLA warnings in stack - * allocation usage). - */ -#define __typecheck(x, y) \ - (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) - -/* - * This returns a constant expression while determining if an argument is - * a constant expression, most importantly without evaluating the argument. - * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de> - */ -#define __is_constexpr(x) \ - (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) - -#define __no_side_effects(x, y) \ - (__is_constexpr(x) && __is_constexpr(y)) - -#define __safe_cmp(x, y) \ - (__typecheck(x, y) && __no_side_effects(x, y)) - -#define __cmp(x, y, op) ((x) op (y) ? (x) : (y)) - -#define __cmp_once(x, y, unique_x, unique_y, op) ({ \ - typeof(x) unique_x = (x); \ - typeof(y) unique_y = (y); \ - __cmp(unique_x, unique_y, op); }) - -#define __careful_cmp(x, y, op) \ - __builtin_choose_expr(__safe_cmp(x, y), \ - __cmp(x, y, op), \ - __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op)) - -/** - * min - return minimum of two values of the same or compatible types - * @x: first value - * @y: second value - */ -#define min(x, y) __careful_cmp(x, y, <) - -/** - * max - return maximum of two values of the same or compatible types - * @x: first value - * @y: second value - */ -#define max(x, y) __careful_cmp(x, y, >) - -/** - * min3 - return minimum of three values - * @x: first value - * @y: second value - * @z: third value - */ -#define min3(x, y, z) min((typeof(x))min(x, y), z) - -/** - * max3 - return maximum of three values - * @x: first value - * @y: second value - * @z: third value - */ -#define max3(x, y, z) max((typeof(x))max(x, y), z) - -/** - * min_not_zero - return the minimum that is _not_ zero, unless both are zero - * @x: value1 - * @y: value2 - */ -#define min_not_zero(x, y) ({ \ - typeof(x) __x = (x); \ - typeof(y) __y = (y); \ - __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) - -/** - * clamp - return a value clamped to a given range with strict typechecking - * @val: current value - * @lo: lowest allowable value - * @hi: highest allowable value - * - * This macro does strict typechecking of @lo/@hi to make sure they are of the - * same type as @val. See the unnecessary pointer comparisons. - */ -#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) - -/* - * ..and if you can't take the strict - * types, you can specify one yourself. - * - * Or not use min/max/clamp at all, of course. - */ - -/** - * min_t - return minimum of two values, using the specified type - * @type: data type to use - * @x: first value - * @y: second value - */ -#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <) - -/** - * max_t - return maximum of two values, using the specified type - * @type: data type to use - * @x: first value - * @y: second value - */ -#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >) - -/** - * clamp_t - return a value clamped to a given range using a given type - * @type: the type of variable to use - * @val: current value - * @lo: minimum allowable value - * @hi: maximum allowable value - * - * This macro does no typechecking and uses temporary variables of type - * @type to make all the comparisons. - */ -#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) - -/** - * clamp_val - return a value clamped to a given range using val's type - * @val: current value - * @lo: minimum allowable value - * @hi: maximum allowable value - * - * This macro does no typechecking and uses temporary variables of whatever - * type the input argument @val is. This is useful when @val is an unsigned - * type and @lo and @hi are literals that will otherwise be assigned a signed - * integer type. - */ -#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) - - -/** - * swap - swap values of @a and @b - * @a: first value - * @b: second value - */ -#define swap(a, b) \ - do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) - /* This counts to 12. Any more, it will return 13th argument. */ #define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n #define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index 477b8b7c908f..0d6cf64c8bb1 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -16,6 +16,7 @@ #include <linux/linkage.h> #include <linux/init.h> #include <linux/atomic.h> +#include <linux/kprobes.h> #ifdef CONFIG_HAVE_ARCH_KGDB #include <asm/kgdb.h> #endif @@ -335,6 +336,23 @@ extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code, atomic_t *snd_rdy); extern void gdbstub_exit(int status); +/* + * kgdb and kprobes both use the same (kprobe) blocklist (which makes sense + * given they are both typically hooked up to the same trap meaning on most + * architectures one cannot be used to debug the other) + * + * However on architectures where kprobes is not (yet) implemented we permit + * breakpoints everywhere rather than blocking everything by default. + */ +static inline bool kgdb_within_blocklist(unsigned long addr) +{ +#ifdef CONFIG_KGDB_HONOUR_BLOCKLIST + return within_kprobe_blacklist(addr); +#else + return false; +#endif +} + extern int kgdb_single_step; extern atomic_t kgdb_active; #define in_dbg_master() \ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 05e3c2fb3ef7..7f2e2a09ebbd 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -346,6 +346,7 @@ struct kvm_memory_slot { unsigned long userspace_addr; u32 flags; short id; + u16 as_id; }; static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) @@ -797,6 +798,7 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); +void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); diff --git a/include/linux/list.h b/include/linux/list.h index 0d0d17a10d25..a18c87b63376 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -610,6 +610,15 @@ static inline void list_splice_tail_init(struct list_head *list, pos = n, n = pos->prev) /** + * list_entry_is_head - test if the entry points to the head of the list + * @pos: the type * to cursor + * @head: the head for your list. + * @member: the name of the list_head within the struct. + */ +#define list_entry_is_head(pos, head, member) \ + (&pos->member == (head)) + +/** * list_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. @@ -617,7 +626,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry(pos, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member); \ - &pos->member != (head); \ + !list_entry_is_head(pos, head, member); \ pos = list_next_entry(pos, member)) /** @@ -628,7 +637,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_reverse(pos, head, member) \ for (pos = list_last_entry(head, typeof(*pos), member); \ - &pos->member != (head); \ + !list_entry_is_head(pos, head, member); \ pos = list_prev_entry(pos, member)) /** @@ -653,7 +662,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_continue(pos, head, member) \ for (pos = list_next_entry(pos, member); \ - &pos->member != (head); \ + !list_entry_is_head(pos, head, member); \ pos = list_next_entry(pos, member)) /** @@ -667,7 +676,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_continue_reverse(pos, head, member) \ for (pos = list_prev_entry(pos, member); \ - &pos->member != (head); \ + !list_entry_is_head(pos, head, member); \ pos = list_prev_entry(pos, member)) /** @@ -679,7 +688,7 @@ static inline void list_splice_tail_init(struct list_head *list, * Iterate over list of given type, continuing from current position. */ #define list_for_each_entry_from(pos, head, member) \ - for (; &pos->member != (head); \ + for (; !list_entry_is_head(pos, head, member); \ pos = list_next_entry(pos, member)) /** @@ -692,7 +701,7 @@ static inline void list_splice_tail_init(struct list_head *list, * Iterate backwards over list of given type, continuing from current position. */ #define list_for_each_entry_from_reverse(pos, head, member) \ - for (; &pos->member != (head); \ + for (; !list_entry_is_head(pos, head, member); \ pos = list_prev_entry(pos, member)) /** @@ -705,7 +714,7 @@ static inline void list_splice_tail_init(struct list_head *list, #define list_for_each_entry_safe(pos, n, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member), \ n = list_next_entry(pos, member); \ - &pos->member != (head); \ + !list_entry_is_head(pos, head, member); \ pos = n, n = list_next_entry(n, member)) /** @@ -721,7 +730,7 @@ static inline void list_splice_tail_init(struct list_head *list, #define list_for_each_entry_safe_continue(pos, n, head, member) \ for (pos = list_next_entry(pos, member), \ n = list_next_entry(pos, member); \ - &pos->member != (head); \ + !list_entry_is_head(pos, head, member); \ pos = n, n = list_next_entry(n, member)) /** @@ -736,7 +745,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_safe_from(pos, n, head, member) \ for (n = list_next_entry(pos, member); \ - &pos->member != (head); \ + !list_entry_is_head(pos, head, member); \ pos = n, n = list_next_entry(n, member)) /** @@ -752,7 +761,7 @@ static inline void list_splice_tail_init(struct list_head *list, #define list_for_each_entry_safe_reverse(pos, n, head, member) \ for (pos = list_last_entry(head, typeof(*pos), member), \ n = list_prev_entry(pos, member); \ - &pos->member != (head); \ + !list_entry_is_head(pos, head, member); \ pos = n, n = list_prev_entry(n, member)) /** diff --git a/include/linux/math64.h b/include/linux/math64.h index 3381d9e33c4e..66deb1fdc2ef 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -28,7 +28,7 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) return dividend / divisor; } -/** +/* * div_s64_rem - signed 64bit divide with 32bit divisor with remainder * @dividend: signed 64bit dividend * @divisor: signed 32bit divisor @@ -42,7 +42,7 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) return dividend / divisor; } -/** +/* * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder * @dividend: unsigned 64bit dividend * @divisor: unsigned 64bit divisor @@ -56,7 +56,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) return dividend / divisor; } -/** +/* * div64_u64 - unsigned 64bit divide with 64bit divisor * @dividend: unsigned 64bit dividend * @divisor: unsigned 64bit divisor @@ -68,7 +68,7 @@ static inline u64 div64_u64(u64 dividend, u64 divisor) return dividend / divisor; } -/** +/* * div64_s64 - signed 64bit divide with 64bit divisor * @dividend: signed 64bit dividend * @divisor: signed 64bit divisor diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6ef4a552e09d..e391e3c56de5 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1531,18 +1531,6 @@ static inline bool memcg_kmem_enabled(void) return static_branch_likely(&memcg_kmem_enabled_key); } -static inline bool memcg_kmem_bypass(void) -{ - if (in_interrupt()) - return true; - - /* Allow remote memcg charging in kthread contexts. */ - if ((!current->mm || (current->flags & PF_KTHREAD)) && - !current->active_memcg) - return true; - return false; -} - static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) { diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index c0faa7a30c46..d65c6fdc5cfc 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -57,6 +57,19 @@ enum { MMOP_ONLINE_MOVABLE, }; +/* Flags for add_memory() and friends to specify memory hotplug details. */ +typedef int __bitwise mhp_t; + +/* No special request */ +#define MHP_NONE ((__force mhp_t)0) +/* + * Allow merging of the added System RAM resource with adjacent, + * mergeable resources. After a successful call to add_memory_resource() + * with this flag set, the resource pointer must no longer be used as it + * might be stale, or the resource might have changed. + */ +#define MEMHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) + /* * Extended parameters for memory hotplug: * altmap: alternative allocator for memmap array (optional) @@ -103,8 +116,8 @@ extern int online_pages(unsigned long pfn, unsigned long nr_pages, int online_type, int nid); extern struct zone *test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn); -extern unsigned long __offline_isolated_pages(unsigned long start_pfn, - unsigned long end_pfn); +extern void __offline_isolated_pages(unsigned long start_pfn, + unsigned long end_pfn); typedef void (*online_page_callback_t)(struct page *page, unsigned int order); @@ -247,13 +260,6 @@ static inline void zone_span_writelock(struct zone *zone) {} static inline void zone_span_writeunlock(struct zone *zone) {} static inline void zone_seqlock_init(struct zone *zone) {} -static inline int mhp_notimplemented(const char *func) -{ - printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); - dump_stack(); - return -ENOSYS; -} - static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) { } @@ -344,14 +350,18 @@ static inline void __remove_memory(int nid, u64 start, u64 size) {} extern void set_zone_contiguous(struct zone *zone); extern void clear_zone_contiguous(struct zone *zone); +#ifdef CONFIG_MEMORY_HOTPLUG extern void __ref free_area_init_core_hotplug(int nid); -extern int __add_memory(int nid, u64 start, u64 size); -extern int add_memory(int nid, u64 start, u64 size); -extern int add_memory_resource(int nid, struct resource *resource); +extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); +extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); +extern int add_memory_resource(int nid, struct resource *resource, + mhp_t mhp_flags); extern int add_memory_driver_managed(int nid, u64 start, u64 size, - const char *resource_name); + const char *resource_name, + mhp_t mhp_flags); extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, - unsigned long nr_pages, struct vmem_altmap *altmap); + unsigned long nr_pages, + struct vmem_altmap *altmap, int migratetype); extern void remove_pfn_range_from_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages); @@ -363,8 +373,8 @@ extern void sparse_remove_section(struct mem_section *ms, unsigned long map_offset, struct vmem_altmap *altmap); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); -extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, - int online_type); extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, unsigned long nr_pages); +#endif /* CONFIG_MEMORY_HOTPLUG */ + #endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h index 66989a16221a..c3748b53bf7d 100644 --- a/include/linux/mfd/mt6397/rtc.h +++ b/include/linux/mfd/mt6397/rtc.h @@ -72,7 +72,6 @@ struct mtk_rtc_data { }; struct mt6397_rtc { - struct device *dev; struct rtc_device *rtc_dev; /* Protect register access from multiple tasks */ diff --git a/include/linux/minmax.h b/include/linux/minmax.h new file mode 100644 index 000000000000..c0f57b0c64d9 --- /dev/null +++ b/include/linux/minmax.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MINMAX_H +#define _LINUX_MINMAX_H + +/* + * min()/max()/clamp() macros must accomplish three things: + * + * - avoid multiple evaluations of the arguments (so side-effects like + * "x++" happen only once) when non-constant. + * - perform strict type-checking (to generate warnings instead of + * nasty runtime surprises). See the "unnecessary" pointer comparison + * in __typecheck(). + * - retain result as a constant expressions when called with only + * constant expressions (to avoid tripping VLA warnings in stack + * allocation usage). + */ +#define __typecheck(x, y) \ + (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) + +/* + * This returns a constant expression while determining if an argument is + * a constant expression, most importantly without evaluating the argument. + * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de> + */ +#define __is_constexpr(x) \ + (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) + +#define __no_side_effects(x, y) \ + (__is_constexpr(x) && __is_constexpr(y)) + +#define __safe_cmp(x, y) \ + (__typecheck(x, y) && __no_side_effects(x, y)) + +#define __cmp(x, y, op) ((x) op (y) ? (x) : (y)) + +#define __cmp_once(x, y, unique_x, unique_y, op) ({ \ + typeof(x) unique_x = (x); \ + typeof(y) unique_y = (y); \ + __cmp(unique_x, unique_y, op); }) + +#define __careful_cmp(x, y, op) \ + __builtin_choose_expr(__safe_cmp(x, y), \ + __cmp(x, y, op), \ + __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op)) + +/** + * min - return minimum of two values of the same or compatible types + * @x: first value + * @y: second value + */ +#define min(x, y) __careful_cmp(x, y, <) + +/** + * max - return maximum of two values of the same or compatible types + * @x: first value + * @y: second value + */ +#define max(x, y) __careful_cmp(x, y, >) + +/** + * min3 - return minimum of three values + * @x: first value + * @y: second value + * @z: third value + */ +#define min3(x, y, z) min((typeof(x))min(x, y), z) + +/** + * max3 - return maximum of three values + * @x: first value + * @y: second value + * @z: third value + */ +#define max3(x, y, z) max((typeof(x))max(x, y), z) + +/** + * min_not_zero - return the minimum that is _not_ zero, unless both are zero + * @x: value1 + * @y: value2 + */ +#define min_not_zero(x, y) ({ \ + typeof(x) __x = (x); \ + typeof(y) __y = (y); \ + __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) + +/** + * clamp - return a value clamped to a given range with strict typechecking + * @val: current value + * @lo: lowest allowable value + * @hi: highest allowable value + * + * This macro does strict typechecking of @lo/@hi to make sure they are of the + * same type as @val. See the unnecessary pointer comparisons. + */ +#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) + +/* + * ..and if you can't take the strict + * types, you can specify one yourself. + * + * Or not use min/max/clamp at all, of course. + */ + +/** + * min_t - return minimum of two values, using the specified type + * @type: data type to use + * @x: first value + * @y: second value + */ +#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <) + +/** + * max_t - return maximum of two values, using the specified type + * @type: data type to use + * @x: first value + * @y: second value + */ +#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >) + +/** + * clamp_t - return a value clamped to a given range using a given type + * @type: the type of variable to use + * @val: current value + * @lo: minimum allowable value + * @hi: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of type + * @type to make all the comparisons. + */ +#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) + +/** + * clamp_val - return a value clamped to a given range using val's type + * @val: current value + * @lo: minimum allowable value + * @hi: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of whatever + * type the input argument @val is. This is useful when @val is an unsigned + * type and @lo and @hi are literals that will otherwise be assigned a signed + * integer type. + */ +#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) + +/** + * swap - swap values of @a and @b + * @a: first value + * @b: second value + */ +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) + +#endif /* _LINUX_MINMAX_H */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index de1ffb4804d6..651591a2965d 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -420,7 +420,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reserved_at_1a[0x2]; u8 ipsec_encrypt[0x1]; u8 ipsec_decrypt[0x1]; - u8 reserved_at_1e[0x2]; + u8 sw_owner_v2[0x1]; + u8 reserved_at_1f[0x1]; u8 termination_table_raw_traffic[0x1]; u8 reserved_at_21[0x1]; @@ -1430,7 +1431,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_bf_reg_size[0x5]; - u8 reserved_at_270[0x8]; + u8 reserved_at_270[0x6]; + u8 lag_dct[0x2]; u8 lag_tx_port_affinity[0x1]; u8 reserved_at_279[0x2]; u8 lag_master[0x1]; diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 2d45a6af52a4..23edd2db4803 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -125,6 +125,14 @@ enum mlx5e_connector_type { MLX5E_CONNECTOR_TYPE_NUMBER, }; +enum mlx5_ptys_width { + MLX5_PTYS_WIDTH_1X = 1 << 0, + MLX5_PTYS_WIDTH_2X = 1 << 1, + MLX5_PTYS_WIDTH_4X = 1 << 2, + MLX5_PTYS_WIDTH_8X = 1 << 3, + MLX5_PTYS_WIDTH_12X = 1 << 4, +}; + #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) #define MLX5_GET_ETH_PROTO(reg, out, ext, field) \ (ext ? MLX5_GET(reg, out, ext_##field) : \ @@ -133,10 +141,9 @@ enum mlx5e_connector_type { int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port); -int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, - u8 *link_width_oper, u8 local_port); -int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev, - u8 *proto_oper, u8 local_port); + +int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper, + u16 *proto_oper, u8 local_port); void mlx5_toggle_port_link(struct mlx5_core_dev *dev); int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status status); diff --git a/include/linux/mm.h b/include/linux/mm.h index 620961e4f32b..ef360fe70aaf 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2440,7 +2440,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn, extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, - enum meminit_context, struct vmem_altmap *); + enum meminit_context, struct vmem_altmap *, int migratetype); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); @@ -2579,7 +2579,7 @@ extern int __do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf, bool downgrade); extern int do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf); -extern int do_madvise(unsigned long start, size_t len_in, int behavior); +extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior); #ifdef CONFIG_MMU extern int __mm_populate(unsigned long addr, unsigned long len, @@ -3025,8 +3025,6 @@ extern int memory_failure(unsigned long pfn, int flags); extern void memory_failure_queue(unsigned long pfn, int flags); extern void memory_failure_queue_kick(int cpu); extern int unpoison_memory(unsigned long pfn); -extern int get_hwpoison_page(struct page *page); -#define put_hwpoison_page(page) put_page(page) extern int sysctl_memory_failure_early_kill; extern int sysctl_memory_failure_recovery; extern void shake_page(struct page *p, int access); @@ -3066,6 +3064,7 @@ enum mf_action_page_type { MF_MSG_BUDDY, MF_MSG_BUDDY_2ND, MF_MSG_DAX, + MF_MSG_UNSPLIT_THP, MF_MSG_UNKNOWN, }; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c27fb1faffe5..fb3bf696c05e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -266,6 +266,8 @@ static inline bool is_active_lru(enum lru_list lru) return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); } +#define ANON_AND_FILE 2 + enum lruvec_flags { LRUVEC_CONGESTED, /* lruvec has many dirty pages * backed by a congested BDI @@ -283,8 +285,8 @@ struct lruvec { unsigned long file_cost; /* Non-resident age, driven by LRU movement */ atomic_long_t nonresident_age; - /* Refaults at the time of last reclaim cycle, anon=0, file=1 */ - unsigned long refaults[2]; + /* Refaults at the time of last reclaim cycle */ + unsigned long refaults[ANON_AND_FILE]; /* Various lruvec state flags (enum lruvec_flags) */ unsigned long flags; #ifdef CONFIG_MEMCG @@ -441,6 +443,8 @@ enum zone_type { #ifndef __GENERATING_BOUNDS_H +#define ASYNC_AND_SYNC 2 + struct zone { /* Read-mostly fields */ @@ -560,8 +564,8 @@ struct zone { #if defined CONFIG_COMPACTION || defined CONFIG_CMA /* pfn where compaction free scanner should start */ unsigned long compact_cached_free_pfn; - /* pfn where async and sync compaction migration scanner should start */ - unsigned long compact_cached_migrate_pfn[2]; + /* pfn where compaction migration scanner should start */ + unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC]; unsigned long compact_init_migrate_pfn; unsigned long compact_init_free_pfn; #endif @@ -1416,7 +1420,6 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr) #define pfn_to_nid(pfn) (0) #endif -#define early_pfn_valid(pfn) pfn_valid(pfn) void sparse_init(void); #else #define sparse_init() do {} while (0) @@ -1436,10 +1439,6 @@ struct mminit_pfnnid_cache { int last_nid; }; -#ifndef early_pfn_valid -#define early_pfn_valid(pfn) (1) -#endif - /* * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we * need to check pfn validity within that MAX_ORDER_NR_PAGES block. diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h index 2129f7d3b6eb..0ce612428aea 100644 --- a/include/linux/mtd/hyperbus.h +++ b/include/linux/mtd/hyperbus.h @@ -8,6 +8,17 @@ #include <linux/mtd/map.h> +/* HyperBus command bits */ +#define HYPERBUS_RW 0x80 /* R/W# */ +#define HYPERBUS_RW_WRITE 0 +#define HYPERBUS_RW_READ 0x80 +#define HYPERBUS_AS 0x40 /* Address Space */ +#define HYPERBUS_AS_MEM 0 +#define HYPERBUS_AS_REG 0x40 +#define HYPERBUS_BT 0x20 /* Burst Type */ +#define HYPERBUS_BT_WRAPPED 0 +#define HYPERBUS_BT_LINEAR 0x20 + enum hyperbus_memtype { HYPERFLASH, HYPERRAM, @@ -20,6 +31,7 @@ enum hyperbus_memtype { * @mtd: pointer to MTD struct * @ctlr: pointer to HyperBus controller struct * @memtype: type of memory device: HyperFlash or HyperRAM + * @priv: pointer to controller specific per device private data */ struct hyperbus_device { @@ -28,6 +40,7 @@ struct hyperbus_device { struct mtd_info *mtd; struct hyperbus_ctlr *ctlr; enum hyperbus_memtype memtype; + void *priv; }; /** diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index af99041ceaa9..697ea2474a7c 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -83,7 +83,18 @@ struct nand_pos { }; /** + * enum nand_page_io_req_type - Direction of an I/O request + * @NAND_PAGE_READ: from the chip, to the controller + * @NAND_PAGE_WRITE: from the controller, to the chip + */ +enum nand_page_io_req_type { + NAND_PAGE_READ = 0, + NAND_PAGE_WRITE, +}; + +/** * struct nand_page_io_req - NAND I/O request object + * @type: the type of page I/O: read or write * @pos: the position this I/O request is targeting * @dataoffs: the offset within the page * @datalen: number of data bytes to read from/write to this page @@ -99,6 +110,7 @@ struct nand_pos { * specific commands/operations. */ struct nand_page_io_req { + enum nand_page_io_req_type type; struct nand_pos pos; unsigned int dataoffs; unsigned int datalen; @@ -115,18 +127,77 @@ struct nand_page_io_req { int mode; }; +const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void); +const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void); +const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void); + +/** + * enum nand_ecc_engine_type - NAND ECC engine type + * @NAND_ECC_ENGINE_TYPE_INVALID: Invalid value + * @NAND_ECC_ENGINE_TYPE_NONE: No ECC correction + * @NAND_ECC_ENGINE_TYPE_SOFT: Software ECC correction + * @NAND_ECC_ENGINE_TYPE_ON_HOST: On host hardware ECC correction + * @NAND_ECC_ENGINE_TYPE_ON_DIE: On chip hardware ECC correction + */ +enum nand_ecc_engine_type { + NAND_ECC_ENGINE_TYPE_INVALID, + NAND_ECC_ENGINE_TYPE_NONE, + NAND_ECC_ENGINE_TYPE_SOFT, + NAND_ECC_ENGINE_TYPE_ON_HOST, + NAND_ECC_ENGINE_TYPE_ON_DIE, +}; + +/** + * enum nand_ecc_placement - NAND ECC bytes placement + * @NAND_ECC_PLACEMENT_UNKNOWN: The actual position of the ECC bytes is unknown + * @NAND_ECC_PLACEMENT_OOB: The ECC bytes are located in the OOB area + * @NAND_ECC_PLACEMENT_INTERLEAVED: Syndrome layout, there are ECC bytes + * interleaved with regular data in the main + * area + */ +enum nand_ecc_placement { + NAND_ECC_PLACEMENT_UNKNOWN, + NAND_ECC_PLACEMENT_OOB, + NAND_ECC_PLACEMENT_INTERLEAVED, +}; + +/** + * enum nand_ecc_algo - NAND ECC algorithm + * @NAND_ECC_ALGO_UNKNOWN: Unknown algorithm + * @NAND_ECC_ALGO_HAMMING: Hamming algorithm + * @NAND_ECC_ALGO_BCH: Bose-Chaudhuri-Hocquenghem algorithm + * @NAND_ECC_ALGO_RS: Reed-Solomon algorithm + */ +enum nand_ecc_algo { + NAND_ECC_ALGO_UNKNOWN, + NAND_ECC_ALGO_HAMMING, + NAND_ECC_ALGO_BCH, + NAND_ECC_ALGO_RS, +}; + /** * struct nand_ecc_props - NAND ECC properties + * @engine_type: ECC engine type + * @placement: OOB placement (if relevant) + * @algo: ECC algorithm (if relevant) * @strength: ECC strength * @step_size: Number of bytes per step + * @flags: Misc properties */ struct nand_ecc_props { + enum nand_ecc_engine_type engine_type; + enum nand_ecc_placement placement; + enum nand_ecc_algo algo; unsigned int strength; unsigned int step_size; + unsigned int flags; }; #define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) } +/* NAND ECC misc flags */ +#define NAND_ECC_MAXIMIZE_STRENGTH BIT(0) + /** * struct nand_bbt - bad block table object * @cache: in memory BBT cache @@ -158,10 +229,79 @@ struct nand_ops { }; /** + * struct nand_ecc_context - Context for the ECC engine + * @conf: basic ECC engine parameters + * @total: total number of bytes used for storing ECC codes, this is used by + * generic OOB layouts + * @priv: ECC engine driver private data + */ +struct nand_ecc_context { + struct nand_ecc_props conf; + unsigned int total; + void *priv; +}; + +/** + * struct nand_ecc_engine_ops - ECC engine operations + * @init_ctx: given a desired user configuration for the pointed NAND device, + * requests the ECC engine driver to setup a configuration with + * values it supports. + * @cleanup_ctx: clean the context initialized by @init_ctx. + * @prepare_io_req: is called before reading/writing a page to prepare the I/O + * request to be performed with ECC correction. + * @finish_io_req: is called after reading/writing a page to terminate the I/O + * request and ensure proper ECC correction. + */ +struct nand_ecc_engine_ops { + int (*init_ctx)(struct nand_device *nand); + void (*cleanup_ctx)(struct nand_device *nand); + int (*prepare_io_req)(struct nand_device *nand, + struct nand_page_io_req *req); + int (*finish_io_req)(struct nand_device *nand, + struct nand_page_io_req *req); +}; + +/** + * struct nand_ecc_engine - ECC engine abstraction for NAND devices + * @ops: ECC engine operations + */ +struct nand_ecc_engine { + struct nand_ecc_engine_ops *ops; +}; + +void of_get_nand_ecc_user_config(struct nand_device *nand); +int nand_ecc_init_ctx(struct nand_device *nand); +void nand_ecc_cleanup_ctx(struct nand_device *nand); +int nand_ecc_prepare_io_req(struct nand_device *nand, + struct nand_page_io_req *req); +int nand_ecc_finish_io_req(struct nand_device *nand, + struct nand_page_io_req *req); +bool nand_ecc_is_strong_enough(struct nand_device *nand); + +/** + * struct nand_ecc - Information relative to the ECC + * @defaults: Default values, depend on the underlying subsystem + * @requirements: ECC requirements from the NAND chip perspective + * @user_conf: User desires in terms of ECC parameters + * @ctx: ECC context for the ECC engine, derived from the device @requirements + * the @user_conf and the @defaults + * @ondie_engine: On-die ECC engine reference, if any + * @engine: ECC engine actually bound + */ +struct nand_ecc { + struct nand_ecc_props defaults; + struct nand_ecc_props requirements; + struct nand_ecc_props user_conf; + struct nand_ecc_context ctx; + struct nand_ecc_engine *ondie_engine; + struct nand_ecc_engine *engine; +}; + +/** * struct nand_device - NAND device * @mtd: MTD instance attached to the NAND device * @memorg: memory layout - * @eccreq: ECC requirements + * @ecc: NAND ECC object attached to the NAND device * @rowconv: position to row address converter * @bbt: bad block table info * @ops: NAND operations attached to the NAND device @@ -169,8 +309,8 @@ struct nand_ops { * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND) * should declare their own NAND object embedding a nand_device struct (that's * how inheritance is done). - * struct_nand_device->memorg and struct_nand_device->eccreq should be filled - * at device detection time to reflect the NAND device + * struct_nand_device->memorg and struct_nand_device->ecc.requirements should + * be filled at device detection time to reflect the NAND device * capabilities/requirements. Once this is done nanddev_init() can be called. * It will take care of converting NAND information into MTD ones, which means * the specialized NAND layers should never manually tweak @@ -179,7 +319,7 @@ struct nand_ops { struct nand_device { struct mtd_info mtd; struct nand_memory_organization memorg; - struct nand_ecc_props eccreq; + struct nand_ecc ecc; struct nand_row_converter rowconv; struct nand_bbt bbt; const struct nand_ops *ops; @@ -383,6 +523,40 @@ nanddev_get_memorg(struct nand_device *nand) return &nand->memorg; } +/** + * nanddev_get_ecc_conf() - Extract the ECC configuration from a NAND device + * @nand: NAND device + */ +static inline const struct nand_ecc_props * +nanddev_get_ecc_conf(struct nand_device *nand) +{ + return &nand->ecc.ctx.conf; +} + +/** + * nanddev_get_ecc_requirements() - Extract the ECC requirements from a NAND + * device + * @nand: NAND device + */ +static inline const struct nand_ecc_props * +nanddev_get_ecc_requirements(struct nand_device *nand) +{ + return &nand->ecc.requirements; +} + +/** + * nanddev_set_ecc_requirements() - Assign the ECC requirements of a NAND + * device + * @nand: NAND device + * @reqs: Requirements + */ +static inline void +nanddev_set_ecc_requirements(struct nand_device *nand, + const struct nand_ecc_props *reqs) +{ + nand->ecc.requirements = *reqs; +} + int nanddev_init(struct nand_device *nand, const struct nand_ops *ops, struct module *owner); void nanddev_cleanup(struct nand_device *nand); @@ -624,11 +798,13 @@ static inline void nanddev_pos_next_page(struct nand_device *nand, * layer. */ static inline void nanddev_io_iter_init(struct nand_device *nand, + enum nand_page_io_req_type reqtype, loff_t offs, struct mtd_oob_ops *req, struct nand_io_iter *iter) { struct mtd_info *mtd = nanddev_to_mtd(nand); + iter->req.type = reqtype; iter->req.mode = req->mode; iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos); iter->req.ooboffs = req->ooboffs; @@ -698,8 +874,8 @@ static inline bool nanddev_io_iter_end(struct nand_device *nand, * * Should be used for iterate over pages that are contained in an MTD request. */ -#define nanddev_io_for_each_page(nand, start, req, iter) \ - for (nanddev_io_iter_init(nand, start, req, iter); \ +#define nanddev_io_for_each_page(nand, type, start, req, iter) \ + for (nanddev_io_iter_init(nand, type, start, req, iter); \ !nanddev_io_iter_end(nand, iter); \ nanddev_io_iter_next_page(nand, iter)) diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h index 6166e7c60869..146413d4bdb7 100644 --- a/include/linux/mtd/pfow.h +++ b/include/linux/mtd/pfow.h @@ -121,37 +121,4 @@ static inline void send_pfow_command(struct map_info *map, map_write(map, CMD(LPDDR_START_EXECUTION), map->pfow_base + PFOW_COMMAND_EXECUTE); } - -static inline void print_drs_error(unsigned dsr) -{ - int prog_status = (dsr & DSR_RPS) >> 8; - - if (!(dsr & DSR_AVAILABLE)) - printk(KERN_NOTICE"DSR.15: (0) Device not Available\n"); - if (prog_status & 0x03) - printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid " - "half with 41h command\n"); - else if (prog_status & 0x02) - printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt " - "in region with Control Mode data\n"); - else if (prog_status & 0x01) - printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region " - "with Object Mode data\n"); - if (!(dsr & DSR_READY_STATUS)) - printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n"); - if (dsr & DSR_ESS) - printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n"); - if (dsr & DSR_ERASE_STATUS) - printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n"); - if (dsr & DSR_PROGRAM_STATUS) - printk(KERN_NOTICE"DSR.4: (1) Program Error\n"); - if (dsr & DSR_VPPS) - printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation " - "aborted\n"); - if (dsr & DSR_PSS) - printk(KERN_NOTICE"DSR.2: (1) Program suspended\n"); - if (dsr & DSR_DPS) - printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt " - "on locked block\n"); -} #endif /* __LINUX_MTD_PFOW_H */ diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index a725b620aca2..aac07940de09 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -14,6 +14,7 @@ #define __LINUX_MTD_RAWNAND_H #include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> #include <linux/mtd/flashchip.h> #include <linux/mtd/bbm.h> #include <linux/mtd/jedec.h> @@ -81,25 +82,6 @@ struct nand_chip; #define NAND_DATA_IFACE_CHECK_ONLY -1 /* - * Constants for ECC_MODES - */ -enum nand_ecc_mode { - NAND_ECC_INVALID, - NAND_ECC_NONE, - NAND_ECC_SOFT, - NAND_ECC_HW, - NAND_ECC_HW_SYNDROME, - NAND_ECC_ON_DIE, -}; - -enum nand_ecc_algo { - NAND_ECC_UNKNOWN, - NAND_ECC_HAMMING, - NAND_ECC_BCH, - NAND_ECC_RS, -}; - -/* * Constants for Hardware ECC */ /* Reset Hardware ECC for read */ @@ -116,7 +98,6 @@ enum nand_ecc_algo { * pages and you want to rely on the default implementation. */ #define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) -#define NAND_ECC_MAXIMIZE BIT(1) /* * Option constants for bizarre disfunctionality and real @@ -310,7 +291,8 @@ static const struct nand_ecc_caps __name = { \ /** * struct nand_ecc_ctrl - Control structure for ECC - * @mode: ECC mode + * @engine_type: ECC engine type + * @placement: OOB bytes placement * @algo: ECC algorithm * @steps: number of ECC steps per page * @size: data bytes per ECC step @@ -338,7 +320,7 @@ static const struct nand_ecc_caps __name = { \ * controller and always return contiguous in-band and * out-of-band data even if they're not stored * contiguously on the NAND chip (e.g. - * NAND_ECC_HW_SYNDROME interleaves in-band and + * NAND_ECC_PLACEMENT_INTERLEAVED interleaves in-band and * out-of-band data). * @write_page_raw: function to write a raw page without ECC. This function * should hide the specific layout used by the ECC @@ -346,7 +328,7 @@ static const struct nand_ecc_caps __name = { \ * in-band and out-of-band data. ECC controller is * responsible for doing the appropriate transformations * to adapt to its specific layout (e.g. - * NAND_ECC_HW_SYNDROME interleaves in-band and + * NAND_ECC_PLACEMENT_INTERLEAVED interleaves in-band and * out-of-band data). * @read_page: function to read a page according to the ECC generator * requirements; returns maximum number of bitflips corrected in @@ -362,7 +344,8 @@ static const struct nand_ecc_caps __name = { \ * @write_oob: function to write chip OOB data */ struct nand_ecc_ctrl { - enum nand_ecc_mode mode; + enum nand_ecc_engine_type engine_type; + enum nand_ecc_placement placement; enum nand_ecc_algo algo; int steps; int size; @@ -1161,9 +1144,6 @@ struct nand_chip { void *priv; }; -extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; -extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops; - static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd) { return container_of(mtd, struct nand_chip, base.mtd); diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index b8360be141da..9dc7eeac924f 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -551,13 +551,13 @@ enum { NFSPROC4_CLNT_LOOKUPP, NFSPROC4_CLNT_LAYOUTERROR, - NFSPROC4_CLNT_COPY_NOTIFY, NFSPROC4_CLNT_GETXATTR, NFSPROC4_CLNT_SETXATTR, NFSPROC4_CLNT_LISTXATTRS, NFSPROC4_CLNT_REMOVEXATTR, + NFSPROC4_CLNT_READ_PLUS, }; /* nfs41 types */ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 7eae72a8762e..38e60ec742df 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -287,5 +287,6 @@ struct nfs_server { #define NFS_CAP_LAYOUTERROR (1U << 26) #define NFS_CAP_COPY_NOTIFY (1U << 27) #define NFS_CAP_XATTR (1U << 28) +#define NFS_CAP_READ_PLUS (1U << 29) #endif diff --git a/include/linux/nfs_ssc.h b/include/linux/nfs_ssc.h new file mode 100644 index 000000000000..f5ba0fbff72f --- /dev/null +++ b/include/linux/nfs_ssc.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/nfs_ssc.h + * + * Author: Dai Ngo <dai.ngo@oracle.com> + * + * Copyright (c) 2020, Oracle and/or its affiliates. + */ + +#include <linux/nfs_fs.h> + +extern struct nfs_ssc_client_ops_tbl nfs_ssc_client_tbl; + +/* + * NFS_V4 + */ +struct nfs4_ssc_client_ops { + struct file *(*sco_open)(struct vfsmount *ss_mnt, + struct nfs_fh *src_fh, nfs4_stateid *stateid); + void (*sco_close)(struct file *filep); +}; + +/* + * NFS_FS + */ +struct nfs_ssc_client_ops { + void (*sco_sb_deactive)(struct super_block *sb); +}; + +struct nfs_ssc_client_ops_tbl { + const struct nfs4_ssc_client_ops *ssc_nfs4_ops; + const struct nfs_ssc_client_ops *ssc_nfs_ops; +}; + +extern void nfs42_ssc_register_ops(void); +extern void nfs42_ssc_unregister_ops(void); + +extern void nfs42_ssc_register(const struct nfs4_ssc_client_ops *ops); +extern void nfs42_ssc_unregister(const struct nfs4_ssc_client_ops *ops); + +#ifdef CONFIG_NFSD_V4_2_INTER_SSC +static inline struct file *nfs42_ssc_open(struct vfsmount *ss_mnt, + struct nfs_fh *src_fh, nfs4_stateid *stateid) +{ + if (nfs_ssc_client_tbl.ssc_nfs4_ops) + return (*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_open)(ss_mnt, src_fh, stateid); + return ERR_PTR(-EIO); +} + +static inline void nfs42_ssc_close(struct file *filep) +{ + if (nfs_ssc_client_tbl.ssc_nfs4_ops) + (*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_close)(filep); +} +#endif + +/* + * NFS_FS + */ +extern void nfs_ssc_register(const struct nfs_ssc_client_ops *ops); +extern void nfs_ssc_unregister(const struct nfs_ssc_client_ops *ops); + +static inline void nfs_do_sb_deactive(struct super_block *sb) +{ + if (nfs_ssc_client_tbl.ssc_nfs_ops) + (*nfs_ssc_client_tbl.ssc_nfs_ops->sco_sb_deactive)(sb); +} diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 69cb46f7b8d2..d63cb862d58e 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -525,7 +525,7 @@ struct nfs_closeargs { struct nfs_seqid * seqid; fmode_t fmode; u32 share_access; - const u32 * bitmask; + u32 * bitmask; struct nfs4_layoutreturn_args *lr_args; }; @@ -608,7 +608,7 @@ struct nfs4_delegreturnargs { struct nfs4_sequence_args seq_args; const struct nfs_fh *fhandle; const nfs4_stateid *stateid; - const u32 * bitmask; + u32 * bitmask; struct nfs4_layoutreturn_args *lr_args; }; @@ -648,7 +648,7 @@ struct nfs_pgio_args { union { unsigned int replen; /* used by read */ struct { - const u32 * bitmask; /* used by write */ + u32 * bitmask; /* used by write */ enum nfs3_stable_how stable; /* used by write */ }; }; @@ -657,7 +657,7 @@ struct nfs_pgio_args { struct nfs_pgio_res { struct nfs4_sequence_res seq_res; struct nfs_fattr * fattr; - __u32 count; + __u64 count; __u32 op_status; union { struct { diff --git a/include/linux/node.h b/include/linux/node.h index 014ba3ab2efd..8e5a29897936 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -99,15 +99,14 @@ extern struct node *node_devices[]; typedef void (*node_registration_func_t)(struct node *); #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) -int link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn, - enum meminit_context context); +void link_mem_sections(int nid, unsigned long start_pfn, + unsigned long end_pfn, + enum meminit_context context); #else -static inline int link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn, - enum meminit_context context) +static inline void link_mem_sections(int nid, unsigned long start_pfn, + unsigned long end_pfn, + enum meminit_context context) { - return 0; } #endif @@ -130,8 +129,7 @@ static inline int register_one_node(int nid) if (error) return error; /* link memory sections under this node */ - error = link_mem_sections(nid, start_pfn, end_pfn, - MEMINIT_EARLY); + link_mem_sections(nid, start_pfn, end_pfn, MEMINIT_EARLY); } return error; diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 3334ce056335..ac398e143c9a 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -90,9 +90,9 @@ * for such situations. See below and CPUMASK_ALLOC also. */ -#include <linux/kernel.h> #include <linux/threads.h> #include <linux/bitmap.h> +#include <linux/minmax.h> #include <linux/numa.h> typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t; diff --git a/include/linux/overflow.h b/include/linux/overflow.h index f1c4e7b56bd9..ef74051d5cfe 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -3,6 +3,7 @@ #define __LINUX_OVERFLOW_H #include <linux/compiler.h> +#include <linux/limits.h> /* * In the fallback code below, we need to compute the minimum and diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 38ded408bd4c..4f6ba9379112 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -431,13 +431,9 @@ PAGEFLAG_FALSE(Uncached) PAGEFLAG(HWPoison, hwpoison, PF_ANY) TESTSCFLAG(HWPoison, hwpoison, PF_ANY) #define __PG_HWPOISON (1UL << PG_hwpoison) -extern bool set_hwpoison_free_buddy_page(struct page *page); +extern bool take_page_off_buddy(struct page *page); #else PAGEFLAG_FALSE(HWPoison) -static inline bool set_hwpoison_free_buddy_page(struct page *page) -{ - return 0; -} #define __PG_HWPOISON 0 #endif diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index 8679ccd722e8..3468794f83d2 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -11,7 +11,7 @@ extern struct page_ext_operations page_owner_ops; extern void __reset_page_owner(struct page *page, unsigned int order); extern void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask); -extern void __split_page_owner(struct page *page, unsigned int order); +extern void __split_page_owner(struct page *page, unsigned int nr); extern void __copy_page_owner(struct page *oldpage, struct page *newpage); extern void __set_page_owner_migrate_reason(struct page *page, int reason); extern void __dump_page_owner(struct page *page); @@ -31,10 +31,10 @@ static inline void set_page_owner(struct page *page, __set_page_owner(page, order, gfp_mask); } -static inline void split_page_owner(struct page *page, unsigned int order) +static inline void split_page_owner(struct page *page, unsigned int nr) { if (static_branch_unlikely(&page_owner_inited)) - __split_page_owner(page, order); + __split_page_owner(page, nr); } static inline void copy_page_owner(struct page *oldpage, struct page *newpage) { diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index c3afd3242b54..c77b7c31b2e4 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -29,6 +29,7 @@ enum mapping_flags { AS_EXITING = 4, /* final truncate in progress */ /* writeback related tags are not used */ AS_NO_WRITEBACK_TAGS = 5, + AS_THP_SUPPORT = 6, /* THPs supported */ }; /** @@ -120,6 +121,40 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) m->gfp_mask = mask; } +static inline bool mapping_thp_support(struct address_space *mapping) +{ + return test_bit(AS_THP_SUPPORT, &mapping->flags); +} + +static inline int filemap_nr_thps(struct address_space *mapping) +{ +#ifdef CONFIG_READ_ONLY_THP_FOR_FS + return atomic_read(&mapping->nr_thps); +#else + return 0; +#endif +} + +static inline void filemap_nr_thps_inc(struct address_space *mapping) +{ +#ifdef CONFIG_READ_ONLY_THP_FOR_FS + if (!mapping_thp_support(mapping)) + atomic_inc(&mapping->nr_thps); +#else + WARN_ON_ONCE(1); +#endif +} + +static inline void filemap_nr_thps_dec(struct address_space *mapping) +{ +#ifdef CONFIG_READ_ONLY_THP_FOR_FS + if (!mapping_thp_support(mapping)) + atomic_dec(&mapping->nr_thps); +#else + WARN_ON_ONCE(1); +#endif +} + void release_pages(struct page **pages, int nr); /* @@ -726,17 +761,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); void delete_from_page_cache_batch(struct address_space *mapping, struct pagevec *pvec); -#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) - -void page_cache_sync_readahead(struct address_space *, struct file_ra_state *, - struct file *, pgoff_t index, unsigned long req_count); -void page_cache_async_readahead(struct address_space *, struct file_ra_state *, - struct file *, struct page *, pgoff_t index, - unsigned long req_count); -void page_cache_readahead_unbounded(struct address_space *, struct file *, - pgoff_t index, unsigned long nr_to_read, - unsigned long lookahead_count); - /* * Like add_to_page_cache_locked, but used to add newly allocated pages: * the page is new, so we can just run __SetPageLocked() against it. @@ -777,6 +801,67 @@ struct readahead_control { unsigned int _batch_count; }; +#define DEFINE_READAHEAD(rac, f, m, i) \ + struct readahead_control rac = { \ + .file = f, \ + .mapping = m, \ + ._index = i, \ + } + +#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) + +void page_cache_ra_unbounded(struct readahead_control *, + unsigned long nr_to_read, unsigned long lookahead_count); +void page_cache_sync_ra(struct readahead_control *, struct file_ra_state *, + unsigned long req_count); +void page_cache_async_ra(struct readahead_control *, struct file_ra_state *, + struct page *, unsigned long req_count); + +/** + * page_cache_sync_readahead - generic file readahead + * @mapping: address_space which holds the pagecache and I/O vectors + * @ra: file_ra_state which holds the readahead state + * @file: Used by the filesystem for authentication. + * @index: Index of first page to be read. + * @req_count: Total number of pages being read by the caller. + * + * page_cache_sync_readahead() should be called when a cache miss happened: + * it will submit the read. The readahead logic may decide to piggyback more + * pages onto the read request if access patterns suggest it will improve + * performance. + */ +static inline +void page_cache_sync_readahead(struct address_space *mapping, + struct file_ra_state *ra, struct file *file, pgoff_t index, + unsigned long req_count) +{ + DEFINE_READAHEAD(ractl, file, mapping, index); + page_cache_sync_ra(&ractl, ra, req_count); +} + +/** + * page_cache_async_readahead - file readahead for marked pages + * @mapping: address_space which holds the pagecache and I/O vectors + * @ra: file_ra_state which holds the readahead state + * @file: Used by the filesystem for authentication. + * @page: The page at @index which triggered the readahead call. + * @index: Index of first page to be read. + * @req_count: Total number of pages being read by the caller. + * + * page_cache_async_readahead() should be called when a page is used which + * is marked as PageReadahead; this is a marker to suggest that the application + * has used up enough of the readahead window that we should start pulling in + * more pages. + */ +static inline +void page_cache_async_readahead(struct address_space *mapping, + struct file_ra_state *ra, struct file *file, + struct page *page, pgoff_t index, unsigned long req_count) +{ + DEFINE_READAHEAD(ractl, file, mapping, index); + page_cache_async_ra(&ractl, ra, page, req_count); +} + /** * readahead_page - Get the next page to read. * @rac: The current readahead request. diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 1af5cb02ef7f..033ce74f02e8 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -51,6 +51,7 @@ extern const struct pci_ecam_ops pci_generic_ecam_ops; #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) extern const struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */ +extern const struct pci_ecam_ops pci_32b_read_ops; /* 32-bit read only */ extern const struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */ extern const struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */ extern const struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h index f42b0fd4b4bc..662881335c7e 100644 --- a/include/linux/pci-ep-cfs.h +++ b/include/linux/pci-ep-cfs.h @@ -19,7 +19,7 @@ void pci_ep_cfs_remove_epf_group(struct config_group *group); #else static inline struct config_group *pci_ep_cfs_add_epc_group(const char *name) { - return 0; + return NULL; } static inline void pci_ep_cfs_remove_epc_group(struct config_group *group) @@ -28,7 +28,7 @@ static inline void pci_ep_cfs_remove_epc_group(struct config_group *group) static inline struct config_group *pci_ep_cfs_add_epf_group(const char *name) { - return 0; + return NULL; } static inline void pci_ep_cfs_remove_epf_group(struct config_group *group) diff --git a/include/linux/pci.h b/include/linux/pci.h index 835530605c0d..22207a79762c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -373,13 +373,14 @@ struct pci_dev { user sysfs */ unsigned int clear_retrain_link:1; /* Need to clear Retrain Link bit manually */ - unsigned int d3_delay; /* D3->D0 transition time in ms */ + unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */ unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */ #ifdef CONFIG_PCIEASPM struct pcie_link_state *link_state; /* ASPM link state */ unsigned int ltr_path:1; /* Latency Tolerance Reporting supported from root to here */ + int l1ss; /* L1SS Capability pointer */ #endif unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */ @@ -445,6 +446,7 @@ struct pci_dev { unsigned int is_probed:1; /* Device probing in progress */ unsigned int link_active_reporting:1;/* Device capable of reporting link active */ unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */ + unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */ pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ @@ -523,6 +525,7 @@ struct pci_host_bridge { struct device dev; struct pci_bus *bus; /* Root bus */ struct pci_ops *ops; + struct pci_ops *child_ops; void *sysdata; int busnr; struct list_head windows; /* resource_entry */ @@ -2034,10 +2037,6 @@ int pcibios_alloc_irq(struct pci_dev *dev); void pcibios_free_irq(struct pci_dev *dev); resource_size_t pcibios_default_alignment(void); -#ifdef CONFIG_HIBERNATE_CALLBACKS -extern struct dev_pm_ops pcibios_pm_ops; -#endif - #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG) void __init pci_mmcfg_early_init(void); void __init pci_mmcfg_late_init(void); diff --git a/include/linux/pid.h b/include/linux/pid.h index 176d6cf80e7c..fa10acb8d6a4 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -77,6 +77,7 @@ extern const struct file_operations pidfd_fops; struct file; extern struct pid *pidfd_pid(const struct file *file); +struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags); static inline struct pid *get_pid(struct pid *pid) { diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h index 03e92c71b3fa..dd474dd44848 100644 --- a/include/linux/platform_data/mtd-davinci.h +++ b/include/linux/platform_data/mtd-davinci.h @@ -60,15 +60,16 @@ struct davinci_nand_pdata { /* platform_data */ struct mtd_partition *parts; unsigned nr_parts; - /* none == NAND_ECC_NONE (strongly *not* advised!!) - * soft == NAND_ECC_SOFT - * else == NAND_ECC_HW, according to ecc_bits + /* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!) + * soft == NAND_ECC_ENGINE_TYPE_SOFT + * else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits * * All DaVinci-family chips support 1-bit hardware ECC. * Newer ones also support 4-bit ECC, but are awkward * using it with large page chips. */ - enum nand_ecc_mode ecc_mode; + enum nand_ecc_engine_type engine_type; + enum nand_ecc_placement ecc_placement; u8 ecc_bits; /* e.g. NAND_BUSWIDTH_16 */ diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h index 08675b16f9e1..25390fc3e795 100644 --- a/include/linux/platform_data/mtd-nand-s3c2410.h +++ b/include/linux/platform_data/mtd-nand-s3c2410.h @@ -49,7 +49,7 @@ struct s3c2410_platform_nand { unsigned int ignore_unset_ecc:1; - enum nand_ecc_mode ecc_mode; + enum nand_ecc_engine_type engine_type; int nr_sets; struct s3c2410_nand_set *sets; diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index 987d9652aa4e..111a40d0d3d5 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -32,6 +32,7 @@ enum bq27xxx_chip { BQ27621, BQ27Z561, BQ28Z610, + BQ34Z100, }; struct bq27xxx_device_info; diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h index ae94dcebd936..45e228b353ea 100644 --- a/include/linux/power/charger-manager.h +++ b/include/linux/power/charger-manager.h @@ -31,22 +31,16 @@ enum polling_modes { CM_POLL_CHARGING_ONLY, }; -enum cm_event_types { - CM_EVENT_UNKNOWN = 0, - CM_EVENT_BATT_FULL, - CM_EVENT_BATT_IN, - CM_EVENT_BATT_OUT, - CM_EVENT_BATT_OVERHEAT, - CM_EVENT_BATT_COLD, - CM_EVENT_EXT_PWR_IN_OUT, - CM_EVENT_CHG_START_STOP, - CM_EVENT_OTHERS, +enum cm_batt_temp { + CM_BATT_OK = 0, + CM_BATT_OVERHEAT, + CM_BATT_COLD, }; /** * struct charger_cable * @extcon_name: the name of extcon device. - * @name: the name of charger cable(external connector). + * @name: the name of the cable connector * @extcon_dev: the extcon device. * @wq: the workqueue to control charger according to the state of * charger cable. If charger cable is attached, enable charger. @@ -62,9 +56,10 @@ enum cm_event_types { struct charger_cable { const char *extcon_name; const char *name; + struct extcon_dev *extcon_dev; + u64 extcon_type; /* The charger-manager use Extcon framework */ - struct extcon_specific_cable_nb extcon_dev; struct work_struct wq; struct notifier_block nb; @@ -131,11 +126,10 @@ struct charger_regulator { * @psy_name: the name of power-supply-class for charger manager * @polling_mode: * Determine which polling mode will be used - * @fullbatt_vchkdrop_ms: * @fullbatt_vchkdrop_uV: * Check voltage drop after the battery is fully charged. - * If it has dropped more than fullbatt_vchkdrop_uV after - * fullbatt_vchkdrop_ms, CM will restart charging. + * If it has dropped more than fullbatt_vchkdrop_uV + * CM will restart charging. * @fullbatt_uV: voltage in microvolt * If VBATT >= fullbatt_uV, it is assumed to be full. * @fullbatt_soc: state of Charge in % @@ -172,7 +166,6 @@ struct charger_desc { enum polling_modes polling_mode; unsigned int polling_interval_ms; - unsigned int fullbatt_vchkdrop_ms; unsigned int fullbatt_vchkdrop_uV; unsigned int fullbatt_uV; unsigned int fullbatt_soc; @@ -211,9 +204,6 @@ struct charger_desc { * @charger_stat: array of power_supply for chargers * @tzd_batt : thermal zone device for battery * @charger_enabled: the state of charger - * @fullbatt_vchk_jiffies_at: - * jiffies at the time full battery check will occur. - * @fullbatt_vchk_work: work queue for full battery check * @emergency_stop: * When setting true, stop charging * @psy_name_buf: the name of power-supply-class for charger manager @@ -224,6 +214,7 @@ struct charger_desc { * saved status of battery before entering suspend-to-RAM * @charging_start_time: saved start time of enabling charging * @charging_end_time: saved end time of disabling charging + * @battery_status: Current battery status */ struct charger_manager { struct list_head entry; @@ -235,9 +226,6 @@ struct charger_manager { #endif bool charger_enabled; - unsigned long fullbatt_vchk_jiffies_at; - struct delayed_work fullbatt_vchk_work; - int emergency_stop; char psy_name_buf[PSY_NAME_MAX + 1]; @@ -246,13 +234,8 @@ struct charger_manager { u64 charging_start_time; u64 charging_end_time; + + int battery_status; }; -#if IS_ENABLED(CONFIG_CHARGER_MANAGER) -extern void cm_notify_event(struct power_supply *psy, - enum cm_event_types type, char *msg); -#else -static inline void cm_notify_event(struct power_supply *psy, - enum cm_event_types type, char *msg) { } -#endif #endif /* _CHARGER_MANAGER_H */ diff --git a/include/linux/power/gpio-charger.h b/include/linux/power/gpio-charger.h index 5a5a8de98181..c0b7657ac1df 100644 --- a/include/linux/power/gpio-charger.h +++ b/include/linux/power/gpio-charger.h @@ -13,18 +13,12 @@ * struct gpio_charger_platform_data - platform_data for gpio_charger devices * @name: Name for the chargers power_supply device * @type: Type of the charger - * @gpio: GPIO which is used to indicate the chargers status - * @gpio_active_low: Should be set to 1 if the GPIO is active low otherwise 0 * @supplied_to: Array of battery names to which this chargers supplies power * @num_supplicants: Number of entries in the supplied_to array */ struct gpio_charger_platform_data { const char *name; enum power_supply_type type; - - int gpio; - int gpio_active_low; - char **supplied_to; size_t num_supplicants; }; diff --git a/include/linux/power/smb347-charger.h b/include/linux/power/smb347-charger.h deleted file mode 100644 index e0b687a4d20c..000000000000 --- a/include/linux/power/smb347-charger.h +++ /dev/null @@ -1,114 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Summit Microelectronics SMB347 Battery Charger Driver - * - * Copyright (C) 2011, Intel Corporation - * - * Authors: Bruce E. Robertson <bruce.e.robertson@intel.com> - * Mika Westerberg <mika.westerberg@linux.intel.com> - */ - -#ifndef SMB347_CHARGER_H -#define SMB347_CHARGER_H - -#include <linux/types.h> -#include <linux/power_supply.h> - -enum { - /* use the default compensation method */ - SMB347_SOFT_TEMP_COMPENSATE_DEFAULT = -1, - - SMB347_SOFT_TEMP_COMPENSATE_NONE, - SMB347_SOFT_TEMP_COMPENSATE_CURRENT, - SMB347_SOFT_TEMP_COMPENSATE_VOLTAGE, -}; - -/* Use default factory programmed value for hard/soft temperature limit */ -#define SMB347_TEMP_USE_DEFAULT -273 - -/* - * Charging enable can be controlled by software (via i2c) by - * smb347-charger driver or by EN pin (active low/high). - */ -enum smb347_chg_enable { - SMB347_CHG_ENABLE_SW, - SMB347_CHG_ENABLE_PIN_ACTIVE_LOW, - SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH, -}; - -/** - * struct smb347_charger_platform_data - platform data for SMB347 charger - * @battery_info: Information about the battery - * @max_charge_current: maximum current (in uA) the battery can be charged - * @max_charge_voltage: maximum voltage (in uV) the battery can be charged - * @pre_charge_current: current (in uA) to use in pre-charging phase - * @termination_current: current (in uA) used to determine when the - * charging cycle terminates - * @pre_to_fast_voltage: voltage (in uV) treshold used for transitioning to - * pre-charge to fast charge mode - * @mains_current_limit: maximum input current drawn from AC/DC input (in uA) - * @usb_hc_current_limit: maximum input high current (in uA) drawn from USB - * input - * @chip_temp_threshold: die temperature where device starts limiting charge - * current [%100 - %130] (in degree C) - * @soft_cold_temp_limit: soft cold temperature limit [%0 - %15] (in degree C), - * granularity is 5 deg C. - * @soft_hot_temp_limit: soft hot temperature limit [%40 - %55] (in degree C), - * granularity is 5 deg C. - * @hard_cold_temp_limit: hard cold temperature limit [%-5 - %10] (in degree C), - * granularity is 5 deg C. - * @hard_hot_temp_limit: hard hot temperature limit [%50 - %65] (in degree C), - * granularity is 5 deg C. - * @suspend_on_hard_temp_limit: suspend charging when hard limit is hit - * @soft_temp_limit_compensation: compensation method when soft temperature - * limit is hit - * @charge_current_compensation: current (in uA) for charging compensation - * current when temperature hits soft limits - * @use_mains: AC/DC input can be used - * @use_usb: USB input can be used - * @use_usb_otg: USB OTG output can be used (not implemented yet) - * @irq_gpio: GPIO number used for interrupts (%-1 if not used) - * @enable_control: how charging enable/disable is controlled - * (driver/pin controls) - * - * @use_main, @use_usb, and @use_usb_otg are means to enable/disable - * hardware support for these. This is useful when we want to have for - * example OTG charging controlled via OTG transceiver driver and not by - * the SMB347 hardware. - * - * Hard and soft temperature limit values are given as described in the - * device data sheet and assuming NTC beta value is %3750. Even if this is - * not the case, these values should be used. They can be mapped to the - * corresponding NTC beta values with the help of table %2 in the data - * sheet. So for example if NTC beta is %3375 and we want to program hard - * hot limit to be %53 deg C, @hard_hot_temp_limit should be set to %50. - * - * If zero value is given in any of the current and voltage values, the - * factory programmed default will be used. For soft/hard temperature - * values, pass in %SMB347_TEMP_USE_DEFAULT instead. - */ -struct smb347_charger_platform_data { - struct power_supply_info battery_info; - unsigned int max_charge_current; - unsigned int max_charge_voltage; - unsigned int pre_charge_current; - unsigned int termination_current; - unsigned int pre_to_fast_voltage; - unsigned int mains_current_limit; - unsigned int usb_hc_current_limit; - unsigned int chip_temp_threshold; - int soft_cold_temp_limit; - int soft_hot_temp_limit; - int hard_cold_temp_limit; - int hard_hot_temp_limit; - bool suspend_on_hard_temp_limit; - unsigned int soft_temp_limit_compensation; - unsigned int charge_current_compensation; - bool use_mains; - bool use_usb; - bool use_usb_otg; - int irq_gpio; - enum smb347_chg_enable enable_control; -}; - -#endif /* SMB347_CHARGER_H */ diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 97cc4b85bf61..81a55e974feb 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -186,6 +186,7 @@ enum power_supply_type { POWER_SUPPLY_TYPE_USB_PD, /* Power Delivery Port */ POWER_SUPPLY_TYPE_USB_PD_DRP, /* PD Dual Role Port */ POWER_SUPPLY_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */ + POWER_SUPPLY_TYPE_WIRELESS, /* Wireless */ }; enum power_supply_usb_type { @@ -365,6 +366,12 @@ struct power_supply_battery_info { int constant_charge_voltage_max_uv; /* microVolts */ int factory_internal_resistance_uohm; /* microOhms */ int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX];/* celsius */ + int temp_ambient_alert_min; /* celsius */ + int temp_ambient_alert_max; /* celsius */ + int temp_alert_min; /* celsius */ + int temp_alert_max; /* celsius */ + int temp_min; /* celsius */ + int temp_max; /* celsius */ struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX]; int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX]; struct power_supply_resistance_temp_table *resist_table; diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 2df965cd0974..270cab43ca3d 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -30,6 +30,7 @@ struct proc_ops { unsigned int proc_flags; int (*proc_open)(struct inode *, struct file *); ssize_t (*proc_read)(struct file *, char __user *, size_t, loff_t *); + ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*proc_write)(struct file *, const char __user *, size_t, loff_t *); loff_t (*proc_lseek)(struct file *, loff_t, int); int (*proc_release)(struct inode *, struct file *); diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index f464d85e88a4..aeb242cefebf 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -242,10 +242,8 @@ struct qed_rdma_register_tid_in_params { bool pbl_two_level; u8 pbl_page_size_log; u8 page_size_log; - u32 fbo; u64 length; u64 vaddr; - bool zbva; bool phy_mr; bool dma_mr; diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h index 072da2f6da37..0d5564a59a59 100644 --- a/include/linux/qed/qede_rdma.h +++ b/include/linux/qed/qede_rdma.h @@ -20,7 +20,8 @@ enum qede_rdma_event { QEDE_UP, QEDE_DOWN, QEDE_CHANGE_ADDR, - QEDE_CLOSE + QEDE_CLOSE, + QEDE_CHANGE_MTU, }; struct qede_rdma_event_work { @@ -54,6 +55,7 @@ void qede_rdma_dev_event_open(struct qede_dev *dev); void qede_rdma_dev_event_close(struct qede_dev *dev); void qede_rdma_dev_remove(struct qede_dev *dev, bool recovery); void qede_rdma_event_changeaddr(struct qede_dev *edr); +void qede_rdma_event_change_mtu(struct qede_dev *edev); #else static inline int qede_rdma_dev_add(struct qede_dev *dev, diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index c2a9f7c90727..64ad900ac742 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -11,6 +11,7 @@ #include <linux/bitops.h> #include <linux/kernel.h> #include <linux/list.h> +#include <linux/percpu.h> #include <linux/preempt.h> #include <linux/rcupdate.h> #include <linux/spinlock.h> @@ -376,7 +377,7 @@ radix_tree_chunk_size(struct radix_tree_iter *iter) * radix_tree_next_slot - find next slot in chunk * * @slot: pointer to current slot - * @iter: pointer to interator state + * @iter: pointer to iterator state * @flags: RADIX_TREE_ITER_*, should be constant * Returns: pointer to next slot, or NULL if there no more left * diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 7a6fc9956510..f8633d37e358 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -63,9 +63,17 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(), \ "RCU-list traversed in non-reader section!"); \ }) + +#define __list_check_srcu(cond) \ + ({ \ + RCU_LOCKDEP_WARN(!(cond), \ + "RCU-list traversed without holding the required lock!");\ + }) #else #define __list_check_rcu(dummy, cond, extra...) \ ({ check_arg_count_one(extra); }) + +#define __list_check_srcu(cond) ({ }) #endif /* @@ -386,6 +394,25 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** + * list_for_each_entry_srcu - iterate over rcu list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * @cond: lockdep expression for the lock required to traverse the list. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as list_add_rcu() + * as long as the traversal is guarded by srcu_read_lock(). + * The lockdep expression srcu_read_lock_held() can be passed as the + * cond argument from read side. + */ +#define list_for_each_entry_srcu(pos, head, member, cond) \ + for (__list_check_srcu(cond), \ + pos = list_entry_rcu((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) + +/** * list_entry_lockless - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. @@ -684,6 +711,27 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, &(pos)->member)), typeof(*(pos)), member)) /** + * hlist_for_each_entry_srcu - iterate over rcu list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + * @cond: lockdep expression for the lock required to traverse the list. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as hlist_add_head_rcu() + * as long as the traversal is guarded by srcu_read_lock(). + * The lockdep expression srcu_read_lock_held() can be passed as the + * cond argument from read side. + */ +#define hlist_for_each_entry_srcu(pos, head, member, cond) \ + for (__list_check_srcu(cond), \ + pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ + typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ + &(pos)->member)), typeof(*(pos)), member)) + +/** * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) * @pos: the type * to use as a loop cursor. * @head: the head for your list. diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index d15d46db61f7..7c1ceff02852 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -55,6 +55,12 @@ void __rcu_read_unlock(void); #else /* #ifdef CONFIG_PREEMPT_RCU */ +#ifdef CONFIG_TINY_RCU +#define rcu_read_unlock_strict() do { } while (0) +#else +void rcu_read_unlock_strict(void); +#endif + static inline void __rcu_read_lock(void) { preempt_disable(); @@ -63,6 +69,7 @@ static inline void __rcu_read_lock(void) static inline void __rcu_read_unlock(void) { preempt_enable(); + rcu_read_unlock_strict(); } static inline int rcu_preempt_depth(void) @@ -709,8 +716,8 @@ static inline void rcu_read_lock_bh(void) "rcu_read_lock_bh() used illegally while idle"); } -/* - * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section +/** + * rcu_read_unlock_bh() - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ @@ -751,10 +758,10 @@ static inline notrace void rcu_read_lock_sched_notrace(void) __acquire(RCU_SCHED); } -/* - * rcu_read_unlock_sched - marks the end of a RCU-classic critical section +/** + * rcu_read_unlock_sched() - marks the end of a RCU-classic critical section * - * See rcu_read_lock_sched for more information. + * See rcu_read_lock_sched() for more information. */ static inline void rcu_read_unlock_sched(void) { @@ -945,7 +952,7 @@ static inline void rcu_head_init(struct rcu_head *rhp) } /** - * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()? + * rcu_head_after_call_rcu() - Has this rcu_head been passed to call_rcu()? * @rhp: The rcu_head structure to test. * @f: The function passed to call_rcu() along with @rhp. * diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 5cc9637cac16..7c1ecdb356d8 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -103,7 +103,6 @@ static inline void rcu_scheduler_starting(void) { } static inline void rcu_end_inkernel_boot(void) { } static inline bool rcu_inkernel_boot_has_ended(void) { return true; } static inline bool rcu_is_watching(void) { return true; } -static inline bool __rcu_is_watching(void) { return true; } static inline void rcu_momentary_dyntick_idle(void) { } static inline void kfree_rcu_scheduler_running(void) { } static inline bool rcu_gp_might_be_stalled(void) { return false; } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index d2f4064ebd1d..59eb5cd567d7 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -64,7 +64,6 @@ extern int rcu_scheduler_active __read_mostly; void rcu_end_inkernel_boot(void); bool rcu_inkernel_boot_has_ended(void); bool rcu_is_watching(void); -bool __rcu_is_watching(void); #ifndef CONFIG_PREEMPTION void rcu_all_qs(void); #endif diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 2fa68bf5aa4f..3fa3ba6498e8 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -442,16 +442,16 @@ enum rproc_crash_type { /** * enum rproc_dump_mechanism - Coredump options for core - * @RPROC_COREDUMP_DEFAULT: Copy dump to separate buffer and carry on with + * @RPROC_COREDUMP_DISABLED: Don't perform any dump + * @RPROC_COREDUMP_ENABLED: Copy dump to separate buffer and carry on with recovery * @RPROC_COREDUMP_INLINE: Read segments directly from device memory. Stall recovery until all segments are read - * @RPROC_COREDUMP_DISABLED: Don't perform any dump */ enum rproc_dump_mechanism { - RPROC_COREDUMP_DEFAULT, - RPROC_COREDUMP_INLINE, RPROC_COREDUMP_DISABLED, + RPROC_COREDUMP_ENABLED, + RPROC_COREDUMP_INLINE, }; /** diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 45cf7b69d852..36c47e7e66a2 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -165,6 +165,22 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, #define for_each_sgtable_dma_sg(sgt, sg, i) \ for_each_sg((sgt)->sgl, sg, (sgt)->nents, i) +static inline void __sg_chain(struct scatterlist *chain_sg, + struct scatterlist *sgl) +{ + /* + * offset and length are unused for chain entry. Clear them. + */ + chain_sg->offset = 0; + chain_sg->length = 0; + + /* + * Set lowest bit to indicate a link pointer, and make sure to clear + * the termination bit if it happens to be set. + */ + chain_sg->page_link = ((unsigned long) sgl | SG_CHAIN) & ~SG_END; +} + /** * sg_chain - Chain two sglists together * @prv: First scatterlist @@ -178,18 +194,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, struct scatterlist *sgl) { - /* - * offset and length are unused for chain entry. Clear them. - */ - prv[prv_nents - 1].offset = 0; - prv[prv_nents - 1].length = 0; - - /* - * Set lowest bit to indicate a link pointer, and make sure to clear - * the termination bit if it happens to be set. - */ - prv[prv_nents - 1].page_link = ((unsigned long) sgl | SG_CHAIN) - & ~SG_END; + __sg_chain(&prv[prv_nents - 1], sgl); } /** @@ -286,10 +291,11 @@ void sg_free_table(struct sg_table *); int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *); int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); -int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, - unsigned int n_pages, unsigned int offset, - unsigned long size, unsigned int max_segment, - gfp_t gfp_mask); +struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, + struct page **pages, unsigned int n_pages, unsigned int offset, + unsigned long size, unsigned int max_segment, + struct scatterlist *prv, unsigned int left_pages, + gfp_t gfp_mask); int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, gfp_t gfp_mask); diff --git a/include/linux/sched.h b/include/linux/sched.h index 9030f3abd969..063cd120b459 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1013,7 +1013,7 @@ struct task_struct { struct held_lock held_locks[MAX_LOCK_DEPTH]; #endif -#ifdef CONFIG_UBSAN +#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP) unsigned int in_ubsan; #endif diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 15bfb06f2884..d5ece7a9a403 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -49,31 +49,6 @@ static inline void mmdrop(struct mm_struct *mm) __mmdrop(mm); } -/* - * This has to be called after a get_task_mm()/mmget_not_zero() - * followed by taking the mmap_lock for writing before modifying the - * vmas or anything the coredump pretends not to change from under it. - * - * It also has to be called when mmgrab() is used in the context of - * the process, but then the mm_count refcount is transferred outside - * the context of the process to run down_write() on that pinned mm. - * - * NOTE: find_extend_vma() called from GUP context is the only place - * that can modify the "mm" (notably the vm_start/end) under mmap_lock - * for reading and outside the context of the process, so it is also - * the only case that holds the mmap_lock for reading that must call - * this function. Generally if the mmap_lock is hold for reading - * there's no need of this check after get_task_mm()/mmget_not_zero(). - * - * This function can be obsoleted and the check can be removed, after - * the coredump code will hold the mmap_lock for writing before - * invoking the ->core_dump methods. - */ -static inline bool mmget_still_valid(struct mm_struct *mm) -{ - return likely(!mm->core_state); -} - /** * mmget() - Pin the address space associated with a &struct mm_struct. * @mm: The address space to pin. @@ -304,39 +279,38 @@ static inline void memalloc_nocma_restore(unsigned int flags) #endif #ifdef CONFIG_MEMCG +DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg); /** - * memalloc_use_memcg - Starts the remote memcg charging scope. + * set_active_memcg - Starts the remote memcg charging scope. * @memcg: memcg to charge. * * This function marks the beginning of the remote memcg charging scope. All the * __GFP_ACCOUNT allocations till the end of the scope will be charged to the * given memcg. * - * NOTE: This function is not nesting safe. + * NOTE: This function can nest. Users must save the return value and + * reset the previous value after their own charging scope is over. */ -static inline void memalloc_use_memcg(struct mem_cgroup *memcg) +static inline struct mem_cgroup * +set_active_memcg(struct mem_cgroup *memcg) { - WARN_ON_ONCE(current->active_memcg); - current->active_memcg = memcg; -} + struct mem_cgroup *old; + + if (in_interrupt()) { + old = this_cpu_read(int_active_memcg); + this_cpu_write(int_active_memcg, memcg); + } else { + old = current->active_memcg; + current->active_memcg = memcg; + } -/** - * memalloc_unuse_memcg - Ends the remote memcg charging scope. - * - * This function marks the end of the remote memcg charging scope started by - * memalloc_use_memcg(). - */ -static inline void memalloc_unuse_memcg(void) -{ - current->active_memcg = NULL; + return old; } #else -static inline void memalloc_use_memcg(struct mem_cgroup *memcg) -{ -} - -static inline void memalloc_unuse_memcg(void) +static inline struct mem_cgroup * +set_active_memcg(struct mem_cgroup *memcg) { + return NULL; } #endif diff --git a/include/linux/smp.h b/include/linux/smp.h index 80d557ef8a11..9f13966d3d92 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -26,6 +26,9 @@ struct __call_single_data { struct { struct llist_node llist; unsigned int flags; +#ifdef CONFIG_64BIT + u16 src, dst; +#endif }; }; smp_call_func_t func; diff --git a/include/linux/smp_types.h b/include/linux/smp_types.h index 364b3ae3e41d..2e8461af8df6 100644 --- a/include/linux/smp_types.h +++ b/include/linux/smp_types.h @@ -61,6 +61,9 @@ struct __call_single_node { unsigned int u_flags; atomic_t a_flags; }; +#ifdef CONFIG_64BIT + u16 src, dst; +#endif }; #endif /* __LINUX_SMP_TYPES_H */ diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index d796058cdff2..f07c334c599f 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h @@ -4,7 +4,7 @@ NetApp provides this source code under the GPL v2 License. The GPL v2 license is available at -http://opensource.org/licenses/gpl-license.php. +https://opensource.org/licenses/gpl-license.php. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 10891b70fc7b..d0965e2997b0 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -45,7 +45,8 @@ */ struct cache_head { struct hlist_node cache_list; - time64_t expiry_time; /* After time time, don't use the data */ + time64_t expiry_time; /* After time expiry_time, don't use + * the data */ time64_t last_refresh; /* If CACHE_PENDING, this is when upcall was * sent, else this is when update was * received, though it is alway set to diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index bea40d9f03a1..43f854487539 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h @@ -143,7 +143,7 @@ typedef __be32 rpc_fraghdr; /* * Well-known netids. See: * - * http://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml + * https://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml */ #define RPCBIND_NETID_UDP "udp" #define RPCBIND_NETID_TCP "tcp" diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 5a6a81b7cd9f..9548d075e06d 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -234,12 +234,15 @@ typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr, extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, struct rpc_rqst *rqst); extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes); +extern int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, + size_t nbytes); extern void xdr_commit_encode(struct xdr_stream *xdr); extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len); extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen); extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, unsigned int len); extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr); +extern unsigned int xdr_page_pos(const struct xdr_stream *xdr); extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, struct rpc_rqst *rqst); extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, @@ -249,6 +252,8 @@ extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes); extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len); extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); +extern uint64_t xdr_align_data(struct xdr_stream *, uint64_t, uint32_t); +extern uint64_t xdr_expand_hole(struct xdr_stream *, uint64_t, uint64_t); /** * xdr_stream_remaining - Return the number of bytes remaining in the stream diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 06db09875aa4..2eda7678fe1d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -879,6 +879,8 @@ asmlinkage long sys_munlockall(void); asmlinkage long sys_mincore(unsigned long start, size_t len, unsigned char __user * vec); asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior); +asmlinkage long sys_process_madvise(int pidfd, const struct iovec __user *vec, + size_t vlen, int behavior, unsigned int flags); asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, unsigned long prot, unsigned long pgoff, unsigned long flags); diff --git a/include/linux/task_work.h b/include/linux/task_work.h index 0fb93aafa478..0d848a1e9e62 100644 --- a/include/linux/task_work.h +++ b/include/linux/task_work.h @@ -13,9 +13,14 @@ init_task_work(struct callback_head *twork, task_work_func_t func) twork->func = func; } -#define TWA_RESUME 1 -#define TWA_SIGNAL 2 -int task_work_add(struct task_struct *task, struct callback_head *twork, int); +enum task_work_notify_mode { + TWA_NONE, + TWA_RESUME, + TWA_SIGNAL, +}; + +int task_work_add(struct task_struct *task, struct callback_head *twork, + enum task_work_notify_mode mode); struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t); void task_work_run(void); diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 42ef807e5d84..d07ea27e72a9 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -55,6 +55,7 @@ enum thermal_notify_event { THERMAL_DEVICE_UP, /* Thermal device is up after a down event */ THERMAL_DEVICE_POWER_CAPABILITY_CHANGED, /* power capability changed */ THERMAL_TABLE_CHANGED, /* Thermal table(s) changed */ + THERMAL_EVENT_KEEP_ALIVE, /* Request for user space handler to respond */ }; struct thermal_zone_device_ops { @@ -84,12 +85,9 @@ struct thermal_cooling_device_ops { int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); int (*set_cur_state) (struct thermal_cooling_device *, unsigned long); - int (*get_requested_power)(struct thermal_cooling_device *, - struct thermal_zone_device *, u32 *); - int (*state2power)(struct thermal_cooling_device *, - struct thermal_zone_device *, unsigned long, u32 *); - int (*power2state)(struct thermal_cooling_device *, - struct thermal_zone_device *, u32, unsigned long *); + int (*get_requested_power)(struct thermal_cooling_device *, u32 *); + int (*state2power)(struct thermal_cooling_device *, unsigned long, u32 *); + int (*power2state)(struct thermal_cooling_device *, u32, unsigned long *); }; struct thermal_cooling_device { diff --git a/include/linux/topology.h b/include/linux/topology.h index 608fa4aadf0e..ad03df1cc266 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -198,7 +198,7 @@ static inline int cpu_to_mem(int cpu) #define topology_die_cpumask(cpu) cpumask_of(cpu) #endif -#ifdef CONFIG_SCHED_SMT +#if defined(CONFIG_SCHED_SMT) && !defined(cpu_smt_mask) static inline const struct cpumask *cpu_smt_mask(int cpu) { return topology_sibling_cpumask(cpu); diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 36fb3bbed6b2..b480e1a07ed8 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -178,9 +178,9 @@ static inline void set_notify_resume(struct task_struct *task) */ static inline void tracehook_notify_resume(struct pt_regs *regs) { + clear_thread_flag(TIF_NOTIFY_RESUME); /* - * The caller just cleared TIF_NOTIFY_RESUME. This barrier - * pairs with task_work_add()->set_notify_resume() after + * This barrier pairs with task_work_add()->set_notify_resume() after * hlist_add_head(task->task_works); */ smp_mb__after_atomic(); diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 1ae36bc8db35..b21a2de80c0f 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -2,12 +2,15 @@ #ifndef __LINUX_UACCESS_H__ #define __LINUX_UACCESS_H__ +#include <linux/fault-inject-usercopy.h> #include <linux/instrumented.h> +#include <linux/minmax.h> #include <linux/sched.h> #include <linux/thread_info.h> #include <asm/uaccess.h> +#ifdef CONFIG_SET_FS /* * Force the uaccess routines to be wired up for actual userspace access, * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone @@ -25,6 +28,23 @@ static inline void force_uaccess_end(mm_segment_t oldfs) { set_fs(oldfs); } +#else /* CONFIG_SET_FS */ +typedef struct { + /* empty dummy */ +} mm_segment_t; + +#define uaccess_kernel() (false) +#define user_addr_max() (TASK_SIZE_MAX) + +static inline mm_segment_t force_uaccess_begin(void) +{ + return (mm_segment_t) { }; +} + +static inline void force_uaccess_end(mm_segment_t oldfs) +{ +} +#endif /* CONFIG_SET_FS */ /* * Architectures should provide two primitives (raw_copy_{to,from}_user()) @@ -83,6 +103,8 @@ static __always_inline __must_check unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { might_fault(); + if (should_fail_usercopy()) + return n; instrument_copy_from_user(to, from, n); check_object_size(to, n, false); return raw_copy_from_user(to, from, n); @@ -104,6 +126,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) static __always_inline __must_check unsigned long __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { + if (should_fail_usercopy()) + return n; instrument_copy_to_user(to, from, n); check_object_size(from, n, true); return raw_copy_to_user(to, from, n); @@ -113,6 +137,8 @@ static __always_inline __must_check unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); + if (should_fail_usercopy()) + return n; instrument_copy_to_user(to, from, n); check_object_size(from, n, true); return raw_copy_to_user(to, from, n); @@ -124,7 +150,7 @@ _copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res = n; might_fault(); - if (likely(access_ok(from, n))) { + if (!should_fail_usercopy() && likely(access_ok(from, n))) { instrument_copy_from_user(to, from, n); res = raw_copy_from_user(to, from, n); } @@ -142,6 +168,8 @@ static inline __must_check unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); + if (should_fail_usercopy()) + return n; if (access_ok(to, n)) { instrument_copy_to_user(to, from, n); n = raw_copy_to_user(to, from, n); diff --git a/include/linux/unicode.h b/include/linux/unicode.h index 990aa97d8049..74484d44c755 100644 --- a/include/linux/unicode.h +++ b/include/linux/unicode.h @@ -27,6 +27,9 @@ int utf8_normalize(const struct unicode_map *um, const struct qstr *str, int utf8_casefold(const struct unicode_map *um, const struct qstr *str, unsigned char *dest, size_t dlen); +int utf8_casefold_hash(const struct unicode_map *um, const void *salt, + struct qstr *str); + struct unicode_map *utf8_load(const char *version); void utf8_unload(struct unicode_map *um); diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h index a4b65eaa0f62..5e0a7b7647c3 100644 --- a/include/linux/usb/typec_altmode.h +++ b/include/linux/usb/typec_altmode.h @@ -152,10 +152,26 @@ struct typec_altmode_driver { #define to_altmode_driver(d) container_of(d, struct typec_altmode_driver, \ driver) +/** + * typec_altmode_register_driver - registers a USB Type-C alternate mode + * device driver + * @drv: pointer to struct typec_altmode_driver + * + * These drivers will be bind to the partner alternate mode devices. They will + * handle all SVID specific communication. + */ #define typec_altmode_register_driver(drv) \ __typec_altmode_register_driver(drv, THIS_MODULE) int __typec_altmode_register_driver(struct typec_altmode_driver *drv, struct module *module); +/** + * typec_altmode_unregister_driver - unregisters a USB Type-C alternate mode + * device driver + * @drv: pointer to struct typec_altmode_driver + * + * These drivers will be bind to the partner alternate mode devices. They will + * handle all SVID specific communication. + */ void typec_altmode_unregister_driver(struct typec_altmode_driver *drv); #define module_typec_altmode_driver(__typec_altmode_driver) \ diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 4b8e38c5c4d8..8519b3ae5d52 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -557,4 +557,14 @@ static inline void virtio_cwrite64(struct virtio_device *vdev, virtio_cread_le((vdev), structname, member, ptr); \ _r; \ }) + +#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS +int arch_has_restricted_virtio_memory_access(void); +#else +static inline int arch_has_restricted_virtio_memory_access(void) +{ + return 0; +} +#endif /* CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS */ + #endif /* _LINUX_VIRTIO_CONFIG_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 0221f852a7e1..938eaf9517e2 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -24,6 +24,7 @@ struct notifier_block; /* in notifier.h */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ +#define VM_MAP_PUT_PAGES 0x00000100 /* put pages and free array in vfree */ /* * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC. @@ -121,6 +122,7 @@ extern void vfree_atomic(const void *addr); extern void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot); +void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot); extern void vunmap(const void *addr); extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, @@ -167,6 +169,7 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, const void *caller); +void free_vm_area(struct vm_struct *area); extern struct vm_struct *remove_vm_area(const void *addr); extern struct vm_struct *find_vm_area(const void *addr); @@ -202,10 +205,6 @@ static inline void set_vm_flush_reset_perms(void *addr) } #endif -/* Allocate/destroy a 'vmalloc' VM area. */ -extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes); -extern void free_vm_area(struct vm_struct *area); - /* for /dev/kmem */ extern long vread(char *buf, char *addr, unsigned long count); extern long vwrite(char *buf, char *addr, unsigned long count); diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 7557c1070fd7..322dcbfcc933 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -28,7 +28,7 @@ struct reclaim_stat { unsigned nr_writeback; unsigned nr_immediate; unsigned nr_pageout; - unsigned nr_activate[2]; + unsigned nr_activate[ANON_AND_FILE]; unsigned nr_ref_keep; unsigned nr_unmap_fail; unsigned nr_lazyfree_fail; diff --git a/include/linux/xarray.h b/include/linux/xarray.h index b4d70e7568b2..92c0160b3352 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -1286,6 +1286,8 @@ static inline bool xa_is_advanced(const void *entry) */ typedef void (*xa_update_node_t)(struct xa_node *node); +void xa_delete_node(struct xa_node *, xa_update_node_t); + /* * The xa_state is opaque to its users. It contains various different pieces * of state involved in the current operation on the XArray. It should be @@ -1505,6 +1507,28 @@ void xas_pause(struct xa_state *); void xas_create_range(struct xa_state *); +#ifdef CONFIG_XARRAY_MULTI +int xa_get_order(struct xarray *, unsigned long index); +void xas_split(struct xa_state *, void *entry, unsigned int order); +void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t); +#else +static inline int xa_get_order(struct xarray *xa, unsigned long index) +{ + return 0; +} + +static inline void xas_split(struct xa_state *xas, void *entry, + unsigned int order) +{ + xas_store(xas, entry); +} + +static inline void xas_split_alloc(struct xa_state *xas, void *entry, + unsigned int order, gfp_t gfp) +{ +} +#endif + /** * xas_reload() - Refetch an entry from the xarray. * @xas: XArray operation state. @@ -1522,10 +1546,21 @@ void xas_create_range(struct xa_state *); static inline void *xas_reload(struct xa_state *xas) { struct xa_node *node = xas->xa_node; - - if (node) - return xa_entry(xas->xa, node, xas->xa_offset); - return xa_head(xas->xa); + void *entry; + char offset; + + if (!node) + return xa_head(xas->xa); + if (IS_ENABLED(CONFIG_XARRAY_MULTI)) { + offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK; + entry = xa_entry(xas->xa, node, offset); + if (!xa_is_sibling(entry)) + return entry; + offset = xa_to_sibling(entry); + } else { + offset = xas->xa_offset; + } + return xa_entry(xas->xa, node, offset); } /** @@ -1714,13 +1749,12 @@ enum { * @xas: XArray operation state. * @entry: Entry retrieved from the array. * - * The loop body will be executed for each entry in the XArray that lies - * within the range specified by @xas. If the loop completes successfully, - * any entries that lie in this range will be replaced by @entry. The caller - * may break out of the loop; if they do so, the contents of the XArray will - * be unchanged. The operation may fail due to an out of memory condition. - * The caller may also call xa_set_err() to exit the loop while setting an - * error to record the reason. + * The loop body will be executed for each entry in the XArray that + * lies within the range specified by @xas. If the loop terminates + * normally, @entry will be %NULL. The user may break out of the loop, + * which will leave @entry set to the conflicting entry. The caller + * may also call xa_set_err() to exit the loop while setting an error + * to record the reason. */ #define xas_for_each_conflict(xas, entry) \ while ((entry = xas_find_conflict(xas))) diff --git a/include/misc/ocxl.h b/include/misc/ocxl.h index 357ef1aadbc0..e013736e275d 100644 --- a/include/misc/ocxl.h +++ b/include/misc/ocxl.h @@ -460,14 +460,8 @@ int ocxl_link_remove_pe(void *link_handle, int pasid); * Allocate an AFU interrupt associated to the link. * * 'hw_irq' is the hardware interrupt number - * 'obj_handle' is the 64-bit object handle to be passed to the AFU to - * trigger the interrupt. - * On P9, 'obj_handle' is an address, which, if written, triggers the - * interrupt. It is an MMIO address which needs to be remapped (one - * page). - */ -int ocxl_link_irq_alloc(void *link_handle, int *hw_irq, - u64 *obj_handle); + */ +int ocxl_link_irq_alloc(void *link_handle, int *hw_irq); /* * Free a previously allocated AFU interrupt diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index aee47f2b5709..661edfc8722e 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -474,6 +474,7 @@ struct ieee80211_sta_s1g_cap { * @n_bitrates: Number of bitrates in @bitrates * @ht_cap: HT capabilities in this band * @vht_cap: VHT capabilities in this band + * @s1g_cap: S1G capabilities in this band * @edmg_cap: EDMG capabilities in this band * @s1g_cap: S1G capabilities in this band (S1B band only, of course) * @n_iftype_data: number of iftype data entries diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h index 36c5c5e38c1d..0bdbc0d17d2f 100644 --- a/include/ras/ras_event.h +++ b/include/ras/ras_event.h @@ -361,6 +361,7 @@ TRACE_EVENT(aer_event, EM ( MF_MSG_POISONED_HUGE, "huge page already hardware poisoned" ) \ EM ( MF_MSG_HUGE, "huge page" ) \ EM ( MF_MSG_FREE_HUGE, "free huge page" ) \ + EM ( MF_MSG_NON_PMD_HUGE, "non-pmd-sized huge page" ) \ EM ( MF_MSG_UNMAP_FAILED, "unmapping failed page" ) \ EM ( MF_MSG_DIRTY_SWAPCACHE, "dirty swapcache page" ) \ EM ( MF_MSG_CLEAN_SWAPCACHE, "clean swapcache page" ) \ @@ -373,6 +374,8 @@ TRACE_EVENT(aer_event, EM ( MF_MSG_TRUNCATED_LRU, "already truncated LRU page" ) \ EM ( MF_MSG_BUDDY, "free buddy page" ) \ EM ( MF_MSG_BUDDY_2ND, "free buddy page (2nd try)" ) \ + EM ( MF_MSG_DAX, "dax page" ) \ + EM ( MF_MSG_UNSPLIT_THP, "unsplit thp" ) \ EMe ( MF_MSG_UNKNOWN, "unknown page" ) /* diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h index 66a8f369a2fa..bae29f50adff 100644 --- a/include/rdma/ib_cache.h +++ b/include/rdma/ib_cache.h @@ -110,5 +110,8 @@ const struct ib_gid_attr *rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index); void rdma_put_gid_attr(const struct ib_gid_attr *attr); void rdma_hold_gid_attr(const struct ib_gid_attr *attr); +ssize_t rdma_query_gid_table(struct ib_device *device, + struct ib_uverbs_gid_entry *entries, + size_t max_entries); #endif /* _IB_CACHE_H */ diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index 382427add677..e23eb357b761 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h @@ -14,9 +14,6 @@ #include <rdma/ib_sa.h> #include <rdma/rdma_cm.h> -/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */ -extern struct class cm_class; - enum ib_cm_state { IB_CM_IDLE, IB_CM_LISTEN, diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 71f573a418bf..70597508c765 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -17,6 +17,7 @@ struct ib_umem_odp; struct ib_umem { struct ib_device *ibdev; struct mm_struct *owning_mm; + u64 iova; size_t length; unsigned long address; u32 writable : 1; @@ -33,19 +34,46 @@ static inline int ib_umem_offset(struct ib_umem *umem) return umem->address & ~PAGE_MASK; } +static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem, + unsigned long pgsz) +{ + return (size_t)((ALIGN(umem->iova + umem->length, pgsz) - + ALIGN_DOWN(umem->iova, pgsz))) / + pgsz; +} + static inline size_t ib_umem_num_pages(struct ib_umem *umem) { - return (ALIGN(umem->address + umem->length, PAGE_SIZE) - - ALIGN_DOWN(umem->address, PAGE_SIZE)) >> - PAGE_SHIFT; + return ib_umem_num_dma_blocks(umem, PAGE_SIZE); +} + +static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter, + struct ib_umem *umem, + unsigned long pgsz) +{ + __rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz); } +/** + * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem + * @umem: umem to iterate over + * @pgsz: Page size to split the list into + * + * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The + * returned DMA blocks will be aligned to pgsz and span the range: + * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz) + * + * Performs exactly ib_umem_num_dma_blocks() iterations. + */ +#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \ + for (__rdma_umem_block_iter_start(biter, umem, pgsz); \ + __rdma_block_iter_next(biter);) + #ifdef CONFIG_INFINIBAND_USER_MEM struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, size_t size, int access); void ib_umem_release(struct ib_umem *umem); -int ib_umem_page_count(struct ib_umem *umem); int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, size_t length); unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, @@ -63,15 +91,15 @@ static inline struct ib_umem *ib_umem_get(struct ib_device *device, return ERR_PTR(-EINVAL); } static inline void ib_umem_release(struct ib_umem *umem) { } -static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; } static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, size_t length) { return -EINVAL; } -static inline int ib_umem_find_best_pgsz(struct ib_umem *umem, - unsigned long pgsz_bitmap, - unsigned long virt) { - return -EINVAL; +static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, + unsigned long pgsz_bitmap, + unsigned long virt) +{ + return 0; } #endif /* CONFIG_INFINIBAND_USER_MEM */ diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h index d16d2c17e733..0844c1d05ac6 100644 --- a/include/rdma/ib_umem_odp.h +++ b/include/rdma/ib_umem_odp.h @@ -14,17 +14,13 @@ struct ib_umem_odp { struct mmu_interval_notifier notifier; struct pid *tgid; + /* An array of the pfns included in the on-demand paging umem. */ + unsigned long *pfn_list; + /* - * An array of the pages included in the on-demand paging umem. - * Indices of pages that are currently not mapped into the device will - * contain NULL. - */ - struct page **page_list; - /* - * An array of the same size as page_list, with DMA addresses mapped - * for pages the pages in page_list. The lower two bits designate - * access permissions. See ODP_READ_ALLOWED_BIT and - * ODP_WRITE_ALLOWED_BIT. + * An array with DMA addresses mapped for pfns in pfn_list. + * The lower two bits designate access permissions. + * See ODP_READ_ALLOWED_BIT and ODP_WRITE_ALLOWED_BIT. */ dma_addr_t *dma_list; /* @@ -97,9 +93,8 @@ ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr, const struct mmu_interval_notifier_ops *ops); void ib_umem_odp_release(struct ib_umem_odp *umem_odp); -int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, - u64 bcnt, u64 access_mask, - unsigned long current_seq); +int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 start_offset, + u64 bcnt, u64 access_mask, bool fault); void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, u64 bound); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index c0b2fa7e9b95..9bf6c319a670 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -138,10 +138,9 @@ union ib_gid { extern union ib_gid zgid; enum ib_gid_type { - /* If link layer is Ethernet, this is RoCE V1 */ - IB_GID_TYPE_IB = 0, - IB_GID_TYPE_ROCE = 0, - IB_GID_TYPE_ROCE_UDP_ENCAP = 1, + IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB, + IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1, + IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2, IB_GID_TYPE_SIZE }; @@ -180,7 +179,7 @@ rdma_node_get_transport(unsigned int node_type); enum rdma_network_type { RDMA_NETWORK_IB, - RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, + RDMA_NETWORK_ROCE_V1, RDMA_NETWORK_IPV4, RDMA_NETWORK_IPV6 }; @@ -190,9 +189,10 @@ static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type net if (network_type == RDMA_NETWORK_IPV4 || network_type == RDMA_NETWORK_IPV6) return IB_GID_TYPE_ROCE_UDP_ENCAP; - - /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ - return IB_GID_TYPE_IB; + else if (network_type == RDMA_NETWORK_ROCE_V1) + return IB_GID_TYPE_ROCE; + else + return IB_GID_TYPE_IB; } static inline enum rdma_network_type @@ -201,6 +201,9 @@ rdma_gid_attr_network_type(const struct ib_gid_attr *attr) if (attr->gid_type == IB_GID_TYPE_IB) return RDMA_NETWORK_IB; + if (attr->gid_type == IB_GID_TYPE_ROCE) + return RDMA_NETWORK_ROCE_V1; + if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid)) return RDMA_NETWORK_IPV4; else @@ -535,7 +538,8 @@ enum ib_port_speed { IB_SPEED_FDR10 = 8, IB_SPEED_FDR = 16, IB_SPEED_EDR = 32, - IB_SPEED_HDR = 64 + IB_SPEED_HDR = 64, + IB_SPEED_NDR = 128, }; /** @@ -669,7 +673,7 @@ struct ib_port_attr { u8 subnet_timeout; u8 init_type_reply; u8 active_width; - u8 active_speed; + u16 active_speed; u8 phys_state; u16 port_cap_flags2; }; @@ -952,13 +956,14 @@ enum ib_wc_status { const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); enum ib_wc_opcode { - IB_WC_SEND, - IB_WC_RDMA_WRITE, - IB_WC_RDMA_READ, - IB_WC_COMP_SWAP, - IB_WC_FETCH_ADD, - IB_WC_LSO, - IB_WC_LOCAL_INV, + IB_WC_SEND = IB_UVERBS_WC_SEND, + IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE, + IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ, + IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP, + IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD, + IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW, + IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV, + IB_WC_LSO = IB_UVERBS_WC_TSO, IB_WC_REG_MR, IB_WC_MASKED_COMP_SWAP, IB_WC_MASKED_FETCH_ADD, @@ -1291,6 +1296,7 @@ enum ib_wr_opcode { IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ, IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP, IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD, + IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW, IB_WR_LSO = IB_UVERBS_WR_TSO, IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV, IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV, @@ -1463,11 +1469,6 @@ enum rdma_remove_reason { RDMA_REMOVE_DRIVER_REMOVE, /* uobj is being cleaned-up before being committed */ RDMA_REMOVE_ABORT, - /* - * uobj has been fully created, with the uobj->object set, but is being - * cleaned up before being comitted - */ - RDMA_REMOVE_ABORT_HWOBJ, }; struct ib_rdmacg_object { @@ -1479,12 +1480,6 @@ struct ib_rdmacg_object { struct ib_ucontext { struct ib_device *device; struct ib_uverbs_file *ufile; - /* - * 'closing' can be read by the driver only during a destroy callback, - * it is set when we are closing the file descriptor and indicates - * that mm_sem may be locked. - */ - bool closing; bool cleanup_retryable; @@ -1863,17 +1858,6 @@ enum ib_flow_spec_type { #define IB_FLOW_SPEC_LAYER_MASK 0xF0 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10 -/* Flow steering rule priority is set according to it's domain. - * Lower domain value means higher priority. - */ -enum ib_flow_domain { - IB_FLOW_DOMAIN_USER, - IB_FLOW_DOMAIN_ETHTOOL, - IB_FLOW_DOMAIN_RFS, - IB_FLOW_DOMAIN_NIC, - IB_FLOW_DOMAIN_NUM /* Must be last */ -}; - enum ib_flow_flags { IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */ @@ -2414,12 +2398,12 @@ struct ib_device_ops { void (*mmap_free)(struct rdma_user_mmap_entry *entry); void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata); - void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata); + int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata); int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr, struct ib_udata *udata); int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); - void (*destroy_ah)(struct ib_ah *ah, u32 flags); + int (*destroy_ah)(struct ib_ah *ah, u32 flags); int (*create_srq)(struct ib_srq *srq, struct ib_srq_init_attr *srq_init_attr, struct ib_udata *udata); @@ -2427,7 +2411,7 @@ struct ib_device_ops { enum ib_srq_attr_mask srq_attr_mask, struct ib_udata *udata); int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr); - void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata); + int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata); struct ib_qp *(*create_qp)(struct ib_pd *pd, struct ib_qp_init_attr *qp_init_attr, struct ib_udata *udata); @@ -2439,7 +2423,7 @@ struct ib_device_ops { int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); - void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); + int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata); struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags); struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, @@ -2462,16 +2446,15 @@ struct ib_device_ops { unsigned int *sg_offset); int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, struct ib_mr_status *mr_status); - struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type, - struct ib_udata *udata); + int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata); int (*dealloc_mw)(struct ib_mw *mw); int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); - void (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); + int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); struct ib_flow *(*create_flow)(struct ib_qp *qp, struct ib_flow_attr *flow_attr, - int domain, struct ib_udata *udata); + struct ib_udata *udata); int (*destroy_flow)(struct ib_flow *flow_id); struct ib_flow_action *(*create_flow_action_esp)( struct ib_device *device, @@ -2496,13 +2479,12 @@ struct ib_device_ops { struct ib_wq *(*create_wq)(struct ib_pd *pd, struct ib_wq_init_attr *init_attr, struct ib_udata *udata); - void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata); + int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata); int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr, u32 wq_attr_mask, struct ib_udata *udata); - struct ib_rwq_ind_table *(*create_rwq_ind_table)( - struct ib_device *device, - struct ib_rwq_ind_table_init_attr *init_attr, - struct ib_udata *udata); + int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table, + struct ib_rwq_ind_table_init_attr *init_attr, + struct ib_udata *udata); int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); struct ib_dm *(*alloc_dm)(struct ib_device *device, struct ib_ucontext *context, @@ -2514,7 +2496,7 @@ struct ib_device_ops { struct uverbs_attr_bundle *attrs); int (*create_counters)(struct ib_counters *counters, struct uverbs_attr_bundle *attrs); - void (*destroy_counters)(struct ib_counters *counters); + int (*destroy_counters)(struct ib_counters *counters); int (*read_counters)(struct ib_counters *counters, struct ib_counters_read_attr *counters_read_attr, struct uverbs_attr_bundle *attrs); @@ -2624,7 +2606,9 @@ struct ib_device_ops { DECLARE_RDMA_OBJ_SIZE(ib_ah); DECLARE_RDMA_OBJ_SIZE(ib_counters); DECLARE_RDMA_OBJ_SIZE(ib_cq); + DECLARE_RDMA_OBJ_SIZE(ib_mw); DECLARE_RDMA_OBJ_SIZE(ib_pd); + DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table); DECLARE_RDMA_OBJ_SIZE(ib_srq); DECLARE_RDMA_OBJ_SIZE(ib_ucontext); DECLARE_RDMA_OBJ_SIZE(ib_xrcd); @@ -2798,7 +2782,8 @@ void ib_dealloc_device(struct ib_device *device); void ib_get_device_fw_str(struct ib_device *device, char *str); -int ib_register_device(struct ib_device *device, const char *name); +int ib_register_device(struct ib_device *device, const char *name, + struct device *dma_device); void ib_unregister_device(struct ib_device *device); void ib_unregister_driver(enum rdma_driver_id driver_id); void ib_unregister_device_and_put(struct ib_device *device); @@ -3352,30 +3337,6 @@ static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) } /** - * rdma_find_pg_bit - Find page bit given address and HW supported page sizes - * - * @addr: address - * @pgsz_bitmap: bitmap of HW supported page sizes - */ -static inline unsigned int rdma_find_pg_bit(unsigned long addr, - unsigned long pgsz_bitmap) -{ - unsigned long align; - unsigned long pgsz; - - align = addr & -addr; - - /* Find page bit such that addr is aligned to the highest supported - * HW page size - */ - pgsz = pgsz_bitmap & ~(-align << 1); - if (!pgsz) - return __ffs(pgsz_bitmap); - - return __fls(pgsz); -} - -/** * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not. * @device: Device * @port_num: 1 based Port number @@ -3472,12 +3433,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, #define ib_alloc_pd(device, flags) \ __ib_alloc_pd((device), (flags), KBUILD_MODNAME) -/** - * ib_dealloc_pd_user - Deallocate kernel/user PD - * @pd: The protection domain - * @udata: Valid user data or NULL for kernel objects - */ -void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata); +int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata); /** * ib_dealloc_pd - Deallocate kernel PD @@ -3487,7 +3443,9 @@ void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata); */ static inline void ib_dealloc_pd(struct ib_pd *pd) { - ib_dealloc_pd_user(pd, NULL); + int ret = ib_dealloc_pd_user(pd, NULL); + + WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail"); } enum rdma_create_ah_flags { @@ -3615,9 +3573,11 @@ int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata); * * NOTE: for user ah use rdma_destroy_ah_user with valid udata! */ -static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags) +static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags) { - return rdma_destroy_ah_user(ah, flags, NULL); + int ret = rdma_destroy_ah_user(ah, flags, NULL); + + WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail"); } struct ib_srq *ib_create_srq_user(struct ib_pd *pd, @@ -3671,9 +3631,11 @@ int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata); * * NOTE: for user srq use ib_destroy_srq_user with valid udata! */ -static inline int ib_destroy_srq(struct ib_srq *srq) +static inline void ib_destroy_srq(struct ib_srq *srq) { - return ib_destroy_srq_user(srq, NULL); + int ret = ib_destroy_srq_user(srq, NULL); + + WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail"); } /** @@ -3817,46 +3779,15 @@ static inline int ib_post_recv(struct ib_qp *qp, return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy); } -struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, - int nr_cqe, int comp_vector, - enum ib_poll_context poll_ctx, - const char *caller, struct ib_udata *udata); - -/** - * ib_alloc_cq_user: Allocate kernel/user CQ - * @dev: The IB device - * @private: Private data attached to the CQE - * @nr_cqe: Number of CQEs in the CQ - * @comp_vector: Completion vector used for the IRQs - * @poll_ctx: Context used for polling the CQ - * @udata: Valid user data or NULL for kernel objects - */ -static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev, - void *private, int nr_cqe, - int comp_vector, - enum ib_poll_context poll_ctx, - struct ib_udata *udata) -{ - return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, - KBUILD_MODNAME, udata); -} - -/** - * ib_alloc_cq: Allocate kernel CQ - * @dev: The IB device - * @private: Private data attached to the CQE - * @nr_cqe: Number of CQEs in the CQ - * @comp_vector: Completion vector used for the IRQs - * @poll_ctx: Context used for polling the CQ - * - * NOTE: for user cq use ib_alloc_cq_user with valid udata! - */ +struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe, + int comp_vector, enum ib_poll_context poll_ctx, + const char *caller); static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx) { - return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, - NULL); + return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx, + KBUILD_MODNAME); } struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, @@ -3878,26 +3809,7 @@ static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev, KBUILD_MODNAME); } -/** - * ib_free_cq_user - Free kernel/user CQ - * @cq: The CQ to free - * @udata: Valid user data or NULL for kernel objects - * - * NOTE: This function shouldn't be called on shared CQs. - */ -void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata); - -/** - * ib_free_cq - Free kernel CQ - * @cq: The CQ to free - * - * NOTE: for user cq use ib_free_cq_user with valid udata! - */ -static inline void ib_free_cq(struct ib_cq *cq) -{ - ib_free_cq_user(cq, NULL); -} - +void ib_free_cq(struct ib_cq *cq); int ib_process_cq_direct(struct ib_cq *cq, int budget); /** @@ -3955,7 +3867,9 @@ int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata); */ static inline void ib_destroy_cq(struct ib_cq *cq) { - ib_destroy_cq_user(cq, NULL); + int ret = ib_destroy_cq_user(cq, NULL); + + WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail"); } /** @@ -4379,10 +4293,9 @@ struct net_device *ib_device_netdev(struct ib_device *dev, u8 port); struct ib_wq *ib_create_wq(struct ib_pd *pd, struct ib_wq_init_attr *init_attr); -int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); +int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata); int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, u32 wq_attr_mask); -int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset, unsigned int page_size); @@ -4410,7 +4323,7 @@ void ib_drain_rq(struct ib_qp *qp); void ib_drain_sq(struct ib_qp *qp); void ib_drain_qp(struct ib_qp *qp); -int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width); +int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width); static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) { @@ -4717,6 +4630,7 @@ bool rdma_dev_access_netns(const struct ib_device *device, const struct net *net); #define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000) +#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF) #define IB_GRH_FLOWLABEL_MASK (0x000FFFFF) /** diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index cf5da2ae49bf..c672ae1da26b 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -110,11 +110,14 @@ struct rdma_cm_id { u8 port_num; }; -struct rdma_cm_id *__rdma_create_id(struct net *net, - rdma_cm_event_handler event_handler, - void *context, enum rdma_ucm_port_space ps, - enum ib_qp_type qp_type, - const char *caller); +struct rdma_cm_id * +__rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler, + void *context, enum rdma_ucm_port_space ps, + enum ib_qp_type qp_type, const char *caller); +struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler, + void *context, + enum rdma_ucm_port_space ps, + enum ib_qp_type qp_type); /** * rdma_create_id - Create an RDMA identifier. @@ -132,9 +135,9 @@ struct rdma_cm_id *__rdma_create_id(struct net *net, * The event handler callback serializes on the id's mutex and is * allowed to sleep. */ -#define rdma_create_id(net, event_handler, context, ps, qp_type) \ - __rdma_create_id((net), (event_handler), (context), (ps), (qp_type), \ - KBUILD_MODNAME) +#define rdma_create_id(net, event_handler, context, ps, qp_type) \ + __rdma_create_kernel_id(net, event_handler, context, ps, qp_type, \ + KBUILD_MODNAME) /** * rdma_destroy_id - Destroys an RDMA identifier. @@ -250,29 +253,12 @@ int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, */ int rdma_listen(struct rdma_cm_id *id, int backlog); -int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, - const char *caller); +int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); -int __rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, - const char *caller, struct rdma_ucm_ece *ece); - -/** - * rdma_accept - Called to accept a connection request or response. - * @id: Connection identifier associated with the request. - * @conn_param: Information needed to establish the connection. This must be - * provided if accepting a connection request. If accepting a connection - * response, this parameter must be NULL. - * - * Typically, this routine is only called by the listener to accept a connection - * request. It must also be called on the active side of a connection if the - * user is performing their own QP transitions. - * - * In the case of error, a reject message is sent to the remote side and the - * state of the qp associated with the id is modified to error, such that any - * previously posted receive buffers would be flushed. - */ -#define rdma_accept(id, conn_param) \ - __rdma_accept((id), (conn_param), KBUILD_MODNAME) +void rdma_lock_handler(struct rdma_cm_id *id); +void rdma_unlock_handler(struct rdma_cm_id *id); +int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, + struct rdma_ucm_ece *ece); /** * rdma_notify - Notifies the RDMA CM of an asynchronous event that has diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h index 7682d1bcf789..d3a1cc5be7bc 100644 --- a/include/rdma/restrack.h +++ b/include/rdma/restrack.h @@ -106,22 +106,11 @@ struct rdma_restrack_entry { int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type); - -void rdma_restrack_kadd(struct rdma_restrack_entry *res); -void rdma_restrack_uadd(struct rdma_restrack_entry *res); - -/** - * rdma_restrack_del() - delete object from the reource tracking database - * @res: resource entry - * @type: actual type of object to operate - */ -void rdma_restrack_del(struct rdma_restrack_entry *res); - /** * rdma_is_kernel_res() - check the owner of resource * @res: resource entry */ -static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res) +static inline bool rdma_is_kernel_res(const struct rdma_restrack_entry *res) { return !res->user; } @@ -138,14 +127,6 @@ int __must_check rdma_restrack_get(struct rdma_restrack_entry *res); */ int rdma_restrack_put(struct rdma_restrack_entry *res); -/** - * rdma_restrack_set_task() - set the task for this resource - * @res: resource entry - * @caller: kernel name, the current task will be used if the caller is NULL. - */ -void rdma_restrack_set_task(struct rdma_restrack_entry *res, - const char *caller); - /* * Helper functions for rdma drivers when filling out * nldev driver attributes. diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 5f0c1cf1ea13..8eb49231c6bb 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -40,6 +40,7 @@ enum afs_server_trace { afs_server_trace_get_new_cbi, afs_server_trace_get_probe, afs_server_trace_give_up_cb, + afs_server_trace_purging, afs_server_trace_put_call, afs_server_trace_put_cbi, afs_server_trace_put_find_rsq, @@ -50,6 +51,7 @@ enum afs_server_trace { afs_server_trace_update, }; + enum afs_volume_trace { afs_volume_trace_alloc, afs_volume_trace_free, @@ -67,6 +69,46 @@ enum afs_volume_trace { afs_volume_trace_remove, }; +enum afs_cell_trace { + afs_cell_trace_alloc, + afs_cell_trace_free, + afs_cell_trace_get_queue_dns, + afs_cell_trace_get_queue_manage, + afs_cell_trace_get_queue_new, + afs_cell_trace_get_vol, + afs_cell_trace_insert, + afs_cell_trace_manage, + afs_cell_trace_put_candidate, + afs_cell_trace_put_destroy, + afs_cell_trace_put_queue_fail, + afs_cell_trace_put_queue_work, + afs_cell_trace_put_vol, + afs_cell_trace_see_source, + afs_cell_trace_see_ws, + afs_cell_trace_unuse_alias, + afs_cell_trace_unuse_check_alias, + afs_cell_trace_unuse_delete, + afs_cell_trace_unuse_fc, + afs_cell_trace_unuse_lookup, + afs_cell_trace_unuse_mntpt, + afs_cell_trace_unuse_no_pin, + afs_cell_trace_unuse_parse, + afs_cell_trace_unuse_pin, + afs_cell_trace_unuse_probe, + afs_cell_trace_unuse_sbi, + afs_cell_trace_unuse_ws, + afs_cell_trace_use_alias, + afs_cell_trace_use_check_alias, + afs_cell_trace_use_fc, + afs_cell_trace_use_fc_alias, + afs_cell_trace_use_lookup, + afs_cell_trace_use_mntpt, + afs_cell_trace_use_pin, + afs_cell_trace_use_probe, + afs_cell_trace_use_sbi, + afs_cell_trace_wait, +}; + enum afs_fs_operation { afs_FS_FetchData = 130, /* AFS Fetch file data */ afs_FS_FetchACL = 131, /* AFS Fetch file ACL */ @@ -270,6 +312,7 @@ enum afs_cb_break_reason { EM(afs_server_trace_get_new_cbi, "GET cbi ") \ EM(afs_server_trace_get_probe, "GET probe") \ EM(afs_server_trace_give_up_cb, "giveup-cb") \ + EM(afs_server_trace_purging, "PURGE ") \ EM(afs_server_trace_put_call, "PUT call ") \ EM(afs_server_trace_put_cbi, "PUT cbi ") \ EM(afs_server_trace_put_find_rsq, "PUT f-rsq") \ @@ -295,6 +338,44 @@ enum afs_cb_break_reason { EM(afs_volume_trace_put_validate_fc, "PUT fc-validat") \ E_(afs_volume_trace_remove, "REMOVE ") +#define afs_cell_traces \ + EM(afs_cell_trace_alloc, "ALLOC ") \ + EM(afs_cell_trace_free, "FREE ") \ + EM(afs_cell_trace_get_queue_dns, "GET q-dns ") \ + EM(afs_cell_trace_get_queue_manage, "GET q-mng ") \ + EM(afs_cell_trace_get_queue_new, "GET q-new ") \ + EM(afs_cell_trace_get_vol, "GET vol ") \ + EM(afs_cell_trace_insert, "INSERT ") \ + EM(afs_cell_trace_manage, "MANAGE ") \ + EM(afs_cell_trace_put_candidate, "PUT candid") \ + EM(afs_cell_trace_put_destroy, "PUT destry") \ + EM(afs_cell_trace_put_queue_work, "PUT q-work") \ + EM(afs_cell_trace_put_queue_fail, "PUT q-fail") \ + EM(afs_cell_trace_put_vol, "PUT vol ") \ + EM(afs_cell_trace_see_source, "SEE source") \ + EM(afs_cell_trace_see_ws, "SEE ws ") \ + EM(afs_cell_trace_unuse_alias, "UNU alias ") \ + EM(afs_cell_trace_unuse_check_alias, "UNU chk-al") \ + EM(afs_cell_trace_unuse_delete, "UNU delete") \ + EM(afs_cell_trace_unuse_fc, "UNU fc ") \ + EM(afs_cell_trace_unuse_lookup, "UNU lookup") \ + EM(afs_cell_trace_unuse_mntpt, "UNU mntpt ") \ + EM(afs_cell_trace_unuse_parse, "UNU parse ") \ + EM(afs_cell_trace_unuse_pin, "UNU pin ") \ + EM(afs_cell_trace_unuse_probe, "UNU probe ") \ + EM(afs_cell_trace_unuse_sbi, "UNU sbi ") \ + EM(afs_cell_trace_unuse_ws, "UNU ws ") \ + EM(afs_cell_trace_use_alias, "USE alias ") \ + EM(afs_cell_trace_use_check_alias, "USE chk-al") \ + EM(afs_cell_trace_use_fc, "USE fc ") \ + EM(afs_cell_trace_use_fc_alias, "USE fc-al ") \ + EM(afs_cell_trace_use_lookup, "USE lookup") \ + EM(afs_cell_trace_use_mntpt, "USE mntpt ") \ + EM(afs_cell_trace_use_pin, "USE pin ") \ + EM(afs_cell_trace_use_probe, "USE probe ") \ + EM(afs_cell_trace_use_sbi, "USE sbi ") \ + E_(afs_cell_trace_wait, "WAIT ") + #define afs_fs_operations \ EM(afs_FS_FetchData, "FS.FetchData") \ EM(afs_FS_FetchStatus, "FS.FetchStatus") \ @@ -483,6 +564,7 @@ enum afs_cb_break_reason { afs_call_traces; afs_server_traces; +afs_cell_traces; afs_fs_operations; afs_vl_operations; afs_edit_dir_ops; @@ -1358,6 +1440,33 @@ TRACE_EVENT(afs_volume, __entry->ref) ); +TRACE_EVENT(afs_cell, + TP_PROTO(unsigned int cell_debug_id, int usage, int active, + enum afs_cell_trace reason), + + TP_ARGS(cell_debug_id, usage, active, reason), + + TP_STRUCT__entry( + __field(unsigned int, cell ) + __field(int, usage ) + __field(int, active ) + __field(int, reason ) + ), + + TP_fast_assign( + __entry->cell = cell_debug_id; + __entry->usage = usage; + __entry->active = active; + __entry->reason = reason; + ), + + TP_printk("L=%08x %s u=%d a=%d", + __entry->cell, + __print_symbolic(__entry->reason, afs_cell_traces), + __entry->usage, + __entry->active) + ); + #endif /* _TRACE_AFS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 4c8b99ec8606..b14314fcf732 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -95,6 +95,16 @@ TRACE_DEFINE_ENUM(ES_REFERENCED_B); { FALLOC_FL_COLLAPSE_RANGE, "COLLAPSE_RANGE"}, \ { FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"}) +#define show_fc_reason(reason) \ + __print_symbolic(reason, \ + { EXT4_FC_REASON_XATTR, "XATTR"}, \ + { EXT4_FC_REASON_CROSS_RENAME, "CROSS_RENAME"}, \ + { EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, "JOURNAL_FLAG_CHANGE"}, \ + { EXT4_FC_REASON_MEM, "NO_MEM"}, \ + { EXT4_FC_REASON_SWAP_BOOT, "SWAP_BOOT"}, \ + { EXT4_FC_REASON_RESIZE, "RESIZE"}, \ + { EXT4_FC_REASON_RENAME_DIR, "RENAME_DIR"}, \ + { EXT4_FC_REASON_FALLOC_RANGE, "FALLOC_RANGE"}) TRACE_EVENT(ext4_other_inode_update_time, TP_PROTO(struct inode *inode, ino_t orig_ino), @@ -1766,9 +1776,9 @@ TRACE_EVENT(ext4_ext_load_extent, ); TRACE_EVENT(ext4_load_inode, - TP_PROTO(struct inode *inode), + TP_PROTO(struct super_block *sb, unsigned long ino), - TP_ARGS(inode), + TP_ARGS(sb, ino), TP_STRUCT__entry( __field( dev_t, dev ) @@ -1776,8 +1786,8 @@ TRACE_EVENT(ext4_load_inode, ), TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; + __entry->dev = sb->s_dev; + __entry->ino = ino; ), TP_printk("dev %d,%d ino %ld", @@ -2791,6 +2801,216 @@ TRACE_EVENT(ext4_lazy_itable_init, MAJOR(__entry->dev), MINOR(__entry->dev), __entry->group) ); +TRACE_EVENT(ext4_fc_replay_scan, + TP_PROTO(struct super_block *sb, int error, int off), + + TP_ARGS(sb, error, off), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, error) + __field(int, off) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->error = error; + __entry->off = off; + ), + + TP_printk("FC scan pass on dev %d,%d: error %d, off %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->error, __entry->off) +); + +TRACE_EVENT(ext4_fc_replay, + TP_PROTO(struct super_block *sb, int tag, int ino, int priv1, int priv2), + + TP_ARGS(sb, tag, ino, priv1, priv2), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, tag) + __field(int, ino) + __field(int, priv1) + __field(int, priv2) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->tag = tag; + __entry->ino = ino; + __entry->priv1 = priv1; + __entry->priv2 = priv2; + ), + + TP_printk("FC Replay %d,%d: tag %d, ino %d, data1 %d, data2 %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->tag, __entry->ino, __entry->priv1, __entry->priv2) +); + +TRACE_EVENT(ext4_fc_commit_start, + TP_PROTO(struct super_block *sb), + + TP_ARGS(sb), + + TP_STRUCT__entry( + __field(dev_t, dev) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + ), + + TP_printk("fast_commit started on dev %d,%d", + MAJOR(__entry->dev), MINOR(__entry->dev)) +); + +TRACE_EVENT(ext4_fc_commit_stop, + TP_PROTO(struct super_block *sb, int nblks, int reason), + + TP_ARGS(sb, nblks, reason), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, nblks) + __field(int, reason) + __field(int, num_fc) + __field(int, num_fc_ineligible) + __field(int, nblks_agg) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->nblks = nblks; + __entry->reason = reason; + __entry->num_fc = EXT4_SB(sb)->s_fc_stats.fc_num_commits; + __entry->num_fc_ineligible = + EXT4_SB(sb)->s_fc_stats.fc_ineligible_commits; + __entry->nblks_agg = EXT4_SB(sb)->s_fc_stats.fc_numblks; + ), + + TP_printk("fc on [%d,%d] nblks %d, reason %d, fc = %d, ineligible = %d, agg_nblks %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->nblks, __entry->reason, __entry->num_fc, + __entry->num_fc_ineligible, __entry->nblks_agg) +); + +#define FC_REASON_NAME_STAT(reason) \ + show_fc_reason(reason), \ + __entry->sbi->s_fc_stats.fc_ineligible_reason_count[reason] + +TRACE_EVENT(ext4_fc_stats, + TP_PROTO(struct super_block *sb), + + TP_ARGS(sb), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(struct ext4_sb_info *, sbi) + __field(int, count) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->sbi = EXT4_SB(sb); + ), + + TP_printk("dev %d:%d fc ineligible reasons:\n" + "%s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s,%d; " + "num_commits:%ld, ineligible: %ld, numblks: %ld", + MAJOR(__entry->dev), MINOR(__entry->dev), + FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR), + FC_REASON_NAME_STAT(EXT4_FC_REASON_CROSS_RENAME), + FC_REASON_NAME_STAT(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE), + FC_REASON_NAME_STAT(EXT4_FC_REASON_MEM), + FC_REASON_NAME_STAT(EXT4_FC_REASON_SWAP_BOOT), + FC_REASON_NAME_STAT(EXT4_FC_REASON_RESIZE), + FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR), + FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE), + __entry->sbi->s_fc_stats.fc_num_commits, + __entry->sbi->s_fc_stats.fc_ineligible_commits, + __entry->sbi->s_fc_stats.fc_numblks) + +); + +#define DEFINE_TRACE_DENTRY_EVENT(__type) \ + TRACE_EVENT(ext4_fc_track_##__type, \ + TP_PROTO(struct inode *inode, struct dentry *dentry, int ret), \ + \ + TP_ARGS(inode, dentry, ret), \ + \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(int, ino) \ + __field(int, error) \ + ), \ + \ + TP_fast_assign( \ + __entry->dev = inode->i_sb->s_dev; \ + __entry->ino = inode->i_ino; \ + __entry->error = ret; \ + ), \ + \ + TP_printk("dev %d:%d, inode %d, error %d, fc_%s", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __entry->ino, __entry->error, \ + #__type) \ + ) + +DEFINE_TRACE_DENTRY_EVENT(create); +DEFINE_TRACE_DENTRY_EVENT(link); +DEFINE_TRACE_DENTRY_EVENT(unlink); + +TRACE_EVENT(ext4_fc_track_inode, + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, ino) + __field(int, error) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->error = ret; + ), + + TP_printk("dev %d:%d, inode %d, error %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, __entry->error) + ); + +TRACE_EVENT(ext4_fc_track_range, + TP_PROTO(struct inode *inode, long start, long end, int ret), + + TP_ARGS(inode, start, end, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, ino) + __field(long, start) + __field(long, end) + __field(int, error) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->start = start; + __entry->end = end; + __entry->error = ret; + ), + + TP_printk("dev %d:%d, inode %d, error %d, start %ld, end %ld", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, __entry->error, __entry->start, + __entry->end) + ); + #endif /* _TRACE_EXT4_H */ /* This part must be outside protection */ diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 8a1c1311acac..f8f1e85ff130 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -111,13 +111,15 @@ TRACE_DEFINE_ENUM(CP_RESIZE); #define show_alloc_mode(type) \ __print_symbolic(type, \ - { LFS, "LFS-mode" }, \ - { SSR, "SSR-mode" }) + { LFS, "LFS-mode" }, \ + { SSR, "SSR-mode" }, \ + { AT_SSR, "AT_SSR-mode" }) #define show_victim_policy(type) \ __print_symbolic(type, \ { GC_GREEDY, "Greedy" }, \ - { GC_CB, "Cost-Benefit" }) + { GC_CB, "Cost-Benefit" }, \ + { GC_AT, "Age-threshold" }) #define show_cpreason(type) \ __print_flags(type, "|", \ @@ -134,7 +136,7 @@ TRACE_DEFINE_ENUM(CP_RESIZE); __print_symbolic(type, \ { CP_NO_NEEDED, "no needed" }, \ { CP_NON_REGULAR, "non regular" }, \ - { CP_COMPRESSED, "compreesed" }, \ + { CP_COMPRESSED, "compressed" }, \ { CP_HARDLINK, "hardlink" }, \ { CP_SB_NEED_CP, "sb needs cp" }, \ { CP_WRONG_PINO, "wrong pino" }, \ diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 9417a34aad08..26cfb0fa8e7e 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -17,7 +17,7 @@ ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \ ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\ ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI), \ - ERSN(HYPERV), ERSN(ARM_NISV) + ERSN(HYPERV), ERSN(ARM_NISV), ERSN(X86_RDMSR), ERSN(X86_WRMSR) TRACE_EVENT(kvm_userspace_exit, TP_PROTO(__u32 reason, int errno), diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index ced71237b7e4..155b5cb43cfd 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -74,17 +74,17 @@ TRACE_EVENT_RCU(rcu_grace_period, TP_STRUCT__entry( __field(const char *, rcuname) - __field(unsigned long, gp_seq) + __field(long, gp_seq) __field(const char *, gpevent) ), TP_fast_assign( __entry->rcuname = rcuname; - __entry->gp_seq = gp_seq; + __entry->gp_seq = (long)gp_seq; __entry->gpevent = gpevent; ), - TP_printk("%s %lu %s", + TP_printk("%s %ld %s", __entry->rcuname, __entry->gp_seq, __entry->gpevent) ); @@ -114,8 +114,8 @@ TRACE_EVENT_RCU(rcu_future_grace_period, TP_STRUCT__entry( __field(const char *, rcuname) - __field(unsigned long, gp_seq) - __field(unsigned long, gp_seq_req) + __field(long, gp_seq) + __field(long, gp_seq_req) __field(u8, level) __field(int, grplo) __field(int, grphi) @@ -124,16 +124,16 @@ TRACE_EVENT_RCU(rcu_future_grace_period, TP_fast_assign( __entry->rcuname = rcuname; - __entry->gp_seq = gp_seq; - __entry->gp_seq_req = gp_seq_req; + __entry->gp_seq = (long)gp_seq; + __entry->gp_seq_req = (long)gp_seq_req; __entry->level = level; __entry->grplo = grplo; __entry->grphi = grphi; __entry->gpevent = gpevent; ), - TP_printk("%s %lu %lu %u %d %d %s", - __entry->rcuname, __entry->gp_seq, __entry->gp_seq_req, __entry->level, + TP_printk("%s %ld %ld %u %d %d %s", + __entry->rcuname, (long)__entry->gp_seq, (long)__entry->gp_seq_req, __entry->level, __entry->grplo, __entry->grphi, __entry->gpevent) ); @@ -153,7 +153,7 @@ TRACE_EVENT_RCU(rcu_grace_period_init, TP_STRUCT__entry( __field(const char *, rcuname) - __field(unsigned long, gp_seq) + __field(long, gp_seq) __field(u8, level) __field(int, grplo) __field(int, grphi) @@ -162,14 +162,14 @@ TRACE_EVENT_RCU(rcu_grace_period_init, TP_fast_assign( __entry->rcuname = rcuname; - __entry->gp_seq = gp_seq; + __entry->gp_seq = (long)gp_seq; __entry->level = level; __entry->grplo = grplo; __entry->grphi = grphi; __entry->qsmask = qsmask; ), - TP_printk("%s %lu %u %d %d %lx", + TP_printk("%s %ld %u %d %d %lx", __entry->rcuname, __entry->gp_seq, __entry->level, __entry->grplo, __entry->grphi, __entry->qsmask) ); @@ -197,17 +197,17 @@ TRACE_EVENT_RCU(rcu_exp_grace_period, TP_STRUCT__entry( __field(const char *, rcuname) - __field(unsigned long, gpseq) + __field(long, gpseq) __field(const char *, gpevent) ), TP_fast_assign( __entry->rcuname = rcuname; - __entry->gpseq = gpseq; + __entry->gpseq = (long)gpseq; __entry->gpevent = gpevent; ), - TP_printk("%s %lu %s", + TP_printk("%s %ld %s", __entry->rcuname, __entry->gpseq, __entry->gpevent) ); @@ -316,17 +316,17 @@ TRACE_EVENT_RCU(rcu_preempt_task, TP_STRUCT__entry( __field(const char *, rcuname) - __field(unsigned long, gp_seq) + __field(long, gp_seq) __field(int, pid) ), TP_fast_assign( __entry->rcuname = rcuname; - __entry->gp_seq = gp_seq; + __entry->gp_seq = (long)gp_seq; __entry->pid = pid; ), - TP_printk("%s %lu %d", + TP_printk("%s %ld %d", __entry->rcuname, __entry->gp_seq, __entry->pid) ); @@ -343,17 +343,17 @@ TRACE_EVENT_RCU(rcu_unlock_preempted_task, TP_STRUCT__entry( __field(const char *, rcuname) - __field(unsigned long, gp_seq) + __field(long, gp_seq) __field(int, pid) ), TP_fast_assign( __entry->rcuname = rcuname; - __entry->gp_seq = gp_seq; + __entry->gp_seq = (long)gp_seq; __entry->pid = pid; ), - TP_printk("%s %lu %d", __entry->rcuname, __entry->gp_seq, __entry->pid) + TP_printk("%s %ld %d", __entry->rcuname, __entry->gp_seq, __entry->pid) ); /* @@ -374,7 +374,7 @@ TRACE_EVENT_RCU(rcu_quiescent_state_report, TP_STRUCT__entry( __field(const char *, rcuname) - __field(unsigned long, gp_seq) + __field(long, gp_seq) __field(unsigned long, mask) __field(unsigned long, qsmask) __field(u8, level) @@ -385,7 +385,7 @@ TRACE_EVENT_RCU(rcu_quiescent_state_report, TP_fast_assign( __entry->rcuname = rcuname; - __entry->gp_seq = gp_seq; + __entry->gp_seq = (long)gp_seq; __entry->mask = mask; __entry->qsmask = qsmask; __entry->level = level; @@ -394,7 +394,7 @@ TRACE_EVENT_RCU(rcu_quiescent_state_report, __entry->gp_tasks = gp_tasks; ), - TP_printk("%s %lu %lx>%lx %u %d %d %u", + TP_printk("%s %ld %lx>%lx %u %d %d %u", __entry->rcuname, __entry->gp_seq, __entry->mask, __entry->qsmask, __entry->level, __entry->grplo, __entry->grphi, __entry->gp_tasks) @@ -415,19 +415,19 @@ TRACE_EVENT_RCU(rcu_fqs, TP_STRUCT__entry( __field(const char *, rcuname) - __field(unsigned long, gp_seq) + __field(long, gp_seq) __field(int, cpu) __field(const char *, qsevent) ), TP_fast_assign( __entry->rcuname = rcuname; - __entry->gp_seq = gp_seq; + __entry->gp_seq = (long)gp_seq; __entry->cpu = cpu; __entry->qsevent = qsevent; ), - TP_printk("%s %lu %d %s", + TP_printk("%s %ld %d %s", __entry->rcuname, __entry->gp_seq, __entry->cpu, __entry->qsevent) ); diff --git a/include/trace/events/rdma.h b/include/trace/events/rdma.h index aa19afc73a4e..81bb454fc288 100644 --- a/include/trace/events/rdma.h +++ b/include/trace/events/rdma.h @@ -6,7 +6,6 @@ /* * enum ib_event_type, from include/rdma/ib_verbs.h */ - #define IB_EVENT_LIST \ ib_event(CQ_ERR) \ ib_event(QP_FATAL) \ @@ -91,6 +90,46 @@ IB_WC_STATUS_LIST __print_symbolic(x, IB_WC_STATUS_LIST) /* + * enum ib_cm_event_type, from include/rdma/ib_cm.h + */ +#define IB_CM_EVENT_LIST \ + ib_cm_event(REQ_ERROR) \ + ib_cm_event(REQ_RECEIVED) \ + ib_cm_event(REP_ERROR) \ + ib_cm_event(REP_RECEIVED) \ + ib_cm_event(RTU_RECEIVED) \ + ib_cm_event(USER_ESTABLISHED) \ + ib_cm_event(DREQ_ERROR) \ + ib_cm_event(DREQ_RECEIVED) \ + ib_cm_event(DREP_RECEIVED) \ + ib_cm_event(TIMEWAIT_EXIT) \ + ib_cm_event(MRA_RECEIVED) \ + ib_cm_event(REJ_RECEIVED) \ + ib_cm_event(LAP_ERROR) \ + ib_cm_event(LAP_RECEIVED) \ + ib_cm_event(APR_RECEIVED) \ + ib_cm_event(SIDR_REQ_ERROR) \ + ib_cm_event(SIDR_REQ_RECEIVED) \ + ib_cm_event_end(SIDR_REP_RECEIVED) + +#undef ib_cm_event +#undef ib_cm_event_end + +#define ib_cm_event(x) TRACE_DEFINE_ENUM(IB_CM_##x); +#define ib_cm_event_end(x) TRACE_DEFINE_ENUM(IB_CM_##x); + +IB_CM_EVENT_LIST + +#undef ib_cm_event +#undef ib_cm_event_end + +#define ib_cm_event(x) { IB_CM_##x, #x }, +#define ib_cm_event_end(x) { IB_CM_##x, #x } + +#define rdma_show_ib_cm_event(x) \ + __print_symbolic(x, IB_CM_EVENT_LIST) + +/* * enum rdma_cm_event_type, from include/rdma/rdma_cm.h */ #define RDMA_CM_EVENT_LIST \ diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index abe942225637..bf1065772228 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -13,6 +13,7 @@ #include <linux/scatterlist.h> #include <linux/sunrpc/rpc_rdma_cid.h> #include <linux/tracepoint.h> +#include <rdma/ib_cm.h> #include <trace/events/rdma.h> /** @@ -423,7 +424,6 @@ DEFINE_CONN_EVENT(connect); DEFINE_CONN_EVENT(disconnect); DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc); -DEFINE_RXPRT_EVENT(xprtrdma_op_setport); TRACE_EVENT(xprtrdma_op_connect, TP_PROTO( @@ -1188,68 +1188,6 @@ TRACE_EVENT(xprtrdma_decode_seg, ); /** - ** Allocation/release of rpcrdma_reqs and rpcrdma_reps - **/ - -TRACE_EVENT(xprtrdma_op_allocate, - TP_PROTO( - const struct rpc_task *task, - const struct rpcrdma_req *req - ), - - TP_ARGS(task, req), - - TP_STRUCT__entry( - __field(unsigned int, task_id) - __field(unsigned int, client_id) - __field(const void *, req) - __field(size_t, callsize) - __field(size_t, rcvsize) - ), - - TP_fast_assign( - __entry->task_id = task->tk_pid; - __entry->client_id = task->tk_client->cl_clid; - __entry->req = req; - __entry->callsize = task->tk_rqstp->rq_callsize; - __entry->rcvsize = task->tk_rqstp->rq_rcvsize; - ), - - TP_printk("task:%u@%u req=%p (%zu, %zu)", - __entry->task_id, __entry->client_id, - __entry->req, __entry->callsize, __entry->rcvsize - ) -); - -TRACE_EVENT(xprtrdma_op_free, - TP_PROTO( - const struct rpc_task *task, - const struct rpcrdma_req *req - ), - - TP_ARGS(task, req), - - TP_STRUCT__entry( - __field(unsigned int, task_id) - __field(unsigned int, client_id) - __field(const void *, req) - __field(const void *, rep) - ), - - TP_fast_assign( - __entry->task_id = task->tk_pid; - __entry->client_id = task->tk_client->cl_clid; - __entry->req = req; - __entry->rep = req->rl_reply; - ), - - TP_printk("task:%u@%u req=%p rep=%p", - __entry->task_id, __entry->client_id, - __entry->req, __entry->rep - ) -); - -/** ** Callback events **/ diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 65d7dfbbc9cd..f45b3c01370c 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -259,8 +259,10 @@ DECLARE_EVENT_CLASS(rpc_task_status, TP_ARGS(task)) DEFINE_RPC_STATUS_EVENT(call); -DEFINE_RPC_STATUS_EVENT(bind); DEFINE_RPC_STATUS_EVENT(connect); +DEFINE_RPC_STATUS_EVENT(timeout); +DEFINE_RPC_STATUS_EVENT(retry_refresh); +DEFINE_RPC_STATUS_EVENT(refresh); TRACE_EVENT(rpc_request, TP_PROTO(const struct rpc_task *task), @@ -385,7 +387,10 @@ DECLARE_EVENT_CLASS(rpc_task_running, DEFINE_RPC_RUNNING_EVENT(begin); DEFINE_RPC_RUNNING_EVENT(run_action); +DEFINE_RPC_RUNNING_EVENT(sync_sleep); +DEFINE_RPC_RUNNING_EVENT(sync_wake); DEFINE_RPC_RUNNING_EVENT(complete); +DEFINE_RPC_RUNNING_EVENT(timeout); DEFINE_RPC_RUNNING_EVENT(signalled); DEFINE_RPC_RUNNING_EVENT(end); @@ -517,6 +522,49 @@ DEFINE_RPC_REPLY_EVENT(stale_creds); DEFINE_RPC_REPLY_EVENT(bad_creds); DEFINE_RPC_REPLY_EVENT(auth_tooweak); +#define DEFINE_RPCB_ERROR_EVENT(name) \ + DEFINE_EVENT(rpc_reply_event, rpcb_##name##_err, \ + TP_PROTO( \ + const struct rpc_task *task \ + ), \ + TP_ARGS(task)) + +DEFINE_RPCB_ERROR_EVENT(prog_unavail); +DEFINE_RPCB_ERROR_EVENT(timeout); +DEFINE_RPCB_ERROR_EVENT(bind_version); +DEFINE_RPCB_ERROR_EVENT(unreachable); +DEFINE_RPCB_ERROR_EVENT(unrecognized); + +TRACE_EVENT(rpc_buf_alloc, + TP_PROTO( + const struct rpc_task *task, + int status + ), + + TP_ARGS(task, status), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(size_t, callsize) + __field(size_t, recvsize) + __field(int, status) + ), + + TP_fast_assign( + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; + __entry->callsize = task->tk_rqstp->rq_callsize; + __entry->recvsize = task->tk_rqstp->rq_rcvsize; + __entry->status = status; + ), + + TP_printk("task:%u@%u callsize=%zu recvsize=%zu status=%d", + __entry->task_id, __entry->client_id, + __entry->callsize, __entry->recvsize, __entry->status + ) +); + TRACE_EVENT(rpc_call_rpcerror, TP_PROTO( const struct rpc_task *task, @@ -868,6 +916,34 @@ DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection); DEFINE_RPC_SOCKET_EVENT(rpc_socket_close); DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown); +TRACE_EVENT(rpc_socket_nospace, + TP_PROTO( + const struct rpc_rqst *rqst, + const struct sock_xprt *transport + ), + + TP_ARGS(rqst, transport), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(unsigned int, total) + __field(unsigned int, remaining) + ), + + TP_fast_assign( + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->total = rqst->rq_slen; + __entry->remaining = rqst->rq_slen - transport->xmit.offset; + ), + + TP_printk("task:%u@%u total=%u remaining=%u", + __entry->task_id, __entry->client_id, + __entry->total, __entry->remaining + ) +); + TRACE_DEFINE_ENUM(XPRT_LOCKED); TRACE_DEFINE_ENUM(XPRT_CONNECTED); TRACE_DEFINE_ENUM(XPRT_CONNECTING); @@ -925,6 +1001,7 @@ DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class, TP_ARGS(xprt)) DEFINE_RPC_XPRT_LIFETIME_EVENT(create); +DEFINE_RPC_XPRT_LIFETIME_EVENT(connect); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force); @@ -969,7 +1046,6 @@ DECLARE_EVENT_CLASS(rpc_xprt_event, DEFINE_RPC_XPRT_EVENT(timer); DEFINE_RPC_XPRT_EVENT(lookup_rqst); -DEFINE_RPC_XPRT_EVENT(complete_rqst); TRACE_EVENT(xprt_transmit, TP_PROTO( @@ -1002,37 +1078,6 @@ TRACE_EVENT(xprt_transmit, __entry->seqno, __entry->status) ); -TRACE_EVENT(xprt_enq_xmit, - TP_PROTO( - const struct rpc_task *task, - int stage - ), - - TP_ARGS(task, stage), - - TP_STRUCT__entry( - __field(unsigned int, task_id) - __field(unsigned int, client_id) - __field(u32, xid) - __field(u32, seqno) - __field(int, stage) - ), - - TP_fast_assign( - __entry->task_id = task->tk_pid; - __entry->client_id = task->tk_client ? - task->tk_client->cl_clid : -1; - __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); - __entry->seqno = task->tk_rqstp->rq_seqno; - __entry->stage = stage; - ), - - TP_printk( - "task:%u@%u xid=0x%08x seqno=%u stage=%d", - __entry->task_id, __entry->client_id, __entry->xid, - __entry->seqno, __entry->stage) -); - TRACE_EVENT(xprt_ping, TP_PROTO(const struct rpc_xprt *xprt, int status), @@ -1095,6 +1140,7 @@ DECLARE_EVENT_CLASS(xprt_writelock_event, DEFINE_WRITELOCK_EVENT(reserve_xprt); DEFINE_WRITELOCK_EVENT(release_xprt); +DEFINE_WRITELOCK_EVENT(transmit_queued); DECLARE_EVENT_CLASS(xprt_cong_event, TP_PROTO( @@ -1147,6 +1193,30 @@ DEFINE_CONG_EVENT(release_cong); DEFINE_CONG_EVENT(get_cong); DEFINE_CONG_EVENT(put_cong); +TRACE_EVENT(xprt_reserve, + TP_PROTO( + const struct rpc_rqst *rqst + ), + + TP_ARGS(rqst), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + ), + + TP_fast_assign( + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->xid = be32_to_cpu(rqst->rq_xid); + ), + + TP_printk("task:%u@%u xid=0x%08x", + __entry->task_id, __entry->client_id, __entry->xid + ) +); + TRACE_EVENT(xs_stream_read_data, TP_PROTO(struct rpc_xprt *xprt, ssize_t err, size_t total), @@ -1202,6 +1272,156 @@ TRACE_EVENT(xs_stream_read_request, __entry->copied, __entry->reclen, __entry->offset) ); +TRACE_EVENT(rpcb_getport, + TP_PROTO( + const struct rpc_clnt *clnt, + const struct rpc_task *task, + unsigned int bind_version + ), + + TP_ARGS(clnt, task, bind_version), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(unsigned int, program) + __field(unsigned int, version) + __field(int, protocol) + __field(unsigned int, bind_version) + __string(servername, task->tk_xprt->servername) + ), + + TP_fast_assign( + __entry->task_id = task->tk_pid; + __entry->client_id = clnt->cl_clid; + __entry->program = clnt->cl_prog; + __entry->version = clnt->cl_vers; + __entry->protocol = task->tk_xprt->prot; + __entry->bind_version = bind_version; + __assign_str(servername, task->tk_xprt->servername); + ), + + TP_printk("task:%u@%u server=%s program=%u version=%u protocol=%d bind_version=%u", + __entry->task_id, __entry->client_id, __get_str(servername), + __entry->program, __entry->version, __entry->protocol, + __entry->bind_version + ) +); + +TRACE_EVENT(rpcb_setport, + TP_PROTO( + const struct rpc_task *task, + int status, + unsigned short port + ), + + TP_ARGS(task, status, port), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(int, status) + __field(unsigned short, port) + ), + + TP_fast_assign( + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; + __entry->status = status; + __entry->port = port; + ), + + TP_printk("task:%u@%u status=%d port=%u", + __entry->task_id, __entry->client_id, + __entry->status, __entry->port + ) +); + +TRACE_EVENT(pmap_register, + TP_PROTO( + u32 program, + u32 version, + int protocol, + unsigned short port + ), + + TP_ARGS(program, version, protocol, port), + + TP_STRUCT__entry( + __field(unsigned int, program) + __field(unsigned int, version) + __field(int, protocol) + __field(unsigned int, port) + ), + + TP_fast_assign( + __entry->program = program; + __entry->version = version; + __entry->protocol = protocol; + __entry->port = port; + ), + + TP_printk("program=%u version=%u protocol=%d port=%u", + __entry->program, __entry->version, + __entry->protocol, __entry->port + ) +); + +TRACE_EVENT(rpcb_register, + TP_PROTO( + u32 program, + u32 version, + const char *addr, + const char *netid + ), + + TP_ARGS(program, version, addr, netid), + + TP_STRUCT__entry( + __field(unsigned int, program) + __field(unsigned int, version) + __string(addr, addr) + __string(netid, netid) + ), + + TP_fast_assign( + __entry->program = program; + __entry->version = version; + __assign_str(addr, addr); + __assign_str(netid, netid); + ), + + TP_printk("program=%u version=%u addr=%s netid=%s", + __entry->program, __entry->version, + __get_str(addr), __get_str(netid) + ) +); + +TRACE_EVENT(rpcb_unregister, + TP_PROTO( + u32 program, + u32 version, + const char *netid + ), + + TP_ARGS(program, version, netid), + + TP_STRUCT__entry( + __field(unsigned int, program) + __field(unsigned int, version) + __string(netid, netid) + ), + + TP_fast_assign( + __entry->program = program; + __entry->version = version; + __assign_str(netid, netid); + ), + + TP_printk("program=%u version=%u netid=%s", + __entry->program, __entry->version, __get_str(netid) + ) +); DECLARE_EVENT_CLASS(svc_xdr_buf_class, TP_PROTO( diff --git a/include/uapi/asm-generic/hugetlb_encode.h b/include/uapi/asm-generic/hugetlb_encode.h index b0f8e87235bd..4f3d5aaa11f5 100644 --- a/include/uapi/asm-generic/hugetlb_encode.h +++ b/include/uapi/asm-generic/hugetlb_encode.h @@ -20,6 +20,7 @@ #define HUGETLB_FLAG_ENCODE_SHIFT 26 #define HUGETLB_FLAG_ENCODE_MASK 0x3f +#define HUGETLB_FLAG_ENCODE_16KB (14 << HUGETLB_FLAG_ENCODE_SHIFT) #define HUGETLB_FLAG_ENCODE_64KB (16 << HUGETLB_FLAG_ENCODE_SHIFT) #define HUGETLB_FLAG_ENCODE_512KB (19 << HUGETLB_FLAG_ENCODE_SHIFT) #define HUGETLB_FLAG_ENCODE_1MB (20 << HUGETLB_FLAG_ENCODE_SHIFT) diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index f2b5d72a46c2..2056318988f7 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -857,9 +857,11 @@ __SYSCALL(__NR_openat2, sys_openat2) __SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd) #define __NR_faccessat2 439 __SYSCALL(__NR_faccessat2, sys_faccessat2) +#define __NR_process_madvise 440 +__SYSCALL(__NR_process_madvise, sys_process_madvise) #undef __NR_syscalls -#define __NR_syscalls 440 +#define __NR_syscalls 441 /* * 32 bit systems traditionally used different diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 373cada89815..7233502ea991 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -172,6 +172,9 @@ * - add FUSE_WRITE_KILL_PRIV flag * - add FUSE_SETUPMAPPING and FUSE_REMOVEMAPPING * - add map_alignment to fuse_init_out, add FUSE_MAP_ALIGNMENT flag + * + * 7.32 + * - add flags to fuse_attr, add FUSE_ATTR_SUBMOUNT, add FUSE_SUBMOUNTS */ #ifndef _LINUX_FUSE_H @@ -207,7 +210,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 31 +#define FUSE_KERNEL_MINOR_VERSION 32 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -231,7 +234,7 @@ struct fuse_attr { uint32_t gid; uint32_t rdev; uint32_t blksize; - uint32_t padding; + uint32_t flags; }; struct fuse_kstatfs { @@ -313,7 +316,10 @@ struct fuse_file_lock { * FUSE_CACHE_SYMLINKS: cache READLINK responses * FUSE_NO_OPENDIR_SUPPORT: kernel supports zero-message opendir * FUSE_EXPLICIT_INVAL_DATA: only invalidate cached pages on explicit request - * FUSE_MAP_ALIGNMENT: map_alignment field is valid + * FUSE_MAP_ALIGNMENT: init_out.map_alignment contains log2(byte alignment) for + * foffset and moffset fields in struct + * fuse_setupmapping_out and fuse_removemapping_one. + * FUSE_SUBMOUNTS: kernel supports auto-mounting directory submounts */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -342,6 +348,7 @@ struct fuse_file_lock { #define FUSE_NO_OPENDIR_SUPPORT (1 << 24) #define FUSE_EXPLICIT_INVAL_DATA (1 << 25) #define FUSE_MAP_ALIGNMENT (1 << 26) +#define FUSE_SUBMOUNTS (1 << 27) /** * CUSE INIT request/reply flags @@ -417,6 +424,13 @@ struct fuse_file_lock { */ #define FUSE_FSYNC_FDATASYNC (1 << 0) +/** + * fuse_attr flags + * + * FUSE_ATTR_SUBMOUNT: Object is a submount root + */ +#define FUSE_ATTR_SUBMOUNT (1 << 0) + enum fuse_opcode { FUSE_LOOKUP = 1, FUSE_FORGET = 2, /* no reply */ @@ -892,4 +906,34 @@ struct fuse_copy_file_range_in { uint64_t flags; }; +#define FUSE_SETUPMAPPING_FLAG_WRITE (1ull << 0) +#define FUSE_SETUPMAPPING_FLAG_READ (1ull << 1) +struct fuse_setupmapping_in { + /* An already open handle */ + uint64_t fh; + /* Offset into the file to start the mapping */ + uint64_t foffset; + /* Length of mapping required */ + uint64_t len; + /* Flags, FUSE_SETUPMAPPING_FLAG_* */ + uint64_t flags; + /* Offset in Memory Window */ + uint64_t moffset; +}; + +struct fuse_removemapping_in { + /* number of fuse_removemapping_one follows */ + uint32_t count; +}; + +struct fuse_removemapping_one { + /* Offset into the dax window start the unmapping */ + uint64_t moffset; + /* Length of mapping required */ + uint64_t len; +}; + +#define FUSE_REMOVEMAPPING_MAX_ENTRY \ + (PAGE_SIZE / sizeof(struct fuse_removemapping_one)) + #endif /* _LINUX_FUSE_H */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 7d8eced6f459..ca41220b40b8 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -248,6 +248,8 @@ struct kvm_hyperv_exit { #define KVM_EXIT_IOAPIC_EOI 26 #define KVM_EXIT_HYPERV 27 #define KVM_EXIT_ARM_NISV 28 +#define KVM_EXIT_X86_RDMSR 29 +#define KVM_EXIT_X86_WRMSR 30 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -413,6 +415,17 @@ struct kvm_run { __u64 esr_iss; __u64 fault_ipa; } arm_nisv; + /* KVM_EXIT_X86_RDMSR / KVM_EXIT_X86_WRMSR */ + struct { + __u8 error; /* user -> kernel */ + __u8 pad[7]; +#define KVM_MSR_EXIT_REASON_INVAL (1 << 0) +#define KVM_MSR_EXIT_REASON_UNKNOWN (1 << 1) +#define KVM_MSR_EXIT_REASON_FILTER (1 << 2) + __u32 reason; /* kernel -> user */ + __u32 index; /* kernel -> user */ + __u64 data; /* kernel <-> user */ + } msr; /* Fix the size of the union. */ char padding[256]; }; @@ -1037,6 +1050,9 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_SMALLER_MAXPHYADDR 185 #define KVM_CAP_S390_DIAG318 186 #define KVM_CAP_STEAL_TIME 187 +#define KVM_CAP_X86_USER_SPACE_MSR 188 +#define KVM_CAP_X86_MSR_FILTER 189 +#define KVM_CAP_ENFORCE_PV_FEATURE_CPUID 190 #ifdef KVM_CAP_IRQ_ROUTING @@ -1538,6 +1554,9 @@ struct kvm_pv_cmd { /* Available with KVM_CAP_S390_PROTECTED */ #define KVM_S390_PV_COMMAND _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd) +/* Available with KVM_CAP_X86_MSR_FILTER */ +#define KVM_X86_SET_MSR_FILTER _IOW(KVMIO, 0xc6, struct kvm_msr_filter) + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h index 923cc162609c..f55bc680b5b0 100644 --- a/include/uapi/linux/mman.h +++ b/include/uapi/linux/mman.h @@ -27,6 +27,7 @@ #define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT #define MAP_HUGE_MASK HUGETLB_FLAG_ENCODE_MASK +#define MAP_HUGE_16KB HUGETLB_FLAG_ENCODE_16KB #define MAP_HUGE_64KB HUGETLB_FLAG_ENCODE_64KB #define MAP_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB #define MAP_HUGE_1MB HUGETLB_FLAG_ENCODE_1MB diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h index bf197e99b98f..ed5415e0f1c1 100644 --- a/include/uapi/linux/nfs4.h +++ b/include/uapi/linux/nfs4.h @@ -139,6 +139,8 @@ #define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000 #define EXCHGID4_FLAG_CONFIRMED_R 0x80000000 + +#define EXCHGID4_FLAG_SUPP_FENCE_OPS 0x00000004 /* * Since the validity of these bits depends on whether * they're set in the argument or response, have separate @@ -146,6 +148,7 @@ */ #define EXCHGID4_FLAG_MASK_A 0x40070103 #define EXCHGID4_FLAG_MASK_R 0x80070103 +#define EXCHGID4_2_FLAG_MASK_R 0x80070107 #define SEQ4_STATUS_CB_PATH_DOWN 0x00000001 #define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002 diff --git a/include/uapi/linux/nfsacl.h b/include/uapi/linux/nfsacl.h index ca9a8501ff30..2c2ad204d3b0 100644 --- a/include/uapi/linux/nfsacl.h +++ b/include/uapi/linux/nfsacl.h @@ -9,11 +9,13 @@ #define NFS_ACL_PROGRAM 100227 +#define ACLPROC2_NULL 0 #define ACLPROC2_GETACL 1 #define ACLPROC2_SETACL 2 #define ACLPROC2_GETATTR 3 #define ACLPROC2_ACCESS 4 +#define ACLPROC3_NULL 0 #define ACLPROC3_GETACL 1 #define ACLPROC3_SETACL 2 diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index f9701410d3b5..a95d55f9f257 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -76,6 +76,7 @@ #define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */ #define PCI_LATENCY_TIMER 0x0d /* 8 bits */ #define PCI_HEADER_TYPE 0x0e /* 8 bits */ +#define PCI_HEADER_TYPE_MASK 0x7f #define PCI_HEADER_TYPE_NORMAL 0 #define PCI_HEADER_TYPE_BRIDGE 1 #define PCI_HEADER_TYPE_CARDBUS 2 @@ -246,7 +247,7 @@ #define PCI_PM_CAP_PME_D0 0x0800 /* PME# from D0 */ #define PCI_PM_CAP_PME_D1 0x1000 /* PME# from D1 */ #define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */ -#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */ +#define PCI_PM_CAP_PME_D3hot 0x4000 /* PME# from D3 (hot) */ #define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */ #define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */ #define PCI_PM_CTRL 4 /* PM control and status register */ @@ -532,6 +533,8 @@ #define PCI_EXP_LNKCAP_SLS_32_0GB 0x00000005 /* LNKCAP2 SLS Vector bit 4 */ #define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ #define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#define PCI_EXP_LNKCAP_ASPM_L0S 0x00000400 /* ASPM L0s Support */ +#define PCI_EXP_LNKCAP_ASPM_L1 0x00000800 /* ASPM L1 Support */ #define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */ #define PCI_EXP_LNKCAP_L1EL 0x00038000 /* L1 Exit Latency */ #define PCI_EXP_LNKCAP_CLKPM 0x00040000 /* Clock Power Management */ @@ -1056,6 +1059,7 @@ #define PCI_L1SS_CTL1_PCIPM_L1_1 0x00000002 /* PCI-PM L1.1 Enable */ #define PCI_L1SS_CTL1_ASPM_L1_2 0x00000004 /* ASPM L1.2 Enable */ #define PCI_L1SS_CTL1_ASPM_L1_1 0x00000008 /* ASPM L1.1 Enable */ +#define PCI_L1SS_CTL1_L1_2_MASK 0x00000005 #define PCI_L1SS_CTL1_L1SS_MASK 0x0000000f #define PCI_L1SS_CTL1_CM_RESTORE_TIME 0x0000ff00 /* Common_Mode_Restore_Time */ #define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */ diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 920470502329..2f313a238a8f 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -201,8 +201,11 @@ struct vfio_device_info { #define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */ #define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */ #define VFIO_DEVICE_FLAGS_AP (1 << 5) /* vfio-ap device */ +#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 6) /* vfio-fsl-mc device */ +#define VFIO_DEVICE_FLAGS_CAPS (1 << 7) /* Info supports caps */ __u32 num_regions; /* Max region index + 1 */ __u32 num_irqs; /* Max IRQ index + 1 */ + __u32 cap_offset; /* Offset within info struct of first cap */ }; #define VFIO_DEVICE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 7) @@ -218,6 +221,15 @@ struct vfio_device_info { #define VFIO_DEVICE_API_CCW_STRING "vfio-ccw" #define VFIO_DEVICE_API_AP_STRING "vfio-ap" +/* + * The following capabilities are unique to s390 zPCI devices. Their contents + * are further-defined in vfio_zdev.h + */ +#define VFIO_DEVICE_INFO_CAP_ZPCI_BASE 1 +#define VFIO_DEVICE_INFO_CAP_ZPCI_GROUP 2 +#define VFIO_DEVICE_INFO_CAP_ZPCI_UTIL 3 +#define VFIO_DEVICE_INFO_CAP_ZPCI_PFIP 4 + /** * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8, * struct vfio_region_info) @@ -462,7 +474,7 @@ struct vfio_region_gfx_edid { * 5. Resumed * |--------->| * - * 0. Default state of VFIO device is _RUNNNG when the user application starts. + * 0. Default state of VFIO device is _RUNNING when the user application starts. * 1. During normal shutdown of the user application, the user application may * optionally change the VFIO device state from _RUNNING to _STOP. This * transition is optional. The vendor driver must support this transition but @@ -1039,6 +1051,21 @@ struct vfio_iommu_type1_info_cap_migration { __u64 max_dirty_bitmap_size; /* in bytes */ }; +/* + * The DMA available capability allows to report the current number of + * simultaneously outstanding DMA mappings that are allowed. + * + * The structure below defines version 1 of this capability. + * + * avail: specifies the current number of outstanding DMA mappings allowed. + */ +#define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3 + +struct vfio_iommu_type1_info_dma_avail { + struct vfio_info_cap_header header; + __u32 avail; +}; + #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12) /** diff --git a/include/uapi/linux/vfio_zdev.h b/include/uapi/linux/vfio_zdev.h new file mode 100644 index 000000000000..b4309397b6b2 --- /dev/null +++ b/include/uapi/linux/vfio_zdev.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * VFIO Region definitions for ZPCI devices + * + * Copyright IBM Corp. 2020 + * + * Author(s): Pierre Morel <pmorel@linux.ibm.com> + * Matthew Rosato <mjrosato@linux.ibm.com> + */ + +#ifndef _VFIO_ZDEV_H_ +#define _VFIO_ZDEV_H_ + +#include <linux/types.h> +#include <linux/vfio.h> + +/** + * VFIO_DEVICE_INFO_CAP_ZPCI_BASE - Base PCI Function information + * + * This capability provides a set of descriptive information about the + * associated PCI function. + */ +struct vfio_device_info_cap_zpci_base { + struct vfio_info_cap_header header; + __u64 start_dma; /* Start of available DMA addresses */ + __u64 end_dma; /* End of available DMA addresses */ + __u16 pchid; /* Physical Channel ID */ + __u16 vfn; /* Virtual function number */ + __u16 fmb_length; /* Measurement Block Length (in bytes) */ + __u8 pft; /* PCI Function Type */ + __u8 gid; /* PCI function group ID */ +}; + +/** + * VFIO_DEVICE_INFO_CAP_ZPCI_GROUP - Base PCI Function Group information + * + * This capability provides a set of descriptive information about the group of + * PCI functions that the associated device belongs to. + */ +struct vfio_device_info_cap_zpci_group { + struct vfio_info_cap_header header; + __u64 dasm; /* DMA Address space mask */ + __u64 msi_addr; /* MSI address */ + __u64 flags; +#define VFIO_DEVICE_INFO_ZPCI_FLAG_REFRESH 1 /* Program-specified TLB refresh */ + __u16 mui; /* Measurement Block Update Interval */ + __u16 noi; /* Maximum number of MSIs */ + __u16 maxstbl; /* Maximum Store Block Length */ + __u8 version; /* Supported PCI Version */ +}; + +/** + * VFIO_DEVICE_INFO_CAP_ZPCI_UTIL - Utility String + * + * This capability provides the utility string for the associated device, which + * is a device identifier string made up of EBCDID characters. 'size' specifies + * the length of 'util_str'. + */ +struct vfio_device_info_cap_zpci_util { + struct vfio_info_cap_header header; + __u32 size; + __u8 util_str[]; +}; + +/** + * VFIO_DEVICE_INFO_CAP_ZPCI_PFIP - PCI Function Path + * + * This capability provides the PCI function path string, which is an identifier + * that describes the internal hardware path of the device. 'size' specifies + * the length of 'pfip'. + */ +struct vfio_device_info_cap_zpci_pfip { + struct vfio_info_cap_header header; + __u32 size; + __u8 pfip[]; +}; + +#endif diff --git a/include/uapi/linux/virtio_fs.h b/include/uapi/linux/virtio_fs.h index 3056b6e9f8ce..bea38291421b 100644 --- a/include/uapi/linux/virtio_fs.h +++ b/include/uapi/linux/virtio_fs.h @@ -16,4 +16,7 @@ struct virtio_fs_config { __le32 num_request_queues; } __attribute__((packed)); +/* For the id field in virtio_pci_shm_cap */ +#define VIRTIO_FS_SHMCAP_ID_CACHE 0 + #endif /* _UAPI_LINUX_VIRTIO_FS_H */ diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h index 507a2862bedb..f89fbb5b1e8d 100644 --- a/include/uapi/rdma/efa-abi.h +++ b/include/uapi/rdma/efa-abi.h @@ -105,6 +105,7 @@ struct efa_ibv_create_ah_resp { enum { EFA_QUERY_DEVICE_CAPS_RDMA_READ = 1 << 0, + EFA_QUERY_DEVICE_CAPS_RNR_RETRY = 1 << 1, }; struct efa_ibv_ex_query_device_resp { diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index eb76b38a00d4..9ec85f76e9ac 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h @@ -39,6 +39,8 @@ struct hns_roce_ib_create_cq { __aligned_u64 buf_addr; __aligned_u64 db_addr; + __u32 cqe_size; + __u32 reserved; }; struct hns_roce_ib_create_cq_resp { @@ -73,7 +75,7 @@ struct hns_roce_ib_create_qp_resp { struct hns_roce_ib_alloc_ucontext_resp { __u32 qp_tab_size; - __u32 reserved; + __u32 cqe_size; }; struct hns_roce_ib_alloc_pd_resp { diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h index 99dcabf61a71..7968a1845355 100644 --- a/include/uapi/rdma/ib_user_ioctl_cmds.h +++ b/include/uapi/rdma/ib_user_ioctl_cmds.h @@ -70,6 +70,8 @@ enum uverbs_methods_device { UVERBS_METHOD_QUERY_PORT, UVERBS_METHOD_GET_CONTEXT, UVERBS_METHOD_QUERY_CONTEXT, + UVERBS_METHOD_QUERY_GID_TABLE, + UVERBS_METHOD_QUERY_GID_ENTRY, }; enum uverbs_attrs_invoke_write_cmd_attr_ids { @@ -352,4 +354,18 @@ enum uverbs_attrs_async_event_create { UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE, }; +enum uverbs_attrs_query_gid_table_cmd_attr_ids { + UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE, + UVERBS_ATTR_QUERY_GID_TABLE_FLAGS, + UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES, + UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES, +}; + +enum uverbs_attrs_query_gid_entry_cmd_attr_ids { + UVERBS_ATTR_QUERY_GID_ENTRY_PORT, + UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX, + UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS, + UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY, +}; + #endif diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h index 5debab45ebcb..22483799cd07 100644 --- a/include/uapi/rdma/ib_user_ioctl_verbs.h +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -208,6 +208,7 @@ enum ib_uverbs_read_counters_flags { enum ib_uverbs_advise_mr_advice { IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH, IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE, + IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT, }; enum ib_uverbs_advise_mr_flag { @@ -250,4 +251,18 @@ enum rdma_driver_id { RDMA_DRIVER_SIW, }; +enum ib_uverbs_gid_type { + IB_UVERBS_GID_TYPE_IB, + IB_UVERBS_GID_TYPE_ROCE_V1, + IB_UVERBS_GID_TYPE_ROCE_V2, +}; + +struct ib_uverbs_gid_entry { + __aligned_u64 gid[2]; + __u32 gid_index; + __u32 port_num; + __u32 gid_type; + __u32 netdev_ifindex; /* It is 0 if there is no netdev associated with it */ +}; + #endif diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 0474c7400268..456438c18c2c 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -457,6 +457,17 @@ struct ib_uverbs_poll_cq { __u32 ne; }; +enum ib_uverbs_wc_opcode { + IB_UVERBS_WC_SEND = 0, + IB_UVERBS_WC_RDMA_WRITE = 1, + IB_UVERBS_WC_RDMA_READ = 2, + IB_UVERBS_WC_COMP_SWAP = 3, + IB_UVERBS_WC_FETCH_ADD = 4, + IB_UVERBS_WC_BIND_MW = 5, + IB_UVERBS_WC_LOCAL_INV = 6, + IB_UVERBS_WC_TSO = 7, +}; + struct ib_uverbs_wc { __aligned_u64 wr_id; __u32 status; diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h index aae2e696bb38..e591d8c1f3cf 100644 --- a/include/uapi/rdma/rdma_user_rxe.h +++ b/include/uapi/rdma/rdma_user_rxe.h @@ -39,6 +39,11 @@ #include <linux/in.h> #include <linux/in6.h> +enum { + RXE_NETWORK_TYPE_IPV4 = 1, + RXE_NETWORK_TYPE_IPV6 = 2, +}; + union rxe_gid { __u8 raw[16]; struct { @@ -57,6 +62,7 @@ struct rxe_global_route { struct rxe_av { __u8 port_num; + /* From RXE_NETWORK_TYPE_* */ __u8 network_type; __u8 dmac[6]; struct rxe_global_route grh; @@ -99,8 +105,8 @@ struct rxe_send_wr { struct ib_mr *mr; __aligned_u64 reserved; }; - __u32 key; - __u32 access; + __u32 key; + __u32 access; } reg; } wr; }; @@ -112,7 +118,7 @@ struct rxe_sge { }; struct mminfo { - __aligned_u64 offset; + __aligned_u64 offset; __u32 size; __u32 pad; }; diff --git a/include/xen/events.h b/include/xen/events.h index df1e6391f63f..3b8155c2ea03 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -15,10 +15,15 @@ unsigned xen_evtchn_nr_channels(void); int bind_evtchn_to_irq(evtchn_port_t evtchn); +int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn); int bind_evtchn_to_irqhandler(evtchn_port_t evtchn, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); +int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn, + irq_handler_t handler, + unsigned long irqflags, const char *devname, + void *dev_id); int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu); int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, irq_handler_t handler, @@ -32,12 +37,20 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, void *dev_id); int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, evtchn_port_t remote_port); +int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain, + evtchn_port_t remote_port); int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, evtchn_port_t remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); +int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain, + evtchn_port_t remote_port, + irq_handler_t handler, + unsigned long irqflags, + const char *devname, + void *dev_id); /* * Common unbind function for all event sources. Takes IRQ to unbind from. @@ -46,6 +59,14 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, */ void unbind_from_irqhandler(unsigned int irq, void *dev_id); +/* + * Send late EOI for an IRQ bound to an event channel via one of the *_lateeoi + * functions above. + */ +void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags); +/* Signal an event was spurious, i.e. there was no action resulting from it. */ +#define XEN_EOI_FLAG_SPURIOUS 0x00000001 + #define XEN_IRQ_PRIORITY_MAX EVTCHN_FIFO_PRIORITY_MAX #define XEN_IRQ_PRIORITY_DEFAULT EVTCHN_FIFO_PRIORITY_DEFAULT #define XEN_IRQ_PRIORITY_MIN EVTCHN_FIFO_PRIORITY_MIN |