summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig71
-rw-r--r--arch/alpha/Kconfig9
-rw-r--r--arch/alpha/include/asm/kmap_types.h15
-rw-r--r--arch/alpha/include/asm/mmu_context.h12
-rw-r--r--arch/alpha/include/asm/mmzone.h14
-rw-r--r--arch/alpha/include/asm/page.h7
-rw-r--r--arch/alpha/include/asm/pgtable.h12
-rw-r--r--arch/alpha/include/asm/sparsemem.h18
-rw-r--r--arch/alpha/include/uapi/asm/signal.h14
-rw-r--r--arch/alpha/include/uapi/asm/socket.h3
-rw-r--r--arch/alpha/kernel/process.c4
-rw-r--r--arch/alpha/kernel/setup.c1
-rw-r--r--arch/arc/Kconfig5
-rw-r--r--arch/arc/include/asm/bitops.h4
-rw-r--r--arch/arc/include/asm/highmem.h26
-rw-r--r--arch/arc/include/asm/kmap_types.h14
-rw-r--r--arch/arc/include/asm/mmu_context.h17
-rw-r--r--arch/arc/include/asm/page.h20
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/kernel/head.S17
-rw-r--r--arch/arc/kernel/stacktrace.c63
-rw-r--r--arch/arc/mm/highmem.c54
-rw-r--r--arch/arc/mm/init.c29
-rw-r--r--arch/arc/mm/tlb.c24
-rw-r--r--arch/arc/plat-hsdk/platform.c17
-rw-r--r--arch/arm/Kconfig49
-rw-r--r--arch/arm/Kconfig.debug6
-rw-r--r--arch/arm/Makefile12
-rw-r--r--arch/arm/boot/compressed/Makefile4
-rw-r--r--arch/arm/boot/compressed/head.S3
-rw-r--r--arch/arm/boot/dts/am437x-l4.dtsi2
-rw-r--r--arch/arm/boot/dts/aspeed-g6.dtsi6
-rw-r--r--arch/arm/boot/dts/dra76x.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi1
-rw-r--r--arch/arm/boot/dts/imx50-evk.dts2
-rw-r--r--arch/arm/boot/dts/imx53-ppd.dts2
-rw-r--r--arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts2
-rw-r--r--arch/arm/boot/dts/imx6q-apalis-eval.dts2
-rw-r--r--arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts2
-rw-r--r--arch/arm/boot/dts/imx6q-apalis-ixora.dts2
-rw-r--r--arch/arm/boot/dts/imx6q-prti6q.dts4
-rw-r--r--arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-udoo.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi1
-rw-r--r--arch/arm/boot/dts/imx7-colibri-aster.dtsi2
-rw-r--r--arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi2
-rw-r--r--arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts7
-rw-r--r--arch/arm/boot/dts/mmp3.dtsi2
-rw-r--r--arch/arm/boot/dts/motorola-mapphone-common.dtsi2
-rw-r--r--arch/arm/boot/dts/s5pv210-aries.dtsi2
-rw-r--r--arch/arm/boot/dts/stm32mp157c-ed1.dts15
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi19
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi2
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi4
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dkx.dtsi17
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi2
-rw-r--r--arch/arm/boot/dts/sun6i-a31-hummingbird.dts2
-rw-r--r--arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts2
-rw-r--r--arch/arm/boot/dts/sun7i-a20-bananapi.dts2
-rw-r--r--arch/arm/boot/dts/sun7i-a20-cubietruck.dts2
-rw-r--r--arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts5
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-s3-pinecube.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-v3s.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts12
-rw-r--r--arch/arm/boot/dts/sun9i-a80-cubieboard4.dts2
-rw-r--r--arch/arm/boot/dts/sun9i-a80-optimus.dts2
-rw-r--r--arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra20-acer-a500-picasso.dts2
-rw-r--r--arch/arm/boot/dts/vf610-zii-dev-rev-b.dts3
-rw-r--r--arch/arm/configs/badge4_defconfig1
-rw-r--r--arch/arm/configs/corgi_defconfig1
-rw-r--r--arch/arm/configs/ebsa110_defconfig74
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig1
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/ixp4xx_defconfig1
-rw-r--r--arch/arm/configs/multi_v5_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig1
-rw-r--r--arch/arm/configs/spitz_defconfig1
-rw-r--r--arch/arm/crypto/aes-ce-core.S32
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c8
-rw-r--r--arch/arm/crypto/chacha-glue.c34
-rw-r--r--arch/arm/crypto/chacha-neon-core.S97
-rw-r--r--arch/arm/crypto/sha1-ce-glue.c2
-rw-r--r--arch/arm/crypto/sha1.h2
-rw-r--r--arch/arm/crypto/sha1_glue.c2
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c2
-rw-r--r--arch/arm/crypto/sha2-ce-glue.c2
-rw-r--r--arch/arm/crypto/sha256_glue.c2
-rw-r--r--arch/arm/crypto/sha256_neon_glue.c2
-rw-r--r--arch/arm/crypto/sha512-glue.c2
-rw-r--r--arch/arm/crypto/sha512-neon-glue.c2
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/elf.h4
-rw-r--r--arch/arm/include/asm/fixmap.h4
-rw-r--r--arch/arm/include/asm/hardirq.h11
-rw-r--r--arch/arm/include/asm/highmem.h34
-rw-r--r--arch/arm/include/asm/irq.h2
-rw-r--r--arch/arm/include/asm/kmap_types.h10
-rw-r--r--arch/arm/include/asm/kprobes.h22
-rw-r--r--arch/arm/include/asm/mach/time.h2
-rw-r--r--arch/arm/include/asm/mmu_context.h26
-rw-r--r--arch/arm/include/asm/pgtable-2level.h2
-rw-r--r--arch/arm/include/asm/pgtable-3level.h2
-rw-r--r--arch/arm/include/asm/seccomp.h11
-rw-r--r--arch/arm/include/asm/signal.h2
-rw-r--r--arch/arm/include/uapi/asm/signal.h27
-rw-r--r--arch/arm/kernel/Makefile6
-rw-r--r--arch/arm/kernel/perf_regs.c3
-rw-r--r--arch/arm/kernel/process.c11
-rw-r--r--arch/arm/kernel/time.c14
-rw-r--r--arch/arm/kernel/vdso.c9
-rw-r--r--arch/arm/kernel/vmlinux.lds.S4
-rw-r--r--arch/arm/mach-bcm/Kconfig1
-rw-r--r--arch/arm/mach-davinci/Kconfig1
-rw-r--r--arch/arm/mach-ebsa110/Makefile8
-rw-r--r--arch/arm/mach-ebsa110/Makefile.boot5
-rw-r--r--arch/arm/mach-ebsa110/core.c323
-rw-r--r--arch/arm/mach-ebsa110/core.h38
-rw-r--r--arch/arm/mach-ebsa110/include/mach/entry-macro.S33
-rw-r--r--arch/arm/mach-ebsa110/include/mach/hardware.h21
-rw-r--r--arch/arm/mach-ebsa110/include/mach/io.h89
-rw-r--r--arch/arm/mach-ebsa110/include/mach/irqs.h17
-rw-r--r--arch/arm/mach-ebsa110/include/mach/memory.h22
-rw-r--r--arch/arm/mach-ebsa110/include/mach/uncompress.h41
-rw-r--r--arch/arm/mach-ebsa110/io.c440
-rw-r--r--arch/arm/mach-ebsa110/leds.c71
-rw-r--r--arch/arm/mach-exynos/Kconfig1
-rw-r--r--arch/arm/mach-highbank/Kconfig1
-rw-r--r--arch/arm/mach-imx/anatop.c2
-rw-r--r--arch/arm/mach-keystone/memory.h3
-rw-r--r--arch/arm/mach-mvebu/coherency_ll.S7
-rw-r--r--arch/arm/mach-omap1/board-h2.c22
-rw-r--r--arch/arm/mach-omap1/board-osk.c2
-rw-r--r--arch/arm/mach-omap2/Kconfig4
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c8
-rw-r--r--arch/arm/mach-rpc/time.c2
-rw-r--r--arch/arm/mach-s5pv210/Kconfig1
-rw-r--r--arch/arm/mach-sunxi/sunxi.c1
-rw-r--r--arch/arm/mach-tango/Kconfig1
-rw-r--r--arch/arm/mm/Makefile1
-rw-r--r--arch/arm/mm/highmem.c121
-rw-r--r--arch/arm/mm/init.c82
-rw-r--r--arch/arm/probes/kprobes/opt-arm.c18
-rw-r--r--arch/arm64/Kconfig103
-rw-r--r--arch/arm64/Kconfig.platforms2
-rw-r--r--arch/arm64/Makefile4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts3
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h5-cc.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts2
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts2
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg-s400.dts6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg.dtsi52
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2-plus.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi3
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi20
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi9
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-evk.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi9
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn.dtsi30
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi1
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts10
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts10
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi12
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts12
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi20
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-sim-vdk.dts6
-rw-r--r--arch/arm64/boot/dts/qcom/ipq6018.dtsi72
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774e1.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi3
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/crypto/aes-glue.c2
-rw-r--r--arch/arm64/crypto/chacha-neon-core.S193
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S15
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c48
-rw-r--r--arch/arm64/crypto/poly1305-armv8.pl2
-rw-r--r--arch/arm64/crypto/poly1305-core.S_shipped2
-rw-r--r--arch/arm64/crypto/poly1305-glue.c2
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sha256-glue.c2
-rw-r--r--arch/arm64/crypto/sha3-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sha512-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sha512-glue.c2
-rw-r--r--arch/arm64/include/asm/alternative-macros.h217
-rw-r--r--arch/arm64/include/asm/alternative.h267
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h35
-rw-r--r--arch/arm64/include/asm/brk-imm.h2
-rw-r--r--arch/arm64/include/asm/cache.h1
-rw-r--r--arch/arm64/include/asm/cacheflush.h1
-rw-r--r--arch/arm64/include/asm/cpucaps.h6
-rw-r--r--arch/arm64/include/asm/cpufeature.h65
-rw-r--r--arch/arm64/include/asm/cputype.h6
-rw-r--r--arch/arm64/include/asm/daifflags.h3
-rw-r--r--arch/arm64/include/asm/debug-monitors.h1
-rw-r--r--arch/arm64/include/asm/esr.h1
-rw-r--r--arch/arm64/include/asm/exception.h7
-rw-r--r--arch/arm64/include/asm/exec.h1
-rw-r--r--arch/arm64/include/asm/futex.h8
-rw-r--r--arch/arm64/include/asm/hardirq.h7
-rw-r--r--arch/arm64/include/asm/insn.h3
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h6
-rw-r--r--arch/arm64/include/asm/kprobes.h9
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/memory.h22
-rw-r--r--arch/arm64/include/asm/mmu_context.h14
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h42
-rw-r--r--arch/arm64/include/asm/probes.h2
-rw-r--r--arch/arm64/include/asm/processor.h4
-rw-r--r--arch/arm64/include/asm/ptrace.h12
-rw-r--r--arch/arm64/include/asm/rwonce.h73
-rw-r--r--arch/arm64/include/asm/seccomp.h9
-rw-r--r--arch/arm64/include/asm/signal.h25
-rw-r--r--arch/arm64/include/asm/sysreg.h37
-rw-r--r--arch/arm64/include/asm/system_misc.h2
-rw-r--r--arch/arm64/include/asm/thread_info.h10
-rw-r--r--arch/arm64/include/asm/topology.h4
-rw-r--r--arch/arm64/include/asm/traps.h6
-rw-r--r--arch/arm64/include/asm/uaccess.h178
-rw-r--r--arch/arm64/include/asm/virt.h9
-rw-r--r--arch/arm64/kernel/Makefile1
-rw-r--r--arch/arm64/kernel/alternative.c7
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c4
-rw-r--r--arch/arm64/kernel/asm-offsets.c3
-rw-r--r--arch/arm64/kernel/cpu_errata.c12
-rw-r--r--arch/arm64/kernel/cpufeature.c61
-rw-r--r--arch/arm64/kernel/cpuinfo.c7
-rw-r--r--arch/arm64/kernel/debug-monitors.c5
-rw-r--r--arch/arm64/kernel/efi-header.S86
-rw-r--r--arch/arm64/kernel/entry-common.c256
-rw-r--r--arch/arm64/kernel/entry.S112
-rw-r--r--arch/arm64/kernel/head.S70
-rw-r--r--arch/arm64/kernel/image-vars.h1
-rw-r--r--arch/arm64/kernel/irq.c36
-rw-r--r--arch/arm64/kernel/kaslr.c26
-rw-r--r--arch/arm64/kernel/kexec_image.c41
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c9
-rw-r--r--arch/arm64/kernel/mte.c3
-rw-r--r--arch/arm64/kernel/perf_event.c41
-rw-r--r--arch/arm64/kernel/perf_regs.c3
-rw-r--r--arch/arm64/kernel/probes/kprobes.c120
-rw-r--r--arch/arm64/kernel/process.c44
-rw-r--r--arch/arm64/kernel/proton-pack.c8
-rw-r--r--arch/arm64/kernel/psci.c5
-rw-r--r--arch/arm64/kernel/ptrace.c7
-rw-r--r--arch/arm64/kernel/scs.c16
-rw-r--r--arch/arm64/kernel/sdei.c126
-rw-r--r--arch/arm64/kernel/setup.c6
-rw-r--r--arch/arm64/kernel/signal.c3
-rw-r--r--arch/arm64/kernel/sleep.S2
-rw-r--r--arch/arm64/kernel/smp.c17
-rw-r--r--arch/arm64/kernel/suspend.c1
-rw-r--r--arch/arm64/kernel/sys_compat.c5
-rw-r--r--arch/arm64/kernel/syscall.c3
-rw-r--r--arch/arm64/kernel/topology.c146
-rw-r--r--arch/arm64/kernel/traps.c51
-rw-r--r--arch/arm64/kernel/vdso.c39
-rw-r--r--arch/arm64/kernel/vdso/Makefile2
-rw-r--r--arch/arm64/kernel/vdso32/Makefile25
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S14
-rw-r--r--arch/arm64/kvm/arm.c38
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h21
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/host.S2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-init.S23
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp.lds.S5
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c1
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c21
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c2
-rw-r--r--arch/arm64/kvm/hypercalls.c2
-rw-r--r--arch/arm64/kvm/mmu.c40
-rw-r--r--arch/arm64/kvm/sys_regs.c199
-rw-r--r--arch/arm64/kvm/sys_regs.h16
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c22
-rw-r--r--arch/arm64/lib/clear_user.S8
-rw-r--r--arch/arm64/lib/copy_from_user.S8
-rw-r--r--arch/arm64/lib/copy_in_user.S16
-rw-r--r--arch/arm64/lib/copy_to_user.S8
-rw-r--r--arch/arm64/lib/memcpy.S3
-rw-r--r--arch/arm64/lib/memmove.S3
-rw-r--r--arch/arm64/lib/memset.S3
-rw-r--r--arch/arm64/lib/mte.S6
-rw-r--r--arch/arm64/lib/uaccess_flushcache.c4
-rw-r--r--arch/arm64/mm/fault.c115
-rw-r--r--arch/arm64/mm/init.c120
-rw-r--r--arch/arm64/mm/mmu.c166
-rw-r--r--arch/arm64/mm/pageattr.c6
-rw-r--r--arch/arm64/mm/proc.S6
-rw-r--r--arch/c6x/Kconfig1
-rw-r--r--arch/c6x/include/asm/elf.h3
-rw-r--r--arch/c6x/include/asm/mmu_context.h6
-rw-r--r--arch/csky/Kconfig2
-rw-r--r--arch/csky/include/asm/Kbuild1
-rw-r--r--arch/csky/include/asm/elf.h1
-rw-r--r--arch/csky/include/asm/fixmap.h4
-rw-r--r--arch/csky/include/asm/highmem.h6
-rw-r--r--arch/csky/include/asm/mmu_context.h8
-rw-r--r--arch/csky/include/asm/seccomp.h11
-rw-r--r--arch/csky/kernel/perf_regs.c3
-rw-r--r--arch/csky/kernel/process.c2
-rw-r--r--arch/csky/mm/highmem.c75
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/h8300/include/asm/mmu_context.h6
-rw-r--r--arch/h8300/include/uapi/asm/signal.h24
-rw-r--r--arch/h8300/kernel/process.c2
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/hexagon/include/asm/elf.h1
-rw-r--r--arch/hexagon/include/asm/mmu_context.h33
-rw-r--r--arch/hexagon/kernel/process.c2
-rw-r--r--arch/ia64/Kconfig12
-rw-r--r--arch/ia64/include/asm/kmap_types.h13
-rw-r--r--arch/ia64/include/asm/meminit.h2
-rw-r--r--arch/ia64/include/asm/mmu_context.h17
-rw-r--r--arch/ia64/include/asm/sparsemem.h6
-rw-r--r--arch/ia64/include/uapi/asm/signal.h24
-rw-r--r--arch/ia64/kernel/process.c4
-rw-r--r--arch/ia64/kernel/ptrace.c51
-rw-r--r--arch/ia64/kernel/time.c56
-rw-r--r--arch/ia64/mm/contig.c58
-rw-r--r--arch/ia64/mm/discontig.c44
-rw-r--r--arch/ia64/mm/init.c14
-rw-r--r--arch/ia64/mm/numa.c30
-rw-r--r--arch/m68k/68000/timers.c7
-rw-r--r--arch/m68k/Kconfig.cpu68
-rw-r--r--arch/m68k/Kconfig.machine19
-rw-r--r--arch/m68k/amiga/config.c19
-rw-r--r--arch/m68k/apollo/config.c12
-rw-r--r--arch/m68k/atari/config.c3
-rw-r--r--arch/m68k/atari/time.c9
-rw-r--r--arch/m68k/bvme6000/config.c10
-rw-r--r--arch/m68k/coldfire/Makefile32
-rw-r--r--arch/m68k/coldfire/pit.c2
-rw-r--r--arch/m68k/coldfire/sltimers.c8
-rw-r--r--arch/m68k/coldfire/timers.c8
-rw-r--r--arch/m68k/configs/amiga_defconfig9
-rw-r--r--arch/m68k/configs/apollo_defconfig9
-rw-r--r--arch/m68k/configs/atari_defconfig9
-rw-r--r--arch/m68k/configs/bvme6000_defconfig9
-rw-r--r--arch/m68k/configs/hp300_defconfig9
-rw-r--r--arch/m68k/configs/mac_defconfig9
-rw-r--r--arch/m68k/configs/multi_defconfig9
-rw-r--r--arch/m68k/configs/mvme147_defconfig9
-rw-r--r--arch/m68k/configs/mvme16x_defconfig9
-rw-r--r--arch/m68k/configs/q40_defconfig9
-rw-r--r--arch/m68k/configs/sun3_defconfig9
-rw-r--r--arch/m68k/configs/sun3x_defconfig9
-rw-r--r--arch/m68k/hp300/config.c1
-rw-r--r--arch/m68k/hp300/time.c8
-rw-r--r--arch/m68k/hp300/time.h2
-rw-r--r--arch/m68k/include/asm/cmpxchg.h10
-rw-r--r--arch/m68k/include/asm/machdep.h13
-rw-r--r--arch/m68k/include/asm/mmu_context.h38
-rw-r--r--arch/m68k/include/asm/page.h2
-rw-r--r--arch/m68k/include/asm/page_mm.h7
-rw-r--r--arch/m68k/include/asm/virtconvert.h5
-rw-r--r--arch/m68k/include/uapi/asm/signal.h24
-rw-r--r--arch/m68k/kernel/setup_mm.c3
-rw-r--r--arch/m68k/kernel/setup_no.c2
-rw-r--r--arch/m68k/kernel/time.c18
-rw-r--r--arch/m68k/kernel/vmlinux-nommu.lds3
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds3
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds2
-rw-r--r--arch/m68k/mac/config.c32
-rw-r--r--arch/m68k/mac/iop.c54
-rw-r--r--arch/m68k/mac/via.c29
-rw-r--r--arch/m68k/mm/init.c8
-rw-r--r--arch/m68k/mvme147/config.c10
-rw-r--r--arch/m68k/mvme16x/config.c10
-rw-r--r--arch/m68k/q40/config.c7
-rw-r--r--arch/m68k/q40/q40ints.c10
-rw-r--r--arch/m68k/sun3/config.c4
-rw-r--r--arch/m68k/sun3/sun3ints.c3
-rw-r--r--arch/m68k/sun3x/config.c2
-rw-r--r--arch/m68k/sun3x/time.c5
-rw-r--r--arch/m68k/sun3x/time.h2
-rw-r--r--arch/microblaze/Kconfig55
-rw-r--r--arch/microblaze/Makefile11
-rw-r--r--arch/microblaze/configs/mmu_defconfig1
-rw-r--r--arch/microblaze/configs/nommu_defconfig90
-rw-r--r--arch/microblaze/include/asm/dma.h6
-rw-r--r--arch/microblaze/include/asm/exceptions.h5
-rw-r--r--arch/microblaze/include/asm/fixmap.h4
-rw-r--r--arch/microblaze/include/asm/highmem.h6
-rw-r--r--arch/microblaze/include/asm/io.h3
-rw-r--r--arch/microblaze/include/asm/mmu.h4
-rw-r--r--arch/microblaze/include/asm/mmu_context.h4
-rw-r--r--arch/microblaze/include/asm/mmu_context_mm.h8
-rw-r--r--arch/microblaze/include/asm/page.h59
-rw-r--r--arch/microblaze/include/asm/pgalloc.h4
-rw-r--r--arch/microblaze/include/asm/pgtable.h43
-rw-r--r--arch/microblaze/include/asm/processor.h40
-rw-r--r--arch/microblaze/include/asm/registers.h2
-rw-r--r--arch/microblaze/include/asm/setup.h2
-rw-r--r--arch/microblaze/include/asm/thread_info.h2
-rw-r--r--arch/microblaze/include/asm/tlbflush.h14
-rw-r--r--arch/microblaze/include/asm/uaccess.h27
-rw-r--r--arch/microblaze/kernel/Makefile4
-rw-r--r--arch/microblaze/kernel/asm-offsets.c2
-rw-r--r--arch/microblaze/kernel/entry-nommu.S622
-rw-r--r--arch/microblaze/kernel/exceptions.c5
-rw-r--r--arch/microblaze/kernel/head.S12
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S130
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c2
-rw-r--r--arch/microblaze/kernel/process.c12
-rw-r--r--arch/microblaze/kernel/setup.c4
-rw-r--r--arch/microblaze/kernel/signal.c10
-rw-r--r--arch/microblaze/kernel/unwind.c19
-rw-r--r--arch/microblaze/mm/Makefile5
-rw-r--r--arch/microblaze/mm/consistent.c29
-rw-r--r--arch/microblaze/mm/highmem.c78
-rw-r--r--arch/microblaze/mm/init.c54
-rw-r--r--arch/microblaze/pci/pci-common.c2
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-crypto.h2
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-md5.c14
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha1.c2
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha256.c2
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha512.c2
-rw-r--r--arch/mips/configs/generic/board-ranchu.config1
-rw-r--r--arch/mips/configs/gpr_defconfig2
-rw-r--r--arch/mips/configs/mtx1_defconfig3
-rw-r--r--arch/mips/configs/rm200_defconfig1
-rw-r--r--arch/mips/include/asm/fixmap.h4
-rw-r--r--arch/mips/include/asm/highmem.h6
-rw-r--r--arch/mips/include/asm/kmap_types.h13
-rw-r--r--arch/mips/include/asm/mmu_context.h11
-rw-r--r--arch/mips/include/asm/pgtable-32.h3
-rw-r--r--arch/mips/include/uapi/asm/signal.h12
-rw-r--r--arch/mips/include/uapi/asm/socket.h3
-rw-r--r--arch/mips/kernel/idle.c12
-rw-r--r--arch/mips/kernel/process.c5
-rw-r--r--arch/mips/kernel/smp.c25
-rw-r--r--arch/mips/mm/highmem.c77
-rw-r--r--arch/mips/mm/init.c4
-rw-r--r--arch/mips/vdso/genvdso.c4
-rw-r--r--arch/nds32/Kconfig1
-rw-r--r--arch/nds32/Kconfig.cpu1
-rw-r--r--arch/nds32/include/asm/elf.h1
-rw-r--r--arch/nds32/include/asm/fixmap.h4
-rw-r--r--arch/nds32/include/asm/highmem.h22
-rw-r--r--arch/nds32/include/asm/mmu_context.h10
-rw-r--r--arch/nds32/mm/Makefile1
-rw-r--r--arch/nds32/mm/highmem.c48
-rw-r--r--arch/nds32/mm/mm-nds32.c6
-rw-r--r--arch/nios2/Kconfig1
-rw-r--r--arch/nios2/include/asm/mmu_context.h21
-rw-r--r--arch/nios2/kernel/process.c2
-rw-r--r--arch/openrisc/Kconfig1
-rw-r--r--arch/openrisc/include/asm/mmu_context.h8
-rw-r--r--arch/openrisc/kernel/process.c2
-rw-r--r--arch/openrisc/mm/init.c1
-rw-r--r--arch/openrisc/mm/ioremap.c1
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/hardirq.h1
-rw-r--r--arch/parisc/include/asm/kmap_types.h13
-rw-r--r--arch/parisc/include/asm/mmu_context.h12
-rw-r--r--arch/parisc/include/asm/seccomp.h22
-rw-r--r--arch/parisc/include/uapi/asm/signal.h34
-rw-r--r--arch/parisc/include/uapi/asm/socket.h3
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/kernel/time.c9
-rw-r--r--arch/powerpc/Kconfig8
-rw-r--r--arch/powerpc/Makefile2
-rw-r--r--arch/powerpc/boot/Makefile1
-rw-r--r--arch/powerpc/boot/decompress.c1
-rw-r--r--arch/powerpc/configs/g5_defconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig4
-rw-r--r--arch/powerpc/crypto/sha1-spe-glue.c2
-rw-r--r--arch/powerpc/crypto/sha1.c2
-rw-r--r--arch/powerpc/crypto/sha256-spe-glue.c4
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup-radix.h68
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h12
-rw-r--r--arch/powerpc/include/asm/exception-64s.h12
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h19
-rw-r--r--arch/powerpc/include/asm/fixmap.h4
-rw-r--r--arch/powerpc/include/asm/highmem.h7
-rw-r--r--arch/powerpc/include/asm/kmap_types.h13
-rw-r--r--arch/powerpc/include/asm/kup.h26
-rw-r--r--arch/powerpc/include/asm/mmu_context.h13
-rw-r--r--arch/powerpc/include/asm/mmzone.h5
-rw-r--r--arch/powerpc/include/asm/nohash/32/kup-8xx.h2
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-8xx.h47
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-8xx.h32
-rw-r--r--arch/powerpc/include/asm/seccomp.h23
-rw-r--r--arch/powerpc/include/asm/security_features.h7
-rw-r--r--arch/powerpc/include/asm/setup.h4
-rw-r--r--arch/powerpc/include/asm/sparsemem.h5
-rw-r--r--arch/powerpc/include/asm/topology.h12
-rw-r--r--arch/powerpc/include/asm/uaccess.h4
-rw-r--r--arch/powerpc/include/uapi/asm/signal.h24
-rw-r--r--arch/powerpc/kernel/eeh_cache.c5
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S93
-rw-r--r--arch/powerpc/kernel/head_40x.S8
-rw-r--r--arch/powerpc/kernel/head_8xx.S46
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S15
-rw-r--r--arch/powerpc/kernel/idle.c4
-rw-r--r--arch/powerpc/kernel/prom_init.c1
-rw-r--r--arch/powerpc/kernel/setup_64.c122
-rw-r--r--arch/powerpc/kernel/smp.c3
-rw-r--r--arch/powerpc/kernel/syscall_64.c2
-rw-r--r--arch/powerpc/kernel/time.c56
-rw-r--r--arch/powerpc/kernel/uprobes.c1
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S18
-rw-r--r--arch/powerpc/kvm/book3s_xive.c7
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c7
-rw-r--r--arch/powerpc/lib/feature-fixups.c104
-rw-r--r--arch/powerpc/mm/Makefile3
-rw-r--r--arch/powerpc/mm/book3s64/hash_native.c23
-rw-r--r--arch/powerpc/mm/book3s64/mmu_context.c20
-rw-r--r--arch/powerpc/mm/highmem.c67
-rw-r--r--arch/powerpc/mm/maccess.c9
-rw-r--r--arch/powerpc/mm/mem.c8
-rw-r--r--arch/powerpc/mm/numa.c3
-rw-r--r--arch/powerpc/perf/core-book3s.c6
-rw-r--r--arch/powerpc/perf/imc-pmu.c3
-rw-r--r--arch/powerpc/perf/perf_regs.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/powerpc/platforms/powermac/smp.c2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c33
-rw-r--r--arch/powerpc/platforms/powernv/smp.c3
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c3
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c4
-rw-r--r--arch/powerpc/platforms/pseries/msi.c3
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h2
-rw-r--r--arch/powerpc/platforms/pseries/setup.c15
-rw-r--r--arch/riscv/Kconfig5
-rw-r--r--arch/riscv/include/asm/mmu_context.h22
-rw-r--r--arch/riscv/include/asm/pgtable-32.h2
-rw-r--r--arch/riscv/include/asm/pgtable.h2
-rw-r--r--arch/riscv/include/asm/seccomp.h10
-rw-r--r--arch/riscv/include/asm/set_memory.h1
-rw-r--r--arch/riscv/include/asm/timex.h4
-rw-r--r--arch/riscv/include/asm/uaccess.h2
-rw-r--r--arch/riscv/include/asm/vdso/processor.h2
-rw-r--r--arch/riscv/kernel/ftrace.c2
-rw-r--r--arch/riscv/kernel/head.S5
-rw-r--r--arch/riscv/kernel/perf_regs.c3
-rw-r--r--arch/riscv/kernel/process.c2
-rw-r--r--arch/riscv/kernel/setup.c1
-rw-r--r--arch/riscv/kernel/vdso/.gitignore1
-rw-r--r--arch/riscv/kernel/vdso/Makefile18
-rwxr-xr-xarch/riscv/kernel/vdso/so2s.sh6
-rw-r--r--arch/riscv/mm/fault.c4
-rw-r--r--arch/riscv/mm/init.c32
-rw-r--r--arch/riscv/mm/pageattr.c31
-rw-r--r--arch/s390/Kconfig12
-rw-r--r--arch/s390/Kconfig.debug8
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/boot/boot.h18
-rw-r--r--arch/s390/boot/compressed/.gitignore1
-rw-r--r--arch/s390/boot/compressed/Makefile26
-rw-r--r--arch/s390/boot/compressed/decompressor.h4
-rw-r--r--arch/s390/boot/compressed/vmlinux.lds.S9
-rw-r--r--arch/s390/boot/head.S32
-rw-r--r--arch/s390/boot/head_kdump.S8
-rw-r--r--arch/s390/boot/ipl_parm.c49
-rw-r--r--arch/s390/boot/kaslr.c33
-rw-r--r--arch/s390/boot/mem_detect.c13
-rw-r--r--arch/s390/boot/pgm_check_info.c224
-rw-r--r--arch/s390/boot/startup.c70
-rw-r--r--arch/s390/configs/debug_defconfig14
-rw-r--r--arch/s390/configs/defconfig11
-rw-r--r--arch/s390/configs/zfcpdump_defconfig2
-rw-r--r--arch/s390/crypto/arch_random.c110
-rw-r--r--arch/s390/crypto/prng.c53
-rw-r--r--arch/s390/crypto/sha.h3
-rw-r--r--arch/s390/crypto/sha1_s390.c2
-rw-r--r--arch/s390/crypto/sha256_s390.c2
-rw-r--r--arch/s390/crypto/sha3_256_s390.c1
-rw-r--r--arch/s390/crypto/sha3_512_s390.c1
-rw-r--r--arch/s390/crypto/sha512_s390.c2
-rw-r--r--arch/s390/include/asm/archrandom.h5
-rw-r--r--arch/s390/include/asm/ccwdev.h12
-rw-r--r--arch/s390/include/asm/cio.h2
-rw-r--r--arch/s390/include/asm/delay.h1
-rw-r--r--arch/s390/include/asm/ftrace.h31
-rw-r--r--arch/s390/include/asm/futex.h6
-rw-r--r--arch/s390/include/asm/kasan.h37
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/mmu_context.h46
-rw-r--r--arch/s390/include/asm/pgtable.h65
-rw-r--r--arch/s390/include/asm/processor.h13
-rw-r--r--arch/s390/include/asm/ptrace.h1
-rw-r--r--arch/s390/include/asm/sclp.h7
-rw-r--r--arch/s390/include/asm/seccomp.h9
-rw-r--r--arch/s390/include/asm/sections.h4
-rw-r--r--arch/s390/include/asm/setup.h6
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/timex.h7
-rw-r--r--arch/s390/include/asm/uaccess.h22
-rw-r--r--arch/s390/include/asm/vdso.h25
-rw-r--r--arch/s390/include/asm/vdso/vdso.h0
-rw-r--r--arch/s390/include/asm/vtime.h1
-rw-r--r--arch/s390/include/uapi/asm/signal.h24
-rw-r--r--arch/s390/kernel/asm-offsets.c25
-rw-r--r--arch/s390/kernel/base.S22
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/entry.S160
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/ftrace.c63
-rw-r--r--arch/s390/kernel/head64.S7
-rw-r--r--arch/s390/kernel/idle.c6
-rw-r--r--arch/s390/kernel/mcount.S8
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c4
-rw-r--r--arch/s390/kernel/perf_regs.c3
-rw-r--r--arch/s390/kernel/process.c14
-rw-r--r--arch/s390/kernel/setup.c50
-rw-r--r--arch/s390/kernel/smp.c35
-rw-r--r--arch/s390/kernel/time.c44
-rw-r--r--arch/s390/kernel/uv.c9
-rw-r--r--arch/s390/kernel/vdso.c69
-rw-r--r--arch/s390/kernel/vdso64/Makefile5
-rw-r--r--arch/s390/kernel/vdso64/getcpu.S31
-rw-r--r--arch/s390/kernel/vdso64/getcpu.c21
-rw-r--r--arch/s390/kernel/vdso64/vdso.h14
-rw-r--r--arch/s390/kernel/vdso64/vdso64.lds.S1
-rw-r--r--arch/s390/kernel/vdso64/vdso64_generic.c1
-rw-r--r--arch/s390/kernel/vdso64/vdso_user_wrapper.S1
-rw-r--r--arch/s390/kernel/vmlinux.lds.S3
-rw-r--r--arch/s390/kernel/vtime.c51
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/kvm/pv.c3
-rw-r--r--arch/s390/lib/delay.c18
-rw-r--r--arch/s390/lib/uaccess.c105
-rw-r--r--arch/s390/mm/dump_pagetables.c2
-rw-r--r--arch/s390/mm/fault.c29
-rw-r--r--arch/s390/mm/gmap.c2
-rw-r--r--arch/s390/mm/init.c12
-rw-r--r--arch/s390/mm/kasan_init.c93
-rw-r--r--arch/s390/mm/pgalloc.c13
-rw-r--r--arch/s390/mm/vmem.c38
-rw-r--r--arch/s390/pci/pci.c4
-rw-r--r--arch/s390/pci/pci_event.c4
-rw-r--r--arch/s390/pci/pci_irq.c18
-rw-r--r--arch/s390/pci/pci_mmio.c104
-rw-r--r--arch/s390/purgatory/head.S9
-rw-r--r--arch/s390/purgatory/purgatory.c2
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/include/asm/fixmap.h8
-rw-r--r--arch/sh/include/asm/hardirq.h14
-rw-r--r--arch/sh/include/asm/kmap_types.h15
-rw-r--r--arch/sh/include/asm/mmu_context.h7
-rw-r--r--arch/sh/include/asm/mmu_context_32.h9
-rw-r--r--arch/sh/include/asm/seccomp.h10
-rw-r--r--arch/sh/kernel/idle.c2
-rw-r--r--arch/sh/kernel/irq.c2
-rw-r--r--arch/sh/kernel/traps.c2
-rw-r--r--arch/sh/mm/init.c8
-rw-r--r--arch/sparc/Kconfig6
-rw-r--r--arch/sparc/crypto/crc32c_glue.c2
-rw-r--r--arch/sparc/crypto/md5_glue.c9
-rw-r--r--arch/sparc/crypto/sha1_glue.c2
-rw-r--r--arch/sparc/crypto/sha256_glue.c2
-rw-r--r--arch/sparc/crypto/sha512_glue.c2
-rw-r--r--arch/sparc/include/asm/highmem.h8
-rw-r--r--arch/sparc/include/asm/kmap_types.h11
-rw-r--r--arch/sparc/include/asm/mmu_context_32.h10
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h10
-rw-r--r--arch/sparc/include/asm/pgtable_64.h13
-rw-r--r--arch/sparc/include/asm/vaddrs.h4
-rw-r--r--arch/sparc/include/uapi/asm/signal.h4
-rw-r--r--arch/sparc/include/uapi/asm/socket.h3
-rw-r--r--arch/sparc/kernel/leon_pmc.c4
-rw-r--r--arch/sparc/kernel/process_32.c2
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/lib/csum_copy.S2
-rw-r--r--arch/sparc/mm/Makefile3
-rw-r--r--arch/sparc/mm/highmem.c115
-rw-r--r--arch/sparc/mm/hugetlbpage.c19
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/sparc/mm/srmmu.c2
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/um/include/asm/fixmap.h1
-rw-r--r--arch/um/include/asm/hardirq.h17
-rw-r--r--arch/um/include/asm/kmap_types.h13
-rw-r--r--arch/um/include/asm/mmu_context.h12
-rw-r--r--arch/um/include/asm/pgalloc.h8
-rw-r--r--arch/um/kernel/process.c2
-rw-r--r--arch/um/kernel/skas/clone.c2
-rw-r--r--arch/x86/Kconfig29
-rw-r--r--arch/x86/Kconfig.debug3
-rw-r--r--arch/x86/Makefile12
-rw-r--r--arch/x86/boot/code16gcc.h12
-rw-r--r--arch/x86/boot/compressed/Makefile4
-rw-r--r--arch/x86/boot/compressed/head_64.S8
-rw-r--r--arch/x86/boot/compressed/ident_map_64.c11
-rw-r--r--arch/x86/boot/compressed/mem_encrypt.S20
-rw-r--r--arch/x86/boot/compressed/misc.h2
-rw-r--r--arch/x86/boot/compressed/sev-es.c5
-rw-r--r--arch/x86/crypto/aes_glue.c1
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S20
-rw-r--r--arch/x86/crypto/aesni-intel_avx-x86_64.S20
-rw-r--r--arch/x86/crypto/poly1305-x86_64-cryptogams.pl2
-rw-r--r--arch/x86/crypto/poly1305_glue.c3
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c2
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c2
-rw-r--r--arch/x86/crypto/sha512-avx-asm.S2
-rw-r--r--arch/x86/crypto/sha512-ssse3-asm.S2
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c2
-rw-r--r--arch/x86/entry/common.c34
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl10
-rw-r--r--arch/x86/entry/vdso/Makefile8
-rw-r--r--arch/x86/entry/vdso/extable.c46
-rw-r--r--arch/x86/entry/vdso/extable.h28
-rw-r--r--arch/x86/entry/vdso/vdso-layout.lds.S9
-rw-r--r--arch/x86/entry/vdso/vdso.lds.S1
-rw-r--r--arch/x86/entry/vdso/vdso2c.c2
-rw-r--r--arch/x86/entry/vdso/vdso2c.h50
-rw-r--r--arch/x86/entry/vdso/vdso32/sigreturn.S2
-rw-r--r--arch/x86/entry/vdso/vma.c36
-rw-r--r--arch/x86/entry/vdso/vsgx.S151
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c2
-rw-r--r--arch/x86/events/amd/core.c2
-rw-r--r--arch/x86/events/core.c4
-rw-r--r--arch/x86/events/intel/core.c32
-rw-r--r--arch/x86/events/intel/cstate.c25
-rw-r--r--arch/x86/events/intel/ds.c70
-rw-r--r--arch/x86/events/intel/lbr.c4
-rw-r--r--arch/x86/events/intel/uncore.c10
-rw-r--r--arch/x86/events/intel/uncore.h12
-rw-r--r--arch/x86/events/intel/uncore_snb.c22
-rw-r--r--arch/x86/events/msr.c1
-rw-r--r--arch/x86/events/perf_event.h5
-rw-r--r--arch/x86/events/rapl.c14
-rw-r--r--arch/x86/hyperv/hv_apic.c14
-rw-r--r--arch/x86/ia32/ia32_signal.c2
-rw-r--r--arch/x86/include/asm/acpi.h11
-rw-r--r--arch/x86/include/asm/apic.h16
-rw-r--r--arch/x86/include/asm/apicdef.h16
-rw-r--r--arch/x86/include/asm/atomic.h2
-rw-r--r--arch/x86/include/asm/atomic64_64.h2
-rw-r--r--arch/x86/include/asm/cacheinfo.h4
-rw-r--r--arch/x86/include/asm/cmpxchg.h2
-rw-r--r--arch/x86/include/asm/compat.h15
-rw-r--r--arch/x86/include/asm/copy_mc_test.h75
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/disabled-features.h8
-rw-r--r--arch/x86/include/asm/elf.h15
-rw-r--r--arch/x86/include/asm/enclu.h9
-rw-r--r--arch/x86/include/asm/fixmap.h15
-rw-r--r--arch/x86/include/asm/fpu/api.h23
-rw-r--r--arch/x86/include/asm/highmem.h13
-rw-r--r--arch/x86/include/asm/hpet.h11
-rw-r--r--arch/x86/include/asm/hw_irq.h14
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h7
-rw-r--r--arch/x86/include/asm/idtentry.h3
-rw-r--r--arch/x86/include/asm/insn.h15
-rw-r--r--arch/x86/include/asm/inst.h15
-rw-r--r--arch/x86/include/asm/io_apic.h79
-rw-r--r--arch/x86/include/asm/iomap.h13
-rw-r--r--arch/x86/include/asm/irq_remapping.h9
-rw-r--r--arch/x86/include/asm/irqdomain.h3
-rw-r--r--arch/x86/include/asm/kmap_types.h13
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/mce.h9
-rw-r--r--arch/x86/include/asm/mmu.h9
-rw-r--r--arch/x86/include/asm/mmu_context.h8
-rw-r--r--arch/x86/include/asm/msi.h50
-rw-r--r--arch/x86/include/asm/msidef.h57
-rw-r--r--arch/x86/include/asm/msr-index.h12
-rw-r--r--arch/x86/include/asm/mwait.h2
-rw-r--r--arch/x86/include/asm/page_32_types.h8
-rw-r--r--arch/x86/include/asm/page_64_types.h6
-rw-r--r--arch/x86/include/asm/paravirt.h11
-rw-r--r--arch/x86/include/asm/paravirt_types.h1
-rw-r--r--arch/x86/include/asm/perf_event.h4
-rw-r--r--arch/x86/include/asm/pgtable_32.h18
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h6
-rw-r--r--arch/x86/include/asm/pgtable_types.h1
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/seccomp.h20
-rw-r--r--arch/x86/include/asm/set_memory.h1
-rw-r--r--arch/x86/include/asm/sparsemem.h10
-rw-r--r--arch/x86/include/asm/stacktrace.h3
-rw-r--r--arch/x86/include/asm/sync_core.h9
-rw-r--r--arch/x86/include/asm/thread_info.h17
-rw-r--r--arch/x86/include/asm/topology.h5
-rw-r--r--arch/x86/include/asm/trap_pf.h2
-rw-r--r--arch/x86/include/asm/uv/bios.h51
-rw-r--r--arch/x86/include/asm/uv/uv.h10
-rw-r--r--arch/x86/include/asm/uv/uv_geo.h103
-rw-r--r--arch/x86/include/asm/vdso.h7
-rw-r--r--arch/x86/include/asm/x86_init.h2
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/x86/include/uapi/asm/sgx.h168
-rw-r--r--arch/x86/include/uapi/asm/signal.h24
-rw-r--r--arch/x86/kernel/acpi/apei.c5
-rw-r--r--arch/x86/kernel/alternative.c11
-rw-r--r--arch/x86/kernel/amd_nb.c4
-rw-r--r--arch/x86/kernel/apic/apic.c73
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c18
-rw-r--r--arch/x86/kernel/apic/apic_noop.c10
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c16
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c9
-rw-r--r--arch/x86/kernel/apic/io_apic.c525
-rw-r--r--arch/x86/kernel/apic/ipi.c6
-rw-r--r--arch/x86/kernel/apic/msi.c153
-rw-r--r--arch/x86/kernel/apic/probe_32.c9
-rw-r--r--arch/x86/kernel/apic/vector.c73
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c10
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c17
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c66
-rw-r--r--arch/x86/kernel/asm-offsets.c1
-rw-r--r--arch/x86/kernel/cpu/Makefile1
-rw-r--r--arch/x86/kernel/cpu/amd.c38
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c16
-rw-r--r--arch/x86/kernel/cpu/bugs.c55
-rw-r--r--arch/x86/kernel/cpu/cacheinfo.c8
-rw-r--r--arch/x86/kernel/cpu/feat_ctl.c38
-rw-r--r--arch/x86/kernel/cpu/hygon.c31
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c4
-rw-r--r--arch/x86/kernel/cpu/mce/apei.c61
-rw-r--r--arch/x86/kernel/cpu/mce/core.c55
-rw-r--r--arch/x86/kernel/cpu/mce/inject.c4
-rw-r--r--arch/x86/kernel/cpu/mce/intel.c21
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c63
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c29
-rw-r--r--arch/x86/kernel/cpu/mtrr/mtrr.c5
-rw-r--r--arch/x86/kernel/cpu/resctrl/core.c8
-rw-r--r--arch/x86/kernel/cpu/resctrl/internal.h6
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c88
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c95
-rw-r--r--arch/x86/kernel/cpu/sgx/Makefile5
-rw-r--r--arch/x86/kernel/cpu/sgx/arch.h338
-rw-r--r--arch/x86/kernel/cpu/sgx/driver.c194
-rw-r--r--arch/x86/kernel/cpu/sgx/driver.h29
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.c740
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.h119
-rw-r--r--arch/x86/kernel/cpu/sgx/encls.h231
-rw-r--r--arch/x86/kernel/cpu/sgx/ioctl.c716
-rw-r--r--arch/x86/kernel/cpu/sgx/main.c733
-rw-r--r--arch/x86/kernel/cpu/sgx/sgx.h86
-rw-r--r--arch/x86/kernel/cpu/topology.c10
-rw-r--r--arch/x86/kernel/cpuid.c7
-rw-r--r--arch/x86/kernel/crash_dump_32.c48
-rw-r--r--arch/x86/kernel/devicetree.c30
-rw-r--r--arch/x86/kernel/dumpstack.c25
-rw-r--r--arch/x86/kernel/head64.c1
-rw-r--r--arch/x86/kernel/head_64.S45
-rw-r--r--arch/x86/kernel/hpet.c122
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c3
-rw-r--r--arch/x86/kernel/kprobes/core.c6
-rw-r--r--arch/x86/kernel/kprobes/opt.c22
-rw-r--r--arch/x86/kernel/kvm.c6
-rw-r--r--arch/x86/kernel/msr.c8
-rw-r--r--arch/x86/kernel/nmi.c6
-rw-r--r--arch/x86/kernel/perf_regs.c17
-rw-r--r--arch/x86/kernel/process.c12
-rw-r--r--arch/x86/kernel/process_64.c28
-rw-r--r--arch/x86/kernel/setup.c5
-rw-r--r--arch/x86/kernel/sev-es-shared.c26
-rw-r--r--arch/x86/kernel/sev-es.c20
-rw-r--r--arch/x86/kernel/sev_verify_cbit.S89
-rw-r--r--arch/x86/kernel/signal.c4
-rw-r--r--arch/x86/kernel/signal_compat.c9
-rw-r--r--arch/x86/kernel/smpboot.c88
-rw-r--r--arch/x86/kernel/tboot.c9
-rw-r--r--arch/x86/kernel/traps.c66
-rw-r--r--arch/x86/kernel/unwind_orc.c9
-rw-r--r--arch/x86/kernel/uprobes.c12
-rw-r--r--arch/x86/kernel/vmlinux.lds.S12
-rw-r--r--arch/x86/kernel/x86_init.c1
-rw-r--r--arch/x86/kvm/cpuid.c29
-rw-r--r--arch/x86/kvm/cpuid.h1
-rw-r--r--arch/x86/kvm/emulate.c8
-rw-r--r--arch/x86/kvm/irq.c85
-rw-r--r--arch/x86/kvm/irq_comm.c31
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu/mmu.c24
-rw-r--r--arch/x86/kvm/mmu/spte.c20
-rw-r--r--arch/x86/kvm/mmu/spte.h41
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c11
-rw-r--r--arch/x86/kvm/svm/sev.c2
-rw-r--r--arch/x86/kvm/svm/svm.c16
-rw-r--r--arch/x86/kvm/vmx/evmcs.c3
-rw-r--r--arch/x86/kvm/vmx/evmcs.h3
-rw-r--r--arch/x86/kvm/vmx/vmx.c6
-rw-r--r--arch/x86/kvm/x86.c100
-rw-r--r--arch/x86/kvm/x86.h8
-rw-r--r--arch/x86/lib/copy_mc.c4
-rw-r--r--arch/x86/lib/copy_mc_64.S10
-rw-r--r--arch/x86/lib/insn-eval.c10
-rw-r--r--arch/x86/lib/memcpy_64.S4
-rw-r--r--arch/x86/lib/memmove_64.S4
-rw-r--r--arch/x86/lib/memset_64.S4
-rw-r--r--arch/x86/lib/msr-smp.c7
-rw-r--r--arch/x86/lib/usercopy.c22
-rw-r--r--arch/x86/mm/fault.c45
-rw-r--r--arch/x86/mm/highmem_32.c59
-rw-r--r--arch/x86/mm/ident_map.c12
-rw-r--r--arch/x86/mm/init.c6
-rw-r--r--arch/x86/mm/init_32.c15
-rw-r--r--arch/x86/mm/iomap_32.c57
-rw-r--r--arch/x86/mm/mem_encrypt.c1
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c4
-rw-r--r--arch/x86/mm/numa.c2
-rw-r--r--arch/x86/mm/pat/set_memory.c4
-rw-r--r--arch/x86/mm/tlb.c10
-rw-r--r--arch/x86/oprofile/backtrace.c2
-rw-r--r--arch/x86/pci/i386.c6
-rw-r--r--arch/x86/pci/intel_mid_pci.c8
-rw-r--r--arch/x86/pci/mmconfig-shared.c4
-rw-r--r--arch/x86/pci/xen.c26
-rw-r--r--arch/x86/platform/efi/efi_64.c24
-rw-r--r--arch/x86/platform/uv/Makefile2
-rw-r--r--arch/x86/platform/uv/bios_uv.c55
-rw-r--r--arch/x86/platform/uv/uv_irq.c4
-rw-r--r--arch/x86/platform/uv/uv_sysfs.c63
-rw-r--r--arch/x86/purgatory/purgatory.c2
-rw-r--r--arch/x86/um/stub_segv.c2
-rw-r--r--arch/x86/xen/apic.c7
-rw-r--r--arch/x86/xen/spinlock.c12
-rw-r--r--arch/xtensa/Kconfig2
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--arch/xtensa/include/asm/fixmap.h59
-rw-r--r--arch/xtensa/include/asm/highmem.h15
-rw-r--r--arch/xtensa/include/asm/mmu_context.h11
-rw-r--r--arch/xtensa/include/asm/nommu_context.h26
-rw-r--r--arch/xtensa/include/asm/pgtable.h2
-rw-r--r--arch/xtensa/include/asm/seccomp.h11
-rw-r--r--arch/xtensa/include/asm/uaccess.h2
-rw-r--r--arch/xtensa/include/uapi/asm/signal.h24
-rw-r--r--arch/xtensa/mm/cache.c14
-rw-r--r--arch/xtensa/mm/highmem.c62
-rw-r--r--arch/xtensa/mm/init.c8
-rw-r--r--arch/xtensa/mm/mmu.c3
966 files changed, 11880 insertions, 10048 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 56b6ccc0e32d..d4bdc19ed3ad 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -261,7 +261,7 @@ config ARCH_HAS_SET_DIRECT_MAP
#
# Select if the architecture provides the arch_dma_set_uncached symbol to
-# either provide an uncached segement alias for a DMA allocation, or
+# either provide an uncached segment alias for a DMA allocation, or
# to remap the page tables in place.
#
config ARCH_HAS_DMA_SET_UNCACHED
@@ -314,14 +314,14 @@ config ARCH_32BIT_OFF_T
config HAVE_ASM_MODVERSIONS
bool
help
- This symbol should be selected by an architecure if it provides
+ This symbol should be selected by an architecture if it provides
<asm/asm-prototypes.h> to support the module versioning for symbols
exported from assembly code.
config HAVE_REGS_AND_STACK_ACCESS_API
bool
help
- This symbol should be selected by an architecure if it supports
+ This symbol should be selected by an architecture if it supports
the API needed to access registers and stack entries from pt_regs,
declared in asm/ptrace.h
For example the kprobes-based event tracer needs this API.
@@ -336,7 +336,7 @@ config HAVE_RSEQ
config HAVE_FUNCTION_ARG_ACCESS_API
bool
help
- This symbol should be selected by an architecure if it supports
+ This symbol should be selected by an architecture if it supports
the API needed to access function arguments from pt_regs,
declared in asm/ptrace.h
@@ -486,6 +486,9 @@ config HAVE_ARCH_SECCOMP_FILTER
- secure_computing return value is checked and a return value of -1
results in the system call being skipped immediately.
- seccomp syscall wired up
+ - if !HAVE_SPARSE_SYSCALL_NR, have SECCOMP_ARCH_NATIVE,
+ SECCOMP_ARCH_NATIVE_NR, SECCOMP_ARCH_NATIVE_NAME defined. If
+ COMPAT is supported, have the SECCOMP_ARCH_COMPAT* defines too.
config SECCOMP
prompt "Enable seccomp to safely execute untrusted bytecode"
@@ -514,6 +517,20 @@ config SECCOMP_FILTER
See Documentation/userspace-api/seccomp_filter.rst for details.
+config SECCOMP_CACHE_DEBUG
+ bool "Show seccomp filter cache status in /proc/pid/seccomp_cache"
+ depends on SECCOMP_FILTER && !HAVE_SPARSE_SYSCALL_NR
+ depends on PROC_FS
+ help
+ This enables the /proc/pid/seccomp_cache interface to monitor
+ seccomp cache data. The file format is subject to change. Reading
+ the file requires CAP_SYS_ADMIN.
+
+ This option is for debugging only. Enabling presents the risk that
+ an adversary may be able to infer the seccomp filter logic.
+
+ If unsure, say N.
+
config HAVE_ARCH_STACKLEAK
bool
help
@@ -618,6 +635,23 @@ config HAVE_CONTEXT_TRACKING
protected inside rcu_irq_enter/rcu_irq_exit() but preemption or signal
handling on irq exit still need to be protected.
+config HAVE_CONTEXT_TRACKING_OFFSTACK
+ bool
+ help
+ Architecture neither relies on exception_enter()/exception_exit()
+ nor on schedule_user(). Also preempt_schedule_notrace() and
+ preempt_schedule_irq() can't be called in a preemptible section
+ while context tracking is CONTEXT_USER. This feature reflects a sane
+ entry implementation where the following requirements are met on
+ critical entry code, ie: before user_exit() or after user_enter():
+
+ - Critical entry code isn't preemptible (or better yet:
+ not interruptible).
+ - No use of RCU read side critical sections, unless rcu_nmi_enter()
+ got called.
+ - No use of instrumentation, unless instrumentation_begin() got
+ called.
+
config HAVE_TIF_NOHZ
bool
help
@@ -627,6 +661,12 @@ config HAVE_TIF_NOHZ
config HAVE_VIRT_CPU_ACCOUNTING
bool
+config HAVE_VIRT_CPU_ACCOUNTING_IDLE
+ bool
+ help
+ Architecture has its own way to account idle CPU time and therefore
+ doesn't implement vtime_account_idle().
+
config ARCH_HAS_SCALED_CPUTIME
bool
@@ -641,13 +681,19 @@ config HAVE_VIRT_CPU_ACCOUNTING_GEN
some 32-bit arches may require multiple accesses, so proper
locking is needed to protect against concurrent accesses.
-
config HAVE_IRQ_TIME_ACCOUNTING
bool
help
Archs need to ensure they use a high enough resolution clock to
support irq time accounting and then call enable_sched_clock_irqtime().
+config HAVE_MOVE_PUD
+ bool
+ help
+ Architectures that select this are able to move page tables at the
+ PUD level. If there are only 3 page table levels, the move effectively
+ happens at the PGD level.
+
config HAVE_MOVE_PMD
bool
help
@@ -1028,6 +1074,21 @@ config HAVE_STATIC_CALL_INLINE
bool
depends on HAVE_STATIC_CALL
+config ARCH_WANT_LD_ORPHAN_WARN
+ bool
+ help
+ An arch should select this symbol once all linker sections are explicitly
+ included, size-asserted, or discarded in the linker scripts. This is
+ important because we never want expected sections to be placed heuristically
+ by the linker, since the locations of such sections can change between linker
+ versions.
+
+config HAVE_ARCH_PFN_VALID
+ bool
+
+config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ bool
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index d6e9fc7a7b19..1f51437d5765 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -27,7 +27,6 @@ config ALPHA
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select AUDIT_ARCH
- select GENERIC_CLOCKEVENTS
select GENERIC_CPU_VULNERABILITIES
select GENERIC_SMP_IDLE_THREAD
select GENERIC_STRNCPY_FROM_USER
@@ -40,6 +39,7 @@ config ALPHA
select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67
select MMU_GATHER_NO_RANGE
select SET_FS
+ select SPARSEMEM_EXTREME if SPARSEMEM
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
@@ -551,12 +551,19 @@ config NR_CPUS
config ARCH_DISCONTIGMEM_ENABLE
bool "Discontiguous Memory Support"
+ depends on BROKEN
help
Say Y to support efficient handling of discontiguous physical memory,
for architectures which are either NUMA (Non-Uniform Memory Access)
or have huge holes in the physical address space for other reasons.
See <file:Documentation/vm/numa.rst> for more.
+config ARCH_SPARSEMEM_ENABLE
+ bool "Sparse Memory Support"
+ help
+ Say Y to support efficient handling of discontiguous physical memory,
+ for systems that have huge holes in the physical address space.
+
config NUMA
bool "NUMA Support (EXPERIMENTAL)"
depends on DISCONTIGMEM && BROKEN
diff --git a/arch/alpha/include/asm/kmap_types.h b/arch/alpha/include/asm/kmap_types.h
deleted file mode 100644
index 651714b45729..000000000000
--- a/arch/alpha/include/asm/kmap_types.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_KMAP_TYPES_H
-#define _ASM_KMAP_TYPES_H
-
-/* Dummy header just to define km_type. */
-
-#ifdef CONFIG_DEBUG_HIGHMEM
-#define __WITH_KM_FENCE
-#endif
-
-#include <asm-generic/kmap_types.h>
-
-#undef __WITH_KM_FENCE
-
-#endif
diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h
index 6d7d9bc1b4b8..4eea7c616992 100644
--- a/arch/alpha/include/asm/mmu_context.h
+++ b/arch/alpha/include/asm/mmu_context.h
@@ -214,8 +214,6 @@ ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
tbiap();
}
-#define deactivate_mm(tsk,mm) do { } while (0)
-
#ifdef CONFIG_ALPHA_GENERIC
# define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c))
# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y))
@@ -229,6 +227,7 @@ ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
# endif
#endif
+#define init_new_context init_new_context
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -242,12 +241,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}
-extern inline void
-destroy_context(struct mm_struct *mm)
-{
- /* Nothing to do. */
-}
-
+#define enter_lazy_tlb enter_lazy_tlb
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
@@ -255,6 +249,8 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
}
+#include <asm-generic/mmu_context.h>
+
#ifdef __MMU_EXTERN_INLINE
#undef __EXTERN_INLINE
#undef __MMU_EXTERN_INLINE
diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h
index 9b521c857436..86644604d977 100644
--- a/arch/alpha/include/asm/mmzone.h
+++ b/arch/alpha/include/asm/mmzone.h
@@ -6,6 +6,8 @@
#ifndef _ASM_MMZONE_H_
#define _ASM_MMZONE_H_
+#ifdef CONFIG_DISCONTIGMEM
+
#include <asm/smp.h>
/*
@@ -45,8 +47,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
}
#endif
-#ifdef CONFIG_DISCONTIGMEM
-
/*
* Following are macros that each numa implementation must define.
*/
@@ -68,11 +68,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
/* XXX: FIXME -- nyc */
#define kern_addr_valid(kaddr) (0)
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-
-#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
-#define pte_pfn(pte) (pte_val(pte) >> 32)
-
#define mk_pte(page, pgprot) \
({ \
pte_t pte; \
@@ -95,16 +90,11 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
__xx; \
})
-#define page_to_pa(page) \
- (page_to_pfn(page) << PAGE_SHIFT)
-
#define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
#define pfn_valid(pfn) \
(((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \
node_spanned_pages(pfn_to_nid(pfn))) \
-#define virt_addr_valid(kaddr) pfn_valid((__pa(kaddr) >> PAGE_SHIFT))
-
#endif /* CONFIG_DISCONTIGMEM */
#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h
index e241bd88880f..268f99b4602b 100644
--- a/arch/alpha/include/asm/page.h
+++ b/arch/alpha/include/asm/page.h
@@ -83,12 +83,13 @@ typedef struct page *pgtable_t;
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
-#ifndef CONFIG_DISCONTIGMEM
+
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid((__pa(kaddr) >> PAGE_SHIFT))
+#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) ((pfn) < max_mapnr)
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#endif /* CONFIG_DISCONTIGMEM */
+#endif /* CONFIG_FLATMEM */
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 660b14ce1317..8d856c62e22a 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -203,10 +203,10 @@ extern unsigned long __zero_page(void);
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-#ifndef CONFIG_DISCONTIGMEM
-#define page_to_pa(page) (((page) - mem_map) << PAGE_SHIFT)
-
+#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT)
#define pte_pfn(pte) (pte_val(pte) >> 32)
+
+#ifndef CONFIG_DISCONTIGMEM
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page, pgprot) \
({ \
@@ -236,10 +236,8 @@ pmd_page_vaddr(pmd_t pmd)
return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET;
}
-#ifndef CONFIG_DISCONTIGMEM
-#define pmd_page(pmd) (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32))
-#define pud_page(pud) (mem_map + ((pud_val(pud) & _PFN_MASK) >> 32))
-#endif
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
+#define pud_page(pud) (pfn_to_page(pud_val(pud) >> 32))
extern inline unsigned long pud_page_vaddr(pud_t pgd)
{ return PAGE_OFFSET + ((pud_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
diff --git a/arch/alpha/include/asm/sparsemem.h b/arch/alpha/include/asm/sparsemem.h
new file mode 100644
index 000000000000..a0820fd2d4b1
--- /dev/null
+++ b/arch/alpha/include/asm/sparsemem.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ALPHA_SPARSEMEM_H
+#define _ASM_ALPHA_SPARSEMEM_H
+
+#ifdef CONFIG_SPARSEMEM
+
+#define SECTION_SIZE_BITS 27
+
+/*
+ * According to "Alpha Architecture Reference Manual" physical
+ * addresses are at most 48 bits.
+ * https://download.majix.org/dec/alpha_arch_ref.pdf
+ */
+#define MAX_PHYSMEM_BITS 48
+
+#endif /* CONFIG_SPARSEMEM */
+
+#endif /* _ASM_ALPHA_SPARSEMEM_H */
diff --git a/arch/alpha/include/uapi/asm/signal.h b/arch/alpha/include/uapi/asm/signal.h
index 74c750bf1c1a..a69dd8d080a8 100644
--- a/arch/alpha/include/uapi/asm/signal.h
+++ b/arch/alpha/include/uapi/asm/signal.h
@@ -60,20 +60,6 @@ typedef unsigned long sigset_t;
#define SIGRTMIN 32
#define SIGRTMAX _NSIG
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-
#define SA_ONSTACK 0x00000001
#define SA_RESTART 0x00000002
#define SA_NOCLDSTOP 0x00000004
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index de6c4df61082..57420356ce4c 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -124,6 +124,9 @@
#define SO_DETACH_REUSEPORT_BPF 68
+#define SO_PREFER_BUSY_POLL 69
+#define SO_BUSY_POLL_BUDGET 70
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 7462a7911002..6c71554206cc 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -57,7 +57,7 @@ EXPORT_SYMBOL(pm_power_off);
void arch_cpu_idle(void)
{
wtint(0);
- local_irq_enable();
+ raw_local_irq_enable();
}
void arch_cpu_idle_dead(void)
@@ -134,7 +134,7 @@ common_shutdown_1(void *generic_ptr)
#ifdef CONFIG_DUMMY_CONSOLE
/* If we've gotten here after SysRq-b, leave interrupt
context before taking over the console. */
- if (in_interrupt())
+ if (in_irq())
irq_exit();
/* This has the effect of resetting the VGA video origin. */
console_lock();
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 916e42d74a86..03dda3beb3bd 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -648,6 +648,7 @@ setup_arch(char **cmdline_p)
/* Find our memory. */
setup_memory(kernel_end);
memblock_set_bottom_up(true);
+ sparse_init();
/* First guess at cpu cache sizes. Do this before init_arch. */
determine_cpu_caches(cpu->type);
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 0a89cc9def65..b55ca77f619b 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -19,7 +19,6 @@ config ARC
select COMMON_CLK
select DMA_DIRECT_REMAP
select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC)
- select GENERIC_CLOCKEVENTS
select GENERIC_FIND_FIRST_BIT
# for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP
select GENERIC_IRQ_SHOW
@@ -67,6 +66,7 @@ config GENERIC_CSUM
config ARCH_DISCONTIGMEM_ENABLE
def_bool n
+ depends on BROKEN
config ARCH_FLATMEM_ENABLE
def_bool y
@@ -506,7 +506,8 @@ config LINUX_RAM_BASE
config HIGHMEM
bool "High Memory Support"
- select ARCH_DISCONTIGMEM_ENABLE
+ select HAVE_ARCH_PFN_VALID
+ select KMAP_LOCAL
help
With ARC 2G:2G address split, only upper 2G is directly addressable by
kernel. Enable this to potentially allow access to rest of 2G and PAE
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index c6606f4d20d6..fb98440c0bd4 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -243,10 +243,8 @@ static inline int constant_fls(unsigned int x)
x <<= 2;
r -= 2;
}
- if (!(x & 0x80000000u)) {
- x <<= 1;
+ if (!(x & 0x80000000u))
r -= 1;
- }
return r;
}
diff --git a/arch/arc/include/asm/highmem.h b/arch/arc/include/asm/highmem.h
index 6e5eafb3afdd..a6b8e2c352c4 100644
--- a/arch/arc/include/asm/highmem.h
+++ b/arch/arc/include/asm/highmem.h
@@ -9,17 +9,29 @@
#ifdef CONFIG_HIGHMEM
#include <uapi/asm/page.h>
-#include <asm/kmap_types.h>
+#include <asm/kmap_size.h>
+
+#define FIXMAP_SIZE PGDIR_SIZE
+#define PKMAP_SIZE PGDIR_SIZE
/* start after vmalloc area */
#define FIXMAP_BASE (PAGE_OFFSET - FIXMAP_SIZE - PKMAP_SIZE)
-#define FIXMAP_SIZE PGDIR_SIZE /* only 1 PGD worth */
-#define KM_TYPE_NR ((FIXMAP_SIZE >> PAGE_SHIFT)/NR_CPUS)
-#define FIXMAP_ADDR(nr) (FIXMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define FIX_KMAP_SLOTS (KM_MAX_IDX * NR_CPUS)
+#define FIX_KMAP_BEGIN (0UL)
+#define FIX_KMAP_END ((FIX_KMAP_BEGIN + FIX_KMAP_SLOTS) - 1)
+
+#define FIXADDR_TOP (FIXMAP_BASE + (FIX_KMAP_END << PAGE_SHIFT))
+
+/*
+ * This should be converted to the asm-generic version, but of course this
+ * is needlessly different from all other architectures. Sigh - tglx
+ */
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x) (((FIXADDR_TOP - ((x) & PAGE_MASK))) >> PAGE_SHIFT)
/* start after fixmap area */
#define PKMAP_BASE (FIXMAP_BASE + FIXMAP_SIZE)
-#define PKMAP_SIZE PGDIR_SIZE
#define LAST_PKMAP (PKMAP_SIZE >> PAGE_SHIFT)
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
@@ -29,11 +41,13 @@
extern void kmap_init(void);
+#define arch_kmap_local_post_unmap(vaddr) \
+ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
+
static inline void flush_cache_kmaps(void)
{
flush_cache_all();
}
-
#endif
#endif
diff --git a/arch/arc/include/asm/kmap_types.h b/arch/arc/include/asm/kmap_types.h
deleted file mode 100644
index fecf7851ec32..000000000000
--- a/arch/arc/include/asm/kmap_types.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
- */
-
-#ifndef _ASM_KMAP_TYPES_H
-#define _ASM_KMAP_TYPES_H
-
-/*
- * We primarily need to define KM_TYPE_NR here but that in turn
- * is a function of PGDIR_SIZE etc.
- * To avoid circular deps issue, put everything in asm/highmem.h
- */
-#endif
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index 3a5e6a5b9ed6..df164066e172 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -102,6 +102,7 @@ set_hw:
* Initialize the context related info for a new mm_struct
* instance.
*/
+#define init_new_context init_new_context
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -113,6 +114,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}
+#define destroy_context destroy_context
static inline void destroy_context(struct mm_struct *mm)
{
unsigned long flags;
@@ -153,13 +155,13 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
}
/*
- * Called at the time of execve() to get a new ASID
- * Note the subtlety here: get_new_mmu_context() behaves differently here
- * vs. in switch_mm(). Here it always returns a new ASID, because mm has
- * an unallocated "initial" value, while in latter, it moves to a new ASID,
- * only if it was unallocated
+ * activate_mm defaults (in asm-generic) to switch_mm and is called at the
+ * time of execve() to get a new ASID Note the subtlety here:
+ * get_new_mmu_context() behaves differently here vs. in switch_mm(). Here
+ * it always returns a new ASID, because mm has an unallocated "initial"
+ * value, while in latter, it moves to a new ASID, only if it was
+ * unallocated
*/
-#define activate_mm(prev, next) switch_mm(prev, next, NULL)
/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
* for retiring-mm. However destroy_context( ) still needs to do that because
@@ -168,8 +170,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* there is a good chance that task gets sched-out/in, making it's ASID valid
* again (this teased me for a whole day).
*/
-#define deactivate_mm(tsk, mm) do { } while (0)
-#define enter_lazy_tlb(mm, tsk)
+#include <asm-generic/mmu_context.h>
#endif /* __ASM_ARC_MMU_CONTEXT_H */
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index b0dfed0f12be..23e41e890eda 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -82,11 +82,25 @@ typedef pte_t * pgtable_t;
*/
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
-#define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_RAM_BASE)
+/*
+ * When HIGHMEM is enabled we have holes in the memory map so we need
+ * pfn_valid() that takes into account the actual extents of the physical
+ * memory
+ */
+#ifdef CONFIG_HIGHMEM
+
+extern unsigned long arch_pfn_offset;
+#define ARCH_PFN_OFFSET arch_pfn_offset
+
+extern int pfn_valid(unsigned long pfn);
+#define pfn_valid pfn_valid
-#ifdef CONFIG_FLATMEM
+#else /* CONFIG_HIGHMEM */
+
+#define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_RAM_BASE)
#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
-#endif
+
+#endif /* CONFIG_HIGHMEM */
/*
* __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index f1ed17edb085..163641726a2b 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -134,8 +134,10 @@
#ifdef CONFIG_ARC_HAS_PAE40
#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
#else
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
/**************************************************************************
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 17fd1ed700cc..9152782444b5 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -67,7 +67,22 @@
sr r5, [ARC_REG_LPB_CTRL]
1:
#endif /* CONFIG_ARC_LPB_DISABLE */
-#endif
+
+ /* On HSDK, CCMs need to remapped super early */
+#ifdef CONFIG_ARC_SOC_HSDK
+ mov r6, 0x60000000
+ lr r5, [ARC_REG_ICCM_BUILD]
+ breq r5, 0, 1f
+ sr r6, [ARC_REG_AUX_ICCM]
+1:
+ lr r5, [ARC_REG_DCCM_BUILD]
+ breq r5, 0, 2f
+ sr r6, [ARC_REG_AUX_DCCM]
+2:
+#endif /* CONFIG_ARC_SOC_HSDK */
+
+#endif /* CONFIG_ISA_ARCV2 */
+
; Config DSP_CTRL properly, so kernel may use integer multiply,
; multiply-accumulate, and divide operations
DSP_EARLY_INIT
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index feba91c9d969..f73da203b170 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -38,15 +38,27 @@
#ifdef CONFIG_ARC_DW2_UNWIND
-static void seed_unwind_frame_info(struct task_struct *tsk,
- struct pt_regs *regs,
- struct unwind_frame_info *frame_info)
+static int
+seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
+ struct unwind_frame_info *frame_info)
{
- /*
- * synchronous unwinding (e.g. dump_stack)
- * - uses current values of SP and friends
- */
- if (tsk == NULL && regs == NULL) {
+ if (regs) {
+ /*
+ * Asynchronous unwinding of intr/exception
+ * - Just uses the pt_regs passed
+ */
+ frame_info->task = tsk;
+
+ frame_info->regs.r27 = regs->fp;
+ frame_info->regs.r28 = regs->sp;
+ frame_info->regs.r31 = regs->blink;
+ frame_info->regs.r63 = regs->ret;
+ frame_info->call_frame = 0;
+ } else if (tsk == NULL || tsk == current) {
+ /*
+ * synchronous unwinding (e.g. dump_stack)
+ * - uses current values of SP and friends
+ */
unsigned long fp, sp, blink, ret;
frame_info->task = current;
@@ -63,13 +75,17 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
frame_info->regs.r31 = blink;
frame_info->regs.r63 = ret;
frame_info->call_frame = 0;
- } else if (regs == NULL) {
+ } else {
/*
- * Asynchronous unwinding of sleeping task
- * - Gets SP etc from task's pt_regs (saved bottom of kernel
- * mode stack of task)
+ * Asynchronous unwinding of a likely sleeping task
+ * - first ensure it is actually sleeping
+ * - if so, it will be in __switch_to, kernel mode SP of task
+ * is safe-kept and BLINK at a well known location in there
*/
+ if (tsk->state == TASK_RUNNING)
+ return -1;
+
frame_info->task = tsk;
frame_info->regs.r27 = TSK_K_FP(tsk);
@@ -90,19 +106,8 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
frame_info->regs.r28 += 60;
frame_info->call_frame = 0;
- } else {
- /*
- * Asynchronous unwinding of intr/exception
- * - Just uses the pt_regs passed
- */
- frame_info->task = tsk;
-
- frame_info->regs.r27 = regs->fp;
- frame_info->regs.r28 = regs->sp;
- frame_info->regs.r31 = regs->blink;
- frame_info->regs.r63 = regs->ret;
- frame_info->call_frame = 0;
}
+ return 0;
}
#endif
@@ -112,11 +117,12 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
int (*consumer_fn) (unsigned int, void *), void *arg)
{
#ifdef CONFIG_ARC_DW2_UNWIND
- int ret = 0;
+ int ret = 0, cnt = 0;
unsigned int address;
struct unwind_frame_info frame_info;
- seed_unwind_frame_info(tsk, regs, &frame_info);
+ if (seed_unwind_frame_info(tsk, regs, &frame_info))
+ return 0;
while (1) {
address = UNW_PC(&frame_info);
@@ -132,6 +138,11 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
break;
frame_info.regs.r63 = frame_info.regs.r31;
+
+ if (cnt++ > 128) {
+ printk("unwinder looping too long, aborting !\n");
+ return 0;
+ }
}
return address; /* return the last address it saw */
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index 1b9f473c6369..c79912a6b196 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -36,9 +36,8 @@
* This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
* 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
*
- * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
- * slots across NR_CPUS would be more than sufficient (generic code defines
- * KM_TYPE_NR as 20).
+ * - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per
+ * CPU. So the number of CPUs sharing a single PTE page is limited.
*
* - pkmap being preemptible, in theory could do with more than 256 concurrent
* mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
@@ -47,48 +46,6 @@
*/
extern pte_t * pkmap_page_table;
-static pte_t * fixmap_page_table;
-
-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
-{
- int idx, cpu_idx;
- unsigned long vaddr;
-
- cpu_idx = kmap_atomic_idx_push();
- idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
- vaddr = FIXMAP_ADDR(idx);
-
- set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
- mk_pte(page, prot));
-
- return (void *)vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic_high_prot);
-
-void kunmap_atomic_high(void *kv)
-{
- unsigned long kvaddr = (unsigned long)kv;
-
- if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
-
- /*
- * Because preemption is disabled, this vaddr can be associated
- * with the current allocated index.
- * But in case of multiple live kmap_atomic(), it still relies on
- * callers to unmap in right order.
- */
- int cpu_idx = kmap_atomic_idx();
- int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
-
- WARN_ON(kvaddr != FIXMAP_ADDR(idx));
-
- pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
- local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
-
- kmap_atomic_idx_pop();
- }
-}
-EXPORT_SYMBOL(kunmap_atomic_high);
static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
{
@@ -108,10 +65,9 @@ void __init kmap_init(void)
{
/* Due to recursive include hell, we can't do this in processor.h */
BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
+ BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
+ BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE);
- BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
-
- BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
- fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
+ alloc_kmap_pgtable(FIXMAP_BASE);
}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 3a35b82a718e..ce07e697916c 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -28,6 +28,8 @@ static unsigned long low_mem_sz;
static unsigned long min_high_pfn, max_high_pfn;
static phys_addr_t high_mem_start;
static phys_addr_t high_mem_sz;
+unsigned long arch_pfn_offset;
+EXPORT_SYMBOL(arch_pfn_offset);
#endif
#ifdef CONFIG_DISCONTIGMEM
@@ -98,16 +100,11 @@ void __init setup_arch_memory(void)
init_mm.brk = (unsigned long)_end;
/* first page of system - kernel .vector starts here */
- min_low_pfn = ARCH_PFN_OFFSET;
+ min_low_pfn = virt_to_pfn(CONFIG_LINUX_RAM_BASE);
/* Last usable page of low mem */
max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
-#ifdef CONFIG_FLATMEM
- /* pfn_valid() uses this */
- max_mapnr = max_low_pfn - min_low_pfn;
-#endif
-
/*------------- bootmem allocator setup -----------------------*/
/*
@@ -153,7 +150,9 @@ void __init setup_arch_memory(void)
* DISCONTIGMEM in turns requires multiple nodes. node 0 above is
* populated with normal memory zone while node 1 only has highmem
*/
+#ifdef CONFIG_DISCONTIGMEM
node_set_online(1);
+#endif
min_high_pfn = PFN_DOWN(high_mem_start);
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
@@ -161,8 +160,15 @@ void __init setup_arch_memory(void)
max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
+
+ arch_pfn_offset = min(min_low_pfn, min_high_pfn);
kmap_init();
-#endif
+
+#else /* CONFIG_HIGHMEM */
+ /* pfn_valid() uses this when FLATMEM=y and HIGHMEM=n */
+ max_mapnr = max_low_pfn - min_low_pfn;
+
+#endif /* CONFIG_HIGHMEM */
free_area_init(max_zone_pfn);
}
@@ -190,3 +196,12 @@ void __init mem_init(void)
highmem_init();
mem_init_print_info(NULL);
}
+
+#ifdef CONFIG_HIGHMEM
+int pfn_valid(unsigned long pfn)
+{
+ return (pfn >= min_high_pfn && pfn <= max_high_pfn) ||
+ (pfn >= min_low_pfn && pfn <= max_low_pfn);
+}
+EXPORT_SYMBOL(pfn_valid);
+#endif
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index c340acd989a0..9bb3c24f3677 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -30,14 +30,14 @@
* -Changes related to MMU v2 (Rel 4.8)
*
* Vineetg: Aug 29th 2008
- * -In TLB Flush operations (Metal Fix MMU) there is a explict command to
+ * -In TLB Flush operations (Metal Fix MMU) there is a explicit command to
* flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
* it fails. Thus need to load it with ANY valid value before invoking
* TLBIVUTLB cmd
*
* Vineetg: Aug 21th 2008:
* -Reduced the duration of IRQ lockouts in TLB Flush routines
- * -Multiple copies of TLB erase code seperated into a "single" function
+ * -Multiple copies of TLB erase code separated into a "single" function
* -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
* in interrupt-safe region.
*
@@ -66,7 +66,7 @@
*
* Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
* much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
- * Given this, the thrasing problem should never happen because once the 3
+ * Given this, the thrashing problem should never happen because once the 3
* J-TLB entries are created (even though 3rd will knock out one of the prev
* two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
*
@@ -127,7 +127,7 @@ static void utlb_invalidate(void)
* There was however an obscure hardware bug, where uTLB flush would
* fail when a prior probe for J-TLB (both totally unrelated) would
* return lkup err - because the entry didn't exist in MMU.
- * The Workround was to set Index reg with some valid value, prior to
+ * The Workaround was to set Index reg with some valid value, prior to
* flush. This was fixed in MMU v3
*/
unsigned int idx;
@@ -272,7 +272,7 @@ noinline void local_flush_tlb_all(void)
}
/*
- * Flush the entrie MM for userland. The fastest way is to move to Next ASID
+ * Flush the entire MM for userland. The fastest way is to move to Next ASID
*/
noinline void local_flush_tlb_mm(struct mm_struct *mm)
{
@@ -303,7 +303,7 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm)
* Difference between this and Kernel Range Flush is
* -Here the fastest way (if range is too large) is to move to next ASID
* without doing any explicit Shootdown
- * -In case of kernel Flush, entry has to be shot down explictly
+ * -In case of kernel Flush, entry has to be shot down explicitly
*/
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
@@ -620,7 +620,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
* Super Page size is configurable in hardware (4K to 16M), but fixed once
* RTL builds.
*
- * The exact THP size a Linx configuration will support is a function of:
+ * The exact THP size a Linux configuration will support is a function of:
* - MMU page size (typical 8K, RTL fixed)
* - software page walker address split between PGD:PTE:PFN (typical
* 11:8:13, but can be changed with 1 line)
@@ -698,7 +698,7 @@ void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
#endif
-/* Read the Cache Build Confuration Registers, Decode them and save into
+/* Read the Cache Build Configuration Registers, Decode them and save into
* the cpuinfo structure for later use.
* No Validation is done here, simply read/convert the BCRs
*/
@@ -803,13 +803,13 @@ void arc_mmu_init(void)
pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
/*
- * Can't be done in processor.h due to header include depenedencies
+ * Can't be done in processor.h due to header include dependencies
*/
BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
/*
* stack top size sanity check,
- * Can't be done in processor.h due to header include depenedencies
+ * Can't be done in processor.h due to header include dependencies
*/
BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
@@ -881,7 +881,7 @@ void arc_mmu_init(void)
* the duplicate one.
* -Knob to be verbose abt it.(TODO: hook them up to debugfs)
*/
-volatile int dup_pd_silent; /* Be slient abt it or complain (default) */
+volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
struct pt_regs *regs)
@@ -948,7 +948,7 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
/***********************************************************************
* Diagnostic Routines
- * -Called from Low Level TLB Hanlders if things don;t look good
+ * -Called from Low Level TLB Handlers if things don;t look good
**********************************************************************/
#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index 0b63fc095b99..b3ea1fa11f87 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -17,22 +17,6 @@ int arc_hsdk_axi_dmac_coherent __section(".data") = 0;
#define ARC_CCM_UNUSED_ADDR 0x60000000
-static void __init hsdk_init_per_cpu(unsigned int cpu)
-{
- /*
- * By default ICCM is mapped to 0x7z while this area is used for
- * kernel virtual mappings, so move it to currently unused area.
- */
- if (cpuinfo_arc700[cpu].iccm.sz)
- write_aux_reg(ARC_REG_AUX_ICCM, ARC_CCM_UNUSED_ADDR);
-
- /*
- * By default DCCM is mapped to 0x8z while this area is used by kernel,
- * so move it to currently unused area.
- */
- if (cpuinfo_arc700[cpu].dccm.sz)
- write_aux_reg(ARC_REG_AUX_DCCM, ARC_CCM_UNUSED_ADDR);
-}
#define ARC_PERIPHERAL_BASE 0xf0000000
#define CREG_BASE (ARC_PERIPHERAL_BASE + 0x1000)
@@ -339,5 +323,4 @@ static const char *hsdk_compat[] __initconst = {
MACHINE_START(SIMULATION, "hsdk")
.dt_compat = hsdk_compat,
.init_early = hsdk_init_early,
- .init_per_cpu = hsdk_init_per_cpu,
MACHINE_END
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index fe2f17eb2b50..ba937d85cb6c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -25,7 +25,7 @@ config ARM
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAS_GCOV_PROFILE_ALL
- select ARCH_KEEP_MEMBLOCK if HAVE_ARCH_PFN_VALID || KEXEC
+ select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_NO_SG_CHAIN if !ARM_HAS_SG_CHAIN
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
@@ -35,6 +35,7 @@ config ARM
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_IPC_PARSE_VERSION
+ select ARCH_WANT_LD_ORPHAN_WARN
select BINFMT_FLAT_ARGVP_ENVP_ON_STACK
select BUILDTIME_TABLE_SORT if MMU
select CLONE_BACKWARDS
@@ -68,6 +69,7 @@ config ARM
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_MMAP_RND_BITS if MMU
+ select HAVE_ARCH_PFN_VALID
select HAVE_ARCH_SECCOMP
select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
@@ -267,8 +269,7 @@ config PHYS_OFFSET
hex "Physical address of main memory" if MMU
depends on !ARM_PATCH_PHYS_VIRT
default DRAM_BASE if !MMU
- default 0x00000000 if ARCH_EBSA110 || \
- ARCH_FOOTBRIDGE
+ default 0x00000000 if ARCH_FOOTBRIDGE
default 0x10000000 if ARCH_OMAP1 || ARCH_RPC
default 0x20000000 if ARCH_S5PV210
default 0xc0000000 if ARCH_SA1100
@@ -322,7 +323,6 @@ config ARCH_MULTIPLATFORM
select AUTO_ZRELADDR
select TIMER_OF
select COMMON_CLK
- select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_MULTI_HANDLER
select HAVE_PCI
select PCI_DOMAINS_GENERIC if PCI
@@ -337,25 +337,10 @@ config ARM_SINGLE_ARMV7M
select TIMER_OF
select COMMON_CLK
select CPU_V7M
- select GENERIC_CLOCKEVENTS
select NO_IOPORT_MAP
select SPARSE_IRQ
select USE_OF
-config ARCH_EBSA110
- bool "EBSA-110"
- select ARCH_USES_GETTIMEOFFSET
- select CPU_SA110
- select ISA
- select NEED_MACH_IO_H
- select NEED_MACH_MEMORY_H
- select NO_IOPORT_MAP
- help
- This is an evaluation board for the StrongARM processor available
- from Digital. It has limited hardware on-board, including an
- Ethernet interface, two PCMCIA sockets, two serial ports and a
- parallel port.
-
config ARCH_EP93XX
bool "EP93xx-based"
select ARCH_SPARSEMEM_ENABLE
@@ -366,7 +351,6 @@ config ARCH_EP93XX
select CLKDEV_LOOKUP
select CLKSRC_MMIO
select CPU_ARM920T
- select GENERIC_CLOCKEVENTS
select GPIOLIB
select HAVE_LEGACY_CLK
help
@@ -376,7 +360,6 @@ config ARCH_FOOTBRIDGE
bool "FootBridge"
select CPU_SA110
select FOOTBRIDGE
- select GENERIC_CLOCKEVENTS
select HAVE_IDE
select NEED_MACH_IO_H if !MMU
select NEED_MACH_MEMORY_H
@@ -404,7 +387,6 @@ config ARCH_IXP4XX
select ARCH_SUPPORTS_BIG_ENDIAN
select CPU_XSCALE
select DMABOUNCE if PCI
- select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_MULTI_HANDLER
select GPIO_IXP4XX
select GPIOLIB
@@ -420,7 +402,6 @@ config ARCH_IXP4XX
config ARCH_DOVE
bool "Marvell Dove"
select CPU_PJ4
- select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
select HAVE_PCI
@@ -444,7 +425,6 @@ config ARCH_PXA
select CLKSRC_MMIO
select TIMER_OF
select CPU_XSCALE if !CPU_XSC3
- select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_MULTI_HANDLER
select GPIO_PXA
select GPIOLIB
@@ -467,6 +447,7 @@ config ARCH_RPC
select HAVE_IDE
select HAVE_PATA_PLATFORM
select ISA_DMA_API
+ select LEGACY_TIMER_TICK
select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H
select NO_IOPORT_MAP
@@ -484,7 +465,6 @@ config ARCH_SA1100
select COMMON_CLK
select CPU_FREQ
select CPU_SA1100
- select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
select HAVE_IDE
@@ -499,7 +479,6 @@ config ARCH_S3C24XX
bool "Samsung S3C24XX SoCs"
select ATAGS
select CLKSRC_SAMSUNG_PWM
- select GENERIC_CLOCKEVENTS
select GPIO_SAMSUNG
select GPIOLIB
select GENERIC_IRQ_MULTI_HANDLER
@@ -519,11 +498,9 @@ config ARCH_S3C24XX
config ARCH_OMAP1
bool "TI OMAP1"
depends on MMU
- select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_OMAP
select CLKDEV_LOOKUP
select CLKSRC_MMIO
- select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
@@ -786,7 +763,6 @@ config ARCH_ACORN
config PLAT_IOP
bool
- select GENERIC_CLOCKEVENTS
config PLAT_ORION
bool
@@ -1177,7 +1153,6 @@ config HAVE_SMP
config SMP
bool "Symmetric Multi-Processing"
depends on CPU_V6K || CPU_V7
- depends on GENERIC_CLOCKEVENTS
depends on HAVE_SMP
depends on MMU || ARM_MPU
select IRQ_WORK
@@ -1371,7 +1346,6 @@ config ARCH_NR_GPIO
config HZ_FIXED
int
- default 200 if ARCH_EBSA110
default 128 if SOC_AT91RM9200
default 0
@@ -1479,9 +1453,6 @@ config OABI_COMPAT
UNPREDICTABLE (in fact it can be predicted that it won't work
at all). If in doubt say N.
-config ARCH_HAS_HOLES_MEMORYMODEL
- bool
-
config ARCH_SELECT_MEMORY_MODEL
bool
@@ -1492,12 +1463,10 @@ config ARCH_SPARSEMEM_ENABLE
bool
select SPARSEMEM_STATIC if SPARSEMEM
-config HAVE_ARCH_PFN_VALID
- def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
-
config HIGHMEM
bool "High Memory Support"
depends on MMU
+ select KMAP_LOCAL
help
The address space of ARM processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address
@@ -1587,9 +1556,7 @@ config FORCE_MAX_ZONEORDER
a value of 11 means that the largest free memory block is 2^10 pages.
config ALIGNMENT_TRAP
- bool
- depends on CPU_CP15_MMU
- default y if !ARCH_EBSA110
+ def_bool CPU_CP15_MMU
select HAVE_PROC_CPU if PROC_FS
help
ARM processors cannot fetch/store information which is not
@@ -1784,7 +1751,7 @@ config CMDLINE
string "Default kernel command string"
default ""
help
- On some architectures (EBSA110 and CATS), there is currently no way
+ On some architectures (e.g. CATS), there is currently no way
for the boot loader to pass arguments to the kernel. For these
architectures, you should supply some command-line options at build
time by entering them here. As a minimum, you should specify the
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 8986a91a6f31..4ff04201a8cc 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1567,7 +1567,7 @@ config DEBUG_SIRFSOC_UART
config DEBUG_UART_FLOW_CONTROL
bool "Enable flow control (CTS) for the debug UART"
depends on DEBUG_LL
- default y if ARCH_EBSA110 || DEBUG_FOOTBRIDGE_COM1 || DEBUG_GEMINI || ARCH_RPC
+ default y if DEBUG_FOOTBRIDGE_COM1 || DEBUG_GEMINI || ARCH_RPC
help
Some UART ports are connected to terminals that will use modem
control signals to indicate whether they are ready to receive text.
@@ -1639,7 +1639,7 @@ config DEBUG_UART_PL01X
# Compatibility options for 8250
config DEBUG_UART_8250
- def_bool ARCH_EBSA110 || ARCH_IOP32X || ARCH_IXP4XX || ARCH_RPC
+ def_bool ARCH_IOP32X || ARCH_IXP4XX || ARCH_RPC
config DEBUG_UART_PHYS
hex "Physical base address of debug UART"
@@ -1743,7 +1743,6 @@ config DEBUG_UART_PHYS
default 0xe8008000 if DEBUG_R7S72100_SCIF2 || DEBUG_R7S9210_SCIF2
default 0xe8009000 if DEBUG_R7S9210_SCIF4
default 0xf0000000 if DEBUG_DIGICOLOR_UA0
- default 0xf0000be0 if ARCH_EBSA110
default 0xf1012000 if DEBUG_MVEBU_UART0_ALTERNATE
default 0xf1012100 if DEBUG_MVEBU_UART1_ALTERNATE
default 0xf7fc9000 if DEBUG_BERLIN_UART
@@ -1790,7 +1789,6 @@ config DEBUG_UART_VIRT
default 0xc8821000 if DEBUG_RV1108_UART1
default 0xc8912000 if DEBUG_RV1108_UART0
default 0xe0010fe0 if ARCH_RPC
- default 0xf0000be0 if ARCH_EBSA110
default 0xf0010000 if DEBUG_ASM9260_UART
default 0xf0100000 if DEBUG_DIGICOLOR_UA0
default 0xf01fb000 if DEBUG_NOMADIK_UART
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 4d76eab2b22d..4a066c687cec 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -16,10 +16,6 @@ LDFLAGS_vmlinux += --be8
KBUILD_LDFLAGS_MODULE += --be8
endif
-# We never want expected sections to be placed heuristically by the
-# linker. All sections should be explicitly named in the linker script.
-LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
-
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
@@ -172,7 +168,6 @@ machine-$(CONFIG_ARCH_CNS3XXX) += cns3xxx
machine-$(CONFIG_ARCH_DAVINCI) += davinci
machine-$(CONFIG_ARCH_DIGICOLOR) += digicolor
machine-$(CONFIG_ARCH_DOVE) += dove
-machine-$(CONFIG_ARCH_EBSA110) += ebsa110
machine-$(CONFIG_ARCH_EFM32) += efm32
machine-$(CONFIG_ARCH_EP93XX) += ep93xx
machine-$(CONFIG_ARCH_EXYNOS) += exynos
@@ -239,13 +234,6 @@ plat-$(CONFIG_PLAT_ORION) += orion
plat-$(CONFIG_PLAT_PXA) += pxa
plat-$(CONFIG_PLAT_VERSATILE) += versatile
-ifeq ($(CONFIG_ARCH_EBSA110),y)
-# This is what happens if you forget the IOCS16 line.
-# PCMCIA cards stop working.
-CFLAGS_3c589_cs.o :=-DISA_SIXTEEN_BIT_PERIPHERAL
-export CFLAGS_3c589_cs.o
-endif
-
# The byte offset of the kernel image in RAM from the start of RAM.
TEXT_OFFSET := $(textofs-y)
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 47f001ca5499..e1567418a2b1 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -129,7 +129,9 @@ LDFLAGS_vmlinux += --no-undefined
# Delete all temporary local symbols
LDFLAGS_vmlinux += -X
# Report orphan sections
-LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
+ifdef CONFIG_LD_ORPHAN_WARN
+LDFLAGS_vmlinux += --orphan-handling=warn
+endif
# Next argument is a linker script
LDFLAGS_vmlinux += -T
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 2e04ec5b5446..caa27322a0ab 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -1472,6 +1472,9 @@ ENTRY(efi_enter_kernel)
@ issued from HYP mode take us to the correct handler code. We
@ will disable the MMU before jumping to the kernel proper.
@
+ ARM( bic r1, r1, #(1 << 30) ) @ clear HSCTLR.TE
+ THUMB( orr r1, r1, #(1 << 30) ) @ set HSCTLR.TE
+ mcr p15, 4, r1, c1, c0, 0
adr r0, __hyp_reentry_vectors
mcr p15, 4, r0, c12, c0, 0 @ set HYP vector base (HVBAR)
isb
diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi
index c220dc3c4e0f..243e35f7a56c 100644
--- a/arch/arm/boot/dts/am437x-l4.dtsi
+++ b/arch/arm/boot/dts/am437x-l4.dtsi
@@ -521,7 +521,7 @@
ranges = <0x0 0x100000 0x8000>;
mac_sw: switch@0 {
- compatible = "ti,am4372-cpsw","ti,cpsw-switch";
+ compatible = "ti,am4372-cpsw-switch", "ti,cpsw-switch";
reg = <0x0 0x4000>;
ranges = <0 0 0x4000>;
clocks = <&cpsw_125mhz_gclk>, <&dpll_clksel_mac_clk>;
diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi
index b58220a49cbd..74367ee96f20 100644
--- a/arch/arm/boot/dts/aspeed-g6.dtsi
+++ b/arch/arm/boot/dts/aspeed-g6.dtsi
@@ -69,6 +69,12 @@
always-on;
};
+ edac: sdram@1e6e0000 {
+ compatible = "aspeed,ast2600-sdram-edac", "syscon";
+ reg = <0x1e6e0000 0x174>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
ahb {
compatible = "simple-bus";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi
index b69c7d40f5d8..2f326151116b 100644
--- a/arch/arm/boot/dts/dra76x.dtsi
+++ b/arch/arm/boot/dts/dra76x.dtsi
@@ -32,8 +32,8 @@
interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "int0", "int1";
- clocks = <&mcan_clk>, <&l3_iclk_div>;
- clock-names = "cclk", "hclk";
+ clocks = <&l3_iclk_div>, <&mcan_clk>;
+ clock-names = "hclk", "cclk";
bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
};
};
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index ab291cec650a..2983e91bc7dd 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -122,7 +122,6 @@
};
&clock {
- clocks = <&clock CLK_XUSBXTI>;
assigned-clocks = <&clock CLK_FOUT_EPLL>;
assigned-clock-rates = <45158401>;
};
diff --git a/arch/arm/boot/dts/imx50-evk.dts b/arch/arm/boot/dts/imx50-evk.dts
index 878e89c20190..4ea5c23f181b 100644
--- a/arch/arm/boot/dts/imx50-evk.dts
+++ b/arch/arm/boot/dts/imx50-evk.dts
@@ -59,7 +59,7 @@
MX50_PAD_CSPI_MISO__CSPI_MISO 0x00
MX50_PAD_CSPI_MOSI__CSPI_MOSI 0x00
MX50_PAD_CSPI_SS0__GPIO4_11 0xc4
- MX50_PAD_ECSPI1_MOSI__CSPI_SS1 0xf4
+ MX50_PAD_ECSPI1_MOSI__GPIO4_13 0x84
>;
};
diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts
index f7dcdf96e5c0..8f4a63ea912e 100644
--- a/arch/arm/boot/dts/imx53-ppd.dts
+++ b/arch/arm/boot/dts/imx53-ppd.dts
@@ -589,7 +589,7 @@
touchscreen@4b {
compatible = "atmel,maxtouch";
- reset-gpio = <&gpio5 19 GPIO_ACTIVE_HIGH>;
+ reset-gpio = <&gpio5 19 GPIO_ACTIVE_LOW>;
reg = <0x4b>;
interrupt-parent = <&gpio5>;
interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
diff --git a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
index 65359aece950..7da74e6f46d9 100644
--- a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
+++ b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
@@ -143,7 +143,7 @@
reg = <0x4a>;
interrupt-parent = <&gpio1>;
interrupts = <9 IRQ_TYPE_EDGE_FALLING>; /* SODIMM 28 */
- reset-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; /* SODIMM 30 */
+ reset-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>; /* SODIMM 30 */
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6q-apalis-eval.dts b/arch/arm/boot/dts/imx6q-apalis-eval.dts
index fab83abb6466..a0683b4aeca1 100644
--- a/arch/arm/boot/dts/imx6q-apalis-eval.dts
+++ b/arch/arm/boot/dts/imx6q-apalis-eval.dts
@@ -140,7 +140,7 @@
reg = <0x4a>;
interrupt-parent = <&gpio6>;
interrupts = <10 IRQ_TYPE_EDGE_FALLING>;
- reset-gpios = <&gpio6 9 GPIO_ACTIVE_HIGH>; /* SODIMM 13 */
+ reset-gpios = <&gpio6 9 GPIO_ACTIVE_LOW>; /* SODIMM 13 */
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts b/arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts
index 1614b1ae501d..86e84781cf5d 100644
--- a/arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts
+++ b/arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts
@@ -145,7 +145,7 @@
reg = <0x4a>;
interrupt-parent = <&gpio6>;
interrupts = <10 IRQ_TYPE_EDGE_FALLING>;
- reset-gpios = <&gpio6 9 GPIO_ACTIVE_HIGH>; /* SODIMM 13 */
+ reset-gpios = <&gpio6 9 GPIO_ACTIVE_LOW>; /* SODIMM 13 */
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6q-apalis-ixora.dts b/arch/arm/boot/dts/imx6q-apalis-ixora.dts
index fa9f98dd15ac..62e72773e53b 100644
--- a/arch/arm/boot/dts/imx6q-apalis-ixora.dts
+++ b/arch/arm/boot/dts/imx6q-apalis-ixora.dts
@@ -144,7 +144,7 @@
reg = <0x4a>;
interrupt-parent = <&gpio6>;
interrupts = <10 IRQ_TYPE_EDGE_FALLING>;
- reset-gpios = <&gpio6 9 GPIO_ACTIVE_HIGH>; /* SODIMM 13 */
+ reset-gpios = <&gpio6 9 GPIO_ACTIVE_LOW>; /* SODIMM 13 */
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6q-prti6q.dts b/arch/arm/boot/dts/imx6q-prti6q.dts
index d112b50f8c5d..b4605edfd2ab 100644
--- a/arch/arm/boot/dts/imx6q-prti6q.dts
+++ b/arch/arm/boot/dts/imx6q-prti6q.dts
@@ -213,8 +213,8 @@
#size-cells = <0>;
/* Microchip KSZ9031RNX PHY */
- rgmii_phy: ethernet-phy@4 {
- reg = <4>;
+ rgmii_phy: ethernet-phy@0 {
+ reg = <0>;
interrupts-extended = <&gpio1 28 IRQ_TYPE_LEVEL_LOW>;
reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
reset-assert-us = <10000>;
diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
index 265f5f3dbff6..24f793ca2886 100644
--- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
@@ -551,7 +551,7 @@
pinctrl_i2c3: i2c3grp {
fsl,pins = <
- MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1
MX6QDL_PAD_GPIO_16__I2C3_SDA 0x4001b8b1
>;
};
diff --git a/arch/arm/boot/dts/imx6qdl-udoo.dtsi b/arch/arm/boot/dts/imx6qdl-udoo.dtsi
index 828dd20cd27d..d07d8f83456d 100644
--- a/arch/arm/boot/dts/imx6qdl-udoo.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-udoo.dtsi
@@ -98,7 +98,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
index 93909796885a..b9b698f72b26 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
@@ -166,7 +166,6 @@
MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
- MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
>;
};
diff --git a/arch/arm/boot/dts/imx7-colibri-aster.dtsi b/arch/arm/boot/dts/imx7-colibri-aster.dtsi
index 9fa701bec2ec..139188eb9f40 100644
--- a/arch/arm/boot/dts/imx7-colibri-aster.dtsi
+++ b/arch/arm/boot/dts/imx7-colibri-aster.dtsi
@@ -99,7 +99,7 @@
reg = <0x4a>;
interrupt-parent = <&gpio2>;
interrupts = <15 IRQ_TYPE_EDGE_FALLING>; /* SODIMM 107 */
- reset-gpios = <&gpio2 28 GPIO_ACTIVE_HIGH>; /* SODIMM 106 */
+ reset-gpios = <&gpio2 28 GPIO_ACTIVE_LOW>; /* SODIMM 106 */
};
/* M41T0M6 real time clock on carrier board */
diff --git a/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi b/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi
index 97601375f264..3caf450735d7 100644
--- a/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi
+++ b/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi
@@ -124,7 +124,7 @@
reg = <0x4a>;
interrupt-parent = <&gpio1>;
interrupts = <9 IRQ_TYPE_EDGE_FALLING>; /* SODIMM 28 */
- reset-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>; /* SODIMM 30 */
+ reset-gpios = <&gpio1 10 GPIO_ACTIVE_LOW>; /* SODIMM 30 */
status = "disabled";
};
diff --git a/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts b/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts
index f1a41152e9dd..342304f5653a 100644
--- a/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts
+++ b/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts
@@ -223,16 +223,15 @@
};
&ssp3 {
- /delete-property/ #address-cells;
- /delete-property/ #size-cells;
+ #address-cells = <0>;
spi-slave;
status = "okay";
- ready-gpio = <&gpio 125 GPIO_ACTIVE_HIGH>;
+ ready-gpios = <&gpio 125 GPIO_ACTIVE_HIGH>;
slave {
compatible = "olpc,xo1.75-ec";
spi-cpha;
- cmd-gpio = <&gpio 155 GPIO_ACTIVE_HIGH>;
+ cmd-gpios = <&gpio 155 GPIO_ACTIVE_HIGH>;
};
};
diff --git a/arch/arm/boot/dts/mmp3.dtsi b/arch/arm/boot/dts/mmp3.dtsi
index cc4efd0efabd..4ae630d37d09 100644
--- a/arch/arm/boot/dts/mmp3.dtsi
+++ b/arch/arm/boot/dts/mmp3.dtsi
@@ -296,6 +296,7 @@
interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&soc_clocks MMP2_CLK_CCIC0>;
clock-names = "axi";
+ power-domains = <&soc_clocks MMP3_POWER_DOMAIN_CAMERA>;
#clock-cells = <0>;
clock-output-names = "mclk";
status = "disabled";
@@ -307,6 +308,7 @@
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&soc_clocks MMP2_CLK_CCIC1>;
clock-names = "axi";
+ power-domains = <&soc_clocks MMP3_POWER_DOMAIN_CAMERA>;
#clock-cells = <0>;
clock-output-names = "mclk";
status = "disabled";
diff --git a/arch/arm/boot/dts/motorola-mapphone-common.dtsi b/arch/arm/boot/dts/motorola-mapphone-common.dtsi
index d5ded4f794df..5f8f77cfbe59 100644
--- a/arch/arm/boot/dts/motorola-mapphone-common.dtsi
+++ b/arch/arm/boot/dts/motorola-mapphone-common.dtsi
@@ -430,7 +430,7 @@
pinctrl-names = "default";
pinctrl-0 = <&touchscreen_pins>;
- reset-gpios = <&gpio6 13 GPIO_ACTIVE_HIGH>; /* gpio173 */
+ reset-gpios = <&gpio6 13 GPIO_ACTIVE_LOW>; /* gpio173 */
/* gpio_183 with sys_nirq2 pad as wakeup */
interrupts-extended = <&gpio6 23 IRQ_TYPE_LEVEL_LOW>,
diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi
index bd4450dbdcb6..4da33d0f2748 100644
--- a/arch/arm/boot/dts/s5pv210-aries.dtsi
+++ b/arch/arm/boot/dts/s5pv210-aries.dtsi
@@ -632,7 +632,7 @@
interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
pinctrl-names = "default";
pinctrl-0 = <&ts_irq>;
- reset-gpios = <&gpj1 3 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpj1 3 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/stm32mp157c-ed1.dts b/arch/arm/boot/dts/stm32mp157c-ed1.dts
index ca109dc18238..2e77ccec3fc1 100644
--- a/arch/arm/boot/dts/stm32mp157c-ed1.dts
+++ b/arch/arm/boot/dts/stm32mp157c-ed1.dts
@@ -89,6 +89,14 @@
states = <1800000 0x1>,
<2900000 0x0>;
};
+
+ vin: vin {
+ compatible = "regulator-fixed";
+ regulator-name = "vin";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
};
&adc {
@@ -150,11 +158,18 @@
regulators {
compatible = "st,stpmic1-regulators";
+ buck1-supply = <&vin>;
+ buck2-supply = <&vin>;
+ buck3-supply = <&vin>;
+ buck4-supply = <&vin>;
ldo1-supply = <&v3v3>;
ldo2-supply = <&v3v3>;
ldo3-supply = <&vdd_ddr>;
+ ldo4-supply = <&vin>;
ldo5-supply = <&v3v3>;
ldo6-supply = <&v3v3>;
+ vref_ddr-supply = <&vin>;
+ boost-supply = <&vin>;
pwr_sw1-supply = <&bst_out>;
pwr_sw2-supply = <&bst_out>;
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
index 5dff24e39af8..8456f172d4b1 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
@@ -46,6 +46,16 @@
linux,code = <KEY_A>;
gpios = <&gpiof 3 GPIO_ACTIVE_LOW>;
};
+
+ /*
+ * The EXTi IRQ line 0 is shared with PMIC,
+ * so mark this as polled GPIO key.
+ */
+ button-2 {
+ label = "TA3-GPIO-C";
+ linux,code = <KEY_C>;
+ gpios = <&gpiog 0 GPIO_ACTIVE_LOW>;
+ };
};
gpio-keys {
@@ -59,13 +69,6 @@
wakeup-source;
};
- button-2 {
- label = "TA3-GPIO-C";
- linux,code = <KEY_C>;
- gpios = <&gpioi 11 GPIO_ACTIVE_LOW>;
- wakeup-source;
- };
-
button-3 {
label = "TA4-GPIO-D";
linux,code = <KEY_D>;
@@ -79,7 +82,7 @@
led-0 {
label = "green:led5";
- gpios = <&gpiog 2 GPIO_ACTIVE_HIGH>;
+ gpios = <&gpioc 6 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
index b4b52cf634af..f796a6150313 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
@@ -68,6 +68,7 @@
gpio = <&gpiog 3 GPIO_ACTIVE_LOW>;
regulator-always-on;
regulator-boot-on;
+ vin-supply = <&vdd>;
};
};
@@ -202,6 +203,7 @@
vdda: ldo1 {
regulator-name = "vdda";
+ regulator-always-on;
regulator-min-microvolt = <2900000>;
regulator-max-microvolt = <2900000>;
interrupts = <IT_CURLIM_LDO1 0>;
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
index 04fbb324a541..803eb8bc9c85 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
@@ -21,6 +21,10 @@
};
};
+&dts {
+ status = "okay";
+};
+
&i2c4 {
pinctrl-names = "default";
pinctrl-0 = <&i2c4_pins_a>;
diff --git a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
index a5307745719a..93398cfae97e 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
@@ -80,6 +80,14 @@
dais = <&sai2a_port &sai2b_port &i2s2_port>;
status = "okay";
};
+
+ vin: vin {
+ compatible = "regulator-fixed";
+ regulator-name = "vin";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
};
&adc {
@@ -240,9 +248,18 @@
regulators {
compatible = "st,stpmic1-regulators";
+ buck1-supply = <&vin>;
+ buck2-supply = <&vin>;
+ buck3-supply = <&vin>;
+ buck4-supply = <&vin>;
ldo1-supply = <&v3v3>;
+ ldo2-supply = <&vin>;
ldo3-supply = <&vdd_ddr>;
+ ldo4-supply = <&vin>;
+ ldo5-supply = <&vin>;
ldo6-supply = <&v3v3>;
+ vref_ddr-supply = <&vin>;
+ boost-supply = <&vin>;
pwr_sw1-supply = <&bst_out>;
pwr_sw2-supply = <&bst_out>;
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 0f95a6ef8543..1c5a666c54b5 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -143,7 +143,7 @@
trips {
cpu_alert0: cpu-alert0 {
/* milliCelsius */
- temperature = <850000>;
+ temperature = <85000>;
hysteresis = <2000>;
type = "passive";
};
diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
index 049e6ab3cf56..73de34ae37fd 100644
--- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
+++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
@@ -154,7 +154,7 @@
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts b/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
index 32d5d45a35c0..8945dbb114a2 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
@@ -130,7 +130,7 @@
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-supply = <&reg_gmac_3v3>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
index bb3987e101c2..0b3d9ae75650 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
@@ -132,7 +132,7 @@
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-supply = <&reg_gmac_3v3>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
index 8c8dee6ea461..9109ca0919ad 100644
--- a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
+++ b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
@@ -151,7 +151,7 @@
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts b/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
index fce2f7fcd084..bf38c66c1815 100644
--- a/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
+++ b/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Adam Sampson <ats@offog.org>
+ * Copyright 2015-2020 Adam Sampson <ats@offog.org>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -115,7 +115,7 @@
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
index 9d34eabba121..431f70234d36 100644
--- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
@@ -131,7 +131,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_sw>;
phy-handle = <&rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
allwinner,rx-delay-ps = <700>;
allwinner,tx-delay-ps = <700>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
index d9be511f054f..d8326a5c681d 100644
--- a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
@@ -183,7 +183,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_dldo4>;
phy-handle = <&rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
index 71fb73208939..babf4cf1b2f6 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
@@ -53,11 +53,6 @@
};
};
-&emac {
- /* LEDs changed to active high on the plus */
- /delete-property/ allwinner,leds-active-low;
-};
-
&mmc1 {
vmmc-supply = <&reg_vcc3v3>;
bus-width = <4>;
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
index 6dbf7b2e0c13..b6ca45d18e51 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
@@ -67,7 +67,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_gmac_3v3>;
phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
index 2fc62ef0cb3e..a6a1087a0c9b 100644
--- a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
+++ b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
@@ -129,7 +129,7 @@
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-supply = <&reg_dc1sw>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-s3-pinecube.dts b/arch/arm/boot/dts/sun8i-s3-pinecube.dts
index 9bab6b7f4014..4aa0ee897a0a 100644
--- a/arch/arm/boot/dts/sun8i-s3-pinecube.dts
+++ b/arch/arm/boot/dts/sun8i-s3-pinecube.dts
@@ -10,7 +10,7 @@
/ {
model = "PineCube IP Camera";
- compatible = "pine64,pinecube", "allwinner,sun8i-s3";
+ compatible = "pine64,pinecube", "sochip,s3", "allwinner,sun8i-v3";
aliases {
serial0 = &uart2;
diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi
index 0c7341676921..89abd4cc7e23 100644
--- a/arch/arm/boot/dts/sun8i-v3s.dtsi
+++ b/arch/arm/boot/dts/sun8i-v3s.dtsi
@@ -539,7 +539,7 @@
gic: interrupt-controller@1c81000 {
compatible = "arm,gic-400";
reg = <0x01c81000 0x1000>,
- <0x01c82000 0x1000>,
+ <0x01c82000 0x2000>,
<0x01c84000 0x2000>,
<0x01c86000 0x2000>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts b/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts
index 15c22b06fc4b..47954551f573 100644
--- a/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts
+++ b/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts
@@ -120,7 +120,7 @@
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-supply = <&reg_dc1sw>;
status = "okay";
};
@@ -198,16 +198,16 @@
};
&reg_dc1sw {
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-name = "vcc-gmac-phy";
};
&reg_dcdc1 {
regulator-always-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-name = "vcc-3v0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-3v3";
};
&reg_dcdc2 {
diff --git a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
index d3b337b043a1..484b93df20cb 100644
--- a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
+++ b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
@@ -129,7 +129,7 @@
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-supply = <&reg_cldo1>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts
index bbc6335e5631..5c3580d712e4 100644
--- a/arch/arm/boot/dts/sun9i-a80-optimus.dts
+++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts
@@ -124,7 +124,7 @@
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-supply = <&reg_cldo1>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi b/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi
index 39263e74fbb5..8e5cb3b3fd68 100644
--- a/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi
+++ b/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi
@@ -126,7 +126,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_gmac_3v3>;
phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
index a0b829738e8f..10794a870776 100644
--- a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
+++ b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
@@ -446,7 +446,7 @@
interrupt-parent = <&gpio>;
interrupts = <TEGRA_GPIO(V, 6) IRQ_TYPE_LEVEL_LOW>;
- reset-gpios = <&gpio TEGRA_GPIO(Q, 7) GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio TEGRA_GPIO(Q, 7) GPIO_ACTIVE_LOW>;
avdd-supply = <&vdd_3v3_sys>;
vdd-supply = <&vdd_3v3_sys>;
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
index e500911ce0a5..6f1e0f0d4f0a 100644
--- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
@@ -406,6 +406,9 @@
};
};
+&mdio1 {
+ clock-frequency = <5000000>;
+};
&iomuxc {
pinctrl_gpio_e6185_eeprom_sel: pinctrl-gpio-e6185-eeprom-spi0 {
diff --git a/arch/arm/configs/badge4_defconfig b/arch/arm/configs/badge4_defconfig
index ef484c4cfd1a..d9119da65f48 100644
--- a/arch/arm/configs/badge4_defconfig
+++ b/arch/arm/configs/badge4_defconfig
@@ -89,7 +89,6 @@ CONFIG_USB_SERIAL_KEYSPAN=m
CONFIG_USB_SERIAL_MCT_U232=m
CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_CYBERJACK=m
-CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=m
diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig
index 4fec2ec379ad..911e880f06ed 100644
--- a/arch/arm/configs/corgi_defconfig
+++ b/arch/arm/configs/corgi_defconfig
@@ -191,7 +191,6 @@ CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_SAFE=m
CONFIG_USB_SERIAL_TI=m
CONFIG_USB_SERIAL_CYBERJACK=m
-CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
diff --git a/arch/arm/configs/ebsa110_defconfig b/arch/arm/configs/ebsa110_defconfig
deleted file mode 100644
index 731a22a55f4e..000000000000
--- a/arch/arm/configs/ebsa110_defconfig
+++ /dev/null
@@ -1,74 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EXPERT=y
-CONFIG_MODULES=y
-CONFIG_ARCH_EBSA110=y
-CONFIG_PCCARD=m
-CONFIG_I82365=m
-CONFIG_LEDS=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/nfs rw mem=16M console=ttyS1,38400n8"
-CONFIG_FPE_NWFPE=y
-CONFIG_FPE_FASTFPE=y
-CONFIG_BINFMT_AOUT=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_SYN_COOKIES=y
-CONFIG_IPV6=y
-CONFIG_NETFILTER=y
-CONFIG_IP_NF_IPTABLES=y
-CONFIG_IP_NF_MATCH_ECN=y
-CONFIG_IP_NF_MATCH_TTL=y
-CONFIG_IP_NF_FILTER=y
-CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_IP_NF_TARGET_LOG=y
-CONFIG_IP_NF_MANGLE=y
-CONFIG_IP_NF_TARGET_ECN=y
-CONFIG_IP6_NF_IPTABLES=y
-CONFIG_IP6_NF_MATCH_FRAG=y
-CONFIG_IP6_NF_MATCH_OPTS=y
-CONFIG_IP6_NF_MATCH_HL=y
-CONFIG_IP6_NF_MATCH_RT=y
-CONFIG_IP6_NF_FILTER=y
-CONFIG_IP6_NF_MANGLE=y
-CONFIG_FW_LOADER=m
-CONFIG_PARPORT=y
-CONFIG_PARPORT_PC=y
-CONFIG_PARPORT_PC_FIFO=y
-CONFIG_PARPORT_1284=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AM79C961A=y
-CONFIG_NET_PCMCIA=y
-CONFIG_PCMCIA_PCNET=m
-CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_CS=m
-CONFIG_PRINTER=m
-CONFIG_WATCHDOG=y
-CONFIG_SOFT_WATCHDOG=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_MINIX_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index aeb1209e0804..bb70acc6b526 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -93,6 +93,7 @@ CONFIG_SPI=y
CONFIG_SPI_IMX=y
CONFIG_SPI_SPIDEV=y
CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_MXC=y
CONFIG_W1=y
CONFIG_W1_MASTER_MXC=y
CONFIG_W1_SLAVE_THERM=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 0fa79bd00219..221f5c340c86 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -217,6 +217,7 @@ CONFIG_GPIO_PCA953X=y
CONFIG_GPIO_PCF857X=y
CONFIG_GPIO_STMPE=y
CONFIG_GPIO_74X164=y
+CONFIG_GPIO_MXC=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_POWER_RESET_SYSCON_POWEROFF=y
diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig
index 27e7c0714b96..0d6edeb27659 100644
--- a/arch/arm/configs/ixp4xx_defconfig
+++ b/arch/arm/configs/ixp4xx_defconfig
@@ -141,7 +141,6 @@ CONFIG_HDLC_CISCO=m
CONFIG_HDLC_FR=m
CONFIG_HDLC_PPP=m
CONFIG_HDLC_X25=m
-CONFIG_DLCI=m
CONFIG_WAN_ROUTER_DRIVERS=m
CONFIG_ATM_TCP=m
# CONFIG_INPUT_KEYBOARD is not set
diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig
index 70b709a669d2..e00be9faa23b 100644
--- a/arch/arm/configs/multi_v5_defconfig
+++ b/arch/arm/configs/multi_v5_defconfig
@@ -166,6 +166,7 @@ CONFIG_SPI_IMX=y
CONFIG_SPI_ORION=y
CONFIG_GPIO_ASPEED=m
CONFIG_GPIO_ASPEED_SGPIO=y
+CONFIG_GPIO_MXC=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_GPIO=y
CONFIG_POWER_RESET_QNAP=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index e731cdf7c88c..a611b0c1e540 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -465,6 +465,7 @@ CONFIG_GPIO_PALMAS=y
CONFIG_GPIO_TPS6586X=y
CONFIG_GPIO_TPS65910=y
CONFIG_GPIO_TWL4030=y
+CONFIG_GPIO_MXC=y
CONFIG_POWER_AVS=y
CONFIG_ROCKCHIP_IODOMAIN=y
CONFIG_POWER_RESET_AS3722=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 34793aabdb65..58df9fd79a76 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -81,7 +81,6 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_BINFMT_MISC=y
CONFIG_CMA=y
CONFIG_ZSMALLOC=m
-CONFIG_ZSMALLOC_PGTABLE_MAPPING=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index d7b9eaf4783c..8654ece13004 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -574,7 +574,6 @@ CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_SAFE=m
CONFIG_USB_SERIAL_TI=m
CONFIG_USB_SERIAL_CYBERJACK=m
-CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index a1cdbfa064c5..8b2c14424927 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -185,7 +185,6 @@ CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_SAFE=m
CONFIG_USB_SERIAL_TI=m
CONFIG_USB_SERIAL_CYBERJACK=m
-CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
diff --git a/arch/arm/crypto/aes-ce-core.S b/arch/arm/crypto/aes-ce-core.S
index 4d1707388d94..312428d83eed 100644
--- a/arch/arm/crypto/aes-ce-core.S
+++ b/arch/arm/crypto/aes-ce-core.S
@@ -386,20 +386,32 @@ ENTRY(ce_aes_ctr_encrypt)
.Lctrloop4x:
subs r4, r4, #4
bmi .Lctr1x
- add r6, r6, #1
+
+ /*
+ * NOTE: the sequence below has been carefully tweaked to avoid
+ * a silicon erratum that exists in Cortex-A57 (#1742098) and
+ * Cortex-A72 (#1655431) cores, where AESE/AESMC instruction pairs
+ * may produce an incorrect result if they take their input from a
+ * register of which a single 32-bit lane has been updated the last
+ * time it was modified. To work around this, the lanes of registers
+ * q0-q3 below are not manipulated individually, and the different
+ * counter values are prepared by successive manipulations of q7.
+ */
+ add ip, r6, #1
vmov q0, q7
+ rev ip, ip
+ add lr, r6, #2
+ vmov s31, ip @ set lane 3 of q1 via q7
+ add ip, r6, #3
+ rev lr, lr
vmov q1, q7
- rev ip, r6
- add r6, r6, #1
+ vmov s31, lr @ set lane 3 of q2 via q7
+ rev ip, ip
vmov q2, q7
- vmov s7, ip
- rev ip, r6
- add r6, r6, #1
+ vmov s31, ip @ set lane 3 of q3 via q7
+ add r6, r6, #4
vmov q3, q7
- vmov s11, ip
- rev ip, r6
- add r6, r6, #1
- vmov s15, ip
+
vld1.8 {q4-q5}, [r1]!
vld1.8 {q6}, [r1]!
vld1.8 {q15}, [r1]!
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index bda8bf17631e..f70af1d0514b 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -19,7 +19,7 @@ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("ecb(aes)");
-MODULE_ALIAS_CRYPTO("cbc(aes)");
+MODULE_ALIAS_CRYPTO("cbc(aes)-all");
MODULE_ALIAS_CRYPTO("ctr(aes)");
MODULE_ALIAS_CRYPTO("xts(aes)");
@@ -191,7 +191,8 @@ static int cbc_init(struct crypto_skcipher *tfm)
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
unsigned int reqsize;
- ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+ ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->enc_tfm))
return PTR_ERR(ctx->enc_tfm);
@@ -441,7 +442,8 @@ static struct skcipher_alg aes_algs[] = { {
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
.base.cra_module = THIS_MODULE,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL |
+ CRYPTO_ALG_NEED_FALLBACK,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
diff --git a/arch/arm/crypto/chacha-glue.c b/arch/arm/crypto/chacha-glue.c
index 59da6c0b63b6..7b5cf8430c6d 100644
--- a/arch/arm/crypto/chacha-glue.c
+++ b/arch/arm/crypto/chacha-glue.c
@@ -23,7 +23,7 @@
asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
int nrounds);
asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
- int nrounds);
+ int nrounds, unsigned int nbytes);
asmlinkage void hchacha_block_arm(const u32 *state, u32 *out, int nrounds);
asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds);
@@ -42,24 +42,24 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
{
u8 buf[CHACHA_BLOCK_SIZE];
- while (bytes >= CHACHA_BLOCK_SIZE * 4) {
- chacha_4block_xor_neon(state, dst, src, nrounds);
- bytes -= CHACHA_BLOCK_SIZE * 4;
- src += CHACHA_BLOCK_SIZE * 4;
- dst += CHACHA_BLOCK_SIZE * 4;
- state[12] += 4;
- }
- while (bytes >= CHACHA_BLOCK_SIZE) {
- chacha_block_xor_neon(state, dst, src, nrounds);
- bytes -= CHACHA_BLOCK_SIZE;
- src += CHACHA_BLOCK_SIZE;
- dst += CHACHA_BLOCK_SIZE;
- state[12]++;
+ while (bytes > CHACHA_BLOCK_SIZE) {
+ unsigned int l = min(bytes, CHACHA_BLOCK_SIZE * 4U);
+
+ chacha_4block_xor_neon(state, dst, src, nrounds, l);
+ bytes -= l;
+ src += l;
+ dst += l;
+ state[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE);
}
if (bytes) {
- memcpy(buf, src, bytes);
- chacha_block_xor_neon(state, buf, buf, nrounds);
- memcpy(dst, buf, bytes);
+ const u8 *s = src;
+ u8 *d = dst;
+
+ if (bytes != CHACHA_BLOCK_SIZE)
+ s = d = memcpy(buf, src, bytes);
+ chacha_block_xor_neon(state, d, s, nrounds);
+ if (d != dst)
+ memcpy(dst, buf, bytes);
}
}
diff --git a/arch/arm/crypto/chacha-neon-core.S b/arch/arm/crypto/chacha-neon-core.S
index eb22926d4912..13d12f672656 100644
--- a/arch/arm/crypto/chacha-neon-core.S
+++ b/arch/arm/crypto/chacha-neon-core.S
@@ -47,6 +47,7 @@
*/
#include <linux/linkage.h>
+#include <asm/cache.h>
.text
.fpu neon
@@ -205,7 +206,7 @@ ENDPROC(hchacha_block_neon)
.align 5
ENTRY(chacha_4block_xor_neon)
- push {r4-r5}
+ push {r4, lr}
mov r4, sp // preserve the stack pointer
sub ip, sp, #0x20 // allocate a 32 byte buffer
bic ip, ip, #0x1f // aligned to 32 bytes
@@ -229,10 +230,10 @@ ENTRY(chacha_4block_xor_neon)
vld1.32 {q0-q1}, [r0]
vld1.32 {q2-q3}, [ip]
- adr r5, .Lctrinc
+ adr lr, .Lctrinc
vdup.32 q15, d7[1]
vdup.32 q14, d7[0]
- vld1.32 {q4}, [r5, :128]
+ vld1.32 {q4}, [lr, :128]
vdup.32 q13, d6[1]
vdup.32 q12, d6[0]
vdup.32 q11, d5[1]
@@ -455,7 +456,7 @@ ENTRY(chacha_4block_xor_neon)
// Re-interleave the words in the first two rows of each block (x0..7).
// Also add the counter values 0-3 to x12[0-3].
- vld1.32 {q8}, [r5, :128] // load counter values 0-3
+ vld1.32 {q8}, [lr, :128] // load counter values 0-3
vzip.32 q0, q1 // => (0 1 0 1) (0 1 0 1)
vzip.32 q2, q3 // => (2 3 2 3) (2 3 2 3)
vzip.32 q4, q5 // => (4 5 4 5) (4 5 4 5)
@@ -493,6 +494,8 @@ ENTRY(chacha_4block_xor_neon)
// Re-interleave the words in the last two rows of each block (x8..15).
vld1.32 {q8-q9}, [sp, :256]
+ mov sp, r4 // restore original stack pointer
+ ldr r4, [r4, #8] // load number of bytes
vzip.32 q12, q13 // => (12 13 12 13) (12 13 12 13)
vzip.32 q14, q15 // => (14 15 14 15) (14 15 14 15)
vzip.32 q8, q9 // => (8 9 8 9) (8 9 8 9)
@@ -520,41 +523,121 @@ ENTRY(chacha_4block_xor_neon)
// XOR the rest of the data with the keystream
vld1.8 {q0-q1}, [r2]!
+ subs r4, r4, #96
veor q0, q0, q8
veor q1, q1, q12
+ ble .Lle96
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
+ subs r4, r4, #32
veor q0, q0, q2
veor q1, q1, q6
+ ble .Lle128
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
+ subs r4, r4, #32
veor q0, q0, q10
veor q1, q1, q14
+ ble .Lle160
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
+ subs r4, r4, #32
veor q0, q0, q4
veor q1, q1, q5
+ ble .Lle192
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
+ subs r4, r4, #32
veor q0, q0, q9
veor q1, q1, q13
+ ble .Lle224
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
+ subs r4, r4, #32
veor q0, q0, q3
veor q1, q1, q7
+ blt .Llt256
+.Lout:
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]
- mov sp, r4 // restore original stack pointer
veor q0, q0, q11
veor q1, q1, q15
vst1.8 {q0-q1}, [r1]
- pop {r4-r5}
- bx lr
+ pop {r4, pc}
+
+.Lle192:
+ vmov q4, q9
+ vmov q5, q13
+
+.Lle160:
+ // nothing to do
+
+.Lfinalblock:
+ // Process the final block if processing less than 4 full blocks.
+ // Entered with 32 bytes of ChaCha cipher stream in q4-q5, and the
+ // previous 32 byte output block that still needs to be written at
+ // [r1] in q0-q1.
+ beq .Lfullblock
+
+.Lpartialblock:
+ adr lr, .Lpermute + 32
+ add r2, r2, r4
+ add lr, lr, r4
+ add r4, r4, r1
+
+ vld1.8 {q2-q3}, [lr]
+ vld1.8 {q6-q7}, [r2]
+
+ add r4, r4, #32
+
+ vtbl.8 d4, {q4-q5}, d4
+ vtbl.8 d5, {q4-q5}, d5
+ vtbl.8 d6, {q4-q5}, d6
+ vtbl.8 d7, {q4-q5}, d7
+
+ veor q6, q6, q2
+ veor q7, q7, q3
+
+ vst1.8 {q6-q7}, [r4] // overlapping stores
+ vst1.8 {q0-q1}, [r1]
+ pop {r4, pc}
+
+.Lfullblock:
+ vmov q11, q4
+ vmov q15, q5
+ b .Lout
+.Lle96:
+ vmov q4, q2
+ vmov q5, q6
+ b .Lfinalblock
+.Lle128:
+ vmov q4, q10
+ vmov q5, q14
+ b .Lfinalblock
+.Lle224:
+ vmov q4, q3
+ vmov q5, q7
+ b .Lfinalblock
+.Llt256:
+ vmov q4, q11
+ vmov q5, q15
+ b .Lpartialblock
ENDPROC(chacha_4block_xor_neon)
+
+ .align L1_CACHE_SHIFT
+.Lpermute:
+ .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+ .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+ .byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
+ .byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
+ .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+ .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+ .byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
+ .byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
diff --git a/arch/arm/crypto/sha1-ce-glue.c b/arch/arm/crypto/sha1-ce-glue.c
index e79b1fb4b4dc..de9100c67b37 100644
--- a/arch/arm/crypto/sha1-ce-glue.c
+++ b/arch/arm/crypto/sha1-ce-glue.c
@@ -7,7 +7,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
diff --git a/arch/arm/crypto/sha1.h b/arch/arm/crypto/sha1.h
index 758db3e9ff0a..b1b7e21da2c3 100644
--- a/arch/arm/crypto/sha1.h
+++ b/arch/arm/crypto/sha1.h
@@ -3,7 +3,7 @@
#define ASM_ARM_CRYPTO_SHA1_H
#include <linux/crypto.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
extern int sha1_update_arm(struct shash_desc *desc, const u8 *data,
unsigned int len);
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
index 4e954b3f7ecd..6c2b849e459d 100644
--- a/arch/arm/crypto/sha1_glue.c
+++ b/arch/arm/crypto/sha1_glue.c
@@ -15,7 +15,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <asm/byteorder.h>
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index 0071e5e4411a..cfe36ae0f3f5 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -19,7 +19,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <asm/neon.h>
#include <asm/simd.h>
diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c
index 87f0b62386c6..c62ce89dd3e0 100644
--- a/arch/arm/crypto/sha2-ce-glue.c
+++ b/arch/arm/crypto/sha2-ce-glue.c
@@ -7,7 +7,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
index b8a4f79020cf..433ee4ddce6c 100644
--- a/arch/arm/crypto/sha256_glue.c
+++ b/arch/arm/crypto/sha256_glue.c
@@ -17,7 +17,7 @@
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/string.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <asm/simd.h>
#include <asm/neon.h>
diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
index 79820b9e2541..701706262ef3 100644
--- a/arch/arm/crypto/sha256_neon_glue.c
+++ b/arch/arm/crypto/sha256_neon_glue.c
@@ -13,7 +13,7 @@
#include <crypto/internal/simd.h>
#include <linux/types.h>
#include <linux/string.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <asm/byteorder.h>
#include <asm/simd.h>
diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
index 8775aa42bbbe..0635a65aa488 100644
--- a/arch/arm/crypto/sha512-glue.c
+++ b/arch/arm/crypto/sha512-glue.c
@@ -6,7 +6,7 @@
*/
#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
#include <linux/crypto.h>
#include <linux/module.h>
diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
index 96cb94403540..c879ad32db51 100644
--- a/arch/arm/crypto/sha512-neon-glue.c
+++ b/arch/arm/crypto/sha512-neon-glue.c
@@ -7,7 +7,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
#include <linux/crypto.h>
#include <linux/module.h>
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 383635b68763..4a0848aef207 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -4,7 +4,6 @@ generic-y += extable.h
generic-y += flat.h
generic-y += local64.h
generic-y += parport.h
-generic-y += seccomp.h
generated-y += mach-types.h
generated-y += unistd-nr.h
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index b078d992414b..61941f369861 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -111,10 +111,6 @@ extern int elf_check_arch(const struct elf32_hdr *);
extern int arm_elf_read_implies_exec(int);
#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(stk)
-struct task_struct;
-int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
-#define ELF_CORE_COPY_TASK_REGS dump_task_regs
-
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index fc56fc3e1931..c279a8a463a2 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -7,14 +7,14 @@
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
#include <linux/pgtable.h>
-#include <asm/kmap_types.h>
+#include <asm/kmap_size.h>
enum fixed_addresses {
FIX_EARLYCON_MEM_BASE,
__end_of_permanent_fixed_addresses,
FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
- FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
/* Support writing RO kernel text via kprobes, jump labels, etc. */
FIX_TEXT_POKE0,
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index b95848ed2bc7..706efafbf972 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -2,16 +2,11 @@
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
-#include <linux/cache.h>
-#include <linux/threads.h>
#include <asm/irq.h>
-typedef struct {
- unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
+#define ack_bad_irq ack_bad_irq
+
+#include <asm-generic/hardirq.h>
#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 31811be38d78..b4b66220952d 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -2,7 +2,8 @@
#ifndef _ASM_HIGHMEM_H
#define _ASM_HIGHMEM_H
-#include <asm/kmap_types.h>
+#include <asm/cachetype.h>
+#include <asm/fixmap.h>
#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
#define LAST_PKMAP PTRS_PER_PTE
@@ -46,19 +47,32 @@ extern pte_t *pkmap_page_table;
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
extern void *kmap_high_get(struct page *page);
-#else
+
+static inline void *arch_kmap_local_high_get(struct page *page)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !cache_is_vivt())
+ return NULL;
+ return kmap_high_get(page);
+}
+#define arch_kmap_local_high_get arch_kmap_local_high_get
+
+#else /* ARCH_NEEDS_KMAP_HIGH_GET */
static inline void *kmap_high_get(struct page *page)
{
return NULL;
}
-#endif
+#endif /* !ARCH_NEEDS_KMAP_HIGH_GET */
-/*
- * The following functions are already defined by <linux/highmem.h>
- * when CONFIG_HIGHMEM is not set.
- */
-#ifdef CONFIG_HIGHMEM
-extern void *kmap_atomic_pfn(unsigned long pfn);
-#endif
+#define arch_kmap_local_post_map(vaddr, pteval) \
+ local_flush_tlb_kernel_page(vaddr)
+
+#define arch_kmap_local_pre_unmap(vaddr) \
+do { \
+ if (cache_is_vivt()) \
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); \
+} while (0)
+
+#define arch_kmap_local_post_unmap(vaddr) \
+ local_flush_tlb_kernel_page(vaddr)
#endif
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 46d41140df27..1cbcc462b07e 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -31,6 +31,8 @@ void handle_IRQ(unsigned int, struct pt_regs *);
void init_IRQ(void);
#ifdef CONFIG_SMP
+#include <linux/cpumask.h>
+
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
bool exclude_self);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
deleted file mode 100644
index 5590940ee43d..000000000000
--- a/arch/arm/include/asm/kmap_types.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ARM_KMAP_TYPES_H
-#define __ARM_KMAP_TYPES_H
-
-/*
- * This is the "bare minimum". AIO seems to require this.
- */
-#define KM_TYPE_NR 16
-
-#endif
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index 213607a1f45c..e26a278d301a 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -44,20 +44,20 @@ int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
/* optinsn template addresses */
-extern __visible kprobe_opcode_t optprobe_template_entry;
-extern __visible kprobe_opcode_t optprobe_template_val;
-extern __visible kprobe_opcode_t optprobe_template_call;
-extern __visible kprobe_opcode_t optprobe_template_end;
-extern __visible kprobe_opcode_t optprobe_template_sub_sp;
-extern __visible kprobe_opcode_t optprobe_template_add_sp;
-extern __visible kprobe_opcode_t optprobe_template_restore_begin;
-extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn;
-extern __visible kprobe_opcode_t optprobe_template_restore_end;
+extern __visible kprobe_opcode_t optprobe_template_entry[];
+extern __visible kprobe_opcode_t optprobe_template_val[];
+extern __visible kprobe_opcode_t optprobe_template_call[];
+extern __visible kprobe_opcode_t optprobe_template_end[];
+extern __visible kprobe_opcode_t optprobe_template_sub_sp[];
+extern __visible kprobe_opcode_t optprobe_template_add_sp[];
+extern __visible kprobe_opcode_t optprobe_template_restore_begin[];
+extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn[];
+extern __visible kprobe_opcode_t optprobe_template_restore_end[];
#define MAX_OPTIMIZED_LENGTH 4
#define MAX_OPTINSN_SIZE \
- ((unsigned long)&optprobe_template_end - \
- (unsigned long)&optprobe_template_entry)
+ ((unsigned long)optprobe_template_end - \
+ (unsigned long)optprobe_template_entry)
#define RELATIVEJUMP_SIZE 4
struct arch_optimized_insn {
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index d75d39280db7..5f522916ec99 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -7,8 +7,6 @@
#ifndef __ASM_ARM_MACH_TIME_H
#define __ASM_ARM_MACH_TIME_H
-extern void timer_tick(void);
-
typedef void (*clock_access_fn)(struct timespec64 *);
extern int register_persistent_clock(clock_access_fn read_persistent);
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index f99ed524fe41..84e58956fcab 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -26,6 +26,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
+
+#define init_new_context init_new_context
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -92,33 +94,11 @@ static inline void finish_arch_post_lock_switch(void)
#endif /* CONFIG_MMU */
-static inline int
-init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
- return 0;
-}
-
-
#endif /* CONFIG_CPU_HAS_ASID */
-#define destroy_context(mm) do { } while(0)
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
/*
- * This is called when "tsk" is about to enter lazy TLB mode.
- *
- * mm: describes the currently active mm context
- * tsk: task which is entering lazy tlb
- * cpu: cpu number which is entering lazy tlb
- *
- * tsk->mm will be NULL
- */
-static inline void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
-/*
* This is the actual mm switch as far as the scheduler
* is concerned. No registers are touched. We avoid
* calling the CPU specific function when the mm hasn't
@@ -149,6 +129,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#endif
}
-#define deactivate_mm(tsk,mm) do { } while (0)
+#include <asm-generic/mmu_context.h>
#endif
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 3502c2f746ca..baf7d0204eb5 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -75,6 +75,8 @@
#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+
/*
* PMD_SHIFT determines the size of the area a second-level page table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index fbb6693c3352..2b85d175e999 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -25,6 +25,8 @@
#define PTE_HWTABLE_OFF (0)
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
+
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map.
*/
diff --git a/arch/arm/include/asm/seccomp.h b/arch/arm/include/asm/seccomp.h
new file mode 100644
index 000000000000..e9ad0f37d2ba
--- /dev/null
+++ b/arch/arm/include/asm/seccomp.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm-generic/seccomp.h>
+
+#define SECCOMP_ARCH_NATIVE AUDIT_ARCH_ARM
+#define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+#define SECCOMP_ARCH_NATIVE_NAME "arm"
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h
index 65530a042009..430be7774402 100644
--- a/arch/arm/include/asm/signal.h
+++ b/arch/arm/include/asm/signal.h
@@ -17,6 +17,8 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
+#define __ARCH_UAPI_SA_FLAGS (SA_THIRTYTWO | SA_RESTORER)
+
#define __ARCH_HAS_SA_RESTORER
#include <asm/sigcontext.h>
diff --git a/arch/arm/include/uapi/asm/signal.h b/arch/arm/include/uapi/asm/signal.h
index 9b4185ba4f8a..c9a3ea1d8d41 100644
--- a/arch/arm/include/uapi/asm/signal.h
+++ b/arch/arm/include/uapi/asm/signal.h
@@ -60,33 +60,12 @@ typedef unsigned long sigset_t;
#define SIGSWI 32
/*
- * SA_FLAGS values:
- *
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_SIGINFO deliver the signal with SIGINFO structs
- * SA_THIRTYTWO delivers the signal in 32-bit mode, even if the task
- * is running in 26-bit.
- * SA_ONSTACK allows alternate signal stacks (see sigaltstack(2)).
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NODEFER prevents the current signal from being masked in the handler.
- * SA_RESETHAND clears the handler when the signal is delivered.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
+ * SA_THIRTYTWO historically meant deliver the signal in 32-bit mode, even if
+ * the task is running in 26-bit. But since the kernel no longer supports
+ * 26-bit mode, the flag has no effect.
*/
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002
-#define SA_SIGINFO 0x00000004
#define SA_THIRTYTWO 0x02000000
#define SA_RESTORER 0x04000000
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 89e5d864e923..09e67cb02b20 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -17,7 +17,7 @@ CFLAGS_REMOVE_return_address.o = -pg
# Object file lists.
obj-y := elf.o entry-common.o irq.o opcodes.o \
- process.o ptrace.o reboot.o \
+ process.o ptrace.o reboot.o io.o \
setup.o signal.o sigreturn_codes.o \
stacktrace.o sys_arm.o time.o traps.o
@@ -83,10 +83,6 @@ AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
obj-$(CONFIG_VDSO) += vdso.o
obj-$(CONFIG_EFI) += efi.o
-
-ifneq ($(CONFIG_ARCH_EBSA110),y)
- obj-y += io.o
-endif
obj-$(CONFIG_PARAVIRT) += paravirt.o
head-y := head$(MMUEXT).o
diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c
index 05fe92aa7d98..0529f90395c9 100644
--- a/arch/arm/kernel/perf_regs.c
+++ b/arch/arm/kernel/perf_regs.c
@@ -32,8 +32,7 @@ u64 perf_reg_abi(struct task_struct *task)
}
void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy)
+ struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 8e6ace03e960..ee3aee69e444 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -71,7 +71,7 @@ void arch_cpu_idle(void)
arm_pm_idle();
else
cpu_do_idle();
- local_irq_enable();
+ raw_local_irq_enable();
}
void arch_cpu_idle_prepare(void)
@@ -272,15 +272,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
return 0;
}
-/*
- * Fill in the task's elfregs structure for a core dump.
- */
-int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
-{
- elf_core_copy_regs(elfregs, task_pt_regs(t));
- return 1;
-}
-
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 09b149b09c43..b3836c94dc74 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -60,20 +60,6 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
#endif
-#ifndef CONFIG_GENERIC_CLOCKEVENTS
-/*
- * Kernel system timer support.
- */
-void timer_tick(void)
-{
- profile_tick(CPU_PROFILING);
- xtime_update(1);
-#ifndef CONFIG_SMP
- update_process_times(user_mode(get_irq_regs()));
-#endif
-}
-#endif
-
static void dummy_clock_access(struct timespec64 *ts)
{
ts->tv_sec = 0;
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index fddd08a6e063..3408269d19c7 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -50,15 +50,6 @@ static const struct vm_special_mapping vdso_data_mapping = {
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
- unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
- unsigned long vdso_size;
-
- /* without VVAR page */
- vdso_size = (vdso_total_pages - 1) << PAGE_SHIFT;
-
- if (vdso_size != new_size)
- return -EINVAL;
-
current->mm->context.vdso = new_vma->vm_start;
return 0;
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 5f4922e858d0..f7f4620d59c3 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -41,6 +41,10 @@ SECTIONS
#ifndef CONFIG_SMP_ON_UP
*(.alt.smp.init)
#endif
+#ifndef CONFIG_ARM_UNWIND
+ *(.ARM.exidx) *(.ARM.exidx.*)
+ *(.ARM.extab) *(.ARM.extab.*)
+#endif
}
. = PAGE_OFFSET + TEXT_OFFSET;
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index ae790908fc74..9b594ae98153 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -211,7 +211,6 @@ config ARCH_BRCMSTB
select BCM7038_L1_IRQ
select BRCMSTB_L2_IRQ
select BCM7120_L2_IRQ
- select ARCH_HAS_HOLES_MEMORYMODEL
select ZONE_DMA if ARM_LPAE
select SOC_BRCMSTB
select SOC_BUS
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index f56ff8c24043..de11030748d0 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -5,7 +5,6 @@ menuconfig ARCH_DAVINCI
depends on ARCH_MULTI_V5
select DAVINCI_TIMER
select ZONE_DMA
- select ARCH_HAS_HOLES_MEMORYMODEL
select PM_GENERIC_DOMAINS if PM
select PM_GENERIC_DOMAINS_OF if PM && OF
select REGMAP_MMIO
diff --git a/arch/arm/mach-ebsa110/Makefile b/arch/arm/mach-ebsa110/Makefile
deleted file mode 100644
index 296541315d25..000000000000
--- a/arch/arm/mach-ebsa110/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the linux kernel.
-#
-
-# Object file lists.
-
-obj-y := core.o io.o leds.o
diff --git a/arch/arm/mach-ebsa110/Makefile.boot b/arch/arm/mach-ebsa110/Makefile.boot
deleted file mode 100644
index e7e98937c71b..000000000000
--- a/arch/arm/mach-ebsa110/Makefile.boot
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
- zreladdr-y += 0x00008000
-params_phys-y := 0x00000400
-initrd_phys-y := 0x00800000
-
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c
deleted file mode 100644
index 5960e3dfd2bf..000000000000
--- a/arch/arm/mach-ebsa110/core.c
+++ /dev/null
@@ -1,323 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/arch/arm/mach-ebsa110/core.c
- *
- * Copyright (C) 1998-2001 Russell King
- *
- * Extra MM routines for the EBSA-110 architecture
- */
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/serial_8250.h>
-#include <linux/init.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <asm/irq.h>
-#include <asm/setup.h>
-#include <asm/mach-types.h>
-#include <asm/page.h>
-#include <asm/system_misc.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/irq.h>
-#include <asm/mach/map.h>
-
-#include <asm/mach/time.h>
-
-#include "core.h"
-
-static void ebsa110_mask_irq(struct irq_data *d)
-{
- __raw_writeb(1 << d->irq, IRQ_MCLR);
-}
-
-static void ebsa110_unmask_irq(struct irq_data *d)
-{
- __raw_writeb(1 << d->irq, IRQ_MSET);
-}
-
-static struct irq_chip ebsa110_irq_chip = {
- .irq_ack = ebsa110_mask_irq,
- .irq_mask = ebsa110_mask_irq,
- .irq_unmask = ebsa110_unmask_irq,
-};
-
-static void __init ebsa110_init_irq(void)
-{
- unsigned long flags;
- unsigned int irq;
-
- local_irq_save(flags);
- __raw_writeb(0xff, IRQ_MCLR);
- __raw_writeb(0x55, IRQ_MSET);
- __raw_writeb(0x00, IRQ_MSET);
- if (__raw_readb(IRQ_MASK) != 0x55)
- while (1);
- __raw_writeb(0xff, IRQ_MCLR); /* clear all interrupt enables */
- local_irq_restore(flags);
-
- for (irq = 0; irq < NR_IRQS; irq++) {
- irq_set_chip_and_handler(irq, &ebsa110_irq_chip,
- handle_level_irq);
- irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
- }
-}
-
-static struct map_desc ebsa110_io_desc[] __initdata = {
- /*
- * sparse external-decode ISAIO space
- */
- { /* IRQ_STAT/IRQ_MCLR */
- .virtual = (unsigned long)IRQ_STAT,
- .pfn = __phys_to_pfn(TRICK4_PHYS),
- .length = TRICK4_SIZE,
- .type = MT_DEVICE
- }, { /* IRQ_MASK/IRQ_MSET */
- .virtual = (unsigned long)IRQ_MASK,
- .pfn = __phys_to_pfn(TRICK3_PHYS),
- .length = TRICK3_SIZE,
- .type = MT_DEVICE
- }, { /* SOFT_BASE */
- .virtual = (unsigned long)SOFT_BASE,
- .pfn = __phys_to_pfn(TRICK1_PHYS),
- .length = TRICK1_SIZE,
- .type = MT_DEVICE
- }, { /* PIT_BASE */
- .virtual = (unsigned long)PIT_BASE,
- .pfn = __phys_to_pfn(TRICK0_PHYS),
- .length = TRICK0_SIZE,
- .type = MT_DEVICE
- },
-
- /*
- * self-decode ISAIO space
- */
- {
- .virtual = ISAIO_BASE,
- .pfn = __phys_to_pfn(ISAIO_PHYS),
- .length = ISAIO_SIZE,
- .type = MT_DEVICE
- }, {
- .virtual = ISAMEM_BASE,
- .pfn = __phys_to_pfn(ISAMEM_PHYS),
- .length = ISAMEM_SIZE,
- .type = MT_DEVICE
- }
-};
-
-static void __init ebsa110_map_io(void)
-{
- iotable_init(ebsa110_io_desc, ARRAY_SIZE(ebsa110_io_desc));
-}
-
-static void __iomem *ebsa110_ioremap_caller(phys_addr_t cookie, size_t size,
- unsigned int flags, void *caller)
-{
- return (void __iomem *)cookie;
-}
-
-static void ebsa110_iounmap(volatile void __iomem *io_addr)
-{}
-
-static void __init ebsa110_init_early(void)
-{
- arch_ioremap_caller = ebsa110_ioremap_caller;
- arch_iounmap = ebsa110_iounmap;
-}
-
-#define PIT_CTRL (PIT_BASE + 0x0d)
-#define PIT_T2 (PIT_BASE + 0x09)
-#define PIT_T1 (PIT_BASE + 0x05)
-#define PIT_T0 (PIT_BASE + 0x01)
-
-/*
- * This is the rate at which your MCLK signal toggles (in Hz)
- * This was measured on a 10 digit frequency counter sampling
- * over 1 second.
- */
-#define MCLK 47894000
-
-/*
- * This is the rate at which the PIT timers get clocked
- */
-#define CLKBY7 (MCLK / 7)
-
-/*
- * This is the counter value. We tick at 200Hz on this platform.
- */
-#define COUNT ((CLKBY7 + (HZ / 2)) / HZ)
-
-/*
- * Get the time offset from the system PIT. Note that if we have missed an
- * interrupt, then the PIT counter will roll over (ie, be negative).
- * This actually works out to be convenient.
- */
-static u32 ebsa110_gettimeoffset(void)
-{
- unsigned long offset, count;
-
- __raw_writeb(0x40, PIT_CTRL);
- count = __raw_readb(PIT_T1);
- count |= __raw_readb(PIT_T1) << 8;
-
- /*
- * If count > COUNT, make the number negative.
- */
- if (count > COUNT)
- count |= 0xffff0000;
-
- offset = COUNT;
- offset -= count;
-
- /*
- * `offset' is in units of timer counts. Convert
- * offset to units of microseconds.
- */
- offset = offset * (1000000 / HZ) / COUNT;
-
- return offset * 1000;
-}
-
-static irqreturn_t
-ebsa110_timer_interrupt(int irq, void *dev_id)
-{
- u32 count;
-
- /* latch and read timer 1 */
- __raw_writeb(0x40, PIT_CTRL);
- count = __raw_readb(PIT_T1);
- count |= __raw_readb(PIT_T1) << 8;
-
- count += COUNT;
-
- __raw_writeb(count & 0xff, PIT_T1);
- __raw_writeb(count >> 8, PIT_T1);
-
- timer_tick();
-
- return IRQ_HANDLED;
-}
-
-/*
- * Set up timer interrupt.
- */
-void __init ebsa110_timer_init(void)
-{
- int irq = IRQ_EBSA110_TIMER0;
-
- arch_gettimeoffset = ebsa110_gettimeoffset;
-
- /*
- * Timer 1, mode 2, LSB/MSB
- */
- __raw_writeb(0x70, PIT_CTRL);
- __raw_writeb(COUNT & 0xff, PIT_T1);
- __raw_writeb(COUNT >> 8, PIT_T1);
-
- if (request_irq(irq, ebsa110_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
- "EBSA110 Timer Tick", NULL))
- pr_err("Failed to request irq %d (EBSA110 Timer Tick)\n", irq);
-}
-
-static struct plat_serial8250_port serial_platform_data[] = {
- {
- .iobase = 0x3f8,
- .irq = 1,
- .uartclk = 1843200,
- .regshift = 0,
- .iotype = UPIO_PORT,
- .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
- },
- {
- .iobase = 0x2f8,
- .irq = 2,
- .uartclk = 1843200,
- .regshift = 0,
- .iotype = UPIO_PORT,
- .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
- },
- { },
-};
-
-static struct platform_device serial_device = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
- .dev = {
- .platform_data = serial_platform_data,
- },
-};
-
-static struct resource am79c961_resources[] = {
- {
- .start = 0x220,
- .end = 0x238,
- .flags = IORESOURCE_IO,
- }, {
- .start = IRQ_EBSA110_ETHERNET,
- .end = IRQ_EBSA110_ETHERNET,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device am79c961_device = {
- .name = "am79c961",
- .id = -1,
- .num_resources = ARRAY_SIZE(am79c961_resources),
- .resource = am79c961_resources,
-};
-
-static struct platform_device *ebsa110_devices[] = {
- &serial_device,
- &am79c961_device,
-};
-
-/*
- * EBSA110 idling methodology:
- *
- * We can not execute the "wait for interrupt" instruction since that
- * will stop our MCLK signal (which provides the clock for the glue
- * logic, and therefore the timer interrupt).
- *
- * Instead, we spin, polling the IRQ_STAT register for the occurrence
- * of any interrupt with core clock down to the memory clock.
- */
-static void ebsa110_idle(void)
-{
- const char *irq_stat = (char *)0xff000000;
-
- /* disable clock switching */
- asm volatile ("mcr p15, 0, ip, c15, c2, 2" : : : "cc");
-
- /* wait for an interrupt to occur */
- while (!*irq_stat);
-
- /* enable clock switching */
- asm volatile ("mcr p15, 0, ip, c15, c1, 2" : : : "cc");
-}
-
-static int __init ebsa110_init(void)
-{
- arm_pm_idle = ebsa110_idle;
- return platform_add_devices(ebsa110_devices, ARRAY_SIZE(ebsa110_devices));
-}
-
-arch_initcall(ebsa110_init);
-
-static void ebsa110_restart(enum reboot_mode mode, const char *cmd)
-{
- soft_restart(0x80000000);
-}
-
-MACHINE_START(EBSA110, "EBSA110")
- /* Maintainer: Russell King */
- .atag_offset = 0x400,
- .reserve_lp0 = 1,
- .reserve_lp2 = 1,
- .map_io = ebsa110_map_io,
- .init_early = ebsa110_init_early,
- .init_irq = ebsa110_init_irq,
- .init_time = ebsa110_timer_init,
- .restart = ebsa110_restart,
-MACHINE_END
diff --git a/arch/arm/mach-ebsa110/core.h b/arch/arm/mach-ebsa110/core.h
deleted file mode 100644
index 47acc610b6b4..000000000000
--- a/arch/arm/mach-ebsa110/core.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 1996-2000 Russell King.
- *
- * This file contains the core hardware definitions of the EBSA-110.
- */
-#ifndef CORE_H
-#define CORE_H
-
-/* Physical addresses/sizes */
-#define ISAMEM_PHYS 0xe0000000
-#define ISAMEM_SIZE 0x10000000
-
-#define ISAIO_PHYS 0xf0000000
-#define ISAIO_SIZE PGDIR_SIZE
-
-#define TRICK0_PHYS 0xf2000000
-#define TRICK0_SIZE PGDIR_SIZE
-#define TRICK1_PHYS 0xf2400000
-#define TRICK1_SIZE PGDIR_SIZE
-#define TRICK2_PHYS 0xf2800000
-#define TRICK3_PHYS 0xf2c00000
-#define TRICK3_SIZE PGDIR_SIZE
-#define TRICK4_PHYS 0xf3000000
-#define TRICK4_SIZE PGDIR_SIZE
-#define TRICK5_PHYS 0xf3400000
-#define TRICK6_PHYS 0xf3800000
-#define TRICK7_PHYS 0xf3c00000
-
-/* Virtual addresses */
-#define PIT_BASE IOMEM(0xfc000000) /* trick 0 */
-#define SOFT_BASE IOMEM(0xfd000000) /* trick 1 */
-#define IRQ_MASK IOMEM(0xfe000000) /* trick 3 - read */
-#define IRQ_MSET IOMEM(0xfe000000) /* trick 3 - write */
-#define IRQ_STAT IOMEM(0xff000000) /* trick 4 - read */
-#define IRQ_MCLR IOMEM(0xff000000) /* trick 4 - write */
-
-#endif
diff --git a/arch/arm/mach-ebsa110/include/mach/entry-macro.S b/arch/arm/mach-ebsa110/include/mach/entry-macro.S
deleted file mode 100644
index 14b110de78a9..000000000000
--- a/arch/arm/mach-ebsa110/include/mach/entry-macro.S
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * arch/arm/mach-ebsa110/include/mach/entry-macro.S
- *
- * Low-level IRQ helper macros for ebsa110 platform.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-
-
-#define IRQ_STAT 0xff000000 /* read */
-
- .macro get_irqnr_preamble, base, tmp
- mov \base, #IRQ_STAT
- .endm
-
- .macro get_irqnr_and_base, irqnr, stat, base, tmp
- ldrb \stat, [\base] @ get interrupts
- mov \irqnr, #0
- tst \stat, #15
- addeq \irqnr, \irqnr, #4
- moveq \stat, \stat, lsr #4
- tst \stat, #3
- addeq \irqnr, \irqnr, #2
- moveq \stat, \stat, lsr #2
- tst \stat, #1
- addeq \irqnr, \irqnr, #1
- moveq \stat, \stat, lsr #1
- tst \stat, #1 @ bit 0 should be set
- .endm
-
diff --git a/arch/arm/mach-ebsa110/include/mach/hardware.h b/arch/arm/mach-ebsa110/include/mach/hardware.h
deleted file mode 100644
index 81f6967683f6..000000000000
--- a/arch/arm/mach-ebsa110/include/mach/hardware.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-ebsa110/include/mach/hardware.h
- *
- * Copyright (C) 1996-2000 Russell King.
- *
- * This file contains the hardware definitions of the EBSA-110.
- */
-#ifndef __ASM_ARCH_HARDWARE_H
-#define __ASM_ARCH_HARDWARE_H
-
-#define ISAMEM_BASE 0xe0000000
-#define ISAIO_BASE 0xf0000000
-
-/*
- * RAM definitions
- */
-#define UNCACHEABLE_ADDR 0xff000000 /* IRQ_STAT */
-
-#endif
-
diff --git a/arch/arm/mach-ebsa110/include/mach/io.h b/arch/arm/mach-ebsa110/include/mach/io.h
deleted file mode 100644
index ad170886c9aa..000000000000
--- a/arch/arm/mach-ebsa110/include/mach/io.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-ebsa110/include/mach/io.h
- *
- * Copyright (C) 1997,1998 Russell King
- *
- * Modifications:
- * 06-Dec-1997 RMK Created.
- */
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-u8 __inb8(unsigned int port);
-void __outb8(u8 val, unsigned int port);
-
-u8 __inb16(unsigned int port);
-void __outb16(u8 val, unsigned int port);
-
-u16 __inw(unsigned int port);
-void __outw(u16 val, unsigned int port);
-
-u32 __inl(unsigned int port);
-void __outl(u32 val, unsigned int port);
-
-u8 __readb(const volatile void __iomem *addr);
-u16 __readw(const volatile void __iomem *addr);
-u32 __readl(const volatile void __iomem *addr);
-
-void __writeb(u8 val, volatile void __iomem *addr);
-void __writew(u16 val, volatile void __iomem *addr);
-void __writel(u32 val, volatile void __iomem *addr);
-
-/*
- * Argh, someone forgot the IOCS16 line. We therefore have to handle
- * the byte stearing by selecting the correct byte IO functions here.
- */
-#ifdef ISA_SIXTEEN_BIT_PERIPHERAL
-#define inb(p) __inb16(p)
-#define outb(v,p) __outb16(v,p)
-#else
-#define inb(p) __inb8(p)
-#define outb(v,p) __outb8(v,p)
-#endif
-
-#define inw(p) __inw(p)
-#define outw(v,p) __outw(v,p)
-
-#define inl(p) __inl(p)
-#define outl(v,p) __outl(v,p)
-
-#define readb(b) __readb(b)
-#define readw(b) __readw(b)
-#define readl(b) __readl(b)
-#define readb_relaxed(addr) readb(addr)
-#define readw_relaxed(addr) readw(addr)
-#define readl_relaxed(addr) readl(addr)
-
-#define writeb(v,b) __writeb(v,b)
-#define writew(v,b) __writew(v,b)
-#define writel(v,b) __writel(v,b)
-
-#define insb insb
-extern void insb(unsigned int port, void *buf, int sz);
-#define insw insw
-extern void insw(unsigned int port, void *buf, int sz);
-#define insl insl
-extern void insl(unsigned int port, void *buf, int sz);
-
-#define outsb outsb
-extern void outsb(unsigned int port, const void *buf, int sz);
-#define outsw outsw
-extern void outsw(unsigned int port, const void *buf, int sz);
-#define outsl outsl
-extern void outsl(unsigned int port, const void *buf, int sz);
-
-/* can't support writesb atm */
-#define writesw writesw
-extern void writesw(volatile void __iomem *addr, const void *data, int wordlen);
-#define writesl writesl
-extern void writesl(volatile void __iomem *addr, const void *data, int longlen);
-
-/* can't support readsb atm */
-#define readsw readsw
-extern void readsw(const volatile void __iomem *addr, void *data, int wordlen);
-
-#define readsl readsl
-extern void readsl(const volatile void __iomem *addr, void *data, int longlen);
-
-#endif
diff --git a/arch/arm/mach-ebsa110/include/mach/irqs.h b/arch/arm/mach-ebsa110/include/mach/irqs.h
deleted file mode 100644
index 29a8671fe849..000000000000
--- a/arch/arm/mach-ebsa110/include/mach/irqs.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-ebsa110/include/mach/irqs.h
- *
- * Copyright (C) 1996 Russell King
- */
-
-#define NR_IRQS 8
-
-#define IRQ_EBSA110_PRINTER 0
-#define IRQ_EBSA110_COM1 1
-#define IRQ_EBSA110_COM2 2
-#define IRQ_EBSA110_ETHERNET 3
-#define IRQ_EBSA110_TIMER0 4
-#define IRQ_EBSA110_TIMER1 5
-#define IRQ_EBSA110_PCMCIA 6
-#define IRQ_EBSA110_IMMEDIATE 7
diff --git a/arch/arm/mach-ebsa110/include/mach/memory.h b/arch/arm/mach-ebsa110/include/mach/memory.h
deleted file mode 100644
index f025f405de50..000000000000
--- a/arch/arm/mach-ebsa110/include/mach/memory.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-ebsa110/include/mach/memory.h
- *
- * Copyright (C) 1996-1999 Russell King.
- *
- * Changelog:
- * 20-Oct-1996 RMK Created
- * 31-Dec-1997 RMK Fixed definitions to reduce warnings
- * 21-Mar-1999 RMK Renamed to memory.h
- * RMK Moved TASK_SIZE and PAGE_OFFSET here
- */
-#ifndef __ASM_ARCH_MEMORY_H
-#define __ASM_ARCH_MEMORY_H
-
-/*
- * Cache flushing area - SRAM
- */
-#define FLUSH_BASE_PHYS 0x40000000
-#define FLUSH_BASE 0xdf000000
-
-#endif
diff --git a/arch/arm/mach-ebsa110/include/mach/uncompress.h b/arch/arm/mach-ebsa110/include/mach/uncompress.h
deleted file mode 100644
index 3ec12efe98a6..000000000000
--- a/arch/arm/mach-ebsa110/include/mach/uncompress.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-ebsa110/include/mach/uncompress.h
- *
- * Copyright (C) 1996,1997,1998 Russell King
- */
-
-#include <linux/serial_reg.h>
-
-#define SERIAL_BASE ((unsigned char *)0xf0000be0)
-
-/*
- * This does not append a newline
- */
-static inline void putc(int c)
-{
- unsigned char v, *base = SERIAL_BASE;
-
- do {
- v = base[UART_LSR << 2];
- barrier();
- } while (!(v & UART_LSR_THRE));
-
- base[UART_TX << 2] = c;
-}
-
-static inline void flush(void)
-{
- unsigned char v, *base = SERIAL_BASE;
-
- do {
- v = base[UART_LSR << 2];
- barrier();
- } while ((v & (UART_LSR_TEMT|UART_LSR_THRE)) !=
- (UART_LSR_TEMT|UART_LSR_THRE));
-}
-
-/*
- * nothing to do
- */
-#define arch_decomp_setup()
diff --git a/arch/arm/mach-ebsa110/io.c b/arch/arm/mach-ebsa110/io.c
deleted file mode 100644
index 3c44dd3596ea..000000000000
--- a/arch/arm/mach-ebsa110/io.c
+++ /dev/null
@@ -1,440 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/arm/mach-ebsa110/isamem.c
- *
- * Copyright (C) 2001 Russell King
- *
- * Perform "ISA" memory and IO accesses. The EBSA110 has some "peculiarities"
- * in the way it handles accesses to odd IO ports on 16-bit devices. These
- * devices have their D0-D15 lines connected to the processors D0-D15 lines.
- * Since they expect all byte IO operations to be performed on D0-D7, and the
- * StrongARM expects to transfer the byte to these odd addresses on D8-D15,
- * we must use a trick to get the required behaviour.
- *
- * The trick employed here is to use long word stores to odd address -1. The
- * glue logic picks this up as a "trick" access, and asserts the LSB of the
- * peripherals address bus, thereby accessing the odd IO port. Meanwhile, the
- * StrongARM transfers its data on D0-D7 as expected.
- *
- * Things get more interesting on the pass-1 EBSA110 - the PCMCIA controller
- * wiring was screwed in such a way that it had limited memory space access.
- * Luckily, the work-around for this is not too horrible. See
- * __isamem_convert_addr for the details.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <asm/page.h>
-
-static void __iomem *__isamem_convert_addr(const volatile void __iomem *addr)
-{
- u32 ret, a = (u32 __force) addr;
-
- /*
- * The PCMCIA controller is wired up as follows:
- * +---------+---------+---------+---------+---------+---------+
- * PCMCIA | 2 2 2 2 | 1 1 1 1 | 1 1 1 1 | 1 1 | | |
- * | 3 2 1 0 | 9 8 7 6 | 5 4 3 2 | 1 0 9 8 | 7 6 5 4 | 3 2 1 0 |
- * +---------+---------+---------+---------+---------+---------+
- * CPU | 2 2 2 2 | 2 1 1 1 | 1 1 1 1 | 1 1 1 | | |
- * | 4 3 2 1 | 0 9 9 8 | 7 6 5 4 | 3 2 0 9 | 8 7 6 5 | 4 3 2 x |
- * +---------+---------+---------+---------+---------+---------+
- *
- * This means that we can access PCMCIA regions as follows:
- * 0x*10000 -> 0x*1ffff
- * 0x*70000 -> 0x*7ffff
- * 0x*90000 -> 0x*9ffff
- * 0x*f0000 -> 0x*fffff
- */
- ret = (a & 0xf803fe) << 1;
- ret |= (a & 0x03fc00) << 2;
-
- ret += 0xe8000000;
-
- if ((a & 0x20000) == (a & 0x40000) >> 1)
- return (void __iomem *)ret;
-
- BUG();
- return NULL;
-}
-
-/*
- * read[bwl] and write[bwl]
- */
-u8 __readb(const volatile void __iomem *addr)
-{
- void __iomem *a = __isamem_convert_addr(addr);
- u32 ret;
-
- if ((unsigned long)addr & 1)
- ret = __raw_readl(a);
- else
- ret = __raw_readb(a);
- return ret;
-}
-
-u16 __readw(const volatile void __iomem *addr)
-{
- void __iomem *a = __isamem_convert_addr(addr);
-
- if ((unsigned long)addr & 1)
- BUG();
-
- return __raw_readw(a);
-}
-
-u32 __readl(const volatile void __iomem *addr)
-{
- void __iomem *a = __isamem_convert_addr(addr);
- u32 ret;
-
- if ((unsigned long)addr & 3)
- BUG();
-
- ret = __raw_readw(a);
- ret |= __raw_readw(a + 4) << 16;
- return ret;
-}
-
-EXPORT_SYMBOL(__readb);
-EXPORT_SYMBOL(__readw);
-EXPORT_SYMBOL(__readl);
-
-void readsw(const volatile void __iomem *addr, void *data, int len)
-{
- void __iomem *a = __isamem_convert_addr(addr);
-
- BUG_ON((unsigned long)addr & 1);
-
- __raw_readsw(a, data, len);
-}
-EXPORT_SYMBOL(readsw);
-
-void readsl(const volatile void __iomem *addr, void *data, int len)
-{
- void __iomem *a = __isamem_convert_addr(addr);
-
- BUG_ON((unsigned long)addr & 3);
-
- __raw_readsl(a, data, len);
-}
-EXPORT_SYMBOL(readsl);
-
-void __writeb(u8 val, volatile void __iomem *addr)
-{
- void __iomem *a = __isamem_convert_addr(addr);
-
- if ((unsigned long)addr & 1)
- __raw_writel(val, a);
- else
- __raw_writeb(val, a);
-}
-
-void __writew(u16 val, volatile void __iomem *addr)
-{
- void __iomem *a = __isamem_convert_addr(addr);
-
- if ((unsigned long)addr & 1)
- BUG();
-
- __raw_writew(val, a);
-}
-
-void __writel(u32 val, volatile void __iomem *addr)
-{
- void __iomem *a = __isamem_convert_addr(addr);
-
- if ((unsigned long)addr & 3)
- BUG();
-
- __raw_writew(val, a);
- __raw_writew(val >> 16, a + 4);
-}
-
-EXPORT_SYMBOL(__writeb);
-EXPORT_SYMBOL(__writew);
-EXPORT_SYMBOL(__writel);
-
-void writesw(volatile void __iomem *addr, const void *data, int len)
-{
- void __iomem *a = __isamem_convert_addr(addr);
-
- BUG_ON((unsigned long)addr & 1);
-
- __raw_writesw(a, data, len);
-}
-EXPORT_SYMBOL(writesw);
-
-void writesl(volatile void __iomem *addr, const void *data, int len)
-{
- void __iomem *a = __isamem_convert_addr(addr);
-
- BUG_ON((unsigned long)addr & 3);
-
- __raw_writesl(a, data, len);
-}
-EXPORT_SYMBOL(writesl);
-
-/*
- * The EBSA110 has a weird "ISA IO" region:
- *
- * Region 0 (addr = 0xf0000000 + io << 2)
- * --------------------------------------------------------
- * Physical region IO region
- * f0000fe0 - f0000ffc 3f8 - 3ff ttyS0
- * f0000e60 - f0000e64 398 - 399
- * f0000de0 - f0000dfc 378 - 37f lp0
- * f0000be0 - f0000bfc 2f8 - 2ff ttyS1
- *
- * Region 1 (addr = 0xf0000000 + (io & ~1) << 1 + (io & 1))
- * --------------------------------------------------------
- * Physical region IO region
- * f00014f1 a79 pnp write data
- * f00007c0 - f00007c1 3e0 - 3e1 pcmcia
- * f00004f1 279 pnp address
- * f0000440 - f000046c 220 - 236 eth0
- * f0000405 203 pnp read data
- */
-#define SUPERIO_PORT(p) \
- (((p) >> 3) == (0x3f8 >> 3) || \
- ((p) >> 3) == (0x2f8 >> 3) || \
- ((p) >> 3) == (0x378 >> 3))
-
-/*
- * We're addressing an 8 or 16-bit peripheral which tranfers
- * odd addresses on the low ISA byte lane.
- */
-u8 __inb8(unsigned int port)
-{
- u32 ret;
-
- /*
- * The SuperIO registers use sane addressing techniques...
- */
- if (SUPERIO_PORT(port))
- ret = __raw_readb((void __iomem *)ISAIO_BASE + (port << 2));
- else {
- void __iomem *a = (void __iomem *)ISAIO_BASE + ((port & ~1) << 1);
-
- /*
- * Shame nothing else does
- */
- if (port & 1)
- ret = __raw_readl(a);
- else
- ret = __raw_readb(a);
- }
- return ret;
-}
-
-/*
- * We're addressing a 16-bit peripheral which transfers odd
- * addresses on the high ISA byte lane.
- */
-u8 __inb16(unsigned int port)
-{
- unsigned int offset;
-
- /*
- * The SuperIO registers use sane addressing techniques...
- */
- if (SUPERIO_PORT(port))
- offset = port << 2;
- else
- offset = (port & ~1) << 1 | (port & 1);
-
- return __raw_readb((void __iomem *)ISAIO_BASE + offset);
-}
-
-u16 __inw(unsigned int port)
-{
- unsigned int offset;
-
- /*
- * The SuperIO registers use sane addressing techniques...
- */
- if (SUPERIO_PORT(port))
- offset = port << 2;
- else {
- offset = port << 1;
- BUG_ON(port & 1);
- }
- return __raw_readw((void __iomem *)ISAIO_BASE + offset);
-}
-
-/*
- * Fake a 32-bit read with two 16-bit reads. Needed for 3c589.
- */
-u32 __inl(unsigned int port)
-{
- void __iomem *a;
-
- if (SUPERIO_PORT(port) || port & 3)
- BUG();
-
- a = (void __iomem *)ISAIO_BASE + ((port & ~1) << 1);
-
- return __raw_readw(a) | __raw_readw(a + 4) << 16;
-}
-
-EXPORT_SYMBOL(__inb8);
-EXPORT_SYMBOL(__inb16);
-EXPORT_SYMBOL(__inw);
-EXPORT_SYMBOL(__inl);
-
-void __outb8(u8 val, unsigned int port)
-{
- /*
- * The SuperIO registers use sane addressing techniques...
- */
- if (SUPERIO_PORT(port))
- __raw_writeb(val, (void __iomem *)ISAIO_BASE + (port << 2));
- else {
- void __iomem *a = (void __iomem *)ISAIO_BASE + ((port & ~1) << 1);
-
- /*
- * Shame nothing else does
- */
- if (port & 1)
- __raw_writel(val, a);
- else
- __raw_writeb(val, a);
- }
-}
-
-void __outb16(u8 val, unsigned int port)
-{
- unsigned int offset;
-
- /*
- * The SuperIO registers use sane addressing techniques...
- */
- if (SUPERIO_PORT(port))
- offset = port << 2;
- else
- offset = (port & ~1) << 1 | (port & 1);
-
- __raw_writeb(val, (void __iomem *)ISAIO_BASE + offset);
-}
-
-void __outw(u16 val, unsigned int port)
-{
- unsigned int offset;
-
- /*
- * The SuperIO registers use sane addressing techniques...
- */
- if (SUPERIO_PORT(port))
- offset = port << 2;
- else {
- offset = port << 1;
- BUG_ON(port & 1);
- }
- __raw_writew(val, (void __iomem *)ISAIO_BASE + offset);
-}
-
-void __outl(u32 val, unsigned int port)
-{
- BUG();
-}
-
-EXPORT_SYMBOL(__outb8);
-EXPORT_SYMBOL(__outb16);
-EXPORT_SYMBOL(__outw);
-EXPORT_SYMBOL(__outl);
-
-void outsb(unsigned int port, const void *from, int len)
-{
- u32 off;
-
- if (SUPERIO_PORT(port))
- off = port << 2;
- else {
- off = (port & ~1) << 1;
- if (port & 1)
- BUG();
- }
-
- __raw_writesb((void __iomem *)ISAIO_BASE + off, from, len);
-}
-
-void insb(unsigned int port, void *from, int len)
-{
- u32 off;
-
- if (SUPERIO_PORT(port))
- off = port << 2;
- else {
- off = (port & ~1) << 1;
- if (port & 1)
- BUG();
- }
-
- __raw_readsb((void __iomem *)ISAIO_BASE + off, from, len);
-}
-
-EXPORT_SYMBOL(outsb);
-EXPORT_SYMBOL(insb);
-
-void outsw(unsigned int port, const void *from, int len)
-{
- u32 off;
-
- if (SUPERIO_PORT(port))
- off = port << 2;
- else {
- off = (port & ~1) << 1;
- if (port & 1)
- BUG();
- }
-
- __raw_writesw((void __iomem *)ISAIO_BASE + off, from, len);
-}
-
-void insw(unsigned int port, void *from, int len)
-{
- u32 off;
-
- if (SUPERIO_PORT(port))
- off = port << 2;
- else {
- off = (port & ~1) << 1;
- if (port & 1)
- BUG();
- }
-
- __raw_readsw((void __iomem *)ISAIO_BASE + off, from, len);
-}
-
-EXPORT_SYMBOL(outsw);
-EXPORT_SYMBOL(insw);
-
-/*
- * We implement these as 16-bit insw/outsw, mainly for
- * 3c589 cards.
- */
-void outsl(unsigned int port, const void *from, int len)
-{
- u32 off = port << 1;
-
- if (SUPERIO_PORT(port) || port & 3)
- BUG();
-
- __raw_writesw((void __iomem *)ISAIO_BASE + off, from, len << 1);
-}
-
-void insl(unsigned int port, void *from, int len)
-{
- u32 off = port << 1;
-
- if (SUPERIO_PORT(port) || port & 3)
- BUG();
-
- __raw_readsw((void __iomem *)ISAIO_BASE + off, from, len << 1);
-}
-
-EXPORT_SYMBOL(outsl);
-EXPORT_SYMBOL(insl);
diff --git a/arch/arm/mach-ebsa110/leds.c b/arch/arm/mach-ebsa110/leds.c
deleted file mode 100644
index fd1474b66d31..000000000000
--- a/arch/arm/mach-ebsa110/leds.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for the LED found on the EBSA110 machine
- * Based on Versatile and RealView machine LED code
- *
- * Author: Bryan Wu <bryan.wu@canonical.com>
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/leds.h>
-
-#include <asm/mach-types.h>
-
-#include "core.h"
-
-#if defined(CONFIG_NEW_LEDS) && defined(CONFIG_LEDS_CLASS)
-static void ebsa110_led_set(struct led_classdev *cdev,
- enum led_brightness b)
-{
- u8 reg = __raw_readb(SOFT_BASE);
-
- if (b != LED_OFF)
- reg |= 0x80;
- else
- reg &= ~0x80;
-
- __raw_writeb(reg, SOFT_BASE);
-}
-
-static enum led_brightness ebsa110_led_get(struct led_classdev *cdev)
-{
- u8 reg = __raw_readb(SOFT_BASE);
-
- return (reg & 0x80) ? LED_FULL : LED_OFF;
-}
-
-static int __init ebsa110_leds_init(void)
-{
-
- struct led_classdev *cdev;
- int ret;
-
- if (!machine_is_ebsa110())
- return -ENODEV;
-
- cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
- if (!cdev)
- return -ENOMEM;
-
- cdev->name = "ebsa110:0";
- cdev->brightness_set = ebsa110_led_set;
- cdev->brightness_get = ebsa110_led_get;
- cdev->default_trigger = "heartbeat";
-
- ret = led_classdev_register(NULL, cdev);
- if (ret < 0) {
- kfree(cdev);
- return ret;
- }
-
- return 0;
-}
-
-/*
- * Since we may have triggers on any subsystem, defer registration
- * until after subsystem_init.
- */
-fs_initcall(ebsa110_leds_init);
-#endif
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index d2d249706ebb..56d272967fc0 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -8,7 +8,6 @@
menuconfig ARCH_EXYNOS
bool "Samsung Exynos"
depends on ARCH_MULTI_V7
- select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_GIC
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig
index 1bc68913d62c..9de38ce8124f 100644
--- a/arch/arm/mach-highbank/Kconfig
+++ b/arch/arm/mach-highbank/Kconfig
@@ -2,7 +2,6 @@
config ARCH_HIGHBANK
bool "Calxeda ECX-1000/2000 (Highbank/Midway)"
depends on ARCH_MULTI_V7
- select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_ERRATA_764369 if SMP
diff --git a/arch/arm/mach-imx/anatop.c b/arch/arm/mach-imx/anatop.c
index d841bed8664d..7bb47eb3fc07 100644
--- a/arch/arm/mach-imx/anatop.c
+++ b/arch/arm/mach-imx/anatop.c
@@ -136,7 +136,7 @@ void __init imx_init_revision_from_anatop(void)
src_np = of_find_compatible_node(NULL, NULL,
"fsl,imx6ul-src");
- src_base = of_iomap(np, 0);
+ src_base = of_iomap(src_np, 0);
of_node_put(src_np);
WARN_ON(!src_base);
sbmr2 = readl_relaxed(src_base + SRC_SBMR2);
diff --git a/arch/arm/mach-keystone/memory.h b/arch/arm/mach-keystone/memory.h
index 9147565d0581..1b9ed1271e05 100644
--- a/arch/arm/mach-keystone/memory.h
+++ b/arch/arm/mach-keystone/memory.h
@@ -6,9 +6,6 @@
#ifndef __MEMORY_H
#define __MEMORY_H
-#define MAX_PHYSMEM_BITS 36
-#define SECTION_SIZE_BITS 34
-
#define KEYSTONE_LOW_PHYS_START 0x80000000ULL
#define KEYSTONE_LOW_PHYS_SIZE 0x80000000ULL /* 2G */
#define KEYSTONE_LOW_PHYS_END (KEYSTONE_LOW_PHYS_START + \
diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
index 2d962fe48821..a3a64bf97250 100644
--- a/arch/arm/mach-mvebu/coherency_ll.S
+++ b/arch/arm/mach-mvebu/coherency_ll.S
@@ -35,13 +35,8 @@ ENTRY(ll_get_coherency_base)
/*
* MMU is disabled, use the physical address of the coherency
- * base address. However, if the coherency fabric isn't mapped
- * (i.e its virtual address is zero), it means coherency is
- * not enabled, so we return 0.
+ * base address, (or 0x0 if the coherency fabric is not mapped)
*/
- ldr r1, =coherency_base
- cmp r1, #0
- beq 2f
adr r1, 3f
ldr r3, [r1]
ldr r1, [r1, r3]
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index cb7ce627ffe8..c40cf5ef8607 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -16,6 +16,7 @@
* Copyright (C) 2004 Nokia Corporation by Imre Deak <imre.deak@nokia.com>
*/
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
@@ -46,6 +47,9 @@
#include "common.h"
#include "board-h2.h"
+/* The first 16 SoC GPIO lines are on this GPIO chip */
+#define OMAP_GPIO_LABEL "gpio-0-15"
+
/* At OMAP1610 Innovator the Ethernet is directly connected to CS1 */
#define OMAP1610_ETHR_START 0x04000300
@@ -334,7 +338,19 @@ static struct i2c_board_info __initdata h2_i2c_board_info[] = {
I2C_BOARD_INFO("tps65010", 0x48),
.platform_data = &tps_board,
}, {
- I2C_BOARD_INFO("isp1301_omap", 0x2d),
+ .type = "isp1301_omap",
+ .addr = 0x2d,
+ .dev_name = "isp1301",
+ },
+};
+
+static struct gpiod_lookup_table isp1301_gpiod_table = {
+ .dev_id = "isp1301",
+ .table = {
+ /* Active low since the irq triggers on falling edge */
+ GPIO_LOOKUP(OMAP_GPIO_LABEL, 2,
+ NULL, GPIO_ACTIVE_LOW),
+ { },
},
};
@@ -406,8 +422,10 @@ static void __init h2_init(void)
h2_smc91x_resources[1].end = gpio_to_irq(0);
platform_add_devices(h2_devices, ARRAY_SIZE(h2_devices));
omap_serial_init();
+
+ /* ISP1301 IRQ wired at M14 */
+ omap_cfg_reg(M14_1510_GPIO2);
h2_i2c_board_info[0].irq = gpio_to_irq(58);
- h2_i2c_board_info[1].irq = gpio_to_irq(2);
omap_register_i2c_bus(1, 100, h2_i2c_board_info,
ARRAY_SIZE(h2_i2c_board_info));
omap1_usb_init(&h2_usb_config);
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index 144b9caa935c..a720259099ed 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -288,7 +288,7 @@ static struct gpiod_lookup_table osk_usb_gpio_table = {
.dev_id = "ohci",
.table = {
/* Power GPIO on the I2C-attached TPS65010 */
- GPIO_LOOKUP("i2c-tps65010", 1, "power", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("tps65010", 0, "power", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP(OMAP_GPIO_LABEL, 9, "overcurrent",
GPIO_ACTIVE_HIGH),
},
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 3ee7bdff86b2..164985505f9e 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -7,7 +7,6 @@ config ARCH_OMAP2
depends on ARCH_MULTI_V6
select ARCH_OMAP2PLUS
select CPU_V6
- select PM_GENERIC_DOMAINS if PM
select SOC_HAS_OMAP2_SDRC
config ARCH_OMAP3
@@ -94,7 +93,6 @@ config SOC_DRA7XX
config ARCH_OMAP2PLUS
bool
select ARCH_HAS_BANDGAP
- select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_HAS_RESET_CONTROLLER
select ARCH_OMAP
select CLKSRC_MMIO
@@ -106,6 +104,8 @@ config ARCH_OMAP2PLUS
select OMAP_DM_TIMER
select OMAP_GPMC
select PINCTRL
+ select PM_GENERIC_DOMAINS if PM
+ select PM_GENERIC_DOMAINS_OF if PM
select RESET_CONTROLLER
select SOC_BUS
select TI_SYSC
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index a92d277f81a0..c8d317fafe2e 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -175,8 +175,11 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
if (mpuss_can_lose_context) {
error = cpu_cluster_pm_enter();
if (error) {
- omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
- goto cpu_cluster_pm_out;
+ index = 0;
+ cx = state_ptr + index;
+ pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
+ omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+ mpuss_can_lose_context = 0;
}
}
}
@@ -184,7 +187,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
omap4_enter_lowpower(dev->cpu, cx->cpu_state);
cpu_done[dev->cpu] = true;
-cpu_cluster_pm_out:
/* Wakeup CPU1 only if it is not offlined */
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
diff --git a/arch/arm/mach-rpc/time.c b/arch/arm/mach-rpc/time.c
index da85cac761ba..9f8edcfe9357 100644
--- a/arch/arm/mach-rpc/time.c
+++ b/arch/arm/mach-rpc/time.c
@@ -81,7 +81,7 @@ static irqreturn_t
ioc_timer_interrupt(int irq, void *dev_id)
{
ioc_time += RPC_LATCH;
- timer_tick();
+ legacy_timer_tick(1);
return IRQ_HANDLED;
}
diff --git a/arch/arm/mach-s5pv210/Kconfig b/arch/arm/mach-s5pv210/Kconfig
index 95d4e8284866..d644b45bc29d 100644
--- a/arch/arm/mach-s5pv210/Kconfig
+++ b/arch/arm/mach-s5pv210/Kconfig
@@ -8,7 +8,6 @@
config ARCH_S5PV210
bool "Samsung S5PV210/S5PC110"
depends on ARCH_MULTI_V7
- select ARCH_HAS_HOLES_MEMORYMODEL
select ARM_VIC
select CLKSRC_SAMSUNG_PWM
select COMMON_CLK_SAMSUNG
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
index 06da2747a90b..19635721013d 100644
--- a/arch/arm/mach-sunxi/sunxi.c
+++ b/arch/arm/mach-sunxi/sunxi.c
@@ -66,6 +66,7 @@ static const char * const sun8i_board_dt_compat[] = {
"allwinner,sun8i-h2-plus",
"allwinner,sun8i-h3",
"allwinner,sun8i-r40",
+ "allwinner,sun8i-v3",
"allwinner,sun8i-v3s",
NULL,
};
diff --git a/arch/arm/mach-tango/Kconfig b/arch/arm/mach-tango/Kconfig
index 25b2fd434861..a9eeda36aeb1 100644
--- a/arch/arm/mach-tango/Kconfig
+++ b/arch/arm/mach-tango/Kconfig
@@ -3,7 +3,6 @@ config ARCH_TANGO
bool "Sigma Designs Tango4 (SMP87xx)"
depends on ARCH_MULTI_V7
# Cortex-A9 MPCore r3p0, PL310 r3p2
- select ARCH_HAS_HOLES_MEMORYMODEL
select ARM_ERRATA_754322
select ARM_ERRATA_764369 if SMP
select ARM_ERRATA_775420
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 7cb1699fbfc4..c4ce477c5261 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_MODULES) += proc-syms.o
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
-obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_ARM_PV_FIXUP) += pv-fixup-asm.o
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
deleted file mode 100644
index 187fab227b50..000000000000
--- a/arch/arm/mm/highmem.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * arch/arm/mm/highmem.c -- ARM highmem support
- *
- * Author: Nicolas Pitre
- * Created: september 8, 2008
- * Copyright: Marvell Semiconductors Inc.
- */
-
-#include <linux/module.h>
-#include <linux/highmem.h>
-#include <linux/interrupt.h>
-#include <asm/fixmap.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-#include "mm.h"
-
-static inline void set_fixmap_pte(int idx, pte_t pte)
-{
- unsigned long vaddr = __fix_to_virt(idx);
- pte_t *ptep = virt_to_kpte(vaddr);
-
- set_pte_ext(ptep, pte, 0);
- local_flush_tlb_kernel_page(vaddr);
-}
-
-static inline pte_t get_fixmap_pte(unsigned long vaddr)
-{
- pte_t *ptep = virt_to_kpte(vaddr);
-
- return *ptep;
-}
-
-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
-{
- unsigned int idx;
- unsigned long vaddr;
- void *kmap;
- int type;
-
-#ifdef CONFIG_DEBUG_HIGHMEM
- /*
- * There is no cache coherency issue when non VIVT, so force the
- * dedicated kmap usage for better debugging purposes in that case.
- */
- if (!cache_is_vivt())
- kmap = NULL;
- else
-#endif
- kmap = kmap_high_get(page);
- if (kmap)
- return kmap;
-
- type = kmap_atomic_idx_push();
-
- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- /*
- * With debugging enabled, kunmap_atomic forces that entry to 0.
- * Make sure it was indeed properly unmapped.
- */
- BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
-#endif
- /*
- * When debugging is off, kunmap_atomic leaves the previous mapping
- * in place, so the contained TLB flush ensures the TLB is updated
- * with the new mapping.
- */
- set_fixmap_pte(idx, mk_pte(page, prot));
-
- return (void *)vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic_high_prot);
-
-void kunmap_atomic_high(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int idx, type;
-
- if (kvaddr >= (void *)FIXADDR_START) {
- type = kmap_atomic_idx();
- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
-
- if (cache_is_vivt())
- __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(vaddr != __fix_to_virt(idx));
- set_fixmap_pte(idx, __pte(0));
-#else
- (void) idx; /* to kill a warning */
-#endif
- kmap_atomic_idx_pop();
- } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
- /* this address was obtained through kmap_high_get() */
- kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
- }
-}
-EXPORT_SYMBOL(kunmap_atomic_high);
-
-void *kmap_atomic_pfn(unsigned long pfn)
-{
- unsigned long vaddr;
- int idx, type;
- struct page *page = pfn_to_page(pfn);
-
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
- type = kmap_atomic_idx_push();
- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
-#endif
- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
-
- return (void *)vaddr;
-}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index d57112a276f5..db623d7c30de 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -267,83 +267,6 @@ static inline void poison_init_mem(void *s, size_t count)
*p++ = 0xe7fddef0;
}
-static inline void __init
-free_memmap(unsigned long start_pfn, unsigned long end_pfn)
-{
- struct page *start_pg, *end_pg;
- phys_addr_t pg, pgend;
-
- /*
- * Convert start_pfn/end_pfn to a struct page pointer.
- */
- start_pg = pfn_to_page(start_pfn - 1) + 1;
- end_pg = pfn_to_page(end_pfn - 1) + 1;
-
- /*
- * Convert to physical addresses, and
- * round start upwards and end downwards.
- */
- pg = PAGE_ALIGN(__pa(start_pg));
- pgend = __pa(end_pg) & PAGE_MASK;
-
- /*
- * If there are free pages between these,
- * free the section of the memmap array.
- */
- if (pg < pgend)
- memblock_free_early(pg, pgend - pg);
-}
-
-/*
- * The mem_map array can get very big. Free the unused area of the memory map.
- */
-static void __init free_unused_memmap(void)
-{
- unsigned long start, end, prev_end = 0;
- int i;
-
- /*
- * This relies on each bank being in address order.
- * The banks are sorted previously in bootmem_init().
- */
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
-#ifdef CONFIG_SPARSEMEM
- /*
- * Take care not to free memmap entries that don't exist
- * due to SPARSEMEM sections which aren't present.
- */
- start = min(start,
- ALIGN(prev_end, PAGES_PER_SECTION));
-#else
- /*
- * Align down here since the VM subsystem insists that the
- * memmap entries are valid from the bank start aligned to
- * MAX_ORDER_NR_PAGES.
- */
- start = round_down(start, MAX_ORDER_NR_PAGES);
-#endif
- /*
- * If we had a previous bank, and there is a space
- * between the current bank and the previous, free it.
- */
- if (prev_end && prev_end < start)
- free_memmap(prev_end, start);
-
- /*
- * Align up here since the VM subsystem insists that the
- * memmap entries are valid from the bank end aligned to
- * MAX_ORDER_NR_PAGES.
- */
- prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
- }
-
-#ifdef CONFIG_SPARSEMEM
- if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
- free_memmap(prev_end,
- ALIGN(prev_end, PAGES_PER_SECTION));
-#endif
-}
-
static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
@@ -354,8 +277,8 @@ static void __init free_highpages(void)
/* set highmem page free */
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
&range_start, &range_end, NULL) {
- unsigned long start = PHYS_PFN(range_start);
- unsigned long end = PHYS_PFN(range_end);
+ unsigned long start = PFN_UP(range_start);
+ unsigned long end = PFN_DOWN(range_end);
/* Ignore complete lowmem entries */
if (end <= max_low)
@@ -385,7 +308,6 @@ void __init mem_init(void)
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
/* this will put all unused low memory onto the freelists */
- free_unused_memmap();
memblock_free_all();
#ifdef CONFIG_SA1111
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
index 7a449df0b359..c78180172120 100644
--- a/arch/arm/probes/kprobes/opt-arm.c
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -85,21 +85,21 @@ asm (
"optprobe_template_end:\n");
#define TMPL_VAL_IDX \
- ((unsigned long *)&optprobe_template_val - (unsigned long *)&optprobe_template_entry)
+ ((unsigned long *)optprobe_template_val - (unsigned long *)optprobe_template_entry)
#define TMPL_CALL_IDX \
- ((unsigned long *)&optprobe_template_call - (unsigned long *)&optprobe_template_entry)
+ ((unsigned long *)optprobe_template_call - (unsigned long *)optprobe_template_entry)
#define TMPL_END_IDX \
- ((unsigned long *)&optprobe_template_end - (unsigned long *)&optprobe_template_entry)
+ ((unsigned long *)optprobe_template_end - (unsigned long *)optprobe_template_entry)
#define TMPL_ADD_SP \
- ((unsigned long *)&optprobe_template_add_sp - (unsigned long *)&optprobe_template_entry)
+ ((unsigned long *)optprobe_template_add_sp - (unsigned long *)optprobe_template_entry)
#define TMPL_SUB_SP \
- ((unsigned long *)&optprobe_template_sub_sp - (unsigned long *)&optprobe_template_entry)
+ ((unsigned long *)optprobe_template_sub_sp - (unsigned long *)optprobe_template_entry)
#define TMPL_RESTORE_BEGIN \
- ((unsigned long *)&optprobe_template_restore_begin - (unsigned long *)&optprobe_template_entry)
+ ((unsigned long *)optprobe_template_restore_begin - (unsigned long *)optprobe_template_entry)
#define TMPL_RESTORE_ORIGN_INSN \
- ((unsigned long *)&optprobe_template_restore_orig_insn - (unsigned long *)&optprobe_template_entry)
+ ((unsigned long *)optprobe_template_restore_orig_insn - (unsigned long *)optprobe_template_entry)
#define TMPL_RESTORE_END \
- ((unsigned long *)&optprobe_template_restore_end - (unsigned long *)&optprobe_template_entry)
+ ((unsigned long *)optprobe_template_restore_end - (unsigned long *)optprobe_template_entry)
/*
* ARM can always optimize an instruction when using ARM ISA, except
@@ -234,7 +234,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
}
/* Copy arch-dep-instance from template. */
- memcpy(code, (unsigned long *)&optprobe_template_entry,
+ memcpy(code, (unsigned long *)optprobe_template_entry,
TMPL_END_IDX * sizeof(kprobe_opcode_t));
/* Adjust buffer according to instruction. */
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index f858c352f72a..9f0139ba8a1d 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -71,6 +71,7 @@ config ARM64
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_USE_SYM_ANNOTATIONS
+ select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK
select ARCH_SUPPORTS_ATOMIC_RMW
@@ -81,6 +82,7 @@ config ARM64
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
+ select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARM_AMBA
select ARM_ARCH_TIMER
@@ -101,7 +103,6 @@ config ARM64
select FRAME_POINTER
select GENERIC_ALLOCATOR
select GENERIC_ARCH_TOPOLOGY
- select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES
@@ -124,6 +125,7 @@ config ARM64
select HANDLE_DOMAIN_IRQ
select HARDIRQS_SW_RESEND
select HAVE_MOVE_PMD
+ select HAVE_MOVE_PUD
select HAVE_PCI
select HAVE_ACPI_APEI if (ACPI && EFI)
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
@@ -138,6 +140,7 @@ config ARM64
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
+ select HAVE_ARCH_PFN_VALID
select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_STACKLEAK
@@ -170,6 +173,8 @@ config ARM64
select HAVE_NMI
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
+ select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI && HW_PERF_EVENTS
+ select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
@@ -195,7 +200,6 @@ config ARM64
select PCI_SYSCALL if PCI
select POWER_RESET
select POWER_SUPPLY
- select SET_FS
select SPARSE_IRQ
select SWIOTLB
select SYSCTL_EXCEPTION_TRACE
@@ -331,16 +335,16 @@ config BROKEN_GAS_INST
config KASAN_SHADOW_OFFSET
hex
depends on KASAN
- default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS
- default 0xdfffd00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS
- default 0xdffffe8000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
- default 0xdfffffd000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS
- default 0xdffffffa00000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS
- default 0xefff900000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS
- default 0xefffc80000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS
- default 0xeffffe4000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS
- default 0xefffffc800000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS
- default 0xeffffff900000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS
+ default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS
+ default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS
+ default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
+ default 0xdfffffc000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS
+ default 0xdffffff800000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS
+ default 0xefff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS
+ default 0xefffc00000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS
+ default 0xeffffe0000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS
+ default 0xefffffc000000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS
+ default 0xeffffff800000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS
default 0xffffffffffffffff
source "arch/arm64/Kconfig.platforms"
@@ -636,6 +640,26 @@ config ARM64_ERRATUM_1542419
If unsure, say Y.
+config ARM64_ERRATUM_1508412
+ bool "Cortex-A77: 1508412: workaround deadlock on sequence of NC/Device load and store exclusive or PAR read"
+ default y
+ help
+ This option adds a workaround for Arm Cortex-A77 erratum 1508412.
+
+ Affected Cortex-A77 cores (r0p0, r1p0) could deadlock on a sequence
+ of a store-exclusive or read of PAR_EL1 and a load with device or
+ non-cacheable memory attributes. The workaround depends on a firmware
+ counterpart.
+
+ KVM guests must also have the workaround implemented or they can
+ deadlock the system.
+
+ Work around the issue by inserting DMB SY barriers around PAR_EL1
+ register reads and warning KVM users. The DMB barrier is sufficient
+ to prevent a speculative PAR_EL1 read.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
@@ -982,7 +1006,7 @@ config NUMA
config NODES_SHIFT
int "Maximum NUMA Nodes (as a power of 2)"
range 1 10
- default "2"
+ default "4"
depends on NEED_MULTIPLE_NODES
help
Specify the maximum number of NUMA Nodes available on the target
@@ -1005,9 +1029,6 @@ config HOLES_IN_ZONE
source "kernel/Kconfig.hz"
-config ARCH_SUPPORTS_DEBUG_PAGEALLOC
- def_bool y
-
config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_VMEMMAP_ENABLE
@@ -1021,9 +1042,6 @@ config ARCH_SELECT_MEMORY_MODEL
config ARCH_FLATMEM_ENABLE
def_bool !NUMA
-config HAVE_ARCH_PFN_VALID
- def_bool y
-
config HW_PERF_EVENTS
def_bool y
depends on ARM_PMU
@@ -1368,6 +1386,9 @@ config ARM64_PAN
The feature is detected at runtime, and will remain as a 'nop'
instruction if the cpu does not implement the feature.
+config AS_HAS_LDAPR
+ def_bool $(as-instr,.arch_extension rcpc)
+
config ARM64_LSE_ATOMICS
bool
default ARM64_USE_LSE_ATOMICS
@@ -1405,27 +1426,6 @@ endmenu
menu "ARMv8.2 architectural features"
-config ARM64_UAO
- bool "Enable support for User Access Override (UAO)"
- default y
- help
- User Access Override (UAO; part of the ARMv8.2 Extensions)
- causes the 'unprivileged' variant of the load/store instructions to
- be overridden to be privileged.
-
- This option changes get_user() and friends to use the 'unprivileged'
- variant of the load/store instructions. This ensures that user-space
- really did have access to the supplied memory. When addr_limit is
- set to kernel memory the UAO bit will be set, allowing privileged
- access to kernel memory.
-
- Choosing this option will cause copy_to_user() et al to use user-space
- memory permissions.
-
- The feature is detected at runtime, the kernel will use the
- regular load/store instructions if the cpu does not implement the
- feature.
-
config ARM64_PMEM
bool "Enable support for persistent memory"
select ARCH_HAS_PMEM_API
@@ -1826,15 +1826,36 @@ config CMDLINE
entering them here. As a minimum, you should specify the the
root device (e.g. root=/dev/nfs).
+choice
+ prompt "Kernel command line type" if CMDLINE != ""
+ default CMDLINE_FROM_BOOTLOADER
+ help
+ Choose how the kernel will handle the provided default kernel
+ command line string.
+
+config CMDLINE_FROM_BOOTLOADER
+ bool "Use bootloader kernel arguments if available"
+ help
+ Uses the command-line options passed by the boot loader. If
+ the boot loader doesn't provide any, the default kernel command
+ string provided in CMDLINE will be used.
+
+config CMDLINE_EXTEND
+ bool "Extend bootloader kernel arguments"
+ help
+ The command-line arguments provided by the boot loader will be
+ appended to the default kernel command string.
+
config CMDLINE_FORCE
bool "Always use the default kernel command string"
- depends on CMDLINE != ""
help
Always use the default kernel command string, even if the boot
loader passes other arguments to the kernel.
This is useful if you cannot or don't want to change the
command-line options your boot loader passes to the kernel.
+endchoice
+
config EFI_STUB
bool
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 6f2494dd6d60..4991aef92be0 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -54,6 +54,7 @@ config ARCH_BCM_IPROC
config ARCH_BERLIN
bool "Marvell Berlin SoC Family"
select DW_APB_ICTL
+ select DW_APB_TIMER_OF
select GPIOLIB
select PINCTRL
help
@@ -256,7 +257,6 @@ config ARCH_TEGRA
select ARM_GIC_PM
select CLKSRC_MMIO
select TIMER_OF
- select GENERIC_CLOCKEVENTS
select GPIOLIB
select PINCTRL
select PM
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 5789c2d18d43..6a87d592bd00 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -28,10 +28,6 @@ LDFLAGS_vmlinux += --fix-cortex-a53-843419
endif
endif
-# We never want expected sections to be placed heuristically by the
-# linker. All sections should be explicitly named in the linker script.
-LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
-
ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y)
ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y)
$(warning LSE atomics not supported by binutils)
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
index 3ea5182ca489..e5e840b9fbb4 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
@@ -105,7 +105,7 @@
&emac {
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ext_rgmii_phy>;
phy-supply = <&reg_dc1sw>;
status = "okay";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
index d894ec5fa8a1..70e31743f0ba 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
@@ -120,7 +120,7 @@
&emac {
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ext_rgmii_phy>;
phy-supply = <&reg_gmac_3v3>;
status = "okay";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
index b26181cf9095..b54099b654c8 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
@@ -13,7 +13,7 @@
&emac {
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-txid";
phy-handle = <&ext_rgmii_phy>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
index 3ab0f0347bc9..0494bfaf2ffa 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
@@ -122,9 +122,6 @@
status = "okay";
port {
- #address-cells = <1>;
- #size-cells = <0>;
-
csi_ep: endpoint {
remote-endpoint = <&ov5640_ep>;
bus-width = <8>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
index 9ebb9e07fae3..d4069749d721 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
@@ -79,7 +79,7 @@
&emac {
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ext_rgmii_phy>;
phy-supply = <&reg_dc1sw>;
status = "okay";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h5-cc.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h5-cc.dts
index df1b9263ad0e..6e30a564c87f 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h5-cc.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h5-cc.dts
@@ -36,7 +36,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_gmac_3v3>;
phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
/delete-property/ allwinner,leds-active-low;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
index 4f9ba53ffaae..9d93fe153689 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
@@ -96,7 +96,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_gmac_3v3>;
phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
index 7d7aad18f078..8bf2db9dcbda 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
@@ -123,7 +123,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_gmac_3v3>;
phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
index cb44bfa5981f..33ab44072e6d 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
@@ -124,7 +124,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_gmac_3v3>;
phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
index 3f7ceeb1a767..7c9dbde645b5 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
@@ -97,7 +97,7 @@
&emac {
pinctrl-names = "default";
pinctrl-0 = <&ext_rgmii_pins>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ext_rgmii_phy>;
phy-supply = <&reg_aldo2>;
status = "okay";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts
index fceb298bfd53..29a081e72a9b 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts
@@ -27,7 +27,7 @@
&emac {
pinctrl-names = "default";
pinctrl-0 = <&ext_rgmii_pins>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ext_rgmii_phy>;
phy-supply = <&reg_gmac_3v3>;
allwinner,rx-delay-ps = <200>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
index af85b2074867..961732c52aa0 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
@@ -100,7 +100,7 @@
&emac {
pinctrl-names = "default";
pinctrl-0 = <&ext_rgmii_pins>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ext_rgmii_phy>;
phy-supply = <&reg_gmac_3v3>;
allwinner,rx-delay-ps = <200>;
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
index feadd21bc0dc..46e558ab7729 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
@@ -159,7 +159,7 @@
flash@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q00a";
+ compatible = "micron,mt25qu02g", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
index c07966740e14..f9b4a39683cf 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
@@ -192,7 +192,7 @@
flash@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q00a";
+ compatible = "micron,mt25qu02g", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
index cb1360ae1211..7740f97c240f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
@@ -584,3 +584,9 @@
pinctrl-0 = <&uart_ao_a_pins>;
pinctrl-names = "default";
};
+
+&usb {
+ status = "okay";
+ dr_mode = "otg";
+ vbus-supply = <&usb_pwr>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
index b9efc8469265..724ee179b316 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
@@ -171,6 +171,46 @@
#size-cells = <2>;
ranges;
+ usb: usb@ffe09080 {
+ compatible = "amlogic,meson-axg-usb-ctrl";
+ reg = <0x0 0xffe09080 0x0 0x20>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clocks = <&clkc CLKID_USB>, <&clkc CLKID_USB1_DDR_BRIDGE>;
+ clock-names = "usb_ctrl", "ddr";
+ resets = <&reset RESET_USB_OTG>;
+
+ dr_mode = "otg";
+
+ phys = <&usb2_phy1>;
+ phy-names = "usb2-phy1";
+
+ dwc2: usb@ff400000 {
+ compatible = "amlogic,meson-g12a-usb", "snps,dwc2";
+ reg = <0x0 0xff400000 0x0 0x40000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clkc CLKID_USB1>;
+ clock-names = "otg";
+ phys = <&usb2_phy1>;
+ dr_mode = "peripheral";
+ g-rx-fifo-size = <192>;
+ g-np-tx-fifo-size = <128>;
+ g-tx-fifo-size = <128 128 16 16 16>;
+ };
+
+ dwc3: usb@ff500000 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0xff500000 0x0 0x100000>;
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ dr_mode = "host";
+ maximum-speed = "high-speed";
+ snps,dis_u2_susphy_quirk;
+ };
+ };
+
ethmac: ethernet@ff3f0000 {
compatible = "amlogic,meson-axg-dwmac",
"snps,dwmac-3.70a",
@@ -187,6 +227,8 @@
"timing-adjustment";
rx-fifo-depth = <4096>;
tx-fifo-depth = <2048>;
+ resets = <&reset RESET_ETHERNET>;
+ reset-names = "stmmaceth";
status = "disabled";
};
@@ -1734,6 +1776,16 @@
clock-names = "core", "clkin0", "clkin1";
resets = <&reset RESET_SD_EMMC_C>;
};
+
+ usb2_phy1: phy@9020 {
+ compatible = "amlogic,meson-gxl-usb2-phy";
+ #phy-cells = <0>;
+ reg = <0x0 0x9020 0x0 0x20>;
+ clocks = <&clkc CLKID_USB>;
+ clock-names = "phy";
+ resets = <&reset RESET_USB_OTG>;
+ reset-names = "phy";
+ };
};
sram: sram@fffc0000 {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index 1e83ec5b8c91..8514fe6a275a 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -209,7 +209,7 @@
};
ethmac: ethernet@ff3f0000 {
- compatible = "amlogic,meson-axg-dwmac",
+ compatible = "amlogic,meson-g12a-dwmac",
"snps,dwmac-3.70a",
"snps,dwmac";
reg = <0x0 0xff3f0000 0x0 0x10000>,
@@ -224,6 +224,8 @@
"timing-adjustment";
rx-fifo-depth = <4096>;
tx-fifo-depth = <2048>;
+ resets = <&reset RESET_ETHERNET>;
+ reset-names = "stmmaceth";
status = "disabled";
mdio0: mdio {
@@ -282,6 +284,8 @@
hwrng: rng@218 {
compatible = "amlogic,meson-rng";
reg = <0x0 0x218 0x0 0x4>;
+ clocks = <&clkc CLKID_RNG0>;
+ clock-names = "core";
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2-plus.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2-plus.dts
index 5de2815ba99d..ce1198ad34e4 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2-plus.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2-plus.dts
@@ -19,7 +19,7 @@
regulator-min-microvolt = <680000>;
regulator-max-microvolt = <1040000>;
- pwms = <&pwm_AO_cd 1 1500 0>;
+ pwms = <&pwm_ab 0 1500 0>;
};
&vddcpu_b {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi
index 9b8548e5f6e5..ee8fcae9f9f0 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi
@@ -135,3 +135,7 @@
};
};
};
+
+&mali {
+ dma-coherent;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 0edd137151f8..726b91d3a905 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -13,6 +13,7 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/meson-gxbb-power.h>
+#include <dt-bindings/reset/amlogic,meson-gxbb-reset.h>
#include <dt-bindings/thermal/thermal.h>
/ {
@@ -575,6 +576,8 @@
interrupt-names = "macirq";
rx-fifo-depth = <4096>;
tx-fifo-depth = <2048>;
+ resets = <&reset RESET_ETHERNET>;
+ reset-names = "stmmaceth";
power-domains = <&pwrc PWRC_GXBB_ETHERNET_MEM_ID>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
index 55259f973b5a..aef8f2b00778 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
@@ -5,20 +5,20 @@
usb {
compatible = "simple-bus";
dma-ranges;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x0 0x68500000 0x00400000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>;
usbphy0: usb-phy@0 {
compatible = "brcm,sr-usb-combo-phy";
- reg = <0x00000000 0x100>;
+ reg = <0x0 0x00000000 0x0 0x100>;
#phy-cells = <1>;
status = "disabled";
};
xhci0: usb@1000 {
compatible = "generic-xhci";
- reg = <0x00001000 0x1000>;
+ reg = <0x0 0x00001000 0x0 0x1000>;
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usbphy0 1>, <&usbphy0 0>;
phy-names = "phy0", "phy1";
@@ -28,7 +28,7 @@
bdc0: usb@2000 {
compatible = "brcm,bdc-v0.16";
- reg = <0x00002000 0x1000>;
+ reg = <0x0 0x00002000 0x0 0x1000>;
interrupts = <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usbphy0 0>, <&usbphy0 1>;
phy-names = "phy0", "phy1";
@@ -38,21 +38,21 @@
usbphy1: usb-phy@10000 {
compatible = "brcm,sr-usb-combo-phy";
- reg = <0x00010000 0x100>;
+ reg = <0x0 0x00010000 0x0 0x100>;
#phy-cells = <1>;
status = "disabled";
};
usbphy2: usb-phy@20000 {
compatible = "brcm,sr-usb-hs-phy";
- reg = <0x00020000 0x100>;
+ reg = <0x0 0x00020000 0x0 0x100>;
#phy-cells = <0>;
status = "disabled";
};
xhci1: usb@11000 {
compatible = "generic-xhci";
- reg = <0x00011000 0x1000>;
+ reg = <0x0 0x00011000 0x0 0x1000>;
interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usbphy1 1>, <&usbphy2>, <&usbphy1 0>;
phy-names = "phy0", "phy1", "phy2";
@@ -62,7 +62,7 @@
bdc1: usb@21000 {
compatible = "brcm,bdc-v0.16";
- reg = <0x00021000 0x1000>;
+ reg = <0x0 0x00021000 0x0 0x1000>;
interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usbphy2>;
phy-names = "phy0";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
index f46eb47cfa4d..8161dd237971 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
@@ -75,6 +75,7 @@
&enetc_port0 {
phy-handle = <&phy0>;
phy-connection-type = "sgmii";
+ managed = "in-band-status";
status = "okay";
mdio {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index 73e4f9466887..7a6fb7e1fb82 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -1012,6 +1012,7 @@
compatible = "fsl,ls1028a-rcpm", "fsl,qoriq-rcpm-2.1+";
reg = <0x0 0x1e34040 0x0 0x1c>;
#fsl,rcpm-wakeup-cells = <7>;
+ little-endian;
};
ftm_alarm0: timer@2800000 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
index ff5805206a28..692d8f4a206d 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
@@ -805,6 +805,7 @@
compatible = "fsl,ls1088a-rcpm", "fsl,qoriq-rcpm-2.1+";
reg = <0x0 0x1e34040 0x0 0x18>;
#fsl,rcpm-wakeup-cells = <6>;
+ little-endian;
};
ftm_alarm0: timer@2800000 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
index bf72918fe545..e7abb74bd816 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
@@ -892,6 +892,7 @@
compatible = "fsl,ls208xa-rcpm", "fsl,qoriq-rcpm-2.1+";
reg = <0x0 0x1e34040 0x0 0x18>;
#fsl,rcpm-wakeup-cells = <6>;
+ little-endian;
};
ftm_alarm0: timer@2800000 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
index 6de86a4f0ec4..b88c3c99b007 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
@@ -72,6 +72,7 @@
pmic@4b {
compatible = "rohm,bd71847";
reg = <0x4b>;
+ pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pmic>;
interrupt-parent = <&gpio1>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
@@ -210,6 +211,7 @@
host-wakeup-gpios = <&gpio2 8 GPIO_ACTIVE_HIGH>;
device-wakeup-gpios = <&gpio2 7 GPIO_ACTIVE_HIGH>;
clocks = <&osc_32k>;
+ max-speed = <4000000>;
clock-names = "extclk";
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
index f305a530ff6f..521eb3a5a12e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
@@ -121,6 +121,7 @@
pmic@4b {
compatible = "rohm,bd71847";
reg = <0x4b>;
+ pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pmic>;
interrupt-parent = <&gpio1>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
index 4107fe914d08..49082529764f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
@@ -135,13 +135,10 @@
pmic@4b {
compatible = "rohm,bd71847";
reg = <0x4b>;
+ pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pmic>;
interrupt-parent = <&gpio2>;
- /*
- * The interrupt is not correct. It should be level low,
- * however with internal pull up this causes IRQ storm.
- */
- interrupts = <8 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
rohm,reset-snvs-powered;
#clock-cells = <0>;
@@ -398,7 +395,7 @@
pinctrl_pmic: pmicirqgrp {
fsl,pins = <
- MX8MM_IOMUXC_SD1_DATA6_GPIO2_IO8 0x41
+ MX8MM_IOMUXC_SD1_DATA6_GPIO2_IO8 0x141
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
index b83f400def8b..05ee062548e4 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
@@ -129,7 +129,7 @@
opp-1600000000 {
opp-hz = /bits/ 64 <1600000000>;
- opp-microvolt = <900000>;
+ opp-microvolt = <950000>;
opp-supported-hw = <0xc>, <0x7>;
clock-latency-ns = <150000>;
opp-suspend;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
index 46e76cf32b2f..7dfee715a2c4 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
@@ -53,6 +53,7 @@
pmic@4b {
compatible = "rohm,bd71847";
reg = <0x4b>;
+ pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pmic>;
interrupt-parent = <&gpio1>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-evk.dts
index 707d8486b4d8..8311b95dee49 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-evk.dts
@@ -18,6 +18,7 @@
pmic: pmic@25 {
compatible = "nxp,pca9450b";
reg = <0x25>;
+ pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pmic>;
interrupt-parent = <&gpio1>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
index a2d0190921e4..7f356edf9f91 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
@@ -116,13 +116,10 @@
pmic@4b {
compatible = "rohm,bd71847";
reg = <0x4b>;
+ pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pmic>;
interrupt-parent = <&gpio2>;
- /*
- * The interrupt is not correct. It should be level low,
- * however with internal pull up this causes IRQ storm.
- */
- interrupts = <8 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
rohm,reset-snvs-powered;
regulators {
@@ -388,7 +385,7 @@
pinctrl_pmic: pmicirqgrp {
fsl,pins = <
- MX8MN_IOMUXC_SD1_DATA6_GPIO2_IO8 0x101
+ MX8MN_IOMUXC_SD1_DATA6_GPIO2_IO8 0x141
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index 746faf1cf2fb..16c7202885d7 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -790,28 +790,6 @@
#index-cells = <1>;
reg = <0x32e40200 0x200>;
};
-
- usbotg2: usb@32e50000 {
- compatible = "fsl,imx8mn-usb", "fsl,imx7d-usb";
- reg = <0x32e50000 0x200>;
- interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MN_CLK_USB1_CTRL_ROOT>;
- clock-names = "usb1_ctrl_root_clk";
- assigned-clocks = <&clk IMX8MN_CLK_USB_BUS>,
- <&clk IMX8MN_CLK_USB_CORE_REF>;
- assigned-clock-parents = <&clk IMX8MN_SYS_PLL2_500M>,
- <&clk IMX8MN_SYS_PLL1_100M>;
- fsl,usbphy = <&usbphynop2>;
- fsl,usbmisc = <&usbmisc2 0>;
- status = "disabled";
- };
-
- usbmisc2: usbmisc@32e50200 {
- compatible = "fsl,imx8mn-usbmisc", "fsl,imx7d-usbmisc";
- #index-cells = <1>;
- reg = <0x32e50200 0x200>;
- };
-
};
dma_apbh: dma-controller@33000000 {
@@ -876,12 +854,4 @@
assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_100M>;
clock-names = "main_clk";
};
-
- usbphynop2: usbphynop2 {
- compatible = "usb-nop-xceiv";
- clocks = <&clk IMX8MN_CLK_USB_PHY_REF>;
- assigned-clocks = <&clk IMX8MN_CLK_USB_PHY_REF>;
- assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_100M>;
- clock-names = "main_clk";
- };
};
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
index 8bc6caa9167d..4338db14c5da 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
@@ -19,6 +19,7 @@ fman0: fman@1a00000 {
clock-names = "fmanclk";
fsl,qman-channel-range = <0x800 0x10>;
ptimer-handle = <&ptp_timer0>;
+ dma-coherent;
muram@0 {
compatible = "fsl,fman-muram";
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
index 96c50d48289d..a7a83f29f00b 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
@@ -110,7 +110,7 @@
flash@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "mt25qu02g";
+ compatible = "micron,mt25qu02g", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts
index 03733fd92732..215d2f702623 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts
@@ -20,17 +20,23 @@
compatible = "globalscale,espressobin-v7-emmc", "globalscale,espressobin-v7",
"globalscale,espressobin", "marvell,armada3720",
"marvell,armada3710";
+
+ aliases {
+ /* ethernet1 is wan port */
+ ethernet1 = &switch0port3;
+ ethernet3 = &switch0port1;
+ };
};
&switch0 {
ports {
- port@1 {
+ switch0port1: port@1 {
reg = <1>;
label = "lan1";
phy-handle = <&switch0phy0>;
};
- port@3 {
+ switch0port3: port@3 {
reg = <3>;
label = "wan";
phy-handle = <&switch0phy2>;
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts
index 8570c5f47d7d..b6f4af8ebafb 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts
@@ -19,17 +19,23 @@
model = "Globalscale Marvell ESPRESSOBin Board V7";
compatible = "globalscale,espressobin-v7", "globalscale,espressobin",
"marvell,armada3720", "marvell,armada3710";
+
+ aliases {
+ /* ethernet1 is wan port */
+ ethernet1 = &switch0port3;
+ ethernet3 = &switch0port1;
+ };
};
&switch0 {
ports {
- port@1 {
+ switch0port1: port@1 {
reg = <1>;
label = "lan1";
phy-handle = <&switch0phy0>;
};
- port@3 {
+ switch0port3: port@3 {
reg = <3>;
label = "wan";
phy-handle = <&switch0phy2>;
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
index b97218c72727..0775c16e0ec8 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
@@ -13,6 +13,10 @@
/ {
aliases {
ethernet0 = &eth0;
+ /* for dsa slave device */
+ ethernet1 = &switch0port1;
+ ethernet2 = &switch0port2;
+ ethernet3 = &switch0port3;
serial0 = &uart0;
serial1 = &uart1;
};
@@ -120,7 +124,7 @@
#address-cells = <1>;
#size-cells = <0>;
- port@0 {
+ switch0port0: port@0 {
reg = <0>;
label = "cpu";
ethernet = <&eth0>;
@@ -131,19 +135,19 @@
};
};
- port@1 {
+ switch0port1: port@1 {
reg = <1>;
label = "wan";
phy-handle = <&switch0phy0>;
};
- port@2 {
+ switch0port2: port@2 {
reg = <2>;
label = "lan0";
phy-handle = <&switch0phy1>;
};
- port@3 {
+ switch0port3: port@3 {
reg = <3>;
label = "lan1";
phy-handle = <&switch0phy2>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
index 381a84912ba8..c28d51cc5797 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
@@ -10,18 +10,6 @@
model = "NVIDIA Jetson TX2 Developer Kit";
compatible = "nvidia,p2771-0000", "nvidia,tegra186";
- aconnect {
- status = "okay";
-
- dma-controller@2930000 {
- status = "okay";
- };
-
- interrupt-controller@2a40000 {
- status = "okay";
- };
- };
-
i2c@3160000 {
power-monitor@42 {
compatible = "ti,ina3221";
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
index a2893be80507..0dc8304a2edd 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
@@ -54,7 +54,7 @@
status = "okay";
};
- serial@c280000 {
+ serial@3100000 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
index e9c90f0f44ff..93438d2b9469 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
@@ -1161,7 +1161,7 @@
hsp_aon: hsp@c150000 {
compatible = "nvidia,tegra194-hsp", "nvidia,tegra186-hsp";
- reg = <0x0c150000 0xa0000>;
+ reg = <0x0c150000 0x90000>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
index e18e1a9a3011..a9caaf7c0d67 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
@@ -1663,16 +1663,6 @@
vin-supply = <&vdd_5v0_sys>;
};
- vdd_usb_vbus_otg: regulator@11 {
- compatible = "regulator-fixed";
- regulator-name = "USB_VBUS_EN0";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>;
- enable-active-high;
- vin-supply = <&vdd_5v0_sys>;
- };
-
vdd_hdmi: regulator@10 {
compatible = "regulator-fixed";
regulator-name = "VDD_HDMI_5V0";
@@ -1712,4 +1702,14 @@
enable-active-high;
vin-supply = <&vdd_3v3_sys>;
};
+
+ vdd_usb_vbus_otg: regulator@14 {
+ compatible = "regulator-fixed";
+ regulator-name = "USB_VBUS_EN0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_5v0_sys>;
+ };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-sim-vdk.dts b/arch/arm64/boot/dts/nvidia/tegra234-sim-vdk.dts
index f6e6a24829af..b5d9a5526272 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234-sim-vdk.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra234-sim-vdk.dts
@@ -8,7 +8,7 @@
compatible = "nvidia,tegra234-vdk", "nvidia,tegra234";
aliases {
- sdhci3 = "/cbb@0/sdhci@3460000";
+ mmc3 = "/bus@0/mmc@3460000";
serial0 = &uarta;
};
@@ -17,12 +17,12 @@
stdout-path = "serial0:115200n8";
};
- cbb@0 {
+ bus@0 {
serial@3100000 {
status = "okay";
};
- sdhci@3460000 {
+ mmc@3460000 {
status = "okay";
bus-width = <8>;
non-removable;
diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
index a94dac76bf3f..59e0cbfa2214 100644
--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
@@ -179,22 +179,22 @@
};
soc: soc {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0 0 0xffffffff>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0 0 0 0 0x0 0xffffffff>;
dma-ranges;
compatible = "simple-bus";
prng: qrng@e1000 {
compatible = "qcom,prng-ee";
- reg = <0xe3000 0x1000>;
+ reg = <0x0 0xe3000 0x0 0x1000>;
clocks = <&gcc GCC_PRNG_AHB_CLK>;
clock-names = "core";
};
cryptobam: dma@704000 {
compatible = "qcom,bam-v1.7.0";
- reg = <0x00704000 0x20000>;
+ reg = <0x0 0x00704000 0x0 0x20000>;
interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_CRYPTO_AHB_CLK>;
clock-names = "bam_clk";
@@ -206,7 +206,7 @@
crypto: crypto@73a000 {
compatible = "qcom,crypto-v5.1";
- reg = <0x0073a000 0x6000>;
+ reg = <0x0 0x0073a000 0x0 0x6000>;
clocks = <&gcc GCC_CRYPTO_AHB_CLK>,
<&gcc GCC_CRYPTO_AXI_CLK>,
<&gcc GCC_CRYPTO_CLK>;
@@ -217,7 +217,7 @@
tlmm: pinctrl@1000000 {
compatible = "qcom,ipq6018-pinctrl";
- reg = <0x01000000 0x300000>;
+ reg = <0x0 0x01000000 0x0 0x300000>;
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
#gpio-cells = <2>;
@@ -235,7 +235,7 @@
gcc: gcc@1800000 {
compatible = "qcom,gcc-ipq6018";
- reg = <0x01800000 0x80000>;
+ reg = <0x0 0x01800000 0x0 0x80000>;
clocks = <&xo>, <&sleep_clk>;
clock-names = "xo", "sleep_clk";
#clock-cells = <1>;
@@ -244,17 +244,17 @@
tcsr_mutex_regs: syscon@1905000 {
compatible = "syscon";
- reg = <0x01905000 0x8000>;
+ reg = <0x0 0x01905000 0x0 0x8000>;
};
tcsr_q6: syscon@1945000 {
compatible = "syscon";
- reg = <0x01945000 0xe000>;
+ reg = <0x0 0x01945000 0x0 0xe000>;
};
blsp_dma: dma@7884000 {
compatible = "qcom,bam-v1.7.0";
- reg = <0x07884000 0x2b000>;
+ reg = <0x0 0x07884000 0x0 0x2b000>;
interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_AHB_CLK>;
clock-names = "bam_clk";
@@ -264,7 +264,7 @@
blsp1_uart3: serial@78b1000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
- reg = <0x078b1000 0x200>;
+ reg = <0x0 0x078b1000 0x0 0x200>;
interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_UART3_APPS_CLK>,
<&gcc GCC_BLSP1_AHB_CLK>;
@@ -276,7 +276,7 @@
compatible = "qcom,spi-qup-v2.2.1";
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x078b5000 0x600>;
+ reg = <0x0 0x078b5000 0x0 0x600>;
interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
spi-max-frequency = <50000000>;
clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
@@ -291,7 +291,7 @@
compatible = "qcom,spi-qup-v2.2.1";
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x078b6000 0x600>;
+ reg = <0x0 0x078b6000 0x0 0x600>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
spi-max-frequency = <50000000>;
clocks = <&gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>,
@@ -306,7 +306,7 @@
compatible = "qcom,i2c-qup-v2.2.1";
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x078b6000 0x600>;
+ reg = <0x0 0x078b6000 0x0 0x600>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_AHB_CLK>,
<&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
@@ -321,7 +321,7 @@
compatible = "qcom,i2c-qup-v2.2.1";
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x078b7000 0x600>;
+ reg = <0x0 0x078b7000 0x0 0x600>;
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_AHB_CLK>,
<&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
@@ -336,24 +336,24 @@
compatible = "qcom,msm-qgic2";
interrupt-controller;
#interrupt-cells = <0x3>;
- reg = <0x0b000000 0x1000>, /*GICD*/
- <0x0b002000 0x1000>, /*GICC*/
- <0x0b001000 0x1000>, /*GICH*/
- <0x0b004000 0x1000>; /*GICV*/
+ reg = <0x0 0x0b000000 0x0 0x1000>, /*GICD*/
+ <0x0 0x0b002000 0x0 0x1000>, /*GICC*/
+ <0x0 0x0b001000 0x0 0x1000>, /*GICH*/
+ <0x0 0x0b004000 0x0 0x1000>; /*GICV*/
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
};
watchdog@b017000 {
compatible = "qcom,kpss-wdt";
interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
- reg = <0x0b017000 0x40>;
+ reg = <0x0 0x0b017000 0x0 0x40>;
clocks = <&sleep_clk>;
timeout-sec = <10>;
};
apcs_glb: mailbox@b111000 {
compatible = "qcom,ipq6018-apcs-apps-global";
- reg = <0x0b111000 0x1000>;
+ reg = <0x0 0x0b111000 0x0 0x1000>;
#clock-cells = <1>;
clocks = <&a53pll>, <&xo>;
clock-names = "pll", "xo";
@@ -362,7 +362,7 @@
a53pll: clock@b116000 {
compatible = "qcom,ipq6018-a53pll";
- reg = <0x0b116000 0x40>;
+ reg = <0x0 0x0b116000 0x0 0x40>;
#clock-cells = <0>;
clocks = <&xo>;
clock-names = "xo";
@@ -377,68 +377,68 @@
};
timer@b120000 {
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
ranges;
compatible = "arm,armv7-timer-mem";
- reg = <0x0b120000 0x1000>;
+ reg = <0x0 0x0b120000 0x0 0x1000>;
clock-frequency = <19200000>;
frame@b120000 {
frame-number = <0>;
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x0b121000 0x1000>,
- <0x0b122000 0x1000>;
+ reg = <0x0 0x0b121000 0x0 0x1000>,
+ <0x0 0x0b122000 0x0 0x1000>;
};
frame@b123000 {
frame-number = <1>;
interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xb123000 0x1000>;
+ reg = <0x0 0xb123000 0x0 0x1000>;
status = "disabled";
};
frame@b124000 {
frame-number = <2>;
interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x0b124000 0x1000>;
+ reg = <0x0 0x0b124000 0x0 0x1000>;
status = "disabled";
};
frame@b125000 {
frame-number = <3>;
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x0b125000 0x1000>;
+ reg = <0x0 0x0b125000 0x0 0x1000>;
status = "disabled";
};
frame@b126000 {
frame-number = <4>;
interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x0b126000 0x1000>;
+ reg = <0x0 0x0b126000 0x0 0x1000>;
status = "disabled";
};
frame@b127000 {
frame-number = <5>;
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x0b127000 0x1000>;
+ reg = <0x0 0x0b127000 0x0 0x1000>;
status = "disabled";
};
frame@b128000 {
frame-number = <6>;
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x0b128000 0x1000>;
+ reg = <0x0 0x0b128000 0x0 0x1000>;
status = "disabled";
};
};
q6v5_wcss: remoteproc@cd00000 {
compatible = "qcom,ipq8074-wcss-pil";
- reg = <0x0cd00000 0x4040>,
- <0x004ab000 0x20>;
+ reg = <0x0 0x0cd00000 0x0 0x4040>,
+ <0x0 0x004ab000 0x0 0x20>;
reg-names = "qdsp6",
"rmb";
interrupts-extended = <&intc GIC_SPI 325 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
index 9cbf963aa068..c29643442e91 100644
--- a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
@@ -28,6 +28,12 @@
clock-frequency = <0>;
};
+ audio_clk_b: audio_clk_b {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
audio_clk_c: audio_clk_c {
compatible = "fixed-clock";
#clock-cells = <0>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
index 35bd6b904b9c..337681038519 100644
--- a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
@@ -243,7 +243,6 @@
interrupts = <RK_PB2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&pmic_int>;
- rockchip,system-power-controller;
wakeup-source;
#clock-cells = <1>;
clock-output-names = "rk808-clkout1", "xin32k";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
index be7a31d81632..2ee07d15a6e3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
@@ -20,7 +20,7 @@
gmac_clk: gmac-clock {
compatible = "fixed-clock";
clock-frequency = <125000000>;
- clock-output-names = "gmac_clk";
+ clock-output-names = "gmac_clkin";
#clock-cells = <0>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
index e7a459fa4322..20309076dbac 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
@@ -74,14 +74,14 @@
label = "red:diy";
gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
default-state = "off";
- linux,default-trigger = "mmc1";
+ linux,default-trigger = "mmc2";
};
yellow_led: led-2 {
label = "yellow:yellow-led";
gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
default-state = "off";
- linux,default-trigger = "mmc0";
+ linux,default-trigger = "mmc1";
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index ada724b12f01..7a9a7aca86c6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -29,6 +29,9 @@
i2c6 = &i2c6;
i2c7 = &i2c7;
i2c8 = &i2c8;
+ mmc0 = &sdio0;
+ mmc1 = &sdmmc;
+ mmc2 = &sdhci;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 17a2df6a263e..5e7d86cf5dfa 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -500,6 +500,7 @@ CONFIG_GPIO_ALTERA=m
CONFIG_GPIO_DWAPB=y
CONFIG_GPIO_MB86S7X=y
CONFIG_GPIO_MPC8XXX=y
+CONFIG_GPIO_MXC=y
CONFIG_GPIO_PL061=y
CONFIG_GPIO_RCAR=y
CONFIG_GPIO_UNIPHIER=y
@@ -1081,6 +1082,7 @@ CONFIG_CRYPTO_DEV_CCREE=m
CONFIG_CRYPTO_DEV_HISI_SEC2=m
CONFIG_CRYPTO_DEV_HISI_ZIP=m
CONFIG_CRYPTO_DEV_HISI_HPRE=m
+CONFIG_CRYPTO_DEV_HISI_TRNG=m
CONFIG_CMA_SIZE_MBYTES=32
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 395bbf64b2ab..34b8a89197be 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -10,7 +10,7 @@
#include <asm/simd.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
diff --git a/arch/arm64/crypto/chacha-neon-core.S b/arch/arm64/crypto/chacha-neon-core.S
index e90386a7db8e..b70ac76f2610 100644
--- a/arch/arm64/crypto/chacha-neon-core.S
+++ b/arch/arm64/crypto/chacha-neon-core.S
@@ -195,7 +195,6 @@ SYM_FUNC_START(chacha_4block_xor_neon)
adr_l x10, .Lpermute
and x5, x4, #63
add x10, x10, x5
- add x11, x10, #64
//
// This function encrypts four consecutive ChaCha blocks by loading
@@ -645,11 +644,11 @@ CPU_BE( rev a15, a15 )
zip2 v31.4s, v14.4s, v15.4s
eor a15, a15, w9
- mov x3, #64
+ add x3, x2, x4
+ sub x3, x3, #128 // start of last block
+
subs x5, x4, #128
- add x6, x5, x2
- csel x3, x3, xzr, ge
- csel x2, x2, x6, ge
+ csel x2, x2, x3, ge
// interleave 64-bit words in state n, n+2
zip1 v0.2d, v16.2d, v18.2d
@@ -658,13 +657,10 @@ CPU_BE( rev a15, a15 )
zip1 v8.2d, v17.2d, v19.2d
zip2 v12.2d, v17.2d, v19.2d
stp a2, a3, [x1, #-56]
- ld1 {v16.16b-v19.16b}, [x2], x3
subs x6, x4, #192
- ccmp x3, xzr, #4, lt
- add x7, x6, x2
- csel x3, x3, xzr, eq
- csel x2, x2, x7, eq
+ ld1 {v16.16b-v19.16b}, [x2], #64
+ csel x2, x2, x3, ge
zip1 v1.2d, v20.2d, v22.2d
zip2 v5.2d, v20.2d, v22.2d
@@ -672,13 +668,10 @@ CPU_BE( rev a15, a15 )
zip1 v9.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
stp a6, a7, [x1, #-40]
- ld1 {v20.16b-v23.16b}, [x2], x3
subs x7, x4, #256
- ccmp x3, xzr, #4, lt
- add x8, x7, x2
- csel x3, x3, xzr, eq
- csel x2, x2, x8, eq
+ ld1 {v20.16b-v23.16b}, [x2], #64
+ csel x2, x2, x3, ge
zip1 v2.2d, v24.2d, v26.2d
zip2 v6.2d, v24.2d, v26.2d
@@ -686,12 +679,10 @@ CPU_BE( rev a15, a15 )
zip1 v10.2d, v25.2d, v27.2d
zip2 v14.2d, v25.2d, v27.2d
stp a10, a11, [x1, #-24]
- ld1 {v24.16b-v27.16b}, [x2], x3
subs x8, x4, #320
- ccmp x3, xzr, #4, lt
- add x9, x8, x2
- csel x2, x2, x9, eq
+ ld1 {v24.16b-v27.16b}, [x2], #64
+ csel x2, x2, x3, ge
zip1 v3.2d, v28.2d, v30.2d
zip2 v7.2d, v28.2d, v30.2d
@@ -699,151 +690,105 @@ CPU_BE( rev a15, a15 )
zip1 v11.2d, v29.2d, v31.2d
zip2 v15.2d, v29.2d, v31.2d
stp a14, a15, [x1, #-8]
+
+ tbnz x5, #63, .Lt128
ld1 {v28.16b-v31.16b}, [x2]
// xor with corresponding input, write to output
- tbnz x5, #63, 0f
eor v16.16b, v16.16b, v0.16b
eor v17.16b, v17.16b, v1.16b
eor v18.16b, v18.16b, v2.16b
eor v19.16b, v19.16b, v3.16b
- st1 {v16.16b-v19.16b}, [x1], #64
- cbz x5, .Lout
- tbnz x6, #63, 1f
+ tbnz x6, #63, .Lt192
+
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v6.16b
eor v23.16b, v23.16b, v7.16b
- st1 {v20.16b-v23.16b}, [x1], #64
- cbz x6, .Lout
- tbnz x7, #63, 2f
+ st1 {v16.16b-v19.16b}, [x1], #64
+ tbnz x7, #63, .Lt256
+
eor v24.16b, v24.16b, v8.16b
eor v25.16b, v25.16b, v9.16b
eor v26.16b, v26.16b, v10.16b
eor v27.16b, v27.16b, v11.16b
- st1 {v24.16b-v27.16b}, [x1], #64
- cbz x7, .Lout
- tbnz x8, #63, 3f
+ st1 {v20.16b-v23.16b}, [x1], #64
+ tbnz x8, #63, .Lt320
+
eor v28.16b, v28.16b, v12.16b
eor v29.16b, v29.16b, v13.16b
eor v30.16b, v30.16b, v14.16b
eor v31.16b, v31.16b, v15.16b
+
+ st1 {v24.16b-v27.16b}, [x1], #64
st1 {v28.16b-v31.16b}, [x1]
.Lout: frame_pop
ret
- // fewer than 128 bytes of in/output
-0: ld1 {v8.16b}, [x10]
- ld1 {v9.16b}, [x11]
- movi v10.16b, #16
- sub x2, x1, #64
- add x1, x1, x5
- ld1 {v16.16b-v19.16b}, [x2]
- tbl v4.16b, {v0.16b-v3.16b}, v8.16b
- tbx v20.16b, {v16.16b-v19.16b}, v9.16b
- add v8.16b, v8.16b, v10.16b
- add v9.16b, v9.16b, v10.16b
- tbl v5.16b, {v0.16b-v3.16b}, v8.16b
- tbx v21.16b, {v16.16b-v19.16b}, v9.16b
- add v8.16b, v8.16b, v10.16b
- add v9.16b, v9.16b, v10.16b
- tbl v6.16b, {v0.16b-v3.16b}, v8.16b
- tbx v22.16b, {v16.16b-v19.16b}, v9.16b
- add v8.16b, v8.16b, v10.16b
- add v9.16b, v9.16b, v10.16b
- tbl v7.16b, {v0.16b-v3.16b}, v8.16b
- tbx v23.16b, {v16.16b-v19.16b}, v9.16b
-
- eor v20.16b, v20.16b, v4.16b
- eor v21.16b, v21.16b, v5.16b
- eor v22.16b, v22.16b, v6.16b
- eor v23.16b, v23.16b, v7.16b
- st1 {v20.16b-v23.16b}, [x1]
- b .Lout
-
// fewer than 192 bytes of in/output
-1: ld1 {v8.16b}, [x10]
- ld1 {v9.16b}, [x11]
- movi v10.16b, #16
- add x1, x1, x6
- tbl v0.16b, {v4.16b-v7.16b}, v8.16b
- tbx v20.16b, {v16.16b-v19.16b}, v9.16b
- add v8.16b, v8.16b, v10.16b
- add v9.16b, v9.16b, v10.16b
- tbl v1.16b, {v4.16b-v7.16b}, v8.16b
- tbx v21.16b, {v16.16b-v19.16b}, v9.16b
- add v8.16b, v8.16b, v10.16b
- add v9.16b, v9.16b, v10.16b
- tbl v2.16b, {v4.16b-v7.16b}, v8.16b
- tbx v22.16b, {v16.16b-v19.16b}, v9.16b
- add v8.16b, v8.16b, v10.16b
- add v9.16b, v9.16b, v10.16b
- tbl v3.16b, {v4.16b-v7.16b}, v8.16b
- tbx v23.16b, {v16.16b-v19.16b}, v9.16b
-
- eor v20.16b, v20.16b, v0.16b
- eor v21.16b, v21.16b, v1.16b
- eor v22.16b, v22.16b, v2.16b
- eor v23.16b, v23.16b, v3.16b
- st1 {v20.16b-v23.16b}, [x1]
+.Lt192: cbz x5, 1f // exactly 128 bytes?
+ ld1 {v28.16b-v31.16b}, [x10]
+ add x5, x5, x1
+ tbl v28.16b, {v4.16b-v7.16b}, v28.16b
+ tbl v29.16b, {v4.16b-v7.16b}, v29.16b
+ tbl v30.16b, {v4.16b-v7.16b}, v30.16b
+ tbl v31.16b, {v4.16b-v7.16b}, v31.16b
+
+0: eor v20.16b, v20.16b, v28.16b
+ eor v21.16b, v21.16b, v29.16b
+ eor v22.16b, v22.16b, v30.16b
+ eor v23.16b, v23.16b, v31.16b
+ st1 {v20.16b-v23.16b}, [x5] // overlapping stores
+1: st1 {v16.16b-v19.16b}, [x1]
b .Lout
+ // fewer than 128 bytes of in/output
+.Lt128: ld1 {v28.16b-v31.16b}, [x10]
+ add x5, x5, x1
+ sub x1, x1, #64
+ tbl v28.16b, {v0.16b-v3.16b}, v28.16b
+ tbl v29.16b, {v0.16b-v3.16b}, v29.16b
+ tbl v30.16b, {v0.16b-v3.16b}, v30.16b
+ tbl v31.16b, {v0.16b-v3.16b}, v31.16b
+ ld1 {v16.16b-v19.16b}, [x1] // reload first output block
+ b 0b
+
// fewer than 256 bytes of in/output
-2: ld1 {v4.16b}, [x10]
- ld1 {v5.16b}, [x11]
- movi v6.16b, #16
- add x1, x1, x7
+.Lt256: cbz x6, 2f // exactly 192 bytes?
+ ld1 {v4.16b-v7.16b}, [x10]
+ add x6, x6, x1
tbl v0.16b, {v8.16b-v11.16b}, v4.16b
- tbx v24.16b, {v20.16b-v23.16b}, v5.16b
- add v4.16b, v4.16b, v6.16b
- add v5.16b, v5.16b, v6.16b
- tbl v1.16b, {v8.16b-v11.16b}, v4.16b
- tbx v25.16b, {v20.16b-v23.16b}, v5.16b
- add v4.16b, v4.16b, v6.16b
- add v5.16b, v5.16b, v6.16b
- tbl v2.16b, {v8.16b-v11.16b}, v4.16b
- tbx v26.16b, {v20.16b-v23.16b}, v5.16b
- add v4.16b, v4.16b, v6.16b
- add v5.16b, v5.16b, v6.16b
- tbl v3.16b, {v8.16b-v11.16b}, v4.16b
- tbx v27.16b, {v20.16b-v23.16b}, v5.16b
-
- eor v24.16b, v24.16b, v0.16b
- eor v25.16b, v25.16b, v1.16b
- eor v26.16b, v26.16b, v2.16b
- eor v27.16b, v27.16b, v3.16b
- st1 {v24.16b-v27.16b}, [x1]
+ tbl v1.16b, {v8.16b-v11.16b}, v5.16b
+ tbl v2.16b, {v8.16b-v11.16b}, v6.16b
+ tbl v3.16b, {v8.16b-v11.16b}, v7.16b
+
+ eor v28.16b, v28.16b, v0.16b
+ eor v29.16b, v29.16b, v1.16b
+ eor v30.16b, v30.16b, v2.16b
+ eor v31.16b, v31.16b, v3.16b
+ st1 {v28.16b-v31.16b}, [x6] // overlapping stores
+2: st1 {v20.16b-v23.16b}, [x1]
b .Lout
// fewer than 320 bytes of in/output
-3: ld1 {v4.16b}, [x10]
- ld1 {v5.16b}, [x11]
- movi v6.16b, #16
- add x1, x1, x8
+.Lt320: cbz x7, 3f // exactly 256 bytes?
+ ld1 {v4.16b-v7.16b}, [x10]
+ add x7, x7, x1
tbl v0.16b, {v12.16b-v15.16b}, v4.16b
- tbx v28.16b, {v24.16b-v27.16b}, v5.16b
- add v4.16b, v4.16b, v6.16b
- add v5.16b, v5.16b, v6.16b
- tbl v1.16b, {v12.16b-v15.16b}, v4.16b
- tbx v29.16b, {v24.16b-v27.16b}, v5.16b
- add v4.16b, v4.16b, v6.16b
- add v5.16b, v5.16b, v6.16b
- tbl v2.16b, {v12.16b-v15.16b}, v4.16b
- tbx v30.16b, {v24.16b-v27.16b}, v5.16b
- add v4.16b, v4.16b, v6.16b
- add v5.16b, v5.16b, v6.16b
- tbl v3.16b, {v12.16b-v15.16b}, v4.16b
- tbx v31.16b, {v24.16b-v27.16b}, v5.16b
+ tbl v1.16b, {v12.16b-v15.16b}, v5.16b
+ tbl v2.16b, {v12.16b-v15.16b}, v6.16b
+ tbl v3.16b, {v12.16b-v15.16b}, v7.16b
eor v28.16b, v28.16b, v0.16b
eor v29.16b, v29.16b, v1.16b
eor v30.16b, v30.16b, v2.16b
eor v31.16b, v31.16b, v3.16b
- st1 {v28.16b-v31.16b}, [x1]
+ st1 {v28.16b-v31.16b}, [x7] // overlapping stores
+3: st1 {v24.16b-v27.16b}, [x1]
b .Lout
SYM_FUNC_END(chacha_4block_xor_neon)
@@ -851,7 +796,7 @@ SYM_FUNC_END(chacha_4block_xor_neon)
.align L1_CACHE_SHIFT
.Lpermute:
.set .Li, 0
- .rept 192
+ .rept 128
.byte (.Li - 64)
.set .Li, .Li + 1
.endr
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index 6b958dcdf136..7868330dd54e 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -544,7 +544,22 @@ CPU_LE( rev w8, w8 )
ext XL.16b, XL.16b, XL.16b, #8
rev64 XL.16b, XL.16b
eor XL.16b, XL.16b, KS0.16b
+
+ .if \enc == 1
st1 {XL.16b}, [x10] // store tag
+ .else
+ ldp x11, x12, [sp, #40] // load tag pointer and authsize
+ adr_l x17, .Lpermute_table
+ ld1 {KS0.16b}, [x11] // load supplied tag
+ add x17, x17, x12
+ ld1 {KS1.16b}, [x17] // load permute vector
+
+ cmeq XL.16b, XL.16b, KS0.16b // compare tags
+ mvn XL.16b, XL.16b // -1 for fail, 0 for pass
+ tbl XL.16b, {XL.16b}, KS1.16b // keep authsize bytes only
+ sminv b0, XL.16b // signed minimum across XL
+ smov w0, v0.b[0] // return b0
+ .endif
4: ldp x29, x30, [sp], #32
ret
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 8536008e3e35..720cd3a58da3 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -55,10 +55,10 @@ asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
asmlinkage void pmull_gcm_encrypt(int bytes, u8 dst[], const u8 src[],
u64 const h[][2], u64 dg[], u8 ctr[],
u32 const rk[], int rounds, u8 tag[]);
-
-asmlinkage void pmull_gcm_decrypt(int bytes, u8 dst[], const u8 src[],
- u64 const h[][2], u64 dg[], u8 ctr[],
- u32 const rk[], int rounds, u8 tag[]);
+asmlinkage int pmull_gcm_decrypt(int bytes, u8 dst[], const u8 src[],
+ u64 const h[][2], u64 dg[], u8 ctr[],
+ u32 const rk[], int rounds, const u8 l[],
+ const u8 tag[], u64 authsize);
static int ghash_init(struct shash_desc *desc)
{
@@ -168,7 +168,7 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
put_unaligned_be64(ctx->digest[1], dst);
put_unaligned_be64(ctx->digest[0], dst + 8);
- *ctx = (struct ghash_desc_ctx){};
+ memzero_explicit(ctx, sizeof(*ctx));
return 0;
}
@@ -458,6 +458,7 @@ static int gcm_decrypt(struct aead_request *req)
unsigned int authsize = crypto_aead_authsize(aead);
int nrounds = num_rounds(&ctx->aes_key);
struct skcipher_walk walk;
+ u8 otag[AES_BLOCK_SIZE];
u8 buf[AES_BLOCK_SIZE];
u8 iv[AES_BLOCK_SIZE];
u64 dg[2] = {};
@@ -474,9 +475,15 @@ static int gcm_decrypt(struct aead_request *req)
memcpy(iv, req->iv, GCM_IV_SIZE);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
+ scatterwalk_map_and_copy(otag, req->src,
+ req->assoclen + req->cryptlen - authsize,
+ authsize, 0);
+
err = skcipher_walk_aead_decrypt(&walk, req, false);
if (likely(crypto_simd_usable())) {
+ int ret;
+
do {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
@@ -493,9 +500,10 @@ static int gcm_decrypt(struct aead_request *req)
}
kernel_neon_begin();
- pmull_gcm_decrypt(nbytes, dst, src, ctx->ghash_key.h,
- dg, iv, ctx->aes_key.key_enc, nrounds,
- tag);
+ ret = pmull_gcm_decrypt(nbytes, dst, src,
+ ctx->ghash_key.h,
+ dg, iv, ctx->aes_key.key_enc,
+ nrounds, tag, otag, authsize);
kernel_neon_end();
if (unlikely(!nbytes))
@@ -507,6 +515,11 @@ static int gcm_decrypt(struct aead_request *req)
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
} while (walk.nbytes);
+
+ if (err)
+ return err;
+ if (ret)
+ return -EBADMSG;
} else {
while (walk.nbytes >= AES_BLOCK_SIZE) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -548,23 +561,20 @@ static int gcm_decrypt(struct aead_request *req)
err = skcipher_walk_done(&walk, 0);
}
+ if (err)
+ return err;
+
put_unaligned_be64(dg[1], tag);
put_unaligned_be64(dg[0], tag + 8);
put_unaligned_be32(1, iv + GCM_IV_SIZE);
aes_encrypt(&ctx->aes_key, iv, iv);
crypto_xor(tag, iv, AES_BLOCK_SIZE);
- }
-
- if (err)
- return err;
- /* compare calculated auth tag with the stored one */
- scatterwalk_map_and_copy(buf, req->src,
- req->assoclen + req->cryptlen - authsize,
- authsize, 0);
-
- if (crypto_memneq(tag, buf, authsize))
- return -EBADMSG;
+ if (crypto_memneq(tag, otag, authsize)) {
+ memzero_explicit(tag, AES_BLOCK_SIZE);
+ return -EBADMSG;
+ }
+ }
return 0;
}
diff --git a/arch/arm64/crypto/poly1305-armv8.pl b/arch/arm64/crypto/poly1305-armv8.pl
index 6e5576d19af8..cbc980fb02e3 100644
--- a/arch/arm64/crypto/poly1305-armv8.pl
+++ b/arch/arm64/crypto/poly1305-armv8.pl
@@ -840,7 +840,6 @@ poly1305_blocks_neon:
ldp d14,d15,[sp,#64]
addp $ACC2,$ACC2,$ACC2
ldr x30,[sp,#8]
- .inst 0xd50323bf // autiasp
////////////////////////////////////////////////////////////////
// lazy reduction, but without narrowing
@@ -882,6 +881,7 @@ poly1305_blocks_neon:
str x4,[$ctx,#8] // set is_base2_26
ldr x29,[sp],#80
+ .inst 0xd50323bf // autiasp
ret
.size poly1305_blocks_neon,.-poly1305_blocks_neon
diff --git a/arch/arm64/crypto/poly1305-core.S_shipped b/arch/arm64/crypto/poly1305-core.S_shipped
index 8d1c4e420ccd..fb2822abf63a 100644
--- a/arch/arm64/crypto/poly1305-core.S_shipped
+++ b/arch/arm64/crypto/poly1305-core.S_shipped
@@ -779,7 +779,6 @@ poly1305_blocks_neon:
ldp d14,d15,[sp,#64]
addp v21.2d,v21.2d,v21.2d
ldr x30,[sp,#8]
- .inst 0xd50323bf // autiasp
////////////////////////////////////////////////////////////////
// lazy reduction, but without narrowing
@@ -821,6 +820,7 @@ poly1305_blocks_neon:
str x4,[x0,#8] // set is_base2_26
ldr x29,[sp],#80
+ .inst 0xd50323bf // autiasp
ret
.size poly1305_blocks_neon,.-poly1305_blocks_neon
diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c
index f33ada70c4ed..683de671741a 100644
--- a/arch/arm64/crypto/poly1305-glue.c
+++ b/arch/arm64/crypto/poly1305-glue.c
@@ -177,7 +177,7 @@ void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
}
poly1305_emit(&dctx->h, dst, dctx->s);
- *dctx = (struct poly1305_desc_ctx){};
+ memzero_explicit(dctx, sizeof(*dctx));
}
EXPORT_SYMBOL(poly1305_final_arch);
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index c63b99211db3..c93121bcfdeb 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -10,7 +10,7 @@
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 5e956d7582a5..31ba3da5e61b 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -10,7 +10,7 @@
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index 77bc6e72abae..9462f6088b3f 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -10,7 +10,7 @@
#include <asm/simd.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <linux/types.h>
#include <linux/string.h>
diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
index 9a4bbfc45f40..e5a2936f0886 100644
--- a/arch/arm64/crypto/sha3-ce-glue.c
+++ b/arch/arm64/crypto/sha3-ce-glue.c
@@ -94,7 +94,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out)
if (digest_size & 4)
put_unaligned_le32(sctx->st[i], (__le32 *)digest);
- *sctx = (struct sha3_state){};
+ memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index dc890a719f54..faa83f6cf376 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -14,7 +14,7 @@
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index 370ccb29602f..2acff1c7df5d 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -8,7 +8,7 @@
#include <crypto/internal/hash.h>
#include <linux/types.h>
#include <linux/string.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
#include <asm/neon.h>
diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h
new file mode 100644
index 000000000000..5df500dcc627
--- /dev/null
+++ b/arch/arm64/include/asm/alternative-macros.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ALTERNATIVE_MACROS_H
+#define __ASM_ALTERNATIVE_MACROS_H
+
+#include <asm/cpucaps.h>
+
+#define ARM64_CB_PATCH ARM64_NCAPS
+
+/* A64 instructions are always 32 bits. */
+#define AARCH64_INSN_SIZE 4
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+
+#define ALTINSTR_ENTRY(feature) \
+ " .word 661b - .\n" /* label */ \
+ " .word 663f - .\n" /* new instruction */ \
+ " .hword " __stringify(feature) "\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* source len */ \
+ " .byte 664f-663f\n" /* replacement len */
+
+#define ALTINSTR_ENTRY_CB(feature, cb) \
+ " .word 661b - .\n" /* label */ \
+ " .word " __stringify(cb) "- .\n" /* callback */ \
+ " .hword " __stringify(feature) "\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* source len */ \
+ " .byte 664f-663f\n" /* replacement len */
+
+/*
+ * alternative assembly primitive:
+ *
+ * If any of these .org directive fail, it means that insn1 and insn2
+ * don't have the same length. This used to be written as
+ *
+ * .if ((664b-663b) != (662b-661b))
+ * .error "Alternatives instruction length mismatch"
+ * .endif
+ *
+ * but most assemblers die if insn1 or insn2 have a .inst. This should
+ * be fixed in a binutils release posterior to 2.25.51.0.2 (anything
+ * containing commit 4e4d08cf7399b606 or c1baaddf8861).
+ *
+ * Alternatives with callbacks do not generate replacement instructions.
+ */
+#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
+ ".if "__stringify(cfg_enabled)" == 1\n" \
+ "661:\n\t" \
+ oldinstr "\n" \
+ "662:\n" \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(feature) \
+ ".popsection\n" \
+ ".subsection 1\n" \
+ "663:\n\t" \
+ newinstr "\n" \
+ "664:\n\t" \
+ ".org . - (664b-663b) + (662b-661b)\n\t" \
+ ".org . - (662b-661b) + (664b-663b)\n\t" \
+ ".previous\n" \
+ ".endif\n"
+
+#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
+ ".if "__stringify(cfg_enabled)" == 1\n" \
+ "661:\n\t" \
+ oldinstr "\n" \
+ "662:\n" \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY_CB(feature, cb) \
+ ".popsection\n" \
+ "663:\n\t" \
+ "664:\n\t" \
+ ".endif\n"
+
+#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
+ __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
+
+#define ALTERNATIVE_CB(oldinstr, cb) \
+ __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb)
+#else
+
+#include <asm/assembler.h>
+
+.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
+ .word \orig_offset - .
+ .word \alt_offset - .
+ .hword \feature
+ .byte \orig_len
+ .byte \alt_len
+.endm
+
+.macro alternative_insn insn1, insn2, cap, enable = 1
+ .if \enable
+661: \insn1
+662: .pushsection .altinstructions, "a"
+ altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
+ .popsection
+ .subsection 1
+663: \insn2
+664: .previous
+ .org . - (664b-663b) + (662b-661b)
+ .org . - (662b-661b) + (664b-663b)
+ .endif
+.endm
+
+/*
+ * Alternative sequences
+ *
+ * The code for the case where the capability is not present will be
+ * assembled and linked as normal. There are no restrictions on this
+ * code.
+ *
+ * The code for the case where the capability is present will be
+ * assembled into a special section to be used for dynamic patching.
+ * Code for that case must:
+ *
+ * 1. Be exactly the same length (in bytes) as the default code
+ * sequence.
+ *
+ * 2. Not contain a branch target that is used outside of the
+ * alternative sequence it is defined in (branches into an
+ * alternative sequence are not fixed up).
+ */
+
+/*
+ * Begin an alternative code sequence.
+ */
+.macro alternative_if_not cap
+ .set .Lasm_alt_mode, 0
+ .pushsection .altinstructions, "a"
+ altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
+ .popsection
+661:
+.endm
+
+.macro alternative_if cap
+ .set .Lasm_alt_mode, 1
+ .pushsection .altinstructions, "a"
+ altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
+ .popsection
+ .subsection 1
+ .align 2 /* So GAS knows label 661 is suitably aligned */
+661:
+.endm
+
+.macro alternative_cb cb
+ .set .Lasm_alt_mode, 0
+ .pushsection .altinstructions, "a"
+ altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0
+ .popsection
+661:
+.endm
+
+/*
+ * Provide the other half of the alternative code sequence.
+ */
+.macro alternative_else
+662:
+ .if .Lasm_alt_mode==0
+ .subsection 1
+ .else
+ .previous
+ .endif
+663:
+.endm
+
+/*
+ * Complete an alternative code sequence.
+ */
+.macro alternative_endif
+664:
+ .if .Lasm_alt_mode==0
+ .previous
+ .endif
+ .org . - (664b-663b) + (662b-661b)
+ .org . - (662b-661b) + (664b-663b)
+.endm
+
+/*
+ * Callback-based alternative epilogue
+ */
+.macro alternative_cb_end
+662:
+.endm
+
+/*
+ * Provides a trivial alternative or default sequence consisting solely
+ * of NOPs. The number of NOPs is chosen automatically to match the
+ * previous case.
+ */
+.macro alternative_else_nop_endif
+alternative_else
+ nops (662b-661b) / AARCH64_INSN_SIZE
+alternative_endif
+.endm
+
+#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
+ alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
+
+.macro user_alt, label, oldinstr, newinstr, cond
+9999: alternative_insn "\oldinstr", "\newinstr", \cond
+ _asm_extable 9999b, \label
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature));
+ *
+ * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO));
+ * N.B. If CONFIG_FOO is specified, but not selected, the whole block
+ * will be omitted, including oldinstr.
+ */
+#define ALTERNATIVE(oldinstr, newinstr, ...) \
+ _ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1)
+
+#endif /* __ASM_ALTERNATIVE_MACROS_H */
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 619db9b4c9d5..a38b92e11811 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -2,17 +2,13 @@
#ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H
-#include <asm/cpucaps.h>
-#include <asm/insn.h>
-
-#define ARM64_CB_PATCH ARM64_NCAPS
+#include <asm/alternative-macros.h>
#ifndef __ASSEMBLY__
#include <linux/init.h>
#include <linux/types.h>
#include <linux/stddef.h>
-#include <linux/stringify.h>
struct alt_instr {
s32 orig_offset; /* offset to original instruction */
@@ -35,264 +31,5 @@ void apply_alternatives_module(void *start, size_t length);
static inline void apply_alternatives_module(void *start, size_t length) { }
#endif
-#define ALTINSTR_ENTRY(feature) \
- " .word 661b - .\n" /* label */ \
- " .word 663f - .\n" /* new instruction */ \
- " .hword " __stringify(feature) "\n" /* feature bit */ \
- " .byte 662b-661b\n" /* source len */ \
- " .byte 664f-663f\n" /* replacement len */
-
-#define ALTINSTR_ENTRY_CB(feature, cb) \
- " .word 661b - .\n" /* label */ \
- " .word " __stringify(cb) "- .\n" /* callback */ \
- " .hword " __stringify(feature) "\n" /* feature bit */ \
- " .byte 662b-661b\n" /* source len */ \
- " .byte 664f-663f\n" /* replacement len */
-
-/*
- * alternative assembly primitive:
- *
- * If any of these .org directive fail, it means that insn1 and insn2
- * don't have the same length. This used to be written as
- *
- * .if ((664b-663b) != (662b-661b))
- * .error "Alternatives instruction length mismatch"
- * .endif
- *
- * but most assemblers die if insn1 or insn2 have a .inst. This should
- * be fixed in a binutils release posterior to 2.25.51.0.2 (anything
- * containing commit 4e4d08cf7399b606 or c1baaddf8861).
- *
- * Alternatives with callbacks do not generate replacement instructions.
- */
-#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
- ".if "__stringify(cfg_enabled)" == 1\n" \
- "661:\n\t" \
- oldinstr "\n" \
- "662:\n" \
- ".pushsection .altinstructions,\"a\"\n" \
- ALTINSTR_ENTRY(feature) \
- ".popsection\n" \
- ".subsection 1\n" \
- "663:\n\t" \
- newinstr "\n" \
- "664:\n\t" \
- ".org . - (664b-663b) + (662b-661b)\n\t" \
- ".org . - (662b-661b) + (664b-663b)\n\t" \
- ".previous\n" \
- ".endif\n"
-
-#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
- ".if "__stringify(cfg_enabled)" == 1\n" \
- "661:\n\t" \
- oldinstr "\n" \
- "662:\n" \
- ".pushsection .altinstructions,\"a\"\n" \
- ALTINSTR_ENTRY_CB(feature, cb) \
- ".popsection\n" \
- "663:\n\t" \
- "664:\n\t" \
- ".endif\n"
-
-#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
- __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
-
-#define ALTERNATIVE_CB(oldinstr, cb) \
- __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb)
-#else
-
-#include <asm/assembler.h>
-
-.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
- .word \orig_offset - .
- .word \alt_offset - .
- .hword \feature
- .byte \orig_len
- .byte \alt_len
-.endm
-
-.macro alternative_insn insn1, insn2, cap, enable = 1
- .if \enable
-661: \insn1
-662: .pushsection .altinstructions, "a"
- altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
- .popsection
- .subsection 1
-663: \insn2
-664: .previous
- .org . - (664b-663b) + (662b-661b)
- .org . - (662b-661b) + (664b-663b)
- .endif
-.endm
-
-/*
- * Alternative sequences
- *
- * The code for the case where the capability is not present will be
- * assembled and linked as normal. There are no restrictions on this
- * code.
- *
- * The code for the case where the capability is present will be
- * assembled into a special section to be used for dynamic patching.
- * Code for that case must:
- *
- * 1. Be exactly the same length (in bytes) as the default code
- * sequence.
- *
- * 2. Not contain a branch target that is used outside of the
- * alternative sequence it is defined in (branches into an
- * alternative sequence are not fixed up).
- */
-
-/*
- * Begin an alternative code sequence.
- */
-.macro alternative_if_not cap
- .set .Lasm_alt_mode, 0
- .pushsection .altinstructions, "a"
- altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
- .popsection
-661:
-.endm
-
-.macro alternative_if cap
- .set .Lasm_alt_mode, 1
- .pushsection .altinstructions, "a"
- altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
- .popsection
- .subsection 1
- .align 2 /* So GAS knows label 661 is suitably aligned */
-661:
-.endm
-
-.macro alternative_cb cb
- .set .Lasm_alt_mode, 0
- .pushsection .altinstructions, "a"
- altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0
- .popsection
-661:
-.endm
-
-/*
- * Provide the other half of the alternative code sequence.
- */
-.macro alternative_else
-662:
- .if .Lasm_alt_mode==0
- .subsection 1
- .else
- .previous
- .endif
-663:
-.endm
-
-/*
- * Complete an alternative code sequence.
- */
-.macro alternative_endif
-664:
- .if .Lasm_alt_mode==0
- .previous
- .endif
- .org . - (664b-663b) + (662b-661b)
- .org . - (662b-661b) + (664b-663b)
-.endm
-
-/*
- * Callback-based alternative epilogue
- */
-.macro alternative_cb_end
-662:
-.endm
-
-/*
- * Provides a trivial alternative or default sequence consisting solely
- * of NOPs. The number of NOPs is chosen automatically to match the
- * previous case.
- */
-.macro alternative_else_nop_endif
-alternative_else
- nops (662b-661b) / AARCH64_INSN_SIZE
-alternative_endif
-.endm
-
-#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
- alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
-
-.macro user_alt, label, oldinstr, newinstr, cond
-9999: alternative_insn "\oldinstr", "\newinstr", \cond
- _asm_extable 9999b, \label
-.endm
-
-/*
- * Generate the assembly for UAO alternatives with exception table entries.
- * This is complicated as there is no post-increment or pair versions of the
- * unprivileged instructions, and USER() only works for single instructions.
- */
-#ifdef CONFIG_ARM64_UAO
- .macro uao_ldp l, reg1, reg2, addr, post_inc
- alternative_if_not ARM64_HAS_UAO
-8888: ldp \reg1, \reg2, [\addr], \post_inc;
-8889: nop;
- nop;
- alternative_else
- ldtr \reg1, [\addr];
- ldtr \reg2, [\addr, #8];
- add \addr, \addr, \post_inc;
- alternative_endif
-
- _asm_extable 8888b,\l;
- _asm_extable 8889b,\l;
- .endm
-
- .macro uao_stp l, reg1, reg2, addr, post_inc
- alternative_if_not ARM64_HAS_UAO
-8888: stp \reg1, \reg2, [\addr], \post_inc;
-8889: nop;
- nop;
- alternative_else
- sttr \reg1, [\addr];
- sttr \reg2, [\addr, #8];
- add \addr, \addr, \post_inc;
- alternative_endif
-
- _asm_extable 8888b,\l;
- _asm_extable 8889b,\l;
- .endm
-
- .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
- alternative_if_not ARM64_HAS_UAO
-8888: \inst \reg, [\addr], \post_inc;
- nop;
- alternative_else
- \alt_inst \reg, [\addr];
- add \addr, \addr, \post_inc;
- alternative_endif
-
- _asm_extable 8888b,\l;
- .endm
-#else
- .macro uao_ldp l, reg1, reg2, addr, post_inc
- USER(\l, ldp \reg1, \reg2, [\addr], \post_inc)
- .endm
- .macro uao_stp l, reg1, reg2, addr, post_inc
- USER(\l, stp \reg1, \reg2, [\addr], \post_inc)
- .endm
- .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
- USER(\l, \inst \reg, [\addr], \post_inc)
- .endm
-#endif
-
-#endif /* __ASSEMBLY__ */
-
-/*
- * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature));
- *
- * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO));
- * N.B. If CONFIG_FOO is specified, but not selected, the whole block
- * will be omitted, including oldinstr.
- */
-#define ALTERNATIVE(oldinstr, newinstr, ...) \
- _ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1)
-
+#endif /* __ASSEMBLY__ */
#endif /* __ASM_ALTERNATIVE_H */
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index f68a0e64482a..9990059be106 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -2,7 +2,7 @@
#ifndef __ASM_ASM_UACCESS_H
#define __ASM_ASM_UACCESS_H
-#include <asm/alternative.h>
+#include <asm/alternative-macros.h>
#include <asm/kernel-pgtable.h>
#include <asm/mmu.h>
#include <asm/sysreg.h>
@@ -15,10 +15,10 @@
.macro __uaccess_ttbr0_disable, tmp1
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
bic \tmp1, \tmp1, #TTBR_ASID_MASK
- sub \tmp1, \tmp1, #RESERVED_TTBR0_SIZE // reserved_ttbr0 just before swapper_pg_dir
+ sub \tmp1, \tmp1, #PAGE_SIZE // reserved_pg_dir just before swapper_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
isb
- add \tmp1, \tmp1, #RESERVED_TTBR0_SIZE
+ add \tmp1, \tmp1, #PAGE_SIZE
msr ttbr1_el1, \tmp1 // set reserved ASID
isb
.endm
@@ -58,4 +58,33 @@ alternative_else_nop_endif
.endm
#endif
+/*
+ * Generate the assembly for LDTR/STTR with exception table entries.
+ * This is complicated as there is no post-increment or pair versions of the
+ * unprivileged instructions, and USER() only works for single instructions.
+ */
+ .macro user_ldp l, reg1, reg2, addr, post_inc
+8888: ldtr \reg1, [\addr];
+8889: ldtr \reg2, [\addr, #8];
+ add \addr, \addr, \post_inc;
+
+ _asm_extable 8888b,\l;
+ _asm_extable 8889b,\l;
+ .endm
+
+ .macro user_stp l, reg1, reg2, addr, post_inc
+8888: sttr \reg1, [\addr];
+8889: sttr \reg2, [\addr, #8];
+ add \addr, \addr, \post_inc;
+
+ _asm_extable 8888b,\l;
+ _asm_extable 8889b,\l;
+ .endm
+
+ .macro user_ldst l, inst, reg, addr, post_inc
+8888: \inst \reg, [\addr];
+ add \addr, \addr, \post_inc;
+
+ _asm_extable 8888b,\l;
+ .endm
#endif
diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h
index e3d47b52161d..ec7720dbe2c8 100644
--- a/arch/arm64/include/asm/brk-imm.h
+++ b/arch/arm64/include/asm/brk-imm.h
@@ -10,6 +10,7 @@
* #imm16 values used for BRK instruction generation
* 0x004: for installing kprobes
* 0x005: for installing uprobes
+ * 0x006: for kprobe software single-step
* Allowed values for kgdb are 0x400 - 0x7ff
* 0x100: for triggering a fault on purpose (reserved)
* 0x400: for dynamic BRK instruction
@@ -19,6 +20,7 @@
*/
#define KPROBES_BRK_IMM 0x004
#define UPROBES_BRK_IMM 0x005
+#define KPROBES_BRK_SS_IMM 0x006
#define FAULT_BRK_IMM 0x100
#define KGDB_DYN_DBG_BRK_IMM 0x400
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 0ac3e06a2118..63d43b5f82f6 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -24,6 +24,7 @@
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
#define ICACHE_POLICY_VPIPT 0
+#define ICACHE_POLICY_RESERVED 1
#define ICACHE_POLICY_VIPT 2
#define ICACHE_POLICY_PIPT 3
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 9384fd8fc13c..45217f21f1fe 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -140,6 +140,7 @@ int set_memory_valid(unsigned long addr, int numpages, int enable);
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
+bool kernel_page_present(struct page *page);
#include <asm-generic/cacheflush.h>
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 42868dbd29fd..a7242ef2a2cd 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -16,8 +16,6 @@
#define ARM64_WORKAROUND_CAVIUM_23154 6
#define ARM64_WORKAROUND_834220 7
#define ARM64_HAS_NO_HW_PREFETCH 8
-#define ARM64_HAS_UAO 9
-#define ARM64_ALT_PAN_NOT_UAO 10
#define ARM64_HAS_VIRT_HOST_EXTN 11
#define ARM64_WORKAROUND_CAVIUM_27456 12
#define ARM64_HAS_32BIT_EL0 13
@@ -65,7 +63,9 @@
#define ARM64_HAS_ARMv8_4_TTL 55
#define ARM64_HAS_TLB_RANGE 56
#define ARM64_MTE 57
+#define ARM64_WORKAROUND_1508412 58
+#define ARM64_HAS_LDAPR 59
-#define ARM64_NCAPS 58
+#define ARM64_NCAPS 60
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index f7e7144af174..1c406e8ae27e 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -268,6 +268,8 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
/*
* CPU feature detected at boot time based on feature of one or more CPUs.
* All possible conflicts for a late CPU are ignored.
+ * NOTE: this means that a late CPU with the feature will *not* cause the
+ * capability to be advertised by cpus_have_*cap()!
*/
#define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
(ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
@@ -375,6 +377,23 @@ cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
return false;
}
+static __always_inline bool is_vhe_hyp_code(void)
+{
+ /* Only defined for code run in VHE hyp context */
+ return __is_defined(__KVM_VHE_HYPERVISOR__);
+}
+
+static __always_inline bool is_nvhe_hyp_code(void)
+{
+ /* Only defined for code run in NVHE hyp context */
+ return __is_defined(__KVM_NVHE_HYPERVISOR__);
+}
+
+static __always_inline bool is_hyp_code(void)
+{
+ return is_vhe_hyp_code() || is_nvhe_hyp_code();
+}
+
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;
@@ -428,35 +447,40 @@ static __always_inline bool __cpus_have_const_cap(int num)
}
/*
- * Test for a capability, possibly with a runtime check.
+ * Test for a capability without a runtime check.
*
- * Before capabilities are finalized, this behaves as cpus_have_cap().
+ * Before capabilities are finalized, this will BUG().
* After capabilities are finalized, this is patched to avoid a runtime check.
*
* @num must be a compile-time constant.
*/
-static __always_inline bool cpus_have_const_cap(int num)
+static __always_inline bool cpus_have_final_cap(int num)
{
if (system_capabilities_finalized())
return __cpus_have_const_cap(num);
else
- return cpus_have_cap(num);
+ BUG();
}
/*
- * Test for a capability without a runtime check.
+ * Test for a capability, possibly with a runtime check for non-hyp code.
*
- * Before capabilities are finalized, this will BUG().
+ * For hyp code, this behaves the same as cpus_have_final_cap().
+ *
+ * For non-hyp code:
+ * Before capabilities are finalized, this behaves as cpus_have_cap().
* After capabilities are finalized, this is patched to avoid a runtime check.
*
* @num must be a compile-time constant.
*/
-static __always_inline bool cpus_have_final_cap(int num)
+static __always_inline bool cpus_have_const_cap(int num)
{
- if (system_capabilities_finalized())
+ if (is_hyp_code())
+ return cpus_have_final_cap(num);
+ else if (system_capabilities_finalized())
return __cpus_have_const_cap(num);
else
- BUG();
+ return cpus_have_cap(num);
}
static inline void cpus_set_cap(unsigned int num)
@@ -645,10 +669,16 @@ static __always_inline bool system_supports_fpsimd(void)
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
}
+static inline bool system_uses_hw_pan(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_PAN) &&
+ cpus_have_const_cap(ARM64_HAS_PAN);
+}
+
static inline bool system_uses_ttbr0_pan(void)
{
return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
- !cpus_have_const_cap(ARM64_HAS_PAN);
+ !system_uses_hw_pan();
}
static __always_inline bool system_supports_sve(void)
@@ -740,11 +770,26 @@ static inline bool cpu_has_hw_af(void)
ID_AA64MMFR1_HADBS_SHIFT);
}
+static inline bool cpu_has_pan(void)
+{
+ u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
+ return cpuid_feature_extract_unsigned_field(mmfr1,
+ ID_AA64MMFR1_PAN_SHIFT);
+}
+
#ifdef CONFIG_ARM64_AMU_EXTN
/* Check whether the cpu supports the Activity Monitors Unit (AMU) */
extern bool cpu_has_amu_feat(int cpu);
+#else
+static inline bool cpu_has_amu_feat(int cpu)
+{
+ return false;
+}
#endif
+/* Get a cpu that supports the Activity Monitors Unit (AMU) */
+extern int get_cpu_with_amu_feat(void);
+
static inline unsigned int get_vmid_bits(u64 mmfr1)
{
int vmid_bits;
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 7219cddeba66..ef5b040dee44 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -71,6 +71,7 @@
#define ARM_CPU_PART_CORTEX_A55 0xD05
#define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
+#define ARM_CPU_PART_CORTEX_A77 0xD0D
#define APM_CPU_PART_POTENZA 0x000
@@ -85,6 +86,8 @@
#define QCOM_CPU_PART_FALKOR_V1 0x800
#define QCOM_CPU_PART_FALKOR 0xC00
#define QCOM_CPU_PART_KRYO 0x200
+#define QCOM_CPU_PART_KRYO_2XX_GOLD 0x800
+#define QCOM_CPU_PART_KRYO_2XX_SILVER 0x801
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
@@ -105,6 +108,7 @@
#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
+#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
@@ -114,6 +118,8 @@
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
+#define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD)
+#define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER)
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index ec213b4a1650..1c26d7baa67f 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -128,6 +128,9 @@ static inline void local_daif_inherit(struct pt_regs *regs)
{
unsigned long flags = regs->pstate & DAIF_MASK;
+ if (interrupts_enabled(regs))
+ trace_hardirqs_on();
+
/*
* We can't use local_daif_restore(regs->pstate) here as
* system_has_prio_mask_debugging() won't restore the I bit if it can
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 0b298f48f5bf..657c921fd784 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -53,6 +53,7 @@
/* kprobes BRK opcodes with ESR encoding */
#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (KPROBES_BRK_IMM << 5))
+#define BRK64_OPCODE_KPROBES_SS (AARCH64_BREAK_MON | (KPROBES_BRK_SS_IMM << 5))
/* uprobes BRK opcodes with ESR encoding */
#define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (UPROBES_BRK_IMM << 5))
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 22c81f1edda2..85a3e49f92f4 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -104,6 +104,7 @@
/* Shared ISS fault status code(IFSC/DFSC) for Data/Instruction aborts */
#define ESR_ELx_FSC (0x3F)
#define ESR_ELx_FSC_TYPE (0x3C)
+#define ESR_ELx_FSC_LEVEL (0x03)
#define ESR_ELx_FSC_EXTABT (0x10)
#define ESR_ELx_FSC_SERROR (0x11)
#define ESR_ELx_FSC_ACCESS (0x08)
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 99b9383cd036..78537393b650 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -31,8 +31,13 @@ static inline u32 disr_to_esr(u64 disr)
return esr;
}
+asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs);
+asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs);
asmlinkage void enter_from_user_mode(void);
-void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
+asmlinkage void exit_to_user_mode(void);
+void arm64_enter_nmi(struct pt_regs *regs);
+void arm64_exit_nmi(struct pt_regs *regs);
+void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs);
void do_bti(struct pt_regs *regs);
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr);
diff --git a/arch/arm64/include/asm/exec.h b/arch/arm64/include/asm/exec.h
index 1aae6f9962fc..9a1c22ce664b 100644
--- a/arch/arm64/include/asm/exec.h
+++ b/arch/arm64/include/asm/exec.h
@@ -10,6 +10,5 @@
#include <linux/sched.h>
extern unsigned long arch_align_stack(unsigned long sp);
-void uao_thread_switch(struct task_struct *next);
#endif /* __ASM_EXEC_H */
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 97f6a63810ec..8e41faa37c69 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -16,7 +16,7 @@
do { \
unsigned int loops = FUTEX_MAX_LOOPS; \
\
- uaccess_enable(); \
+ uaccess_enable_privileged(); \
asm volatile( \
" prfm pstl1strm, %2\n" \
"1: ldxr %w1, %2\n" \
@@ -39,7 +39,7 @@ do { \
"+r" (loops) \
: "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \
: "memory"); \
- uaccess_disable(); \
+ uaccess_disable_privileged(); \
} while (0)
static inline int
@@ -95,7 +95,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
return -EFAULT;
uaddr = __uaccess_mask_ptr(_uaddr);
- uaccess_enable();
+ uaccess_enable_privileged();
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
" prfm pstl1strm, %2\n"
"1: ldxr %w1, %2\n"
@@ -118,7 +118,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN)
: "memory");
- uaccess_disable();
+ uaccess_disable_privileged();
if (!ret)
*uval = val;
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 5ffa4bacdad3..cbfa7b6f2e09 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -13,11 +13,8 @@
#include <asm/kvm_arm.h>
#include <asm/sysreg.h>
-typedef struct {
- unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+#define ack_bad_irq ack_bad_irq
+#include <asm-generic/hardirq.h>
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 4b39293d0f72..4ebb9c054ccc 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -10,8 +10,7 @@
#include <linux/build_bug.h>
#include <linux/types.h>
-/* A64 instructions are always 32 bits. */
-#define AARCH64_INSN_SIZE 4
+#include <asm/alternative.h>
#ifndef __ASSEMBLY__
/*
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 19ca76ea60d9..587c504a4c8b 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -89,12 +89,6 @@
#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end))
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-#define RESERVED_TTBR0_SIZE (PAGE_SIZE)
-#else
-#define RESERVED_TTBR0_SIZE (0)
-#endif
-
/* Initial memory map size */
#if ARM64_SWAPPER_USES_SECTION_MAPS
#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
index 97e511d645a2..5d38ff4a4806 100644
--- a/arch/arm64/include/asm/kprobes.h
+++ b/arch/arm64/include/asm/kprobes.h
@@ -16,7 +16,7 @@
#include <linux/percpu.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
-#define MAX_INSN_SIZE 1
+#define MAX_INSN_SIZE 2
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
@@ -28,18 +28,11 @@ struct prev_kprobe {
unsigned int status;
};
-/* Single step context for kprobe */
-struct kprobe_step_ctx {
- unsigned long ss_pending;
- unsigned long match_addr;
-};
-
/* per-cpu kprobe control block */
struct kprobe_ctlblk {
unsigned int kprobe_status;
unsigned long saved_irqflag;
struct prev_kprobe prev_kprobe;
- struct kprobe_step_ctx ss_ctx;
};
void arch_remove_kprobe(struct kprobe *);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 5ef2669ccd6c..00bc6f1234ba 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -350,6 +350,11 @@ static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vc
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
}
+static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
+}
+
static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
{
switch (kvm_vcpu_trap_get_fault(vcpu)) {
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0aecbab6a7fb..0cd9f0f75c13 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -118,6 +118,8 @@ struct kvm_arch {
*/
unsigned long *pmu_filter;
unsigned int pmuver;
+
+ u8 pfr0_csv2;
};
struct kvm_vcpu_fault_info {
@@ -239,6 +241,7 @@ enum vcpu_sysreg {
#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
+#define cp14_DBGVCR (DBGVCR32_EL2 * 2)
#define NR_COPRO_REGS (NR_SYS_REGS * 2)
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index cd61239bae8c..556cb2d62b5b 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -30,8 +30,8 @@
* keep a constant PAGE_OFFSET and "fallback" to using the higher end
* of the VMEMMAP where 52-bit support is not available in hardware.
*/
-#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) \
- >> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT))
+#define VMEMMAP_SHIFT (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT)
+#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) >> VMEMMAP_SHIFT)
/*
* PAGE_OFFSET - the virtual address of the start of the linear map, at the
@@ -44,17 +44,17 @@
#define _PAGE_OFFSET(va) (-(UL(1) << (va)))
#define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS))
#define KIMAGE_VADDR (MODULES_END)
-#define BPF_JIT_REGION_START (KASAN_SHADOW_END)
+#define BPF_JIT_REGION_START (_PAGE_END(VA_BITS_MIN))
#define BPF_JIT_REGION_SIZE (SZ_128M)
#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
#define MODULES_VADDR (BPF_JIT_REGION_END)
#define MODULES_VSIZE (SZ_128M)
-#define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M)
+#define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT)))
#define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE)
-#define PCI_IO_END (VMEMMAP_START - SZ_2M)
+#define PCI_IO_END (VMEMMAP_START - SZ_8M)
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
-#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
+#define FIXADDR_TOP (VMEMMAP_START - SZ_32M)
#if VA_BITS > 48
#define VA_BITS_MIN (48)
@@ -76,10 +76,11 @@
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
#define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \
+ KASAN_SHADOW_OFFSET)
+#define PAGE_END (KASAN_SHADOW_END - (1UL << (vabits_actual - KASAN_SHADOW_SCALE_SHIFT)))
#define KASAN_THREAD_SHIFT 1
#else
#define KASAN_THREAD_SHIFT 0
-#define KASAN_SHADOW_END (_PAGE_END(VA_BITS_MIN))
+#define PAGE_END (_PAGE_END(VA_BITS_MIN))
#endif /* CONFIG_KASAN */
#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
@@ -167,7 +168,6 @@
#include <asm/bug.h>
extern u64 vabits_actual;
-#define PAGE_END (_PAGE_END(vabits_actual))
extern s64 memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */
@@ -238,11 +238,9 @@ static inline const void *__tag_set(const void *addr, u8 tag)
/*
- * The linear kernel range starts at the bottom of the virtual address
- * space. Testing the top bit for the start of the region is a
- * sufficient check and avoids having to worry about the tag.
+ * The linear kernel range starts at the bottom of the virtual address space.
*/
-#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1)))
+#define __is_lm_address(addr) (((u64)(addr) & ~PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 0672236e1aea..0b3079fd28eb 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -36,11 +36,11 @@ static inline void contextidr_thread_switch(struct task_struct *next)
}
/*
- * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
+ * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
*/
static inline void cpu_set_reserved_ttbr0(void)
{
- unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page));
+ unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
write_sysreg(ttbr, ttbr0_el1);
isb();
@@ -174,9 +174,9 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp)
* Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
* take CPU migration into account.
*/
-#define destroy_context(mm) do { } while(0)
void check_and_switch_context(struct mm_struct *mm);
+#define init_new_context(tsk, mm) init_new_context(tsk, mm)
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -195,7 +195,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
return;
if (mm == &init_mm)
- ttbr = __pa_symbol(empty_zero_page);
+ ttbr = __pa_symbol(reserved_pg_dir);
else
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
@@ -208,6 +208,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
}
#endif
+#define enter_lazy_tlb enter_lazy_tlb
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
@@ -248,15 +249,14 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
update_saved_ttbr0(tsk, next);
}
-#define deactivate_mm(tsk,mm) do { } while (0)
-#define activate_mm(prev,next) switch_mm(prev, next, current)
-
void verify_cpu_asid_bits(void);
void post_ttbr_update_workaround(void);
unsigned long arm64_mm_context_get(struct mm_struct *mm);
void arm64_mm_context_put(struct mm_struct *mm);
+#include <asm-generic/mmu_context.h>
+
#endif /* !__ASSEMBLY__ */
#endif /* !__ASM_MMU_CONTEXT_H */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 01a96d07ae74..42442a0ae2ab 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -260,6 +260,7 @@
#define TCR_TBI1 (UL(1) << 38)
#define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40)
+#define TCR_TBID1 (UL(1) << 52)
#define TCR_NFD0 (UL(1) << 53)
#define TCR_NFD1 (UL(1) << 54)
#define TCR_E0PD0 (UL(1) << 55)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 4ff12a7adcfd..501562793ce2 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -22,7 +22,7 @@
* and fixed mappings
*/
#define VMALLOC_START (MODULES_END)
-#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
+#define VMALLOC_END (VMEMMAP_START - SZ_256M)
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
@@ -115,8 +115,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
-#define pte_valid_young(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
#define pte_valid_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
@@ -124,9 +122,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
* so that we don't erroneously return false for pages that have been
* remapped as PROT_NONE but are yet to be flushed from the TLB.
+ * Note that we can't make any assumptions based on the state of the access
+ * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
+ * TLB.
*/
#define pte_accessible(mm, pte) \
- (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
+ (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
/*
* p??_access_permitted() is true for valid user mappings (subject to the
@@ -164,13 +165,6 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
return pmd;
}
-static inline pte_t pte_wrprotect(pte_t pte)
-{
- pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
- pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
- return pte;
-}
-
static inline pte_t pte_mkwrite(pte_t pte)
{
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
@@ -196,6 +190,20 @@ static inline pte_t pte_mkdirty(pte_t pte)
return pte;
}
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ /*
+ * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
+ * clear), set the PTE_DIRTY bit.
+ */
+ if (pte_hw_dirty(pte))
+ pte = pte_mkdirty(pte);
+
+ pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
+ pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
+ return pte;
+}
+
static inline pte_t pte_mkold(pte_t pte)
{
return clear_pte_bit(pte, __pgprot(PTE_AF));
@@ -407,6 +415,7 @@ static inline int pmd_trans_huge(pmd_t pmd)
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
#define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
+#define pmd_cont(pmd) pte_cont(pmd_pte(pmd))
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
@@ -454,6 +463,7 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
+#define set_pud_at(mm, addr, pudp, pud) set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud))
#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
@@ -503,6 +513,9 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
PMD_TYPE_SECT)
#define pmd_leaf(pmd) pmd_sect(pmd)
+#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
+#define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
+
#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
static inline bool pud_sect(pud_t pud) { return false; }
static inline bool pud_table(pud_t pud) { return true; }
@@ -519,6 +532,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_end[];
extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
+extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
@@ -845,12 +859,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
pte = READ_ONCE(*ptep);
do {
old_pte = pte;
- /*
- * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
- * clear), set the PTE_DIRTY bit.
- */
- if (pte_hw_dirty(pte))
- pte = pte_mkdirty(pte);
pte = pte_wrprotect(pte);
pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
pte_val(old_pte), pte_val(pte));
diff --git a/arch/arm64/include/asm/probes.h b/arch/arm64/include/asm/probes.h
index 4266262101fe..006946745352 100644
--- a/arch/arm64/include/asm/probes.h
+++ b/arch/arm64/include/asm/probes.h
@@ -7,6 +7,8 @@
#ifndef _ARM_PROBES_H
#define _ARM_PROBES_H
+#include <asm/insn.h>
+
typedef u32 probe_opcode_t;
typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index fce8cbecd6bc..724249f37af5 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -8,9 +8,6 @@
#ifndef __ASM_PROCESSOR_H
#define __ASM_PROCESSOR_H
-#define KERNEL_DS UL(-1)
-#define USER_DS ((UL(1) << VA_BITS) - 1)
-
/*
* On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
* no point in shifting all network buffers by 2 bytes just to make some IP
@@ -48,6 +45,7 @@
#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN)
#define TASK_SIZE_64 (UL(1) << vabits_actual)
+#define TASK_SIZE_MAX (UL(1) << VA_BITS)
#ifdef CONFIG_COMPAT
#if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 997cf8c8cd52..e58bca832dff 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -16,6 +16,11 @@
#define CurrentEL_EL1 (1 << 2)
#define CurrentEL_EL2 (2 << 2)
+#define INIT_PSTATE_EL1 \
+ (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL1h)
+#define INIT_PSTATE_EL2 \
+ (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL2h)
+
/*
* PMR values used to mask/unmask interrupts.
*
@@ -188,11 +193,14 @@ struct pt_regs {
s32 syscallno;
u32 unused2;
#endif
-
- u64 orig_addr_limit;
+ u64 sdei_ttbr1;
/* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
u64 pmr_save;
u64 stackframe[2];
+
+ /* Only valid for some EL1 exceptions. */
+ u64 lockdep_hardirqs;
+ u64 exit_rcu;
};
static inline bool in_syscall(struct pt_regs const *regs)
diff --git a/arch/arm64/include/asm/rwonce.h b/arch/arm64/include/asm/rwonce.h
new file mode 100644
index 000000000000..1bce62fa908a
--- /dev/null
+++ b/arch/arm64/include/asm/rwonce.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Google LLC.
+ */
+#ifndef __ASM_RWONCE_H
+#define __ASM_RWONCE_H
+
+#ifdef CONFIG_LTO
+
+#include <linux/compiler_types.h>
+#include <asm/alternative-macros.h>
+
+#ifndef BUILD_VDSO
+
+#ifdef CONFIG_AS_HAS_LDAPR
+#define __LOAD_RCPC(sfx, regs...) \
+ ALTERNATIVE( \
+ "ldar" #sfx "\t" #regs, \
+ ".arch_extension rcpc\n" \
+ "ldapr" #sfx "\t" #regs, \
+ ARM64_HAS_LDAPR)
+#else
+#define __LOAD_RCPC(sfx, regs...) "ldar" #sfx "\t" #regs
+#endif /* CONFIG_AS_HAS_LDAPR */
+
+/*
+ * When building with LTO, there is an increased risk of the compiler
+ * converting an address dependency headed by a READ_ONCE() invocation
+ * into a control dependency and consequently allowing for harmful
+ * reordering by the CPU.
+ *
+ * Ensure that such transformations are harmless by overriding the generic
+ * READ_ONCE() definition with one that provides RCpc acquire semantics
+ * when building with LTO.
+ */
+#define __READ_ONCE(x) \
+({ \
+ typeof(&(x)) __x = &(x); \
+ int atomic = 1; \
+ union { __unqual_scalar_typeof(*__x) __val; char __c[1]; } __u; \
+ switch (sizeof(x)) { \
+ case 1: \
+ asm volatile(__LOAD_RCPC(b, %w0, %1) \
+ : "=r" (*(__u8 *)__u.__c) \
+ : "Q" (*__x) : "memory"); \
+ break; \
+ case 2: \
+ asm volatile(__LOAD_RCPC(h, %w0, %1) \
+ : "=r" (*(__u16 *)__u.__c) \
+ : "Q" (*__x) : "memory"); \
+ break; \
+ case 4: \
+ asm volatile(__LOAD_RCPC(, %w0, %1) \
+ : "=r" (*(__u32 *)__u.__c) \
+ : "Q" (*__x) : "memory"); \
+ break; \
+ case 8: \
+ asm volatile(__LOAD_RCPC(, %0, %1) \
+ : "=r" (*(__u64 *)__u.__c) \
+ : "Q" (*__x) : "memory"); \
+ break; \
+ default: \
+ atomic = 0; \
+ } \
+ atomic ? (typeof(*__x))__u.__val : (*(volatile typeof(__x))__x);\
+})
+
+#endif /* !BUILD_VDSO */
+#endif /* CONFIG_LTO */
+
+#include <asm-generic/rwonce.h>
+
+#endif /* __ASM_RWONCE_H */
diff --git a/arch/arm64/include/asm/seccomp.h b/arch/arm64/include/asm/seccomp.h
index c36387170936..30256233788b 100644
--- a/arch/arm64/include/asm/seccomp.h
+++ b/arch/arm64/include/asm/seccomp.h
@@ -19,4 +19,13 @@
#include <asm-generic/seccomp.h>
+#define SECCOMP_ARCH_NATIVE AUDIT_ARCH_AARCH64
+#define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+#define SECCOMP_ARCH_NATIVE_NAME "aarch64"
+#ifdef CONFIG_COMPAT
+# define SECCOMP_ARCH_COMPAT AUDIT_ARCH_ARM
+# define SECCOMP_ARCH_COMPAT_NR __NR_compat_syscalls
+# define SECCOMP_ARCH_COMPAT_NAME "arm"
+#endif
+
#endif /* _ASM_SECCOMP_H */
diff --git a/arch/arm64/include/asm/signal.h b/arch/arm64/include/asm/signal.h
new file mode 100644
index 000000000000..ef449f5f4ba8
--- /dev/null
+++ b/arch/arm64/include/asm/signal.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ARM64_ASM_SIGNAL_H
+#define __ARM64_ASM_SIGNAL_H
+
+#include <asm/memory.h>
+#include <uapi/asm/signal.h>
+#include <uapi/asm/siginfo.h>
+
+static inline void __user *arch_untagged_si_addr(void __user *addr,
+ unsigned long sig,
+ unsigned long si_code)
+{
+ /*
+ * For historical reasons, all bits of the fault address are exposed as
+ * address bits for watchpoint exceptions. New architectures should
+ * handle the tag bits consistently.
+ */
+ if (sig == SIGTRAP && si_code == TRAP_BRKPT)
+ return addr;
+
+ return untagged_addr(addr);
+}
+#define arch_untagged_si_addr arch_untagged_si_addr
+
+#endif
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index d52c1b3ce589..cf7922f23808 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -98,6 +98,10 @@
#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
#define SET_PSTATE_TCO(x) __emit_inst(0xd500401f | PSTATE_TCO | ((!!x) << PSTATE_Imm_shift))
+#define set_pstate_pan(x) asm volatile(SET_PSTATE_PAN(x))
+#define set_pstate_uao(x) asm volatile(SET_PSTATE_UAO(x))
+#define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x))
+
#define __SYS_BARRIER_INSN(CRm, op2, Rt) \
__emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
@@ -372,6 +376,8 @@
#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1)
#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4)
+#define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7)
+
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
@@ -404,6 +410,8 @@
#define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2)
#define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3)
+#define SYS_SCXTNUM_EL0 sys_reg(3, 3, 13, 0, 7)
+
/* Definitions for system register interface to AMU for ARMv8.4 onwards */
#define SYS_AM_EL0(crm, op2) sys_reg(3, 3, 13, (crm), (op2))
#define SYS_AMCR_EL0 SYS_AM_EL0(2, 0)
@@ -578,6 +586,9 @@
#define ENDIAN_SET_EL2 0
#endif
+#define INIT_SCTLR_EL2_MMU_OFF \
+ (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
+
/* SCTLR_EL1 specific flags. */
#define SCTLR_EL1_ATA0 (BIT(42))
@@ -611,12 +622,15 @@
#define ENDIAN_SET_EL1 0
#endif
-#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\
- SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\
- SCTLR_EL1_DZE | SCTLR_EL1_UCT |\
- SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
- SCTLR_ELx_ITFSB| SCTLR_ELx_ATA | SCTLR_EL1_ATA0 |\
- ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
+#define INIT_SCTLR_EL1_MMU_OFF \
+ (ENDIAN_SET_EL1 | SCTLR_EL1_RES1)
+
+#define INIT_SCTLR_EL1_MMU_ON \
+ (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_EL1_SA0 | \
+ SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \
+ SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \
+ SCTLR_ELx_ATA | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | \
+ SCTLR_EL1_RES1)
/* MAIR_ELx memory attributes (used by Linux) */
#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
@@ -983,7 +997,7 @@
#define SYS_TFSR_EL1_TF0_SHIFT 0
#define SYS_TFSR_EL1_TF1_SHIFT 1
#define SYS_TFSR_EL1_TF0 (UL(1) << SYS_TFSR_EL1_TF0_SHIFT)
-#define SYS_TFSR_EL1_TF1 (UK(2) << SYS_TFSR_EL1_TF1_SHIFT)
+#define SYS_TFSR_EL1_TF1 (UL(1) << SYS_TFSR_EL1_TF1_SHIFT)
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
#define SYS_MPIDR_SAFE_VAL (BIT(31))
@@ -1007,6 +1021,7 @@
#include <linux/build_bug.h>
#include <linux/types.h>
+#include <asm/alternative.h>
#define __DEFINE_MRS_MSR_S_REGNUM \
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
@@ -1095,6 +1110,14 @@
write_sysreg_s(__scs_new, sysreg); \
} while (0)
+#define read_sysreg_par() ({ \
+ u64 par; \
+ asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
+ par = read_sysreg(par_el1); \
+ asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
+ par; \
+})
+
#endif
#endif /* __ASM_SYSREG_H */
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 1ab63cfbbaf1..673be2d1263c 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -22,7 +22,7 @@ void die(const char *msg, struct pt_regs *regs, int err);
struct siginfo;
void arm64_notify_die(const char *str, struct pt_regs *regs,
- int signo, int sicode, void __user *addr,
+ int signo, int sicode, unsigned long far,
int err);
void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 1fbab854a51b..015beafe58f5 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -18,14 +18,11 @@ struct task_struct;
#include <asm/stack_pointer.h>
#include <asm/types.h>
-typedef unsigned long mm_segment_t;
-
/*
* low level task data that entry.S needs immediate access to.
*/
struct thread_info {
unsigned long flags; /* low level flags */
- mm_segment_t addr_limit; /* address limit */
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
u64 ttbr0; /* saved TTBR0_EL1 */
#endif
@@ -66,8 +63,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
-#define TIF_FSCHECK 5 /* Check FS is USER_DS on return */
-#define TIF_MTE_ASYNC_FAULT 6 /* MTE Asynchronous Tag Check Fault */
+#define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
@@ -93,7 +89,6 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_UPROBE (1 << TIF_UPROBE)
-#define _TIF_FSCHECK (1 << TIF_FSCHECK)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_SVE (1 << TIF_SVE)
@@ -101,7 +96,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
- _TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT)
+ _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT)
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
@@ -119,7 +114,6 @@ void arch_release_task_struct(struct task_struct *tsk);
{ \
.flags = _TIF_FOREIGN_FPSTATE, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .addr_limit = KERNEL_DS, \
INIT_SCS \
}
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 11a465243f66..3b8dca4eb08d 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -16,12 +16,14 @@ int pcibus_to_node(struct pci_bus *bus);
#include <linux/arch_topology.h>
+void update_freq_counters_refs(void);
+void topology_scale_freq_tick(void);
+
#ifdef CONFIG_ARM64_AMU_EXTN
/*
* Replace task scheduler's default counter-based
* frequency-invariance scale factor setting.
*/
-void topology_scale_freq_tick(void);
#define arch_scale_freq_tick topology_scale_freq_tick
#endif /* CONFIG_ARM64_AMU_EXTN */
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index d96dc2c7c09d..54f32a0675df 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -26,9 +26,9 @@ void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook);
void force_signal_inject(int signal, int code, unsigned long address, unsigned int err);
void arm64_notify_segfault(unsigned long addr);
-void arm64_force_sig_fault(int signo, int code, void __user *addr, const char *str);
-void arm64_force_sig_mceerr(int code, void __user *addr, short lsb, const char *str);
-void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr, const char *str);
+void arm64_force_sig_fault(int signo, int code, unsigned long far, const char *str);
+void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str);
+void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, const char *str);
/*
* Move regs->pc to next instruction and do necessary setup before it
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 991dd5f031e4..abb31aa1f8ca 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -24,44 +24,18 @@
#include <asm/memory.h>
#include <asm/extable.h>
-#define get_fs() (current_thread_info()->addr_limit)
-
-static inline void set_fs(mm_segment_t fs)
-{
- current_thread_info()->addr_limit = fs;
-
- /*
- * Prevent a mispredicted conditional call to set_fs from forwarding
- * the wrong address limit to access_ok under speculation.
- */
- spec_bar();
-
- /* On user-mode return, check fs is correct */
- set_thread_flag(TIF_FSCHECK);
-
- /*
- * Enable/disable UAO so that copy_to_user() etc can access
- * kernel memory with the unprivileged instructions.
- */
- if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
- asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
- else
- asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
- CONFIG_ARM64_UAO));
-}
-
-#define uaccess_kernel() (get_fs() == KERNEL_DS)
+#define HAVE_GET_KERNEL_NOFAULT
/*
* Test whether a block of memory is a valid user space address.
* Returns 1 if the range is valid, 0 otherwise.
*
* This is equivalent to the following test:
- * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
+ * (u65)addr + (u65)size <= (u65)TASK_SIZE_MAX
*/
static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
{
- unsigned long ret, limit = current_thread_info()->addr_limit;
+ unsigned long ret, limit = TASK_SIZE_MAX - 1;
/*
* Asynchronous I/O running in a kernel thread does not have the
@@ -94,7 +68,6 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
}
#define access_ok(addr, size) __range_ok(addr, size)
-#define user_addr_max get_fs
#define _ASM_EXTABLE(from, to) \
" .pushsection __ex_table, \"a\"\n" \
@@ -113,8 +86,8 @@ static inline void __uaccess_ttbr0_disable(void)
local_irq_save(flags);
ttbr = read_sysreg(ttbr1_el1);
ttbr &= ~TTBR_ASID_MASK;
- /* reserved_ttbr0 placed before swapper_pg_dir */
- write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1);
+ /* reserved_pg_dir placed before swapper_pg_dir */
+ write_sysreg(ttbr - PAGE_SIZE, ttbr0_el1);
isb();
/* Set reserved ASID */
write_sysreg(ttbr, ttbr1_el1);
@@ -186,47 +159,26 @@ static inline void __uaccess_enable_hw_pan(void)
CONFIG_ARM64_PAN));
}
-#define __uaccess_disable(alt) \
-do { \
- if (!uaccess_ttbr0_disable()) \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
- CONFIG_ARM64_PAN)); \
-} while (0)
-
-#define __uaccess_enable(alt) \
-do { \
- if (!uaccess_ttbr0_enable()) \
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
- CONFIG_ARM64_PAN)); \
-} while (0)
-
-static inline void uaccess_disable(void)
+static inline void uaccess_disable_privileged(void)
{
- __uaccess_disable(ARM64_HAS_PAN);
-}
+ if (uaccess_ttbr0_disable())
+ return;
-static inline void uaccess_enable(void)
-{
- __uaccess_enable(ARM64_HAS_PAN);
+ __uaccess_enable_hw_pan();
}
-/*
- * These functions are no-ops when UAO is present.
- */
-static inline void uaccess_disable_not_uao(void)
+static inline void uaccess_enable_privileged(void)
{
- __uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
-}
+ if (uaccess_ttbr0_enable())
+ return;
-static inline void uaccess_enable_not_uao(void)
-{
- __uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
+ __uaccess_disable_hw_pan();
}
/*
- * Sanitise a uaccess pointer such that it becomes NULL if above the
- * current addr_limit. In case the pointer is tagged (has the top byte set),
- * untag the pointer before checking.
+ * Sanitise a uaccess pointer such that it becomes NULL if above the maximum
+ * user address. In case the pointer is tagged (has the top byte set), untag
+ * the pointer before checking.
*/
#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
@@ -237,7 +189,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
" bics xzr, %3, %2\n"
" csel %0, %1, xzr, eq\n"
: "=&r" (safe_ptr)
- : "r" (ptr), "r" (current_thread_info()->addr_limit),
+ : "r" (ptr), "r" (TASK_SIZE_MAX - 1),
"r" (untagged_addr(ptr))
: "cc");
@@ -253,10 +205,9 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
* The "__xxx_error" versions set the third argument to -EFAULT if an error
* occurs, and leave it unchanged on success.
*/
-#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
+#define __get_mem_asm(load, reg, x, addr, err) \
asm volatile( \
- "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
- alt_instr " " reg "1, [%2]\n", feature) \
+ "1: " load " " reg "1, [%2]\n" \
"2:\n" \
" .section .fixup, \"ax\"\n" \
" .align 2\n" \
@@ -268,35 +219,36 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT))
-#define __raw_get_user(x, ptr, err) \
+#define __raw_get_mem(ldr, x, ptr, err) \
do { \
unsigned long __gu_val; \
- __chk_user_ptr(ptr); \
- uaccess_enable_not_uao(); \
switch (sizeof(*(ptr))) { \
case 1: \
- __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err)); \
break; \
case 2: \
- __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err)); \
break; \
case 4: \
- __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err)); \
break; \
case 8: \
- __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err)); \
break; \
default: \
BUILD_BUG(); \
} \
- uaccess_disable_not_uao(); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
+#define __raw_get_user(x, ptr, err) \
+do { \
+ __chk_user_ptr(ptr); \
+ uaccess_ttbr0_enable(); \
+ __raw_get_mem("ldtr", x, ptr, err); \
+ uaccess_ttbr0_disable(); \
+} while (0)
+
#define __get_user_error(x, ptr, err) \
do { \
__typeof__(*(ptr)) __user *__p = (ptr); \
@@ -318,10 +270,19 @@ do { \
#define get_user __get_user
-#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ int __gkn_err = 0; \
+ \
+ __raw_get_mem("ldr", *((type *)(dst)), \
+ (__force type *)(src), __gkn_err); \
+ if (unlikely(__gkn_err)) \
+ goto err_label; \
+} while (0)
+
+#define __put_mem_asm(store, reg, x, addr, err) \
asm volatile( \
- "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
- alt_instr " " reg "1, [%2]\n", feature) \
+ "1: " store " " reg "1, [%2]\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
@@ -332,32 +293,33 @@ do { \
: "+r" (err) \
: "r" (x), "r" (addr), "i" (-EFAULT))
-#define __raw_put_user(x, ptr, err) \
+#define __raw_put_mem(str, x, ptr, err) \
do { \
__typeof__(*(ptr)) __pu_val = (x); \
- __chk_user_ptr(ptr); \
- uaccess_enable_not_uao(); \
switch (sizeof(*(ptr))) { \
case 1: \
- __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err)); \
break; \
case 2: \
- __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err)); \
break; \
case 4: \
- __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __put_mem_asm(str, "%w", __pu_val, (ptr), (err)); \
break; \
case 8: \
- __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \
- (err), ARM64_HAS_UAO); \
+ __put_mem_asm(str, "%x", __pu_val, (ptr), (err)); \
break; \
default: \
BUILD_BUG(); \
} \
- uaccess_disable_not_uao(); \
+} while (0)
+
+#define __raw_put_user(x, ptr, err) \
+do { \
+ __chk_user_ptr(ptr); \
+ uaccess_ttbr0_enable(); \
+ __raw_put_mem("sttr", x, ptr, err); \
+ uaccess_ttbr0_disable(); \
} while (0)
#define __put_user_error(x, ptr, err) \
@@ -381,14 +343,24 @@ do { \
#define put_user __put_user
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ int __pkn_err = 0; \
+ \
+ __raw_put_mem("str", *((type *)(src)), \
+ (__force type *)(dst), __pkn_err); \
+ if (unlikely(__pkn_err)) \
+ goto err_label; \
+} while(0)
+
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
#define raw_copy_from_user(to, from, n) \
({ \
unsigned long __acfu_ret; \
- uaccess_enable_not_uao(); \
+ uaccess_ttbr0_enable(); \
__acfu_ret = __arch_copy_from_user((to), \
__uaccess_mask_ptr(from), (n)); \
- uaccess_disable_not_uao(); \
+ uaccess_ttbr0_disable(); \
__acfu_ret; \
})
@@ -396,10 +368,10 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
#define raw_copy_to_user(to, from, n) \
({ \
unsigned long __actu_ret; \
- uaccess_enable_not_uao(); \
+ uaccess_ttbr0_enable(); \
__actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
(from), (n)); \
- uaccess_disable_not_uao(); \
+ uaccess_ttbr0_disable(); \
__actu_ret; \
})
@@ -407,10 +379,10 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi
#define raw_copy_in_user(to, from, n) \
({ \
unsigned long __aciu_ret; \
- uaccess_enable_not_uao(); \
+ uaccess_ttbr0_enable(); \
__aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \
__uaccess_mask_ptr(from), (n)); \
- uaccess_disable_not_uao(); \
+ uaccess_ttbr0_disable(); \
__aciu_ret; \
})
@@ -421,9 +393,9 @@ extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned lo
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
{
if (access_ok(to, n)) {
- uaccess_enable_not_uao();
+ uaccess_ttbr0_enable();
n = __arch_clear_user(__uaccess_mask_ptr(to), n);
- uaccess_disable_not_uao();
+ uaccess_ttbr0_disable();
}
return n;
}
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 09977acc007d..6069be50baf9 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -86,13 +86,12 @@ static inline bool is_kernel_in_hyp_mode(void)
static __always_inline bool has_vhe(void)
{
/*
- * The following macros are defined for code specic to VHE/nVHE.
- * If has_vhe() is inlined into those compilation units, it can
- * be determined statically. Otherwise fall back to caps.
+ * Code only run in VHE/NVHE hyp context can assume VHE is present or
+ * absent. Otherwise fall back to caps.
*/
- if (__is_defined(__KVM_VHE_HYPERVISOR__))
+ if (is_vhe_hyp_code())
return true;
- else if (__is_defined(__KVM_NVHE_HYPERVISOR__))
+ else if (is_nvhe_hyp_code())
return false;
else
return cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN);
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index bbaf0bc4ad60..86364ab6f13f 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -58,7 +58,6 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_CRASH_CORE) += crash_core.o
obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
-obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-$(CONFIG_ARM64_MTE) += mte.o
obj-y += vdso/ probes/
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 73039949b5ce..a57cffb752e8 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -21,7 +21,8 @@
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
-static int all_alternatives_applied;
+/* Volatile, as we may be patching the guts of READ_ONCE() */
+static volatile int all_alternatives_applied;
static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS);
@@ -205,7 +206,7 @@ static int __apply_alternatives_multi_stop(void *unused)
/* We always have a CPU 0 at this point (__init) */
if (smp_processor_id()) {
- while (!READ_ONCE(all_alternatives_applied))
+ while (!all_alternatives_applied)
cpu_relax();
isb();
} else {
@@ -217,7 +218,7 @@ static int __apply_alternatives_multi_stop(void *unused)
BUG_ON(all_alternatives_applied);
__apply_alternatives(&region, false, remaining_capabilities);
/* Barriers provided by the cache flushing */
- WRITE_ONCE(all_alternatives_applied, 1);
+ all_alternatives_applied = 1;
}
return 0;
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 7364de008bab..0e86e8b9cedd 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -277,7 +277,7 @@ static void __init register_insn_emulation_sysctl(void)
#define __user_swpX_asm(data, addr, res, temp, temp2, B) \
do { \
- uaccess_enable(); \
+ uaccess_enable_privileged(); \
__asm__ __volatile__( \
" mov %w3, %w7\n" \
"0: ldxr"B" %w2, [%4]\n" \
@@ -302,7 +302,7 @@ do { \
"i" (-EFAULT), \
"i" (__SWP_LL_SC_LOOPS) \
: "memory"); \
- uaccess_disable(); \
+ uaccess_disable_privileged(); \
} while (0)
#define __user_swp_asm(data, addr, res, temp, temp2) \
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 7d32fc959b1a..679b19b8a7ff 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -30,7 +30,6 @@ int main(void)
BLANK();
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
- DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
#endif
@@ -70,7 +69,7 @@ int main(void)
DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate));
DEFINE(S_PC, offsetof(struct pt_regs, pc));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
- DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
+ DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1));
DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 24d75af344b1..cafaf0da05b7 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -299,6 +299,8 @@ static const struct midr_range erratum_845719_list[] = {
MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
/* Brahma-B53 r0p[0] */
MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
+ /* Kryo2XX Silver rAp4 */
+ MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
{},
};
#endif
@@ -523,6 +525,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.cpu_enable = cpu_enable_trap_ctr_access,
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1508412
+ {
+ /* we depend on the firmware portion for correctness */
+ .desc = "ARM erratum 1508412 (kernel portion)",
+ .capability = ARM64_WORKAROUND_1508412,
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
+ 0, 0,
+ 1, 0),
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index dcc165b3fc04..39138f6d3ba2 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -153,10 +153,6 @@ EXPORT_SYMBOL(cpu_hwcap_keys);
.width = 0, \
}
-/* meta feature for alternatives */
-static bool __maybe_unused
-cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
-
static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
static bool __system_matches_cap(unsigned int n);
@@ -1337,6 +1333,8 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_GOLD),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{ /* sentinel */ }
@@ -1526,8 +1524,10 @@ bool cpu_has_amu_feat(int cpu)
return cpumask_test_cpu(cpu, &amu_cpus);
}
-/* Initialize the use of AMU counters for frequency invariance */
-extern void init_cpu_freq_invariance_counters(void);
+int get_cpu_with_amu_feat(void)
+{
+ return cpumask_any(&amu_cpus);
+}
static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
{
@@ -1535,7 +1535,7 @@ static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
smp_processor_id());
cpumask_set_cpu(smp_processor_id(), &amu_cpus);
- init_cpu_freq_invariance_counters();
+ update_freq_counters_refs();
}
}
@@ -1557,6 +1557,11 @@ static bool has_amu(const struct arm64_cpu_capabilities *cap,
return true;
}
+#else
+int get_cpu_with_amu_feat(void)
+{
+ return nr_cpu_ids;
+}
#endif
#ifdef CONFIG_ARM64_VHE
@@ -1598,7 +1603,7 @@ static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
WARN_ON_ONCE(in_interrupt());
sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
- asm(SET_PSTATE_PAN(1));
+ set_pstate_pan(1);
}
#endif /* CONFIG_ARM64_PAN */
@@ -1768,28 +1773,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
.matches = has_no_hw_prefetch,
},
-#ifdef CONFIG_ARM64_UAO
- {
- .desc = "User Access Override",
- .capability = ARM64_HAS_UAO,
- .type = ARM64_CPUCAP_SYSTEM_FEATURE,
- .matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64MMFR2_EL1,
- .field_pos = ID_AA64MMFR2_UAO_SHIFT,
- .min_field_value = 1,
- /*
- * We rely on stop_machine() calling uao_thread_switch() to set
- * UAO immediately after patching.
- */
- },
-#endif /* CONFIG_ARM64_UAO */
-#ifdef CONFIG_ARM64_PAN
- {
- .capability = ARM64_ALT_PAN_NOT_UAO,
- .type = ARM64_CPUCAP_SYSTEM_FEATURE,
- .matches = cpufeature_pan_not_uao,
- },
-#endif /* CONFIG_ARM64_PAN */
#ifdef CONFIG_ARM64_VHE
{
.desc = "Virtualization Host Extensions",
@@ -2136,6 +2119,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_mte,
},
#endif /* CONFIG_ARM64_MTE */
+ {
+ .desc = "RCpc load-acquire (LDAPR)",
+ .capability = ARM64_HAS_LDAPR,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_LRCPC_SHIFT,
+ .matches = has_cpuid_feature,
+ .min_field_value = 1,
+ },
{},
};
@@ -2650,7 +2643,7 @@ bool this_cpu_has_cap(unsigned int n)
* - The SYSTEM_FEATURE cpu_hwcaps may not have been set.
* In all other cases cpus_have_{const_}cap() should be used.
*/
-static bool __system_matches_cap(unsigned int n)
+static bool __maybe_unused __system_matches_cap(unsigned int n)
{
if (n < ARM64_NCAPS) {
const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
@@ -2730,12 +2723,6 @@ void __init setup_cpu_features(void)
ARCH_DMA_MINALIGN);
}
-static bool __maybe_unused
-cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
-{
- return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO));
-}
-
static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
{
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 6a7bb3729d60..77605aec25fe 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -34,10 +34,10 @@ DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
static struct cpuinfo_arm64 boot_cpu_data;
static const char *icache_policy_str[] = {
- [0 ... ICACHE_POLICY_PIPT] = "RESERVED/UNKNOWN",
+ [ICACHE_POLICY_VPIPT] = "VPIPT",
+ [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
[ICACHE_POLICY_VIPT] = "VIPT",
[ICACHE_POLICY_PIPT] = "PIPT",
- [ICACHE_POLICY_VPIPT] = "VPIPT",
};
unsigned long __icache_flags;
@@ -334,10 +334,11 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
case ICACHE_POLICY_VPIPT:
set_bit(ICACHEF_VPIPT, &__icache_flags);
break;
- default:
+ case ICACHE_POLICY_RESERVED:
case ICACHE_POLICY_VIPT:
/* Assume aliasing */
set_bit(ICACHEF_ALIASING, &__icache_flags);
+ break;
}
pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index fa76151de6ff..4f3661eeb7ec 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -234,9 +234,8 @@ static void send_user_sigtrap(int si_code)
if (interrupts_enabled(regs))
local_irq_enable();
- arm64_force_sig_fault(SIGTRAP, si_code,
- (void __user *)instruction_pointer(regs),
- "User debug trap");
+ arm64_force_sig_fault(SIGTRAP, si_code, instruction_pointer(regs),
+ "User debug trap");
}
static int single_step_handler(unsigned long unused, unsigned int esr,
diff --git a/arch/arm64/kernel/efi-header.S b/arch/arm64/kernel/efi-header.S
index df67c0f2a077..28d8a5dca5f1 100644
--- a/arch/arm64/kernel/efi-header.S
+++ b/arch/arm64/kernel/efi-header.S
@@ -7,30 +7,48 @@
#include <linux/pe.h>
#include <linux/sizes.h>
+ .macro efi_signature_nop
+#ifdef CONFIG_EFI
+.L_head:
+ /*
+ * This ccmp instruction has no meaningful effect except that
+ * its opcode forms the magic "MZ" signature required by UEFI.
+ */
+ ccmp x18, #0, #0xd, pl
+#else
+ /*
+ * Bootloaders may inspect the opcode at the start of the kernel
+ * image to decide if the kernel is capable of booting via UEFI.
+ * So put an ordinary NOP here, not the "MZ.." pseudo-nop above.
+ */
+ nop
+#endif
+ .endm
+
.macro __EFI_PE_HEADER
+#ifdef CONFIG_EFI
+ .set .Lpe_header_offset, . - .L_head
.long PE_MAGIC
-coff_header:
.short IMAGE_FILE_MACHINE_ARM64 // Machine
- .short section_count // NumberOfSections
+ .short .Lsection_count // NumberOfSections
.long 0 // TimeDateStamp
.long 0 // PointerToSymbolTable
.long 0 // NumberOfSymbols
- .short section_table - optional_header // SizeOfOptionalHeader
+ .short .Lsection_table - .Loptional_header // SizeOfOptionalHeader
.short IMAGE_FILE_DEBUG_STRIPPED | \
IMAGE_FILE_EXECUTABLE_IMAGE | \
IMAGE_FILE_LINE_NUMS_STRIPPED // Characteristics
-optional_header:
+.Loptional_header:
.short PE_OPT_MAGIC_PE32PLUS // PE32+ format
.byte 0x02 // MajorLinkerVersion
.byte 0x14 // MinorLinkerVersion
- .long __initdata_begin - efi_header_end // SizeOfCode
+ .long __initdata_begin - .Lefi_header_end // SizeOfCode
.long __pecoff_data_size // SizeOfInitializedData
.long 0 // SizeOfUninitializedData
- .long __efistub_efi_pe_entry - _head // AddressOfEntryPoint
- .long efi_header_end - _head // BaseOfCode
+ .long __efistub_efi_pe_entry - .L_head // AddressOfEntryPoint
+ .long .Lefi_header_end - .L_head // BaseOfCode
-extra_header_fields:
.quad 0 // ImageBase
.long SEGMENT_ALIGN // SectionAlignment
.long PECOFF_FILE_ALIGNMENT // FileAlignment
@@ -42,10 +60,10 @@ extra_header_fields:
.short 0 // MinorSubsystemVersion
.long 0 // Win32VersionValue
- .long _end - _head // SizeOfImage
+ .long _end - .L_head // SizeOfImage
// Everything before the kernel image is considered part of the header
- .long efi_header_end - _head // SizeOfHeaders
+ .long .Lefi_header_end - .L_head // SizeOfHeaders
.long 0 // CheckSum
.short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem
.short 0 // DllCharacteristics
@@ -54,7 +72,7 @@ extra_header_fields:
.quad 0 // SizeOfHeapReserve
.quad 0 // SizeOfHeapCommit
.long 0 // LoaderFlags
- .long (section_table - .) / 8 // NumberOfRvaAndSizes
+ .long (.Lsection_table - .) / 8 // NumberOfRvaAndSizes
.quad 0 // ExportTable
.quad 0 // ImportTable
@@ -64,17 +82,17 @@ extra_header_fields:
.quad 0 // BaseRelocationTable
#ifdef CONFIG_DEBUG_EFI
- .long efi_debug_table - _head // DebugTable
- .long efi_debug_table_size
+ .long .Lefi_debug_table - .L_head // DebugTable
+ .long .Lefi_debug_table_size
#endif
// Section table
-section_table:
+.Lsection_table:
.ascii ".text\0\0\0"
- .long __initdata_begin - efi_header_end // VirtualSize
- .long efi_header_end - _head // VirtualAddress
- .long __initdata_begin - efi_header_end // SizeOfRawData
- .long efi_header_end - _head // PointerToRawData
+ .long __initdata_begin - .Lefi_header_end // VirtualSize
+ .long .Lefi_header_end - .L_head // VirtualAddress
+ .long __initdata_begin - .Lefi_header_end // SizeOfRawData
+ .long .Lefi_header_end - .L_head // PointerToRawData
.long 0 // PointerToRelocations
.long 0 // PointerToLineNumbers
@@ -86,9 +104,9 @@ section_table:
.ascii ".data\0\0\0"
.long __pecoff_data_size // VirtualSize
- .long __initdata_begin - _head // VirtualAddress
+ .long __initdata_begin - .L_head // VirtualAddress
.long __pecoff_data_rawsize // SizeOfRawData
- .long __initdata_begin - _head // PointerToRawData
+ .long __initdata_begin - .L_head // PointerToRawData
.long 0 // PointerToRelocations
.long 0 // PointerToLineNumbers
@@ -98,7 +116,7 @@ section_table:
IMAGE_SCN_MEM_READ | \
IMAGE_SCN_MEM_WRITE // Characteristics
- .set section_count, (. - section_table) / 40
+ .set .Lsection_count, (. - .Lsection_table) / 40
#ifdef CONFIG_DEBUG_EFI
/*
@@ -114,21 +132,21 @@ section_table:
__INITRODATA
.align 2
-efi_debug_table:
+.Lefi_debug_table:
// EFI_IMAGE_DEBUG_DIRECTORY_ENTRY
.long 0 // Characteristics
.long 0 // TimeDateStamp
.short 0 // MajorVersion
.short 0 // MinorVersion
.long IMAGE_DEBUG_TYPE_CODEVIEW // Type
- .long efi_debug_entry_size // SizeOfData
+ .long .Lefi_debug_entry_size // SizeOfData
.long 0 // RVA
- .long efi_debug_entry - _head // FileOffset
+ .long .Lefi_debug_entry - .L_head // FileOffset
- .set efi_debug_table_size, . - efi_debug_table
+ .set .Lefi_debug_table_size, . - .Lefi_debug_table
.previous
-efi_debug_entry:
+.Lefi_debug_entry:
// EFI_IMAGE_DEBUG_CODEVIEW_NB10_ENTRY
.ascii "NB10" // Signature
.long 0 // Unknown
@@ -137,16 +155,12 @@ efi_debug_entry:
.asciz VMLINUX_PATH
- .set efi_debug_entry_size, . - efi_debug_entry
+ .set .Lefi_debug_entry_size, . - .Lefi_debug_entry
#endif
- /*
- * EFI will load .text onwards at the 4k section alignment
- * described in the PE/COFF header. To ensure that instruction
- * sequences using an adrp and a :lo12: immediate will function
- * correctly at this alignment, we must ensure that .text is
- * placed at a 4k boundary in the Image to begin with.
- */
- .align 12
-efi_header_end:
+ .balign SEGMENT_ALIGN
+.Lefi_header_end:
+#else
+ .set .Lpe_header_offset, 0x0
+#endif
.endm
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 43d4c329775f..5346953e4382 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -17,40 +17,163 @@
#include <asm/mmu.h>
#include <asm/sysreg.h>
-static void notrace el1_abort(struct pt_regs *regs, unsigned long esr)
+/*
+ * This is intended to match the logic in irqentry_enter(), handling the kernel
+ * mode transitions only.
+ */
+static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
+{
+ regs->exit_rcu = false;
+
+ if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ rcu_irq_enter();
+ trace_hardirqs_off_finish();
+
+ regs->exit_rcu = true;
+ return;
+ }
+
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ rcu_irq_enter_check_tick();
+ trace_hardirqs_off_finish();
+}
+
+/*
+ * This is intended to match the logic in irqentry_exit(), handling the kernel
+ * mode transitions only, and with preemption handled elsewhere.
+ */
+static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
+{
+ lockdep_assert_irqs_disabled();
+
+ if (interrupts_enabled(regs)) {
+ if (regs->exit_rcu) {
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ rcu_irq_exit();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+ return;
+ }
+
+ trace_hardirqs_on();
+ } else {
+ if (regs->exit_rcu)
+ rcu_irq_exit();
+ }
+}
+
+void noinstr arm64_enter_nmi(struct pt_regs *regs)
+{
+ regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
+
+ __nmi_enter();
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ lockdep_hardirq_enter();
+ rcu_nmi_enter();
+
+ trace_hardirqs_off_finish();
+ ftrace_nmi_enter();
+}
+
+void noinstr arm64_exit_nmi(struct pt_regs *regs)
+{
+ bool restore = regs->lockdep_hardirqs;
+
+ ftrace_nmi_exit();
+ if (restore) {
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ }
+
+ rcu_nmi_exit();
+ lockdep_hardirq_exit();
+ if (restore)
+ lockdep_hardirqs_on(CALLER_ADDR0);
+ __nmi_exit();
+}
+
+asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
+ arm64_enter_nmi(regs);
+ else
+ enter_from_kernel_mode(regs);
+}
+
+asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
+ arm64_exit_nmi(regs);
+ else
+ exit_to_kernel_mode(regs);
+}
+
+static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
+ enter_from_kernel_mode(regs);
local_daif_inherit(regs);
- far = untagged_addr(far);
do_mem_abort(far, esr, regs);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
}
-NOKPROBE_SYMBOL(el1_abort);
-static void notrace el1_pc(struct pt_regs *regs, unsigned long esr)
+static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
+ enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_sp_pc_abort(far, esr, regs);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
}
-NOKPROBE_SYMBOL(el1_pc);
-static void notrace el1_undef(struct pt_regs *regs)
+static void noinstr el1_undef(struct pt_regs *regs)
{
+ enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_undefinstr(regs);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
}
-NOKPROBE_SYMBOL(el1_undef);
-static void notrace el1_inv(struct pt_regs *regs, unsigned long esr)
+static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
{
+ enter_from_kernel_mode(regs);
local_daif_inherit(regs);
bad_mode(regs, 0, esr);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
}
-NOKPROBE_SYMBOL(el1_inv);
-static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
+static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
+{
+ regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
+
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ rcu_nmi_enter();
+
+ trace_hardirqs_off_finish();
+}
+
+static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
+{
+ bool restore = regs->lockdep_hardirqs;
+
+ if (restore) {
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ }
+
+ rcu_nmi_exit();
+ if (restore)
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
@@ -62,18 +185,21 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+ arm64_enter_el1_dbg(regs);
do_debug_exception(far, esr, regs);
+ arm64_exit_el1_dbg(regs);
}
-NOKPROBE_SYMBOL(el1_dbg);
-static void notrace el1_fpac(struct pt_regs *regs, unsigned long esr)
+static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
{
+ enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_ptrauth_fault(regs, esr);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
}
-NOKPROBE_SYMBOL(el1_fpac);
-asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
+asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -106,20 +232,33 @@ asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
el1_inv(regs, esr);
}
}
-NOKPROBE_SYMBOL(el1_sync_handler);
-static void notrace el0_da(struct pt_regs *regs, unsigned long esr)
+asmlinkage void noinstr enter_from_user_mode(void)
+{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ CT_WARN_ON(ct_state() != CONTEXT_USER);
+ user_exit_irqoff();
+ trace_hardirqs_off_finish();
+}
+
+asmlinkage void noinstr exit_to_user_mode(void)
+{
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ user_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
- far = untagged_addr(far);
do_mem_abort(far, esr, regs);
}
-NOKPROBE_SYMBOL(el0_da);
-static void notrace el0_ia(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
@@ -131,90 +270,80 @@ static void notrace el0_ia(struct pt_regs *regs, unsigned long esr)
if (!is_ttbr0_addr(far))
arm64_apply_bp_hardening();
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_mem_abort(far, esr, regs);
}
-NOKPROBE_SYMBOL(el0_ia);
-static void notrace el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_fpsimd_acc(esr, regs);
}
-NOKPROBE_SYMBOL(el0_fpsimd_acc);
-static void notrace el0_sve_acc(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_sve_acc(esr, regs);
}
-NOKPROBE_SYMBOL(el0_sve_acc);
-static void notrace el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_fpsimd_exc(esr, regs);
}
-NOKPROBE_SYMBOL(el0_fpsimd_exc);
-static void notrace el0_sys(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_sysinstr(esr, regs);
}
-NOKPROBE_SYMBOL(el0_sys);
-static void notrace el0_pc(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
if (!is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening();
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_sp_pc_abort(far, esr, regs);
}
-NOKPROBE_SYMBOL(el0_pc);
-static void notrace el0_sp(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_sp_pc_abort(regs->sp, esr, regs);
}
-NOKPROBE_SYMBOL(el0_sp);
-static void notrace el0_undef(struct pt_regs *regs)
+static void noinstr el0_undef(struct pt_regs *regs)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_undefinstr(regs);
}
-NOKPROBE_SYMBOL(el0_undef);
-static void notrace el0_bti(struct pt_regs *regs)
+static void noinstr el0_bti(struct pt_regs *regs)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_bti(regs);
}
-NOKPROBE_SYMBOL(el0_bti);
-static void notrace el0_inv(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
bad_el0_sync(regs, 0, esr);
}
-NOKPROBE_SYMBOL(el0_inv);
-static void notrace el0_dbg(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
{
/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
unsigned long far = read_sysreg(far_el1);
@@ -222,30 +351,28 @@ static void notrace el0_dbg(struct pt_regs *regs, unsigned long esr)
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
- user_exit_irqoff();
+ enter_from_user_mode();
do_debug_exception(far, esr, regs);
local_daif_restore(DAIF_PROCCTX_NOIRQ);
}
-NOKPROBE_SYMBOL(el0_dbg);
-static void notrace el0_svc(struct pt_regs *regs)
+static void noinstr el0_svc(struct pt_regs *regs)
{
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+ enter_from_user_mode();
do_el0_svc(regs);
}
-NOKPROBE_SYMBOL(el0_svc);
-static void notrace el0_fpac(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_ptrauth_fault(regs, esr);
}
-NOKPROBE_SYMBOL(el0_fpac);
-asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
+asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -297,27 +424,25 @@ asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
el0_inv(regs, esr);
}
}
-NOKPROBE_SYMBOL(el0_sync_handler);
#ifdef CONFIG_COMPAT
-static void notrace el0_cp15(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_cp15instr(esr, regs);
}
-NOKPROBE_SYMBOL(el0_cp15);
-static void notrace el0_svc_compat(struct pt_regs *regs)
+static void noinstr el0_svc_compat(struct pt_regs *regs)
{
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+ enter_from_user_mode();
do_el0_svc_compat(regs);
}
-NOKPROBE_SYMBOL(el0_svc_compat);
-asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
+asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -360,5 +485,4 @@ asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
el0_inv(regs, esr);
}
}
-NOKPROBE_SYMBOL(el0_sync_compat_handler);
#endif /* CONFIG_COMPAT */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index f30007dff35f..51c762156099 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -30,18 +30,18 @@
#include <asm/unistd.h>
/*
- * Context tracking subsystem. Used to instrument transitions
- * between user and kernel mode.
+ * Context tracking and irqflag tracing need to instrument transitions between
+ * user and kernel mode.
*/
- .macro ct_user_exit_irqoff
-#ifdef CONFIG_CONTEXT_TRACKING
+ .macro user_exit_irqoff
+#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
bl enter_from_user_mode
#endif
.endm
- .macro ct_user_enter
-#ifdef CONFIG_CONTEXT_TRACKING
- bl context_tracking_user_enter
+ .macro user_enter_irqoff
+#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
+ bl exit_to_user_mode
#endif
.endm
@@ -216,12 +216,6 @@ alternative_else_nop_endif
.else
add x21, sp, #S_FRAME_SIZE
get_current_task tsk
- /* Save the task's original addr_limit and set USER_DS */
- ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
- str x20, [sp, #S_ORIG_ADDR_LIMIT]
- mov x20, #USER_DS
- str x20, [tsk, #TSK_TI_ADDR_LIMIT]
- /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
.endif /* \el == 0 */
mrs x22, elr_el1
mrs x23, spsr_el1
@@ -279,12 +273,6 @@ alternative_else_nop_endif
.macro kernel_exit, el
.if \el != 0
disable_daif
-
- /* Restore the task's original addr_limit. */
- ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
- str x20, [tsk, #TSK_TI_ADDR_LIMIT]
-
- /* No need to restore UAO, it will be restored from SPSR_EL1 */
.endif
/* Restore pmr */
@@ -298,9 +286,6 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
alternative_else_nop_endif
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
- .if \el == 0
- ct_user_enter
- .endif
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
alternative_if_not ARM64_HAS_PAN
@@ -365,6 +350,9 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
br x30
#endif
.else
+ /* Ensure any device/NC reads complete */
+ alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
+
eret
.endif
sb
@@ -438,7 +426,7 @@ SYM_CODE_END(__swpan_exit_el0)
#ifdef CONFIG_SHADOW_CALL_STACK
/* also switch to the irq shadow stack */
- adr_this_cpu scs_sp, irq_shadow_call_stack, x26
+ ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
#endif
9998:
@@ -634,16 +622,8 @@ SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
gic_prio_irq_setup pmr=x20, tmp=x1
enable_da_f
-#ifdef CONFIG_ARM64_PSEUDO_NMI
- test_irqs_unmasked res=x0, pmr=x20
- cbz x0, 1f
- bl asm_nmi_enter
-1:
-#endif
-
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
-#endif
+ mov x0, sp
+ bl enter_el1_irq_or_nmi
irq_handler
@@ -662,26 +642,8 @@ alternative_else_nop_endif
1:
#endif
-#ifdef CONFIG_ARM64_PSEUDO_NMI
- /*
- * When using IRQ priority masking, we can get spurious interrupts while
- * PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a
- * section with interrupts disabled. Skip tracing in those cases.
- */
- test_irqs_unmasked res=x0, pmr=x20
- cbz x0, 1f
- bl asm_nmi_exit
-1:
-#endif
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-#ifdef CONFIG_ARM64_PSEUDO_NMI
- test_irqs_unmasked res=x0, pmr=x20
- cbnz x0, 1f
-#endif
- bl trace_hardirqs_on
-1:
-#endif
+ mov x0, sp
+ bl exit_el1_irq_or_nmi
kernel_exit 1
SYM_CODE_END(el1_irq)
@@ -723,21 +685,14 @@ SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
kernel_entry 0
el0_irq_naked:
gic_prio_irq_setup pmr=x20, tmp=x0
- ct_user_exit_irqoff
+ user_exit_irqoff
enable_da_f
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
-#endif
-
tbz x22, #55, 1f
bl do_el0_irq_bp_hardening
1:
irq_handler
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on
-#endif
b ret_to_user
SYM_CODE_END(el0_irq)
@@ -756,7 +711,7 @@ SYM_CODE_START_LOCAL(el0_error)
el0_error_naked:
mrs x25, esr_el1
gic_prio_kentry_setup tmp=x2
- ct_user_exit_irqoff
+ user_exit_irqoff
enable_dbg
mov x0, sp
mov x1, x25
@@ -771,13 +726,17 @@ SYM_CODE_END(el0_error)
SYM_CODE_START_LOCAL(ret_to_user)
disable_daif
gic_prio_kentry_setup tmp=x3
- ldr x1, [tsk, #TSK_TI_FLAGS]
- and x2, x1, #_TIF_WORK_MASK
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+#endif
+ ldr x19, [tsk, #TSK_TI_FLAGS]
+ and x2, x19, #_TIF_WORK_MASK
cbnz x2, work_pending
finish_ret_to_user:
+ user_enter_irqoff
/* Ignore asynchronous tag check faults in the uaccess routines */
clear_mte_async_tcf
- enable_step_tsk x1, x2
+ enable_step_tsk x19, x2
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
bl stackleak_erase
#endif
@@ -788,11 +747,9 @@ finish_ret_to_user:
*/
work_pending:
mov x0, sp // 'regs'
+ mov x1, x19
bl do_notify_resume
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on // enabled while in userspace
-#endif
- ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
+ ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
b finish_ret_to_user
SYM_CODE_END(ret_to_user)
@@ -804,9 +761,10 @@ SYM_CODE_END(ret_to_user)
*/
.pushsection ".entry.tramp.text", "ax"
+ // Move from tramp_pg_dir to swapper_pg_dir
.macro tramp_map_kernel, tmp
mrs \tmp, ttbr1_el1
- add \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
+ add \tmp, \tmp, #(2 * PAGE_SIZE)
bic \tmp, \tmp, #USER_ASID_FLAG
msr ttbr1_el1, \tmp
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
@@ -823,9 +781,10 @@ alternative_else_nop_endif
#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
.endm
+ // Move from swapper_pg_dir to tramp_pg_dir
.macro tramp_unmap_kernel, tmp
mrs \tmp, ttbr1_el1
- sub \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
+ sub \tmp, \tmp, #(2 * PAGE_SIZE)
orr \tmp, \tmp, #USER_ASID_FLAG
msr ttbr1_el1, \tmp
/*
@@ -996,10 +955,9 @@ SYM_CODE_START(__sdei_asm_entry_trampoline)
mov x4, xzr
/*
- * Use reg->interrupted_regs.addr_limit to remember whether to unmap
- * the kernel on exit.
+ * Remember whether to unmap the kernel on exit.
*/
-1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
+1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
#ifdef CONFIG_RANDOMIZE_BASE
adr x4, tramp_vectors + PAGE_SIZE
@@ -1020,7 +978,7 @@ NOKPROBE(__sdei_asm_entry_trampoline)
* x4: struct sdei_registered_event argument from registration time.
*/
SYM_CODE_START(__sdei_asm_exit_trampoline)
- ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
+ ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
cbnz x4, 1f
tramp_unmap_kernel tmp=x4
@@ -1094,9 +1052,9 @@ SYM_CODE_START(__sdei_asm_handler)
#ifdef CONFIG_SHADOW_CALL_STACK
/* Use a separate shadow call stack for normal and critical events */
cbnz w4, 3f
- adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal, tmp=x6
+ ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
b 4f
-3: adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical, tmp=x6
+3: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
4:
#endif
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index d8d9caf02834..f2eb206920a2 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -58,21 +58,11 @@
* in the entry routines.
*/
__HEAD
-_head:
/*
* DO NOT MODIFY. Image header expected by Linux boot-loaders.
*/
-#ifdef CONFIG_EFI
- /*
- * This add instruction has no meaningful effect except that
- * its opcode forms the magic "MZ" signature required by UEFI.
- */
- add x13, x18, #0x16
- b primary_entry
-#else
+ efi_signature_nop // special NOP to identity as PE/COFF executable
b primary_entry // branch to kernel start, magic
- .long 0 // reserved
-#endif
.quad 0 // Image load offset from start of RAM, little-endian
le64sym _kernel_size_le // Effective size of kernel image, little-endian
le64sym _kernel_flags_le // Informative flags, little-endian
@@ -80,14 +70,9 @@ _head:
.quad 0 // reserved
.quad 0 // reserved
.ascii ARM64_IMAGE_MAGIC // Magic number
-#ifdef CONFIG_EFI
- .long pe_header - _head // Offset to the PE header.
+ .long .Lpe_header_offset // Offset to the PE header.
-pe_header:
__EFI_PE_HEADER
-#else
- .long 0 // reserved
-#endif
__INIT
@@ -104,7 +89,7 @@ pe_header:
*/
SYM_CODE_START(primary_entry)
bl preserve_boot_args
- bl el2_setup // Drop to EL1, w0=cpu_boot_mode
+ bl init_kernel_el // w0=cpu_boot_mode
adrp x23, __PHYS_OFFSET
and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
bl set_cpu_boot_mode_flag
@@ -482,24 +467,33 @@ EXPORT_SYMBOL(kimage_vaddr)
.section ".idmap.text","awx"
/*
- * If we're fortunate enough to boot at EL2, ensure that the world is
- * sane before dropping to EL1.
+ * Starting from EL2 or EL1, configure the CPU to execute at the highest
+ * reachable EL supported by the kernel in a chosen default state. If dropping
+ * from EL2 to EL1, configure EL2 before configuring EL1.
+ *
+ * Since we cannot always rely on ERET synchronizing writes to sysregs (e.g. if
+ * SCTLR_ELx.EOS is clear), we place an ISB prior to ERET.
*
* Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
* booted in EL1 or EL2 respectively.
*/
-SYM_FUNC_START(el2_setup)
- msr SPsel, #1 // We want to use SP_EL{1,2}
+SYM_FUNC_START(init_kernel_el)
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
- b.eq 1f
- mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
+ b.eq init_el2
+
+SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
+ mov_q x0, INIT_SCTLR_EL1_MMU_OFF
msr sctlr_el1, x0
- mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
isb
- ret
+ mov_q x0, INIT_PSTATE_EL1
+ msr spsr_el1, x0
+ msr elr_el1, lr
+ mov w0, #BOOT_CPU_MODE_EL1
+ eret
-1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
+SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
+ mov_q x0, INIT_SCTLR_EL2_MMU_OFF
msr sctlr_el2, x0
#ifdef CONFIG_ARM64_VHE
@@ -608,9 +602,12 @@ set_hcr:
cbz x2, install_el2_stub
- mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
isb
- ret
+ mov_q x0, INIT_PSTATE_EL2
+ msr spsr_el2, x0
+ msr elr_el2, lr
+ mov w0, #BOOT_CPU_MODE_EL2
+ eret
SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
/*
@@ -620,7 +617,7 @@ SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
* requires no configuration, and all non-hyp-specific EL2 setup
* will be done via the _EL1 system register aliases in __cpu_setup.
*/
- mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
+ mov_q x0, INIT_SCTLR_EL1_MMU_OFF
msr sctlr_el1, x0
/* Coprocessor traps. */
@@ -642,14 +639,13 @@ SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
7: adr_l x0, __hyp_stub_vectors
msr vbar_el2, x0
- /* spsr */
- mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
- PSR_MODE_EL1h)
+ isb
+ mov x0, #INIT_PSTATE_EL1
msr spsr_el2, x0
msr elr_el2, lr
- mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
+ mov w0, #BOOT_CPU_MODE_EL2
eret
-SYM_FUNC_END(el2_setup)
+SYM_FUNC_END(init_kernel_el)
/*
* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
@@ -699,7 +695,7 @@ SYM_DATA_END(__early_cpu_boot_status)
* cores are held until we're ready for them to initialise.
*/
SYM_FUNC_START(secondary_holding_pen)
- bl el2_setup // Drop to EL1, w0=cpu_boot_mode
+ bl init_kernel_el // w0=cpu_boot_mode
bl set_cpu_boot_mode_flag
mrs x0, mpidr_el1
mov_q x1, MPIDR_HWID_BITMASK
@@ -717,7 +713,7 @@ SYM_FUNC_END(secondary_holding_pen)
* be used where CPUs are brought online dynamically by the kernel.
*/
SYM_FUNC_START(secondary_entry)
- bl el2_setup // Drop to EL1
+ bl init_kernel_el // w0=cpu_boot_mode
bl set_cpu_boot_mode_flag
b secondary_startup
SYM_FUNC_END(secondary_entry)
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 61684a500914..c615b285ff5b 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -87,7 +87,6 @@ KVM_NVHE_ALIAS(__icache_flags);
/* Kernel symbols needed for cpus_have_final/const_caps checks. */
KVM_NVHE_ALIAS(arm64_const_caps_ready);
KVM_NVHE_ALIAS(cpu_hwcap_keys);
-KVM_NVHE_ALIAS(cpu_hwcaps);
/* Static keys which are set if a vGIC trap should be handled in hyp. */
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 9cf2fb87584a..dfb1feab867d 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/irqchip.h>
#include <linux/kprobes.h>
+#include <linux/scs.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <asm/daifflags.h>
@@ -27,6 +28,25 @@ DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
+
+DECLARE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr);
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+DEFINE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr);
+#endif
+
+static void init_irq_scs(void)
+{
+ int cpu;
+
+ if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
+ return;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(irq_shadow_call_stack_ptr, cpu) =
+ scs_alloc(cpu_to_node(cpu));
+}
+
#ifdef CONFIG_VMAP_STACK
static void init_irq_stacks(void)
{
@@ -54,6 +74,7 @@ static void init_irq_stacks(void)
void __init init_IRQ(void)
{
init_irq_stacks();
+ init_irq_scs();
irqchip_init();
if (!handle_arch_irq)
panic("No interrupt controller found.");
@@ -67,18 +88,3 @@ void __init init_IRQ(void)
local_daif_restore(DAIF_PROCCTX_NOIRQ);
}
}
-
-/*
- * Stubs to make nmi_enter/exit() code callable from ASM
- */
-asmlinkage void notrace asm_nmi_enter(void)
-{
- nmi_enter();
-}
-NOKPROBE_SYMBOL(asm_nmi_enter);
-
-asmlinkage void notrace asm_nmi_exit(void)
-{
- nmi_exit();
-}
-NOKPROBE_SYMBOL(asm_nmi_exit);
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index b181e0544b79..0921aa1520b0 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -50,10 +50,16 @@ static __init u64 get_kaslr_seed(void *fdt)
return ret;
}
-static __init const u8 *kaslr_get_cmdline(void *fdt)
+static __init bool cmdline_contains_nokaslr(const u8 *cmdline)
{
- static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
+ const u8 *str;
+ str = strstr(cmdline, "nokaslr");
+ return str == cmdline || (str > cmdline && *(str - 1) == ' ');
+}
+
+static __init bool is_kaslr_disabled_cmdline(void *fdt)
+{
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
int node;
const u8 *prop;
@@ -65,10 +71,17 @@ static __init const u8 *kaslr_get_cmdline(void *fdt)
prop = fdt_getprop(fdt, node, "bootargs", NULL);
if (!prop)
goto out;
- return prop;
+
+ if (cmdline_contains_nokaslr(prop))
+ return true;
+
+ if (IS_ENABLED(CONFIG_CMDLINE_EXTEND))
+ goto out;
+
+ return false;
}
out:
- return default_cmdline;
+ return cmdline_contains_nokaslr(CONFIG_CMDLINE);
}
/*
@@ -83,7 +96,6 @@ u64 __init kaslr_early_init(u64 dt_phys)
{
void *fdt;
u64 seed, offset, mask, module_range;
- const u8 *cmdline, *str;
unsigned long raw;
int size;
@@ -115,9 +127,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
* Check if 'nokaslr' appears on the command line, and
* return 0 if that is the case.
*/
- cmdline = kaslr_get_cmdline(fdt);
- str = strstr(cmdline, "nokaslr");
- if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) {
+ if (is_kaslr_disabled_cmdline(fdt)) {
kaslr_status = KASLR_DISABLED_CMDLINE;
return 0;
}
diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c
index af9987c154ca..9ec34690e255 100644
--- a/arch/arm64/kernel/kexec_image.c
+++ b/arch/arm64/kernel/kexec_image.c
@@ -43,7 +43,7 @@ static void *image_load(struct kimage *image,
u64 flags, value;
bool be_image, be_kernel;
struct kexec_buf kbuf;
- unsigned long text_offset;
+ unsigned long text_offset, kernel_segment_number;
struct kexec_segment *kernel_segment;
int ret;
@@ -88,11 +88,37 @@ static void *image_load(struct kimage *image,
/* Adjust kernel segment with TEXT_OFFSET */
kbuf.memsz += text_offset;
- ret = kexec_add_buffer(&kbuf);
- if (ret)
+ kernel_segment_number = image->nr_segments;
+
+ /*
+ * The location of the kernel segment may make it impossible to satisfy
+ * the other segment requirements, so we try repeatedly to find a
+ * location that will work.
+ */
+ while ((ret = kexec_add_buffer(&kbuf)) == 0) {
+ /* Try to load additional data */
+ kernel_segment = &image->segment[kernel_segment_number];
+ ret = load_other_segments(image, kernel_segment->mem,
+ kernel_segment->memsz, initrd,
+ initrd_len, cmdline);
+ if (!ret)
+ break;
+
+ /*
+ * We couldn't find space for the other segments; erase the
+ * kernel segment and try the next available hole.
+ */
+ image->nr_segments -= 1;
+ kbuf.buf_min = kernel_segment->mem + kernel_segment->memsz;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ }
+
+ if (ret) {
+ pr_err("Could not find any suitable kernel location!");
return ERR_PTR(ret);
+ }
- kernel_segment = &image->segment[image->nr_segments - 1];
+ kernel_segment = &image->segment[kernel_segment_number];
kernel_segment->mem += text_offset;
kernel_segment->memsz -= text_offset;
image->start = kernel_segment->mem;
@@ -101,12 +127,7 @@ static void *image_load(struct kimage *image,
kernel_segment->mem, kbuf.bufsz,
kernel_segment->memsz);
- /* Load additional data */
- ret = load_other_segments(image,
- kernel_segment->mem, kernel_segment->memsz,
- initrd, initrd_len, cmdline);
-
- return ERR_PTR(ret);
+ return NULL;
}
#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 5b0e67b93cdc..03210f644790 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -240,6 +240,11 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
return ret;
}
+/*
+ * Tries to add the initrd and DTB to the image. If it is not possible to find
+ * valid locations, this function will undo changes to the image and return non
+ * zero.
+ */
int load_other_segments(struct kimage *image,
unsigned long kernel_load_addr,
unsigned long kernel_size,
@@ -248,7 +253,8 @@ int load_other_segments(struct kimage *image,
{
struct kexec_buf kbuf;
void *headers, *dtb = NULL;
- unsigned long headers_sz, initrd_load_addr = 0, dtb_len;
+ unsigned long headers_sz, initrd_load_addr = 0, dtb_len,
+ orig_segments = image->nr_segments;
int ret = 0;
kbuf.image = image;
@@ -334,6 +340,7 @@ int load_other_segments(struct kimage *image,
return 0;
out_err:
+ image->nr_segments = orig_segments;
vfree(dtb);
return ret;
}
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 52a0638ed967..ef15c8a2a49d 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -189,7 +189,8 @@ long get_mte_ctrl(struct task_struct *task)
switch (task->thread.sctlr_tcf0) {
case SCTLR_EL1_TCF0_NONE:
- return PR_MTE_TCF_NONE;
+ ret |= PR_MTE_TCF_NONE;
+ break;
case SCTLR_EL1_TCF0_SYNC:
ret |= PR_MTE_TCF_SYNC;
break;
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 3605f77ad4df..38bb07eff872 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -23,6 +23,8 @@
#include <linux/platform_device.h>
#include <linux/sched_clock.h>
#include <linux/smp.h>
+#include <linux/nmi.h>
+#include <linux/cpufreq.h>
/* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
@@ -1248,10 +1250,21 @@ static struct platform_driver armv8_pmu_driver = {
static int __init armv8_pmu_driver_init(void)
{
+ int ret;
+
if (acpi_disabled)
- return platform_driver_register(&armv8_pmu_driver);
+ ret = platform_driver_register(&armv8_pmu_driver);
else
- return arm_pmu_acpi_probe(armv8_pmuv3_init);
+ ret = arm_pmu_acpi_probe(armv8_pmuv3_init);
+
+ /*
+ * Try to re-initialize lockup detector after PMU init in
+ * case PMU events are triggered via NMIs.
+ */
+ if (ret == 0 && arm_pmu_irq_is_nmi())
+ lockup_detector_init();
+
+ return ret;
}
device_initcall(armv8_pmu_driver_init)
@@ -1309,3 +1322,27 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->cap_user_time_zero = 1;
userpg->cap_user_time_short = 1;
}
+
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
+/*
+ * Safe maximum CPU frequency in case a particular platform doesn't implement
+ * cpufreq driver. Although, architecture doesn't put any restrictions on
+ * maximum frequency but 5 GHz seems to be safe maximum given the available
+ * Arm CPUs in the market which are clocked much less than 5 GHz. On the other
+ * hand, we can't make it much higher as it would lead to a large hard-lockup
+ * detection timeout on parts which are running slower (eg. 1GHz on
+ * Developerbox) and doesn't possess a cpufreq driver.
+ */
+#define SAFE_MAX_CPU_FREQ 5000000000UL // 5 GHz
+u64 hw_nmi_get_sample_period(int watchdog_thresh)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long max_cpu_freq;
+
+ max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL;
+ if (!max_cpu_freq)
+ max_cpu_freq = SAFE_MAX_CPU_FREQ;
+
+ return (u64)max_cpu_freq * watchdog_thresh;
+}
+#endif
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
index 94e8718e7229..f6f58e6265df 100644
--- a/arch/arm64/kernel/perf_regs.c
+++ b/arch/arm64/kernel/perf_regs.c
@@ -73,8 +73,7 @@ u64 perf_reg_abi(struct task_struct *task)
}
void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy)
+ struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index deba738142ed..89c64ada8732 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -34,27 +34,18 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static void __kprobes
-post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
-
-static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
-{
- void *addrs[1];
- u32 insns[1];
-
- addrs[0] = addr;
- insns[0] = opcode;
-
- return aarch64_insn_patch_text(addrs, insns, 1);
-}
+post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{
+ kprobe_opcode_t *addr = p->ainsn.api.insn;
+ void *addrs[] = {addr, addr + 1};
+ u32 insns[] = {p->opcode, BRK64_OPCODE_KPROBES_SS};
+
/* prepare insn slot */
- patch_text(p->ainsn.api.insn, p->opcode);
+ aarch64_insn_patch_text(addrs, insns, 2);
- flush_icache_range((uintptr_t) (p->ainsn.api.insn),
- (uintptr_t) (p->ainsn.api.insn) +
- MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+ flush_icache_range((uintptr_t)addr, (uintptr_t)(addr + MAX_INSN_SIZE));
/*
* Needs restoring of return address after stepping xol.
@@ -77,7 +68,7 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
/* single step simulated, now go for post processing */
- post_kprobe_handler(kcb, regs);
+ post_kprobe_handler(p, kcb, regs);
}
int __kprobes arch_prepare_kprobe(struct kprobe *p)
@@ -128,13 +119,18 @@ void *alloc_insn_page(void)
/* arm kprobe: install breakpoint in text */
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
- patch_text(p->addr, BRK64_OPCODE_KPROBES);
+ void *addr = p->addr;
+ u32 insn = BRK64_OPCODE_KPROBES;
+
+ aarch64_insn_patch_text(&addr, &insn, 1);
}
/* disarm kprobe: remove breakpoint from text */
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
- patch_text(p->addr, p->opcode);
+ void *addr = p->addr;
+
+ aarch64_insn_patch_text(&addr, &p->opcode, 1);
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
@@ -163,20 +159,15 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
}
/*
- * Interrupts need to be disabled before single-step mode is set, and not
- * reenabled until after single-step mode ends.
- * Without disabling interrupt on local CPU, there is a chance of
- * interrupt occurrence in the period of exception return and start of
- * out-of-line single-step, that result in wrongly single stepping
- * into the interrupt handler.
+ * Mask all of DAIF while executing the instruction out-of-line, to keep things
+ * simple and avoid nesting exceptions. Interrupts do have to be disabled since
+ * the kprobe state is per-CPU and doesn't get migrated.
*/
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
kcb->saved_irqflag = regs->pstate & DAIF_MASK;
- regs->pstate |= PSR_I_BIT;
- /* Unmask PSTATE.D for enabling software step exceptions. */
- regs->pstate &= ~PSR_D_BIT;
+ regs->pstate |= DAIF_MASK;
}
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
@@ -186,19 +177,6 @@ static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
regs->pstate |= kcb->saved_irqflag;
}
-static void __kprobes
-set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
-{
- kcb->ss_ctx.ss_pending = true;
- kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
-}
-
-static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
-{
- kcb->ss_ctx.ss_pending = false;
- kcb->ss_ctx.match_addr = 0;
-}
-
static void __kprobes setup_singlestep(struct kprobe *p,
struct pt_regs *regs,
struct kprobe_ctlblk *kcb, int reenter)
@@ -218,11 +196,7 @@ static void __kprobes setup_singlestep(struct kprobe *p,
/* prepare for single stepping */
slot = (unsigned long)p->ainsn.api.insn;
- set_ss_context(kcb, slot); /* mark pending ss */
-
- /* IRQs and single stepping do not mix well. */
kprobes_save_local_irqflag(kcb, regs);
- kernel_enable_single_step(regs);
instruction_pointer_set(regs, slot);
} else {
/* insn simulation */
@@ -255,13 +229,8 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
}
static void __kprobes
-post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
+post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
{
- struct kprobe *cur = kprobe_running();
-
- if (!cur)
- return;
-
/* return addr restore if non-branching insn */
if (cur->ainsn.api.restore != 0)
instruction_pointer_set(regs, cur->ainsn.api.restore);
@@ -273,12 +242,8 @@ post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
}
/* call post handler */
kcb->kprobe_status = KPROBE_HIT_SSDONE;
- if (cur->post_handler) {
- /* post_handler can hit breakpoint and single step
- * again, so we enable D-flag for recursive exception.
- */
+ if (cur->post_handler)
cur->post_handler(cur, regs, 0);
- }
reset_current_kprobe();
}
@@ -302,8 +267,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
if (!instruction_pointer(regs))
BUG();
- kernel_disable_single_step();
-
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
@@ -365,10 +328,6 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
* pre-handler and it returned non-zero, it will
* modify the execution path and no need to single
* stepping. Let's just reset current kprobe and exit.
- *
- * pre_handler can hit a breakpoint and can step thru
- * before return, keep PSTATE D-flag enabled until
- * pre_handler return back.
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
setup_singlestep(p, regs, kcb, 0);
@@ -387,38 +346,27 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
}
static int __kprobes
-kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
-{
- if ((kcb->ss_ctx.ss_pending)
- && (kcb->ss_ctx.match_addr == addr)) {
- clear_ss_context(kcb); /* clear pending ss */
- return DBG_HOOK_HANDLED;
- }
- /* not ours, kprobes should ignore it */
- return DBG_HOOK_ERROR;
-}
-
-static int __kprobes
-kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
+kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- int retval;
-
- /* return error if this is not our step */
- retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
+ unsigned long addr = instruction_pointer(regs);
+ struct kprobe *cur = kprobe_running();
- if (retval == DBG_HOOK_HANDLED) {
+ if (cur && (kcb->kprobe_status == KPROBE_HIT_SS)
+ && ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
kprobes_restore_local_irqflag(kcb, regs);
- kernel_disable_single_step();
+ post_kprobe_handler(cur, kcb, regs);
- post_kprobe_handler(kcb, regs);
+ return DBG_HOOK_HANDLED;
}
- return retval;
+ /* not ours, kprobes should ignore it */
+ return DBG_HOOK_ERROR;
}
-static struct step_hook kprobes_step_hook = {
- .fn = kprobe_single_step_handler,
+static struct break_hook kprobes_break_ss_hook = {
+ .imm = KPROBES_BRK_SS_IMM,
+ .fn = kprobe_breakpoint_ss_handler,
};
static int __kprobes
@@ -486,7 +434,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
int __init arch_init_kprobes(void)
{
register_kernel_break_hook(&kprobes_break_hook);
- register_kernel_step_hook(&kprobes_step_hook);
+ register_kernel_break_hook(&kprobes_break_ss_hook);
return 0;
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 4784011cecac..6616486a58fe 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -72,13 +72,13 @@ EXPORT_SYMBOL_GPL(pm_power_off);
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
-static void __cpu_do_idle(void)
+static void noinstr __cpu_do_idle(void)
{
dsb(sy);
wfi();
}
-static void __cpu_do_idle_irqprio(void)
+static void noinstr __cpu_do_idle_irqprio(void)
{
unsigned long pmr;
unsigned long daif_bits;
@@ -108,7 +108,7 @@ static void __cpu_do_idle_irqprio(void)
* ensure that interrupts are not masked at the PMR (because the core will
* not wake up if we block the wake up signal in the interrupt controller).
*/
-void cpu_do_idle(void)
+void noinstr cpu_do_idle(void)
{
if (system_uses_irq_prio_masking())
__cpu_do_idle_irqprio();
@@ -119,14 +119,14 @@ void cpu_do_idle(void)
/*
* This is our default idle handler.
*/
-void arch_cpu_idle(void)
+void noinstr arch_cpu_idle(void)
{
/*
* This should do all the clock switching and wait for interrupt
* tricks
*/
cpu_do_idle();
- local_irq_enable();
+ raw_local_irq_enable();
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -422,16 +422,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
if (clone_flags & CLONE_SETTLS)
p->thread.uw.tp_value = tls;
} else {
+ /*
+ * A kthread has no context to ERET to, so ensure any buggy
+ * ERET is treated as an illegal exception return.
+ *
+ * When a user task is created from a kthread, childregs will
+ * be initialized by start_thread() or start_compat_thread().
+ */
memset(childregs, 0, sizeof(struct pt_regs));
- childregs->pstate = PSR_MODE_EL1h;
- if (IS_ENABLED(CONFIG_ARM64_UAO) &&
- cpus_have_const_cap(ARM64_HAS_UAO))
- childregs->pstate |= PSR_UAO_BIT;
-
- spectre_v4_enable_task_mitigation(p);
-
- if (system_uses_irq_prio_masking())
- childregs->pmr_save = GIC_PRIO_IRQON;
+ childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz;
@@ -461,17 +460,6 @@ static void tls_thread_switch(struct task_struct *next)
write_sysreg(*task_user_tls(next), tpidr_el0);
}
-/* Restore the UAO state depending on next's addr_limit */
-void uao_thread_switch(struct task_struct *next)
-{
- if (IS_ENABLED(CONFIG_ARM64_UAO)) {
- if (task_thread_info(next)->addr_limit == KERNEL_DS)
- asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
- else
- asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
- }
-}
-
/*
* Force SSBS state on context-switch, since it may be lost after migrating
* from a CPU which treats the bit as RES0 in a heterogeneous system.
@@ -522,14 +510,13 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
bool prev32, next32;
u64 val;
- if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
- cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
+ if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040))
return;
prev32 = is_compat_thread(task_thread_info(prev));
next32 = is_compat_thread(task_thread_info(next));
- if (prev32 == next32)
+ if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
return;
val = read_sysreg(cntkctl_el1);
@@ -555,7 +542,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
hw_breakpoint_thread_switch(next);
contextidr_thread_switch(next);
entry_task_switch(next);
- uao_thread_switch(next);
ssbs_thread_switch(next);
erratum_1418040_thread_switch(prev, next);
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index 25f3c80b5ffe..4c25c008504f 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -24,6 +24,7 @@
#include <linux/prctl.h>
#include <linux/sched/task_stack.h>
+#include <asm/insn.h>
#include <asm/spectre.h>
#include <asm/traps.h>
@@ -118,6 +119,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{ /* sentinel */ }
@@ -135,8 +137,6 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
return SPECTRE_VULNERABLE;
}
-#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED (1)
-
static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
{
int ret;
@@ -539,12 +539,12 @@ static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
if (spectre_v4_mitigations_off()) {
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
- asm volatile(SET_PSTATE_SSBS(1));
+ set_pstate_ssbs(1);
return SPECTRE_VULNERABLE;
}
/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
- asm volatile(SET_PSTATE_SSBS(0));
+ set_pstate_ssbs(0);
return SPECTRE_MITIGATED;
}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 43ae4e0c968f..62d2bda7adb8 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -66,7 +66,6 @@ static int cpu_psci_cpu_disable(unsigned int cpu)
static void cpu_psci_cpu_die(unsigned int cpu)
{
- int ret;
/*
* There are no known implementations of PSCI actually using the
* power state field, pass a sensible default for now.
@@ -74,9 +73,7 @@ static void cpu_psci_cpu_die(unsigned int cpu)
u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
PSCI_0_2_POWER_STATE_TYPE_SHIFT;
- ret = psci_ops.cpu_off(state);
-
- pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
+ psci_ops.cpu_off(state);
}
static int cpu_psci_cpu_kill(unsigned int cpu)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index f49b349e16a3..8ac487c84e37 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -192,14 +192,11 @@ static void ptrace_hbptriggered(struct perf_event *bp,
break;
}
}
- arm64_force_sig_ptrace_errno_trap(si_errno,
- (void __user *)bkpt->trigger,
+ arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger,
desc);
}
#endif
- arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT,
- (void __user *)(bkpt->trigger),
- desc);
+ arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc);
}
/*
diff --git a/arch/arm64/kernel/scs.c b/arch/arm64/kernel/scs.c
deleted file mode 100644
index e8f7ff45dd8f..000000000000
--- a/arch/arm64/kernel/scs.c
+++ /dev/null
@@ -1,16 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Shadow Call Stack support.
- *
- * Copyright (C) 2019 Google LLC
- */
-
-#include <linux/percpu.h>
-#include <linux/scs.h>
-
-DEFINE_SCS(irq_shadow_call_stack);
-
-#ifdef CONFIG_ARM_SDE_INTERFACE
-DEFINE_SCS(sdei_shadow_call_stack_normal);
-DEFINE_SCS(sdei_shadow_call_stack_critical);
-#endif
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index 7689f2031c0c..2c7ca449dd51 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -7,9 +7,11 @@
#include <linux/hardirq.h>
#include <linux/irqflags.h>
#include <linux/sched/task_stack.h>
+#include <linux/scs.h>
#include <linux/uaccess.h>
#include <asm/alternative.h>
+#include <asm/exception.h>
#include <asm/kprobes.h>
#include <asm/mmu.h>
#include <asm/ptrace.h>
@@ -37,6 +39,14 @@ DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
#endif
+DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
+DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
+DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
+#endif
+
static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
{
unsigned long *p;
@@ -52,6 +62,9 @@ static void free_sdei_stacks(void)
{
int cpu;
+ if (!IS_ENABLED(CONFIG_VMAP_STACK))
+ return;
+
for_each_possible_cpu(cpu) {
_free_sdei_stack(&sdei_stack_normal_ptr, cpu);
_free_sdei_stack(&sdei_stack_critical_ptr, cpu);
@@ -75,6 +88,9 @@ static int init_sdei_stacks(void)
int cpu;
int err = 0;
+ if (!IS_ENABLED(CONFIG_VMAP_STACK))
+ return 0;
+
for_each_possible_cpu(cpu) {
err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
if (err)
@@ -90,6 +106,62 @@ static int init_sdei_stacks(void)
return err;
}
+static void _free_sdei_scs(unsigned long * __percpu *ptr, int cpu)
+{
+ void *s;
+
+ s = per_cpu(*ptr, cpu);
+ if (s) {
+ per_cpu(*ptr, cpu) = NULL;
+ scs_free(s);
+ }
+}
+
+static void free_sdei_scs(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ _free_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
+ _free_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
+ }
+}
+
+static int _init_sdei_scs(unsigned long * __percpu *ptr, int cpu)
+{
+ void *s;
+
+ s = scs_alloc(cpu_to_node(cpu));
+ if (!s)
+ return -ENOMEM;
+ per_cpu(*ptr, cpu) = s;
+
+ return 0;
+}
+
+static int init_sdei_scs(void)
+{
+ int cpu;
+ int err = 0;
+
+ if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
+ return 0;
+
+ for_each_possible_cpu(cpu) {
+ err = _init_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
+ if (err)
+ break;
+ err = _init_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
+ if (err)
+ break;
+ }
+
+ if (err)
+ free_sdei_scs();
+
+ return err;
+}
+
static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
@@ -130,13 +202,14 @@ unsigned long sdei_arch_get_entry_point(int conduit)
*/
if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
pr_err("Not supported on this hardware/boot configuration\n");
- return 0;
+ goto out_err;
}
- if (IS_ENABLED(CONFIG_VMAP_STACK)) {
- if (init_sdei_stacks())
- return 0;
- }
+ if (init_sdei_stacks())
+ goto out_err;
+
+ if (init_sdei_scs())
+ goto out_err_free_stacks;
sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
@@ -151,6 +224,10 @@ unsigned long sdei_arch_get_entry_point(int conduit)
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
return (unsigned long)__sdei_asm_handler;
+out_err_free_stacks:
+ free_sdei_stacks();
+out_err:
+ return 0;
}
/*
@@ -178,12 +255,6 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
sdei_api_event_context(i, &regs->regs[i]);
}
- /*
- * We didn't take an exception to get here, set PAN. UAO will be cleared
- * by sdei_event_handler()s force_uaccess_begin() call.
- */
- __uaccess_enable_hw_pan();
-
err = sdei_event_handler(regs, arg);
if (err)
return SDEI_EV_FAILED;
@@ -222,17 +293,44 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
return vbar + 0x480;
}
+static void __kprobes notrace __sdei_pstate_entry(void)
+{
+ /*
+ * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
+ * whether PSTATE bits are inherited unchanged or generated from
+ * scratch, and the TF-A implementation always clears PAN and always
+ * clears UAO. There are no other known implementations.
+ *
+ * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
+ * PSTATE is modified upon architectural exceptions, and so PAN is
+ * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
+ * cleared.
+ *
+ * We must explicitly reset PAN to the expected state, including
+ * clearing it when the host isn't using it, in case a VM had it set.
+ */
+ if (system_uses_hw_pan())
+ set_pstate_pan(1);
+ else if (cpu_has_pan())
+ set_pstate_pan(0);
+}
-asmlinkage __kprobes notrace unsigned long
+asmlinkage noinstr unsigned long
__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
{
unsigned long ret;
- nmi_enter();
+ /*
+ * We didn't take an exception to get here, so the HW hasn't
+ * set/cleared bits in PSTATE that we may rely on. Initialize PAN.
+ */
+ __sdei_pstate_entry();
+
+ arm64_enter_nmi(regs);
ret = _sdei_handler(regs, arg);
- nmi_exit();
+ arm64_exit_nmi(regs);
return ret;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 133257ffd859..1a57a76e1cc2 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -206,7 +206,7 @@ static void __init request_standard_resources(void)
unsigned long i = 0;
size_t res_size;
- kernel_code.start = __pa_symbol(_text);
+ kernel_code.start = __pa_symbol(_stext);
kernel_code.end = __pa_symbol(__init_begin - 1);
kernel_data.start = __pa_symbol(_sdata);
kernel_data.end = __pa_symbol(_end - 1);
@@ -283,7 +283,7 @@ u64 cpu_logical_map(int cpu)
void __init __no_sanitize_address setup_arch(char **cmdline_p)
{
- init_mm.start_code = (unsigned long) _text;
+ init_mm.start_code = (unsigned long) _stext;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = (unsigned long) _end;
@@ -366,7 +366,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
* faults in case uaccess_enable() is inadvertently called by the init
* thread.
*/
- init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page);
+ init_task.thread_info.ttbr0 = __pa_symbol(reserved_pg_dir);
#endif
if (boot_args[1] || boot_args[2] || boot_args[3]) {
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index a8184cad8890..af5c6c6638f7 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -922,9 +922,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
trace_hardirqs_off();
do {
- /* Check valid user FS if needed */
- addr_limit_user_check();
-
if (thread_flags & _TIF_NEED_RESCHED) {
/* Unmask Debug and SError for the next task */
local_daif_restore(DAIF_PROCCTX_NOIRQ);
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index ba40d57757d6..4be7f7eed875 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -99,7 +99,7 @@ SYM_FUNC_END(__cpu_suspend_enter)
.pushsection ".idmap.text", "awx"
SYM_CODE_START(cpu_resume)
- bl el2_setup // if in EL2 drop to EL1 cleanly
+ bl init_kernel_el
bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */
adrp x1, swapper_pg_dir
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 82e75fc2c903..2499b895efea 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -222,6 +222,7 @@ asmlinkage notrace void secondary_start_kernel(void)
if (system_uses_irq_prio_masking())
init_gic_priority_masking();
+ rcu_cpu_starting(cpu);
preempt_disable();
trace_hardirqs_off();
@@ -412,6 +413,7 @@ void cpu_die_early(void)
/* Mark this CPU absent */
set_cpu_present(cpu, 0);
+ rcu_report_dead(cpu);
if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
update_cpu_boot_status(CPU_KILL_ME);
@@ -785,14 +787,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
static const char *ipi_types[NR_IPI] __tracepoint_string = {
-#define S(x,s) [x] = s
- S(IPI_RESCHEDULE, "Rescheduling interrupts"),
- S(IPI_CALL_FUNC, "Function call interrupts"),
- S(IPI_CPU_STOP, "CPU stop interrupts"),
- S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"),
- S(IPI_TIMER, "Timer broadcast interrupts"),
- S(IPI_IRQ_WORK, "IRQ work interrupts"),
- S(IPI_WAKEUP, "CPU wake-up interrupts"),
+ [IPI_RESCHEDULE] = "Rescheduling interrupts",
+ [IPI_CALL_FUNC] = "Function call interrupts",
+ [IPI_CPU_STOP] = "CPU stop interrupts",
+ [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
+ [IPI_TIMER] = "Timer broadcast interrupts",
+ [IPI_IRQ_WORK] = "IRQ work interrupts",
+ [IPI_WAKEUP] = "CPU wake-up interrupts",
};
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 96cd347c7a46..a67b37a7a47e 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -58,7 +58,6 @@ void notrace __cpu_suspend_exit(void)
* features that might not have been set correctly.
*/
__uaccess_enable_hw_pan();
- uao_thread_switch(current);
/*
* Restore HW breakpoint registers to sane values
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 3c18c2454089..265fe3eb1069 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -68,7 +68,7 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
*/
long compat_arm_syscall(struct pt_regs *regs, int scno)
{
- void __user *addr;
+ unsigned long addr;
switch (scno) {
/*
@@ -111,8 +111,7 @@ long compat_arm_syscall(struct pt_regs *regs, int scno)
break;
}
- addr = (void __user *)instruction_pointer(regs) -
- (compat_thumb_mode(regs) ? 2 : 4);
+ addr = instruction_pointer(regs) - (compat_thumb_mode(regs) ? 2 : 4);
arm64_notify_die("Oops - bad compat syscall(2)", regs,
SIGILL, ILL_ILLTRP, addr, scno);
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index e4c0dadf0d92..f61e9d8cc55a 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -121,9 +121,8 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
cortex_a76_erratum_1463225_svc_handler();
local_daif_restore(DAIF_PROCCTX);
- user_exit();
- if (system_supports_mte() && (flags & _TIF_MTE_ASYNC_FAULT)) {
+ if (flags & _TIF_MTE_ASYNC_FAULT) {
/*
* Process the asynchronous tag check fault before the actual
* syscall. do_notify_resume() will send a signal to userspace
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 543c67cae02f..c8308befdb1e 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -124,6 +124,12 @@ int __init parse_acpi_topology(void)
#endif
#ifdef CONFIG_ARM64_AMU_EXTN
+#define read_corecnt() read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0)
+#define read_constcnt() read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0)
+#else
+#define read_corecnt() (0UL)
+#define read_constcnt() (0UL)
+#endif
#undef pr_fmt
#define pr_fmt(fmt) "AMU: " fmt
@@ -133,54 +139,58 @@ static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
static cpumask_var_t amu_fie_cpus;
-/* Initialize counter reference per-cpu variables for the current CPU */
-void init_cpu_freq_invariance_counters(void)
+void update_freq_counters_refs(void)
{
- this_cpu_write(arch_core_cycles_prev,
- read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0));
- this_cpu_write(arch_const_cycles_prev,
- read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0));
+ this_cpu_write(arch_core_cycles_prev, read_corecnt());
+ this_cpu_write(arch_const_cycles_prev, read_constcnt());
}
-static int validate_cpu_freq_invariance_counters(int cpu)
+static inline bool freq_counters_valid(int cpu)
{
- u64 max_freq_hz, ratio;
+ if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
+ return false;
if (!cpu_has_amu_feat(cpu)) {
pr_debug("CPU%d: counters are not supported.\n", cpu);
- return -EINVAL;
+ return false;
}
if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
!per_cpu(arch_core_cycles_prev, cpu))) {
pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
- return -EINVAL;
+ return false;
}
- /* Convert maximum frequency from KHz to Hz and validate */
- max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000;
- if (unlikely(!max_freq_hz)) {
- pr_debug("CPU%d: invalid maximum frequency.\n", cpu);
+ return true;
+}
+
+static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate)
+{
+ u64 ratio;
+
+ if (unlikely(!max_rate || !ref_rate)) {
+ pr_debug("CPU%d: invalid maximum or reference frequency.\n",
+ cpu);
return -EINVAL;
}
/*
* Pre-compute the fixed ratio between the frequency of the constant
- * counter and the maximum frequency of the CPU.
+ * reference counter and the maximum frequency of the CPU.
*
- * const_freq
- * arch_max_freq_scale = ---------------- * SCHED_CAPACITY_SCALE²
- * cpuinfo_max_freq
+ * ref_rate
+ * arch_max_freq_scale = ---------- * SCHED_CAPACITY_SCALE²
+ * max_rate
*
* We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALE²
* in order to ensure a good resolution for arch_max_freq_scale for
- * very low arch timer frequencies (down to the KHz range which should
+ * very low reference frequencies (down to the KHz range which should
* be unlikely).
*/
- ratio = (u64)arch_timer_get_rate() << (2 * SCHED_CAPACITY_SHIFT);
- ratio = div64_u64(ratio, max_freq_hz);
+ ratio = ref_rate << (2 * SCHED_CAPACITY_SHIFT);
+ ratio = div64_u64(ratio, max_rate);
if (!ratio) {
- WARN_ONCE(1, "System timer frequency too low.\n");
+ WARN_ONCE(1, "Reference frequency too low.\n");
return -EINVAL;
}
@@ -213,6 +223,7 @@ static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
static int __init init_amu_fie(void)
{
+ bool invariance_status = topology_scale_freq_invariant();
cpumask_var_t valid_cpus;
bool have_policy = false;
int ret = 0;
@@ -227,8 +238,12 @@ static int __init init_amu_fie(void)
}
for_each_present_cpu(cpu) {
- if (validate_cpu_freq_invariance_counters(cpu))
+ if (!freq_counters_valid(cpu) ||
+ freq_inv_set_max_ratio(cpu,
+ cpufreq_get_hw_max_freq(cpu) * 1000,
+ arch_timer_get_rate()))
continue;
+
cpumask_set_cpu(cpu, valid_cpus);
have_policy |= enable_policy_freq_counters(cpu, valid_cpus);
}
@@ -255,6 +270,15 @@ static int __init init_amu_fie(void)
if (!topology_scale_freq_invariant())
static_branch_disable(&amu_fie_key);
+ /*
+ * Task scheduler behavior depends on frequency invariance support,
+ * either cpufreq or counter driven. If the support status changes as
+ * a result of counter initialisation and use, retrigger the build of
+ * scheduling domains to ensure the information is propagated properly.
+ */
+ if (invariance_status != topology_scale_freq_invariant())
+ rebuild_sched_domains_energy();
+
free_valid_mask:
free_cpumask_var(valid_cpus);
@@ -280,11 +304,14 @@ void topology_scale_freq_tick(void)
if (!cpumask_test_cpu(cpu, amu_fie_cpus))
return;
- const_cnt = read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0);
- core_cnt = read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0);
prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
+ update_freq_counters_refs();
+
+ const_cnt = this_cpu_read(arch_const_cycles_prev);
+ core_cnt = this_cpu_read(arch_core_cycles_prev);
+
if (unlikely(core_cnt <= prev_core_cnt ||
const_cnt <= prev_const_cnt))
goto store_and_exit;
@@ -309,4 +336,71 @@ store_and_exit:
this_cpu_write(arch_core_cycles_prev, core_cnt);
this_cpu_write(arch_const_cycles_prev, const_cnt);
}
-#endif /* CONFIG_ARM64_AMU_EXTN */
+
+#ifdef CONFIG_ACPI_CPPC_LIB
+#include <acpi/cppc_acpi.h>
+
+static void cpu_read_corecnt(void *val)
+{
+ *(u64 *)val = read_corecnt();
+}
+
+static void cpu_read_constcnt(void *val)
+{
+ *(u64 *)val = read_constcnt();
+}
+
+static inline
+int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val)
+{
+ /*
+ * Abort call on counterless CPU or when interrupts are
+ * disabled - can lead to deadlock in smp sync call.
+ */
+ if (!cpu_has_amu_feat(cpu))
+ return -EOPNOTSUPP;
+
+ if (WARN_ON_ONCE(irqs_disabled()))
+ return -EPERM;
+
+ smp_call_function_single(cpu, func, val, 1);
+
+ return 0;
+}
+
+/*
+ * Refer to drivers/acpi/cppc_acpi.c for the description of the functions
+ * below.
+ */
+bool cpc_ffh_supported(void)
+{
+ return freq_counters_valid(get_cpu_with_amu_feat());
+}
+
+int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch ((u64)reg->address) {
+ case 0x0:
+ ret = counters_read_on_cpu(cpu, cpu_read_corecnt, val);
+ break;
+ case 0x1:
+ ret = counters_read_on_cpu(cpu, cpu_read_constcnt, val);
+ break;
+ }
+
+ if (!ret) {
+ *val &= GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
+ reg->bit_offset);
+ *val >>= reg->bit_offset;
+ }
+
+ return ret;
+}
+
+int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_ACPI_CPPC_LIB */
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 8af4e0e85736..08156be75569 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -34,6 +34,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
+#include <asm/exception.h>
#include <asm/extable.h>
#include <asm/insn.h>
#include <asm/kprobes.h>
@@ -170,32 +171,32 @@ static void arm64_show_signal(int signo, const char *str)
__show_regs(regs);
}
-void arm64_force_sig_fault(int signo, int code, void __user *addr,
+void arm64_force_sig_fault(int signo, int code, unsigned long far,
const char *str)
{
arm64_show_signal(signo, str);
if (signo == SIGKILL)
force_sig(SIGKILL);
else
- force_sig_fault(signo, code, addr);
+ force_sig_fault(signo, code, (void __user *)far);
}
-void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,
+void arm64_force_sig_mceerr(int code, unsigned long far, short lsb,
const char *str)
{
arm64_show_signal(SIGBUS, str);
- force_sig_mceerr(code, addr, lsb);
+ force_sig_mceerr(code, (void __user *)far, lsb);
}
-void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr,
+void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far,
const char *str)
{
arm64_show_signal(SIGTRAP, str);
- force_sig_ptrace_errno_trap(errno, addr);
+ force_sig_ptrace_errno_trap(errno, (void __user *)far);
}
void arm64_notify_die(const char *str, struct pt_regs *regs,
- int signo, int sicode, void __user *addr,
+ int signo, int sicode, unsigned long far,
int err)
{
if (user_mode(regs)) {
@@ -203,7 +204,7 @@ void arm64_notify_die(const char *str, struct pt_regs *regs,
current->thread.fault_address = 0;
current->thread.fault_code = err;
- arm64_force_sig_fault(signo, sicode, addr, str);
+ arm64_force_sig_fault(signo, sicode, far, str);
} else {
die(str, regs, err);
}
@@ -374,7 +375,7 @@ void force_signal_inject(int signal, int code, unsigned long address, unsigned i
signal = SIGKILL;
}
- arm64_notify_die(desc, regs, signal, code, (void __user *)address, err);
+ arm64_notify_die(desc, regs, signal, code, address, err);
}
/*
@@ -385,7 +386,7 @@ void arm64_notify_segfault(unsigned long addr)
int code;
mmap_read_lock(current->mm);
- if (find_vma(current->mm, addr) == NULL)
+ if (find_vma(current->mm, untagged_addr(addr)) == NULL)
code = SEGV_MAPERR;
else
code = SEGV_ACCERR;
@@ -448,12 +449,13 @@ NOKPROBE_SYMBOL(do_ptrauth_fault);
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
{
- unsigned long address;
+ unsigned long tagged_address, address;
int rt = ESR_ELx_SYS64_ISS_RT(esr);
int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
int ret = 0;
- address = untagged_addr(pt_regs_read_reg(regs, rt));
+ tagged_address = pt_regs_read_reg(regs, rt);
+ address = untagged_addr(tagged_address);
switch (crm) {
case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
@@ -480,7 +482,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
}
if (ret)
- arm64_notify_segfault(address);
+ arm64_notify_segfault(tagged_address);
else
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
@@ -753,8 +755,10 @@ const char *esr_get_class_string(u32 esr)
* bad_mode handles the impossible case in the exception vector. This is always
* fatal.
*/
-asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
+asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
{
+ arm64_enter_nmi(regs);
+
console_verbose();
pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
@@ -772,7 +776,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
*/
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
{
- void __user *pc = (void __user *)instruction_pointer(regs);
+ unsigned long pc = instruction_pointer(regs);
current->thread.fault_address = 0;
current->thread.fault_code = esr;
@@ -786,7 +790,7 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
__aligned(16);
-asmlinkage void handle_bad_stack(struct pt_regs *regs)
+asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
{
unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
@@ -794,6 +798,8 @@ asmlinkage void handle_bad_stack(struct pt_regs *regs)
unsigned int esr = read_sysreg(esr_el1);
unsigned long far = read_sysreg(far_el1);
+ arm64_enter_nmi(regs);
+
console_verbose();
pr_emerg("Insufficient stack space to handle exception!");
@@ -865,23 +871,16 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
}
}
-asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
+asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr)
{
- nmi_enter();
+ arm64_enter_nmi(regs);
/* non-RAS errors are not containable */
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
arm64_serror_panic(regs, esr);
- nmi_exit();
-}
-
-asmlinkage void enter_from_user_mode(void)
-{
- CT_WARN_ON(ct_state() != CONTEXT_USER);
- user_exit_irqoff();
+ arm64_exit_nmi(regs);
}
-NOKPROBE_SYMBOL(enter_from_user_mode);
/* GENERIC_BUG traps */
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index debb8995d57f..cee5d04ea9ad 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -78,17 +78,9 @@ static union {
} vdso_data_store __page_aligned_data;
struct vdso_data *vdso_data = vdso_data_store.data;
-static int __vdso_remap(enum vdso_abi abi,
- const struct vm_special_mapping *sm,
- struct vm_area_struct *new_vma)
+static int vdso_mremap(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
{
- unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
- unsigned long vdso_size = vdso_info[abi].vdso_code_end -
- vdso_info[abi].vdso_code_start;
-
- if (vdso_size != new_size)
- return -EINVAL;
-
current->mm->context.vdso = (void *)new_vma->vm_start;
return 0;
@@ -219,17 +211,6 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
return vmf_insert_pfn(vma, vmf->address, pfn);
}
-static int vvar_mremap(const struct vm_special_mapping *sm,
- struct vm_area_struct *new_vma)
-{
- unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
-
- if (new_size != VVAR_NR_PAGES * PAGE_SIZE)
- return -EINVAL;
-
- return 0;
-}
-
static int __setup_additional_pages(enum vdso_abi abi,
struct mm_struct *mm,
struct linux_binprm *bprm,
@@ -280,12 +261,6 @@ up_fail:
/*
* Create and map the vectors page for AArch32 tasks.
*/
-static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
- struct vm_area_struct *new_vma)
-{
- return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
-}
-
enum aarch32_map {
AA32_MAP_VECTORS, /* kuser helpers */
AA32_MAP_SIGPAGE,
@@ -308,11 +283,10 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
[AA32_MAP_VVAR] = {
.name = "[vvar]",
.fault = vvar_fault,
- .mremap = vvar_mremap,
},
[AA32_MAP_VDSO] = {
.name = "[vdso]",
- .mremap = aarch32_vdso_mremap,
+ .mremap = vdso_mremap,
},
};
@@ -453,12 +427,6 @@ out:
}
#endif /* CONFIG_COMPAT */
-static int vdso_mremap(const struct vm_special_mapping *sm,
- struct vm_area_struct *new_vma)
-{
- return __vdso_remap(VDSO_ABI_AA64, sm, new_vma);
-}
-
enum aarch64_map {
AA64_MAP_VVAR,
AA64_MAP_VDSO,
@@ -468,7 +436,6 @@ static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
[AA64_MAP_VVAR] = {
.name = "[vvar]",
.fault = vvar_fault,
- .mremap = vvar_mremap,
},
[AA64_MAP_VDSO] = {
.name = "[vdso]",
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index d65f52264aba..a8f8e409e2bf 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -28,7 +28,7 @@ ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
$(btildflags-y) -T
ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
-ccflags-y += -DDISABLE_BRANCH_PROFILING
+ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS)
KASAN_SANITIZE := n
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 7f96a1a9f68c..a1e0f91e6cea 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -22,16 +22,21 @@ endif
CC_COMPAT ?= $(CC)
CC_COMPAT += $(CC_COMPAT_CLANG_FLAGS)
+
+ifneq ($(LLVM),)
+LD_COMPAT ?= $(LD)
+else
+LD_COMPAT ?= $(CROSS_COMPILE_COMPAT)ld
+endif
else
CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc
+LD_COMPAT ?= $(CROSS_COMPILE_COMPAT)ld
endif
cc32-option = $(call try-run,\
$(CC_COMPAT) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
cc32-disable-warning = $(call try-run,\
$(CC_COMPAT) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
-cc32-ldoption = $(call try-run,\
- $(CC_COMPAT) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
cc32-as-instr = $(call try-run,\
printf "%b\n" "$(1)" | $(CC_COMPAT) $(VDSO_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
@@ -43,7 +48,7 @@ cc32-as-instr = $(call try-run,\
# As a result we set our own flags here.
# KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile
-VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include)
+VDSO_CPPFLAGS := -DBUILD_VDSO -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include)
VDSO_CPPFLAGS += $(LINUXINCLUDE)
# Common C and assembly flags
@@ -122,14 +127,10 @@ dmbinstr := $(call cc32-as-instr,dmb ishld,-DCONFIG_AS_DMB_ISHLD=1)
VDSO_CFLAGS += $(dmbinstr)
VDSO_AFLAGS += $(dmbinstr)
-VDSO_LDFLAGS := $(VDSO_CPPFLAGS)
# From arm vDSO Makefile
-VDSO_LDFLAGS += -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1
-VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
-VDSO_LDFLAGS += -nostdlib -shared -mfloat-abi=soft
-VDSO_LDFLAGS += -Wl,--hash-style=sysv
-VDSO_LDFLAGS += -Wl,--build-id=sha1
-VDSO_LDFLAGS += $(call cc32-ldoption,-fuse-ld=bfd)
+VDSO_LDFLAGS += -Bsymbolic --no-undefined -soname=linux-vdso.so.1
+VDSO_LDFLAGS += -z max-page-size=4096 -z common-page-size=4096
+VDSO_LDFLAGS += -nostdlib -shared --hash-style=sysv --build-id=sha1
# Borrow vdsomunge.c from the arm vDSO
@@ -189,8 +190,8 @@ quiet_cmd_vdsold_and_vdso_check = LD32 $@
cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check)
quiet_cmd_vdsold = LD32 $@
- cmd_vdsold = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \
- -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
+ cmd_vdsold = $(LD_COMPAT) $(VDSO_LDFLAGS) \
+ -T $(filter %.lds,$^) $(filter %.o,$^) -o $@
quiet_cmd_vdsocc = CC32 $@
cmd_vdsocc = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $<
quiet_cmd_vdsocc_gettimeofday = CC32 $@
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 6d78c041fdf6..5d5857c5b025 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -121,7 +121,7 @@ SECTIONS
_text = .;
HEAD_TEXT
}
- .text : { /* Real text segment */
+ .text : ALIGN(SEGMENT_ALIGN) { /* Real text segment */
_stext = .; /* Text and read-only data */
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
@@ -164,13 +164,11 @@ SECTIONS
. += PAGE_SIZE;
#endif
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
- reserved_ttbr0 = .;
- . += RESERVED_TTBR0_SIZE;
-#endif
+ reserved_pg_dir = .;
+ . += PAGE_SIZE;
+
swapper_pg_dir = .;
. += PAGE_SIZE;
- swapper_pg_end = .;
. = ALIGN(SEGMENT_ALIGN);
__init_begin = .;
@@ -201,7 +199,7 @@ SECTIONS
INIT_CALLS
CON_INITCALL
INIT_RAM_FS
- *(.init.rodata.* .init.bss) /* from the EFI stub */
+ *(.init.altinstructions .init.bss) /* from the EFI stub */
}
.exit.data : {
EXIT_DATA
@@ -278,7 +276,7 @@ SECTIONS
* explicitly check instead of blindly discarding.
*/
.plt : {
- *(.plt) *(.plt.*) *(.iplt) *(.igot)
+ *(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt)
}
ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index f56122eedffc..c0ffb019ca8b 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -102,6 +102,20 @@ static int kvm_arm_default_max_vcpus(void)
return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
}
+static void set_default_csv2(struct kvm *kvm)
+{
+ /*
+ * The default is to expose CSV2 == 1 if the HW isn't affected.
+ * Although this is a per-CPU feature, we make it global because
+ * asymmetric systems are just a nuisance.
+ *
+ * Userspace can override this as long as it doesn't promise
+ * the impossible.
+ */
+ if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
+ kvm->arch.pfr0_csv2 = 1;
+}
+
/**
* kvm_arch_init_vm - initializes a VM data structure
* @kvm: pointer to the KVM struct
@@ -127,6 +141,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* The maximum number of VCPUs is limited by the host's GIC model */
kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
+ set_default_csv2(kvm);
+
return ret;
out_free_stage2_pgd:
kvm_free_stage2_pgd(&kvm->arch.mmu);
@@ -808,6 +824,25 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
preempt_enable();
+ /*
+ * The ARMv8 architecture doesn't give the hypervisor
+ * a mechanism to prevent a guest from dropping to AArch32 EL0
+ * if implemented by the CPU. If we spot the guest in such
+ * state and that we decided it wasn't supposed to do so (like
+ * with the asymmetric AArch32 case), return to userspace with
+ * a fatal error.
+ */
+ if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) {
+ /*
+ * As we have caught the guest red-handed, decide that
+ * it isn't fit for purpose anymore by making the vcpu
+ * invalid. The VMM can try and fix it by issuing a
+ * KVM_ARM_VCPU_INIT if it really wants to.
+ */
+ vcpu->arch.target = -1;
+ ret = ARM_EXCEPTION_IL;
+ }
+
ret = handle_exit(vcpu, ret);
}
@@ -1719,7 +1754,8 @@ int kvm_arch_init(void *opaque)
return -ENODEV;
}
- if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE))
+ if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
+ cpus_have_final_cap(ARM64_WORKAROUND_1508412))
kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
"Only trusted guests should be used on this system.\n");
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 313a8fa3c721..1f875a8f20c4 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -140,9 +140,9 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
* We do need to save/restore PAR_EL1 though, as we haven't
* saved the guest context yet, and we may return early...
*/
- par = read_sysreg(par_el1);
+ par = read_sysreg_par();
if (!__kvm_at("s1e1r", far))
- tmp = read_sysreg(par_el1);
+ tmp = read_sysreg_par();
else
tmp = SYS_PAR_EL1_F; /* back to the guest */
write_sysreg(par, par_el1);
@@ -421,7 +421,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
handle_tx2_tvm(vcpu))
- return true;
+ goto guest;
/*
* We trap the first access to the FP/SIMD to save the host context
@@ -431,13 +431,13 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
* Similarly for trapped SVE accesses.
*/
if (__hyp_handle_fpsimd(vcpu))
- return true;
+ goto guest;
if (__hyp_handle_ptrauth(vcpu))
- return true;
+ goto guest;
if (!__populate_fault_info(vcpu))
- return true;
+ goto guest;
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
bool valid;
@@ -452,7 +452,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
int ret = __vgic_v2_perform_cpuif_access(vcpu);
if (ret == 1)
- return true;
+ goto guest;
/* Promote an illegal access to an SError.*/
if (ret == -1)
@@ -468,12 +468,17 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
int ret = __vgic_v3_perform_cpuif_access(vcpu);
if (ret == 1)
- return true;
+ goto guest;
}
exit:
/* Return to the host kernel and handle the exit */
return false;
+
+guest:
+ /* Re-enter the guest */
+ asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412));
+ return true;
}
static inline void __kvm_unexpected_el2_exception(void)
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index 7a986030145f..cce43bfe158f 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -43,7 +43,7 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR);
ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL);
- ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg(par_el1);
+ ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg_par();
ctxt_sys_reg(ctxt, TPIDR_EL1) = read_sysreg(tpidr_el1);
ctxt_sys_reg(ctxt, SP_EL1) = read_sysreg(sp_el1);
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index ff9a0f547b9f..ed27f06a31ba 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -17,8 +17,6 @@ SYM_FUNC_START(__host_exit)
get_host_ctxt x0, x1
- ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
-
/* Store the host regs x2 and x3 */
stp x2, x3, [x0, #CPU_XREG_OFFSET(2)]
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index 47224dc62c51..b11a9d7db677 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -57,16 +57,25 @@ __do_hyp_init:
cmp x0, #HVC_STUB_HCALL_NR
b.lo __kvm_handle_stub_hvc
- /* Set tpidr_el2 for use by HYP to free a register */
- msr tpidr_el2, x2
-
- mov x2, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
- cmp x0, x2
- b.eq 1f
+ // We only actively check bits [24:31], and everything
+ // else has to be zero, which we check at build time.
+#if (KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) & 0xFFFFFFFF00FFFFFF)
+#error Unexpected __KVM_HOST_SMCCC_FUNC___kvm_hyp_init value
+#endif
+
+ ror x0, x0, #24
+ eor x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 24) & 0xF)
+ ror x0, x0, #4
+ eor x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 28) & 0xF)
+ cbz x0, 1f
mov x0, #SMCCC_RET_NOT_SUPPORTED
eret
-1: phys_to_ttbr x0, x1
+1:
+ /* Set tpidr_el2 for use by HYP to free a register */
+ msr tpidr_el2, x2
+
+ phys_to_ttbr x0, x1
alternative_if ARM64_HAS_CNP
orr x0, x0, #TTBR_CNP_BIT
alternative_else_nop_endif
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
index bb2d986ff696..a797abace13f 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
@@ -13,6 +13,11 @@
SECTIONS {
HYP_SECTION(.text)
+ /*
+ * .hyp..data..percpu needs to be page aligned to maintain the same
+ * alignment for when linking into vmlinux.
+ */
+ . = ALIGN(PAGE_SIZE);
HYP_SECTION_NAME(.data..percpu) : {
PERCPU_INPUT(L1_CACHE_BYTES)
}
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index a457a0306e03..8ae8160bc93a 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -250,7 +250,7 @@ void __noreturn hyp_panic(void)
{
u64 spsr = read_sysreg_el2(SYS_SPSR);
u64 elr = read_sysreg_el2(SYS_ELR);
- u64 par = read_sysreg(par_el1);
+ u64 par = read_sysreg_par();
bool restore_host = true;
struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu;
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index 39ca71ab8866..fbde89a2c6e8 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -128,7 +128,6 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
struct tlb_inv_context cxt;
/* Switch to requested VMID */
- mmu = kern_hyp_va(mmu);
__tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalle1);
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 0cdf6e461cbd..bdf8e55ed308 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -470,6 +470,15 @@ static bool stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
if (!kvm_block_mapping_supported(addr, end, phys, level))
return false;
+ /*
+ * If the PTE was already valid, drop the refcount on the table
+ * early, as it will be bumped-up again in stage2_map_walk_leaf().
+ * This ensures that the refcount stays constant across a valid to
+ * valid PTE update.
+ */
+ if (kvm_pte_valid(*ptep))
+ put_page(virt_to_page(ptep));
+
if (kvm_set_valid_leaf_pte(ptep, phys, data->attr, level))
goto out;
@@ -493,7 +502,13 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
return 0;
kvm_set_invalid_pte(ptep);
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, 0);
+
+ /*
+ * Invalidate the whole stage-2, as we may have numerous leaf
+ * entries below us which would otherwise need invalidating
+ * individually.
+ */
+ kvm_call_hyp(__kvm_tlb_flush_vmid, data->mmu);
data->anchor = ptep;
return 0;
}
@@ -635,7 +650,7 @@ static void stage2_flush_dcache(void *addr, u64 size)
static bool stage2_pte_cacheable(kvm_pte_t pte)
{
- u64 memattr = FIELD_GET(KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR, pte);
+ u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
return memattr == PAGE_S2_MEMATTR(NORMAL);
}
@@ -846,7 +861,7 @@ int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm)
u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
- pgt->pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL | __GFP_ZERO);
+ pgt->pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (!pgt->pgd)
return -ENOMEM;
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index fe69de16dadc..62546e20b251 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -215,7 +215,7 @@ void __noreturn hyp_panic(void)
{
u64 spsr = read_sysreg_el2(SYS_SPSR);
u64 elr = read_sysreg_el2(SYS_ELR);
- u64 par = read_sysreg(par_el1);
+ u64 par = read_sysreg_par();
__hyp_call_panic(spsr, elr, par);
unreachable();
diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
index 9824025ccc5c..25ea4ecb6449 100644
--- a/arch/arm64/kvm/hypercalls.c
+++ b/arch/arm64/kvm/hypercalls.c
@@ -31,7 +31,7 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
val = SMCCC_RET_SUCCESS;
break;
case SPECTRE_UNAFFECTED:
- val = SMCCC_RET_NOT_REQUIRED;
+ val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
break;
}
break;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 19aacc7d64de..75814a02d189 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -754,10 +754,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
gfn_t gfn;
kvm_pfn_t pfn;
bool logging_active = memslot_is_logging(memslot);
- unsigned long vma_pagesize;
+ unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
+ unsigned long vma_pagesize, fault_granule;
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
struct kvm_pgtable *pgt;
+ fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
write_fault = kvm_is_write_fault(vcpu);
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
VM_BUG_ON(write_fault && exec_fault);
@@ -787,14 +789,28 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vma_shift = PAGE_SHIFT;
}
- if (vma_shift == PUD_SHIFT &&
- !fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
- vma_shift = PMD_SHIFT;
-
- if (vma_shift == PMD_SHIFT &&
- !fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
- force_pte = true;
+ switch (vma_shift) {
+#ifndef __PAGETABLE_PMD_FOLDED
+ case PUD_SHIFT:
+ if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
+ break;
+ fallthrough;
+#endif
+ case CONT_PMD_SHIFT:
+ vma_shift = PMD_SHIFT;
+ fallthrough;
+ case PMD_SHIFT:
+ if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
+ break;
+ fallthrough;
+ case CONT_PTE_SHIFT:
vma_shift = PAGE_SHIFT;
+ force_pte = true;
+ fallthrough;
+ case PAGE_SHIFT:
+ break;
+ default:
+ WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
}
vma_pagesize = 1UL << vma_shift;
@@ -839,6 +855,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (kvm_is_device_pfn(pfn)) {
device = true;
+ force_pte = true;
} else if (logging_active && !write_fault) {
/*
* Only actually map the page as writable if this was a write
@@ -881,7 +898,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
prot |= KVM_PGTABLE_PROT_X;
- if (fault_status == FSC_PERM && !(logging_active && writable)) {
+ /*
+ * Under the premise of getting a FSC_PERM fault, we just need to relax
+ * permissions only if vma_pagesize equals fault_granule. Otherwise,
+ * kvm_pgtable_stage2_map() should be called to change block size.
+ */
+ if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
} else {
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index d9117bc56237..c1fac9836af1 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -95,7 +95,7 @@ static bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
- case PAR_EL1: *val = read_sysreg_s(SYS_PAR_EL1); break;
+ case PAR_EL1: *val = read_sysreg_par(); break;
case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
@@ -1038,8 +1038,8 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
-static bool access_amu(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
- const struct sys_reg_desc *r)
+static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
{
kvm_inject_undefined(vcpu);
@@ -1047,33 +1047,25 @@ static bool access_amu(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
}
/* Macro to expand the AMU counter and type registers*/
-#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), access_amu }
-#define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), access_amu }
-#define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), access_amu }
-#define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), access_amu }
-
-static bool trap_ptrauth(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *rd)
-{
- /*
- * If we land here, that is because we didn't fixup the access on exit
- * by allowing the PtrAuth sysregs. The only way this happens is when
- * the guest does not have PtrAuth support enabled.
- */
- kvm_inject_undefined(vcpu);
-
- return false;
-}
+#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
+#define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
+#define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
+#define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN_USER | REG_HIDDEN_GUEST;
+ return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
}
+/*
+ * If we land here on a PtrAuth access, that is because we didn't
+ * fixup the access on exit by allowing the PtrAuth sysregs. The only
+ * way this happens is when the guest does not have PtrAuth support
+ * enabled.
+ */
#define __PTRAUTH_KEY(k) \
- { SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k, \
+ { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
.visibility = ptrauth_visibility}
#define PTRAUTH_KEY(k) \
@@ -1128,9 +1120,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
if (!vcpu_has_sve(vcpu))
val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
- if (!(val & (0xfUL << ID_AA64PFR0_CSV2_SHIFT)) &&
- arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
- val |= (1UL << ID_AA64PFR0_CSV2_SHIFT);
+ val &= ~(0xfUL << ID_AA64PFR0_CSV2_SHIFT);
+ val |= ((u64)vcpu->kvm->arch.pfr0_csv2 << ID_AA64PFR0_CSV2_SHIFT);
} else if (id == SYS_ID_AA64PFR1_EL1) {
val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT);
} else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
@@ -1153,6 +1144,22 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
return val;
}
+static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
+{
+ u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
+ (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+
+ switch (id) {
+ case SYS_ID_AA64ZFR0_EL1:
+ if (!vcpu_has_sve(vcpu))
+ return REG_RAZ;
+ break;
+ }
+
+ return 0;
+}
+
/* cpufeature ID register access trap handlers */
static bool __access_id_reg(struct kvm_vcpu *vcpu,
@@ -1171,7 +1178,9 @@ static bool access_id_reg(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- return __access_id_reg(vcpu, p, r, false);
+ bool raz = sysreg_visible_as_raz(vcpu, r);
+
+ return __access_id_reg(vcpu, p, r, raz);
}
static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
@@ -1192,71 +1201,40 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
if (vcpu_has_sve(vcpu))
return 0;
- return REG_HIDDEN_USER | REG_HIDDEN_GUEST;
-}
-
-/* Visibility overrides for SVE-specific ID registers */
-static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd)
-{
- if (vcpu_has_sve(vcpu))
- return 0;
-
- return REG_HIDDEN_USER;
+ return REG_HIDDEN;
}
-/* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */
-static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu)
-{
- if (!vcpu_has_sve(vcpu))
- return 0;
-
- return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1);
-}
-
-static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *rd)
-{
- if (p->is_write)
- return write_to_read_only(vcpu, p, rd);
-
- p->regval = guest_id_aa64zfr0_el1(vcpu);
- return true;
-}
-
-static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd,
- const struct kvm_one_reg *reg, void __user *uaddr)
-{
- u64 val;
-
- if (WARN_ON(!vcpu_has_sve(vcpu)))
- return -ENOENT;
-
- val = guest_id_aa64zfr0_el1(vcpu);
- return reg_to_user(uaddr, &val, reg->id);
-}
-
-static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd,
- const struct kvm_one_reg *reg, void __user *uaddr)
+static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
{
const u64 id = sys_reg_to_index(rd);
int err;
u64 val;
-
- if (WARN_ON(!vcpu_has_sve(vcpu)))
- return -ENOENT;
+ u8 csv2;
err = reg_from_user(&val, uaddr, id);
if (err)
return err;
- /* This is what we mean by invariant: you can't change it. */
- if (val != guest_id_aa64zfr0_el1(vcpu))
+ /*
+ * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
+ * it doesn't promise more than what is actually provided (the
+ * guest could otherwise be covered in ectoplasmic residue).
+ */
+ csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT);
+ if (csv2 > 1 ||
+ (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
return -EINVAL;
+ /* We can only differ with CSV2, and anything else is an error */
+ val ^= read_id_reg(vcpu, rd, false);
+ val &= ~(0xFUL << ID_AA64PFR0_CSV2_SHIFT);
+ if (val)
+ return -EINVAL;
+
+ vcpu->kvm->arch.pfr0_csv2 = csv2;
+
return 0;
}
@@ -1299,13 +1277,17 @@ static int __set_id_reg(const struct kvm_vcpu *vcpu,
static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- return __get_id_reg(vcpu, rd, uaddr, false);
+ bool raz = sysreg_visible_as_raz(vcpu, rd);
+
+ return __get_id_reg(vcpu, rd, uaddr, raz);
}
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- return __set_id_reg(vcpu, rd, uaddr, false);
+ bool raz = sysreg_visible_as_raz(vcpu, rd);
+
+ return __set_id_reg(vcpu, rd, uaddr, raz);
}
static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
@@ -1384,19 +1366,13 @@ static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
-static bool access_mte_regs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
- const struct sys_reg_desc *r)
-{
- kvm_inject_undefined(vcpu);
- return false;
-}
-
/* sys_reg_desc initialiser for known cpufeature ID registers */
#define ID_SANITISED(name) { \
SYS_DESC(SYS_##name), \
.access = access_id_reg, \
.get_user = get_id_reg, \
.set_user = set_id_reg, \
+ .visibility = id_visibility, \
}
/*
@@ -1514,11 +1490,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* AArch64 ID registers */
/* CRm=4 */
- ID_SANITISED(ID_AA64PFR0_EL1),
+ { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
+ .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
ID_SANITISED(ID_AA64PFR1_EL1),
ID_UNALLOCATED(4,2),
ID_UNALLOCATED(4,3),
- { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility },
+ ID_SANITISED(ID_AA64ZFR0_EL1),
ID_UNALLOCATED(4,5),
ID_UNALLOCATED(4,6),
ID_UNALLOCATED(4,7),
@@ -1557,8 +1534,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
- { SYS_DESC(SYS_RGSR_EL1), access_mte_regs },
- { SYS_DESC(SYS_GCR_EL1), access_mte_regs },
+ { SYS_DESC(SYS_RGSR_EL1), undef_access },
+ { SYS_DESC(SYS_GCR_EL1), undef_access },
{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
@@ -1584,8 +1561,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
{ SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
- { SYS_DESC(SYS_TFSR_EL1), access_mte_regs },
- { SYS_DESC(SYS_TFSRE0_EL1), access_mte_regs },
+ { SYS_DESC(SYS_TFSR_EL1), undef_access },
+ { SYS_DESC(SYS_TFSRE0_EL1), undef_access },
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
@@ -1621,6 +1598,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
+ { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
+
{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
@@ -1649,14 +1628,16 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
- { SYS_DESC(SYS_AMCR_EL0), access_amu },
- { SYS_DESC(SYS_AMCFGR_EL0), access_amu },
- { SYS_DESC(SYS_AMCGCR_EL0), access_amu },
- { SYS_DESC(SYS_AMUSERENR_EL0), access_amu },
- { SYS_DESC(SYS_AMCNTENCLR0_EL0), access_amu },
- { SYS_DESC(SYS_AMCNTENSET0_EL0), access_amu },
- { SYS_DESC(SYS_AMCNTENCLR1_EL0), access_amu },
- { SYS_DESC(SYS_AMCNTENSET1_EL0), access_amu },
+ { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
+
+ { SYS_DESC(SYS_AMCR_EL0), undef_access },
+ { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
+ { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
+ { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
+ { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
+ { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
+ { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
+ { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
AMU_AMEVCNTR0_EL0(0),
AMU_AMEVCNTR0_EL0(1),
AMU_AMEVCNTR0_EL0(2),
@@ -1897,9 +1878,9 @@ static const struct sys_reg_desc cp14_regs[] = {
{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
DBG_BCR_BVR_WCR_WVR(1),
/* DBGDCCINT */
- { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
+ { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32, NULL, cp14_DBGDCCINT },
/* DBGDSCRext */
- { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
+ { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32, NULL, cp14_DBGDSCRext },
DBG_BCR_BVR_WCR_WVR(2),
/* DBGDTR[RT]Xint */
{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
@@ -1914,7 +1895,7 @@ static const struct sys_reg_desc cp14_regs[] = {
{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
DBG_BCR_BVR_WCR_WVR(6),
/* DBGVCR */
- { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
+ { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32, NULL, cp14_DBGVCR },
DBG_BCR_BVR_WCR_WVR(7),
DBG_BCR_BVR_WCR_WVR(8),
DBG_BCR_BVR_WCR_WVR(9),
@@ -2185,7 +2166,7 @@ static void perform_access(struct kvm_vcpu *vcpu,
trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
/* Check for regs disabled by runtime config */
- if (sysreg_hidden_from_guest(vcpu, r)) {
+ if (sysreg_hidden(vcpu, r)) {
kvm_inject_undefined(vcpu);
return;
}
@@ -2684,7 +2665,7 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
return get_invariant_sys_reg(reg->id, uaddr);
/* Check for regs disabled by runtime config */
- if (sysreg_hidden_from_user(vcpu, r))
+ if (sysreg_hidden(vcpu, r))
return -ENOENT;
if (r->get_user)
@@ -2709,7 +2690,7 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
return set_invariant_sys_reg(reg->id, uaddr);
/* Check for regs disabled by runtime config */
- if (sysreg_hidden_from_user(vcpu, r))
+ if (sysreg_hidden(vcpu, r))
return -ENOENT;
if (r->set_user)
@@ -2780,7 +2761,7 @@ static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
if (!(rd->reg || rd->get_user))
return 0;
- if (sysreg_hidden_from_user(vcpu, rd))
+ if (sysreg_hidden(vcpu, rd))
return 0;
if (!copy_reg_to_user(rd, uind))
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index 5a6fc30f5989..0f95964339b1 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -59,8 +59,8 @@ struct sys_reg_desc {
const struct sys_reg_desc *rd);
};
-#define REG_HIDDEN_USER (1 << 0) /* hidden from userspace ioctls */
-#define REG_HIDDEN_GUEST (1 << 1) /* hidden from guest */
+#define REG_HIDDEN (1 << 0) /* hidden from userspace and guest */
+#define REG_RAZ (1 << 1) /* RAZ from userspace and guest */
static __printf(2, 3)
inline void print_sys_reg_msg(const struct sys_reg_params *p,
@@ -111,22 +111,22 @@ static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r
__vcpu_sys_reg(vcpu, r->reg) = r->val;
}
-static inline bool sysreg_hidden_from_guest(const struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *r)
+static inline bool sysreg_hidden(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
{
if (likely(!r->visibility))
return false;
- return r->visibility(vcpu, r) & REG_HIDDEN_GUEST;
+ return r->visibility(vcpu, r) & REG_HIDDEN;
}
-static inline bool sysreg_hidden_from_user(const struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *r)
+static inline bool sysreg_visible_as_raz(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
{
if (likely(!r->visibility))
return false;
- return r->visibility(vcpu, r) & REG_HIDDEN_USER;
+ return r->visibility(vcpu, r) & REG_RAZ;
}
static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index 52d6f24f65dc..15a6c98ee92f 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -273,6 +273,23 @@ static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
return extract_bytes(value, addr & 7, len);
}
+static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
+ int target_vcpu_id = vcpu->vcpu_id;
+ u64 value;
+
+ value = (u64)(mpidr & GENMASK(23, 0)) << 32;
+ value |= ((target_vcpu_id & 0xffff) << 8);
+
+ if (vgic_has_its(vcpu->kvm))
+ value |= GICR_TYPER_PLPIS;
+
+ /* reporting of the Last bit is not supported for userspace */
+ return extract_bytes(value, addr & 7, len);
+}
+
static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len)
{
@@ -593,8 +610,9 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
VGIC_ACCESS_32bit),
- REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
- vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
+ REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
+ vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
+ vgic_uaccess_read_v3r_typer, vgic_mmio_uaccess_write_wi, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 48a3a26eff66..af9afcbec92c 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -24,20 +24,20 @@ SYM_FUNC_START(__arch_clear_user)
subs x1, x1, #8
b.mi 2f
1:
-uao_user_alternative 9f, str, sttr, xzr, x0, 8
+user_ldst 9f, sttr, xzr, x0, 8
subs x1, x1, #8
b.pl 1b
2: adds x1, x1, #4
b.mi 3f
-uao_user_alternative 9f, str, sttr, wzr, x0, 4
+user_ldst 9f, sttr, wzr, x0, 4
sub x1, x1, #4
3: adds x1, x1, #2
b.mi 4f
-uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
+user_ldst 9f, sttrh, wzr, x0, 2
sub x1, x1, #2
4: adds x1, x1, #1
b.mi 5f
-uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
+user_ldst 9f, sttrb, wzr, x0, 0
5: mov x0, #0
ret
SYM_FUNC_END(__arch_clear_user)
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 0f8a3a9e3795..95cd62d67371 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -21,7 +21,7 @@
*/
.macro ldrb1 reg, ptr, val
- uao_user_alternative 9998f, ldrb, ldtrb, \reg, \ptr, \val
+ user_ldst 9998f, ldtrb, \reg, \ptr, \val
.endm
.macro strb1 reg, ptr, val
@@ -29,7 +29,7 @@
.endm
.macro ldrh1 reg, ptr, val
- uao_user_alternative 9998f, ldrh, ldtrh, \reg, \ptr, \val
+ user_ldst 9998f, ldtrh, \reg, \ptr, \val
.endm
.macro strh1 reg, ptr, val
@@ -37,7 +37,7 @@
.endm
.macro ldr1 reg, ptr, val
- uao_user_alternative 9998f, ldr, ldtr, \reg, \ptr, \val
+ user_ldst 9998f, ldtr, \reg, \ptr, \val
.endm
.macro str1 reg, ptr, val
@@ -45,7 +45,7 @@
.endm
.macro ldp1 reg1, reg2, ptr, val
- uao_ldp 9998f, \reg1, \reg2, \ptr, \val
+ user_ldp 9998f, \reg1, \reg2, \ptr, \val
.endm
.macro stp1 reg1, reg2, ptr, val
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 80e37ada0ee1..1f61cd0df062 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -22,35 +22,35 @@
* x0 - bytes not copied
*/
.macro ldrb1 reg, ptr, val
- uao_user_alternative 9998f, ldrb, ldtrb, \reg, \ptr, \val
+ user_ldst 9998f, ldtrb, \reg, \ptr, \val
.endm
.macro strb1 reg, ptr, val
- uao_user_alternative 9998f, strb, sttrb, \reg, \ptr, \val
+ user_ldst 9998f, sttrb, \reg, \ptr, \val
.endm
.macro ldrh1 reg, ptr, val
- uao_user_alternative 9998f, ldrh, ldtrh, \reg, \ptr, \val
+ user_ldst 9998f, ldtrh, \reg, \ptr, \val
.endm
.macro strh1 reg, ptr, val
- uao_user_alternative 9998f, strh, sttrh, \reg, \ptr, \val
+ user_ldst 9998f, sttrh, \reg, \ptr, \val
.endm
.macro ldr1 reg, ptr, val
- uao_user_alternative 9998f, ldr, ldtr, \reg, \ptr, \val
+ user_ldst 9998f, ldtr, \reg, \ptr, \val
.endm
.macro str1 reg, ptr, val
- uao_user_alternative 9998f, str, sttr, \reg, \ptr, \val
+ user_ldst 9998f, sttr, \reg, \ptr, \val
.endm
.macro ldp1 reg1, reg2, ptr, val
- uao_ldp 9998f, \reg1, \reg2, \ptr, \val
+ user_ldp 9998f, \reg1, \reg2, \ptr, \val
.endm
.macro stp1 reg1, reg2, ptr, val
- uao_stp 9998f, \reg1, \reg2, \ptr, \val
+ user_stp 9998f, \reg1, \reg2, \ptr, \val
.endm
end .req x5
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 4ec59704b8f2..043da90f5dd7 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -24,7 +24,7 @@
.endm
.macro strb1 reg, ptr, val
- uao_user_alternative 9998f, strb, sttrb, \reg, \ptr, \val
+ user_ldst 9998f, sttrb, \reg, \ptr, \val
.endm
.macro ldrh1 reg, ptr, val
@@ -32,7 +32,7 @@
.endm
.macro strh1 reg, ptr, val
- uao_user_alternative 9998f, strh, sttrh, \reg, \ptr, \val
+ user_ldst 9998f, sttrh, \reg, \ptr, \val
.endm
.macro ldr1 reg, ptr, val
@@ -40,7 +40,7 @@
.endm
.macro str1 reg, ptr, val
- uao_user_alternative 9998f, str, sttr, \reg, \ptr, \val
+ user_ldst 9998f, sttr, \reg, \ptr, \val
.endm
.macro ldp1 reg1, reg2, ptr, val
@@ -48,7 +48,7 @@
.endm
.macro stp1 reg1, reg2, ptr, val
- uao_stp 9998f, \reg1, \reg2, \ptr, \val
+ user_stp 9998f, \reg1, \reg2, \ptr, \val
.endm
end .req x5
diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S
index e0bf83d556f2..dc8d2a216a6e 100644
--- a/arch/arm64/lib/memcpy.S
+++ b/arch/arm64/lib/memcpy.S
@@ -56,9 +56,8 @@
stp \reg1, \reg2, [\ptr], \val
.endm
- .weak memcpy
SYM_FUNC_START_ALIAS(__memcpy)
-SYM_FUNC_START_PI(memcpy)
+SYM_FUNC_START_WEAK_PI(memcpy)
#include "copy_template.S"
ret
SYM_FUNC_END_PI(memcpy)
diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S
index 02cda2e33bde..1035dce4bdaf 100644
--- a/arch/arm64/lib/memmove.S
+++ b/arch/arm64/lib/memmove.S
@@ -45,9 +45,8 @@ C_h .req x12
D_l .req x13
D_h .req x14
- .weak memmove
SYM_FUNC_START_ALIAS(__memmove)
-SYM_FUNC_START_PI(memmove)
+SYM_FUNC_START_WEAK_PI(memmove)
cmp dstin, src
b.lo __memcpy
add tmp1, src, count
diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S
index 77c3c7ba0084..a9c1c9a01ea9 100644
--- a/arch/arm64/lib/memset.S
+++ b/arch/arm64/lib/memset.S
@@ -42,9 +42,8 @@ dst .req x8
tmp3w .req w9
tmp3 .req x9
- .weak memset
SYM_FUNC_START_ALIAS(__memset)
-SYM_FUNC_START_PI(memset)
+SYM_FUNC_START_WEAK_PI(memset)
mov dst, dstin /* Preserve return value. */
and A_lw, val, #255
orr A_lw, A_lw, A_lw, lsl #8
diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
index 03ca6d8b8670..351537c12f36 100644
--- a/arch/arm64/lib/mte.S
+++ b/arch/arm64/lib/mte.S
@@ -4,7 +4,7 @@
*/
#include <linux/linkage.h>
-#include <asm/alternative.h>
+#include <asm/asm-uaccess.h>
#include <asm/assembler.h>
#include <asm/mte.h>
#include <asm/page.h>
@@ -67,7 +67,7 @@ SYM_FUNC_START(mte_copy_tags_from_user)
mov x3, x1
cbz x2, 2f
1:
- uao_user_alternative 2f, ldrb, ldtrb, w4, x1, 0
+ user_ldst 2f, ldtrb, w4, x1, 0
lsl x4, x4, #MTE_TAG_SHIFT
stg x4, [x0], #MTE_GRANULE_SIZE
add x1, x1, #1
@@ -94,7 +94,7 @@ SYM_FUNC_START(mte_copy_tags_to_user)
1:
ldg x4, [x1]
ubfx x4, x4, #MTE_TAG_SHIFT, #MTE_TAG_SIZE
- uao_user_alternative 2f, strb, sttrb, w4, x0, 0
+ user_ldst 2f, sttrb, w4, x0, 0
add x0, x0, #1
add x1, x1, #MTE_GRANULE_SIZE
subs x2, x2, #1
diff --git a/arch/arm64/lib/uaccess_flushcache.c b/arch/arm64/lib/uaccess_flushcache.c
index bfa30b75b2b8..c83bb5a4aad2 100644
--- a/arch/arm64/lib/uaccess_flushcache.c
+++ b/arch/arm64/lib/uaccess_flushcache.c
@@ -30,9 +30,7 @@ unsigned long __copy_user_flushcache(void *to, const void __user *from,
{
unsigned long rc;
- uaccess_enable_not_uao();
- rc = __arch_copy_from_user(to, from, n);
- uaccess_disable_not_uao();
+ rc = raw_copy_from_user(to, from, n);
/* See above */
__clean_dcache_area_pop(to, n - rc);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 94c99c1c19e3..2848952b178d 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -40,7 +40,7 @@
#include <asm/traps.h>
struct fault_info {
- int (*fn)(unsigned long addr, unsigned int esr,
+ int (*fn)(unsigned long far, unsigned int esr,
struct pt_regs *regs);
int sig;
int code;
@@ -262,7 +262,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
local_irq_save(flags);
asm volatile("at s1e1r, %0" :: "r" (addr));
isb();
- par = read_sysreg(par_el1);
+ par = read_sysreg_par();
local_irq_restore(flags);
/*
@@ -385,8 +385,11 @@ static void set_thread_esr(unsigned long address, unsigned int esr)
current->thread.fault_code = esr;
}
-static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+static void do_bad_area(unsigned long far, unsigned int esr,
+ struct pt_regs *regs)
{
+ unsigned long addr = untagged_addr(far);
+
/*
* If we are in kernel mode at this point, we have no context to
* handle this fault with.
@@ -395,8 +398,7 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
const struct fault_info *inf = esr_to_fault_info(esr);
set_thread_esr(addr, esr);
- arm64_force_sig_fault(inf->sig, inf->code, (void __user *)addr,
- inf->name);
+ arm64_force_sig_fault(inf->sig, inf->code, far, inf->name);
} else {
__do_kernel_fault(addr, esr, regs);
}
@@ -448,7 +450,7 @@ static bool is_write_abort(unsigned int esr)
return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM);
}
-static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
+static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
struct pt_regs *regs)
{
const struct fault_info *inf;
@@ -456,6 +458,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
vm_fault_t fault;
unsigned long vm_flags = VM_ACCESS_FLAGS;
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
+ unsigned long addr = untagged_addr(far);
if (kprobe_page_fault(regs, esr))
return 0;
@@ -479,11 +482,6 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
}
if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
- /* regs->orig_addr_limit may be 0 if we entered from EL0 */
- if (regs->orig_addr_limit == KERNEL_DS)
- die_kernel_fault("access to user memory with fs=KERNEL_DS",
- addr, esr, regs);
-
if (is_el1_instruction_abort(esr))
die_kernel_fault("execution of user memory",
addr, esr, regs);
@@ -567,8 +565,7 @@ retry:
* We had some memory, but were unable to successfully fix up
* this page fault.
*/
- arm64_force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr,
- inf->name);
+ arm64_force_sig_fault(SIGBUS, BUS_ADRERR, far, inf->name);
} else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) {
unsigned int lsb;
@@ -576,8 +573,7 @@ retry:
if (fault & VM_FAULT_HWPOISON_LARGE)
lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
- arm64_force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr, lsb,
- inf->name);
+ arm64_force_sig_mceerr(BUS_MCEERR_AR, far, lsb, inf->name);
} else {
/*
* Something tried to access memory that isn't in our memory
@@ -585,8 +581,7 @@ retry:
*/
arm64_force_sig_fault(SIGSEGV,
fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR,
- (void __user *)addr,
- inf->name);
+ far, inf->name);
}
return 0;
@@ -596,33 +591,35 @@ no_context:
return 0;
}
-static int __kprobes do_translation_fault(unsigned long addr,
+static int __kprobes do_translation_fault(unsigned long far,
unsigned int esr,
struct pt_regs *regs)
{
+ unsigned long addr = untagged_addr(far);
+
if (is_ttbr0_addr(addr))
- return do_page_fault(addr, esr, regs);
+ return do_page_fault(far, esr, regs);
- do_bad_area(addr, esr, regs);
+ do_bad_area(far, esr, regs);
return 0;
}
-static int do_alignment_fault(unsigned long addr, unsigned int esr,
+static int do_alignment_fault(unsigned long far, unsigned int esr,
struct pt_regs *regs)
{
- do_bad_area(addr, esr, regs);
+ do_bad_area(far, esr, regs);
return 0;
}
-static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+static int do_bad(unsigned long far, unsigned int esr, struct pt_regs *regs)
{
return 1; /* "fault" */
}
-static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs)
{
const struct fault_info *inf;
- void __user *siaddr;
+ unsigned long siaddr;
inf = esr_to_fault_info(esr);
@@ -634,19 +631,30 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
return 0;
}
- if (esr & ESR_ELx_FnV)
- siaddr = NULL;
- else
- siaddr = (void __user *)addr;
+ if (esr & ESR_ELx_FnV) {
+ siaddr = 0;
+ } else {
+ /*
+ * The architecture specifies that the tag bits of FAR_EL1 are
+ * UNKNOWN for synchronous external aborts. Mask them out now
+ * so that userspace doesn't see them.
+ */
+ siaddr = untagged_addr(far);
+ }
arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr);
return 0;
}
-static int do_tag_check_fault(unsigned long addr, unsigned int esr,
+static int do_tag_check_fault(unsigned long far, unsigned int esr,
struct pt_regs *regs)
{
- do_bad_area(addr, esr, regs);
+ /*
+ * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN for tag
+ * check faults. Mask them out now so that userspace doesn't see them.
+ */
+ far &= (1UL << 60) - 1;
+ do_bad_area(far, esr, regs);
return 0;
}
@@ -717,11 +725,12 @@ static const struct fault_info fault_info[] = {
{ do_bad, SIGKILL, SI_KERNEL, "unknown 63" },
};
-void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs)
{
const struct fault_info *inf = esr_to_fault_info(esr);
+ unsigned long addr = untagged_addr(far);
- if (!inf->fn(addr, esr, regs))
+ if (!inf->fn(far, esr, regs))
return;
if (!user_mode(regs)) {
@@ -730,8 +739,12 @@ void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
show_pte(addr);
}
- arm64_notify_die(inf->name, regs,
- inf->sig, inf->code, (void __user *)addr, esr);
+ /*
+ * At this point we have an unrecognized fault type whose tag bits may
+ * have been defined as UNKNOWN. Therefore we only expose the untagged
+ * address to the signal handler.
+ */
+ arm64_notify_die(inf->name, regs, inf->sig, inf->code, addr, esr);
}
NOKPROBE_SYMBOL(do_mem_abort);
@@ -744,8 +757,8 @@ NOKPROBE_SYMBOL(do_el0_irq_bp_hardening);
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
- arm64_notify_die("SP/PC alignment exception", regs,
- SIGBUS, BUS_ADRALN, (void __user *)addr, esr);
+ arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN,
+ addr, esr);
}
NOKPROBE_SYMBOL(do_sp_pc_abort);
@@ -789,25 +802,6 @@ void __init hook_debug_fault_code(int nr,
*/
static void debug_exception_enter(struct pt_regs *regs)
{
- /*
- * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
- * already disabled to preserve the last enabled/disabled addresses.
- */
- if (interrupts_enabled(regs))
- trace_hardirqs_off();
-
- if (user_mode(regs)) {
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
- } else {
- /*
- * We might have interrupted pretty much anything. In
- * fact, if we're a debug exception, we can even interrupt
- * NMI processing. We don't want this code makes in_nmi()
- * to return true, but we need to notify RCU.
- */
- rcu_nmi_enter();
- }
-
preempt_disable();
/* This code is a bit fragile. Test it. */
@@ -818,12 +812,6 @@ NOKPROBE_SYMBOL(debug_exception_enter);
static void debug_exception_exit(struct pt_regs *regs)
{
preempt_enable_no_resched();
-
- if (!user_mode(regs))
- rcu_nmi_exit();
-
- if (interrupts_enabled(regs))
- trace_hardirqs_on();
}
NOKPROBE_SYMBOL(debug_exception_exit);
@@ -871,8 +859,7 @@ void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
arm64_apply_bp_hardening();
if (inf->fn(addr_if_watchpoint, esr, regs)) {
- arm64_notify_die(inf->name, regs,
- inf->sig, inf->code, (void __user *)pc, esr);
+ arm64_notify_die(inf->name, regs, inf->sig, inf->code, pc, esr);
}
debug_exception_exit(regs);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 095540667f0f..69d4251ee079 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -29,6 +29,7 @@
#include <linux/kexec.h>
#include <linux/crash_dump.h>
#include <linux/hugetlb.h>
+#include <linux/acpi_iort.h>
#include <asm/boot.h>
#include <asm/fixmap.h>
@@ -42,8 +43,6 @@
#include <asm/tlb.h>
#include <asm/alternative.h>
-#define ARM64_ZONE_DMA_BITS 30
-
/*
* We need to be able to catch inadvertent references to memstart_addr
* that occur (potentially in generic code) before arm64_memblock_init()
@@ -175,21 +174,34 @@ static void __init reserve_elfcorehdr(void)
#endif /* CONFIG_CRASH_DUMP */
/*
- * Return the maximum physical address for a zone with a given address size
- * limit. It currently assumes that for memory starting above 4G, 32-bit
- * devices will use a DMA offset.
+ * Return the maximum physical address for a zone accessible by the given bits
+ * limit. If DRAM starts above 32-bit, expand the zone to the maximum
+ * available memory, otherwise cap it at 32-bit.
*/
static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
{
- phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, zone_bits);
- return min(offset + (1ULL << zone_bits), memblock_end_of_DRAM());
+ phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
+ phys_addr_t phys_start = memblock_start_of_DRAM();
+
+ if (phys_start > U32_MAX)
+ zone_mask = PHYS_ADDR_MAX;
+ else if (phys_start > zone_mask)
+ zone_mask = U32_MAX;
+
+ return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
}
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
+ unsigned int __maybe_unused acpi_zone_dma_bits;
+ unsigned int __maybe_unused dt_zone_dma_bits;
#ifdef CONFIG_ZONE_DMA
+ acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
+ dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL));
+ zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
+ arm64_dma_phys_limit = max_zone_phys(zone_dma_bits);
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
#endif
#ifdef CONFIG_ZONE_DMA32
@@ -269,7 +281,7 @@ static void __init fdt_enforce_memory_region(void)
void __init arm64_memblock_init(void)
{
- const s64 linear_region_size = BIT(vabits_actual - 1);
+ const s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
/* Handle linux,usable-memory-range property */
fdt_enforce_memory_region();
@@ -348,15 +360,18 @@ void __init arm64_memblock_init(void)
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
extern u16 memstart_offset_seed;
- u64 range = linear_region_size -
- (memblock_end_of_DRAM() - memblock_start_of_DRAM());
+ u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
+ int parange = cpuid_feature_extract_unsigned_field(
+ mmfr0, ID_AA64MMFR0_PARANGE_SHIFT);
+ s64 range = linear_region_size -
+ BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
/*
* If the size of the linear region exceeds, by a sufficient
- * margin, the size of the region that the available physical
- * memory spans, randomize the linear region as well.
+ * margin, the size of the region that the physical memory can
+ * span, randomize the linear region as well.
*/
- if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
+ if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
range /= ARM64_MEMSTART_ALIGN;
memstart_addr -= ARM64_MEMSTART_ALIGN *
((range * memstart_offset_seed) >> 16);
@@ -367,7 +382,7 @@ void __init arm64_memblock_init(void)
* Register the kernel text, kernel data, initrd, and initial
* pagetables with memblock.
*/
- memblock_reserve(__pa_symbol(_text), _end - _text);
+ memblock_reserve(__pa_symbol(_stext), _end - _stext);
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
/* the generic initrd code expects virtual addresses */
initrd_start = __phys_to_virt(phys_initrd_start);
@@ -376,18 +391,11 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
- if (IS_ENABLED(CONFIG_ZONE_DMA)) {
- zone_dma_bits = ARM64_ZONE_DMA_BITS;
- arm64_dma_phys_limit = max_zone_phys(ARM64_ZONE_DMA_BITS);
- }
-
if (IS_ENABLED(CONFIG_ZONE_DMA32))
arm64_dma32_phys_limit = max_zone_phys(32);
else
arm64_dma32_phys_limit = PHYS_MASK + 1;
- reserve_crashkernel();
-
reserve_elfcorehdr();
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
@@ -427,73 +435,14 @@ void __init bootmem_init(void)
sparse_init();
zone_sizes_init(min, max);
- memblock_dump_all();
-}
-
-#ifndef CONFIG_SPARSEMEM_VMEMMAP
-static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
-{
- struct page *start_pg, *end_pg;
- unsigned long pg, pgend;
-
- /*
- * Convert start_pfn/end_pfn to a struct page pointer.
- */
- start_pg = pfn_to_page(start_pfn - 1) + 1;
- end_pg = pfn_to_page(end_pfn - 1) + 1;
-
/*
- * Convert to physical addresses, and round start upwards and end
- * downwards.
+ * request_standard_resources() depends on crashkernel's memory being
+ * reserved, so do it here.
*/
- pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
- pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
-
- /*
- * If there are free pages between these, free the section of the
- * memmap array.
- */
- if (pg < pgend)
- memblock_free(pg, pgend - pg);
-}
-
-/*
- * The mem_map array can get very big. Free the unused area of the memory map.
- */
-static void __init free_unused_memmap(void)
-{
- unsigned long start, end, prev_end = 0;
- int i;
-
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
-#ifdef CONFIG_SPARSEMEM
- /*
- * Take care not to free memmap entries that don't exist due
- * to SPARSEMEM sections which aren't present.
- */
- start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
-#endif
- /*
- * If we had a previous bank, and there is a space between the
- * current bank and the previous, free it.
- */
- if (prev_end && prev_end < start)
- free_memmap(prev_end, start);
-
- /*
- * Align up here since the VM subsystem insists that the
- * memmap entries are valid from the bank end aligned to
- * MAX_ORDER_NR_PAGES.
- */
- prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
- }
+ reserve_crashkernel();
-#ifdef CONFIG_SPARSEMEM
- if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
- free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
-#endif
+ memblock_dump_all();
}
-#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
/*
* mem_init() marks the free areas in the mem_map and tells us how much memory
@@ -510,9 +459,6 @@ void __init mem_init(void)
set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
-#ifndef CONFIG_SPARSEMEM_VMEMMAP
- free_unused_memmap();
-#endif
/* this will put all unused low memory onto the freelists */
memblock_free_all();
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 1c0f3e02f731..ae0c3d023824 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -464,20 +464,35 @@ void __init mark_linear_text_alias_ro(void)
/*
* Remove the write permissions from the linear alias of .text/.rodata
*/
- update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
- (unsigned long)__init_begin - (unsigned long)_text,
+ update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext),
+ (unsigned long)__init_begin - (unsigned long)_stext,
PAGE_KERNEL_RO);
}
+static bool crash_mem_map __initdata;
+
+static int __init enable_crash_mem_map(char *arg)
+{
+ /*
+ * Proper parameter parsing is done by reserve_crashkernel(). We only
+ * need to know if the linear map has to avoid block mappings so that
+ * the crashkernel reservations can be unmapped later.
+ */
+ crash_mem_map = true;
+
+ return 0;
+}
+early_param("crashkernel", enable_crash_mem_map);
+
static void __init map_mem(pgd_t *pgdp)
{
- phys_addr_t kernel_start = __pa_symbol(_text);
+ phys_addr_t kernel_start = __pa_symbol(_stext);
phys_addr_t kernel_end = __pa_symbol(__init_begin);
phys_addr_t start, end;
int flags = 0;
u64 i;
- if (rodata_full || debug_pagealloc_enabled())
+ if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/*
@@ -487,11 +502,6 @@ static void __init map_mem(pgd_t *pgdp)
* the following for-loop
*/
memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
-#ifdef CONFIG_KEXEC_CORE
- if (crashk_res.end)
- memblock_mark_nomap(crashk_res.start,
- resource_size(&crashk_res));
-#endif
/* map all the memory banks */
for_each_mem_range(i, &start, &end) {
@@ -506,7 +516,7 @@ static void __init map_mem(pgd_t *pgdp)
}
/*
- * Map the linear alias of the [_text, __init_begin) interval
+ * Map the linear alias of the [_stext, __init_begin) interval
* as non-executable now, and remove the write permission in
* mark_linear_text_alias_ro() below (which will be called after
* alternative patching has completed). This makes the contents
@@ -518,21 +528,6 @@ static void __init map_mem(pgd_t *pgdp)
__map_memblock(pgdp, kernel_start, kernel_end,
PAGE_KERNEL, NO_CONT_MAPPINGS);
memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
-
-#ifdef CONFIG_KEXEC_CORE
- /*
- * Use page-level mappings here so that we can shrink the region
- * in page granularity and put back unused memory to buddy system
- * through /sys/kernel/kexec_crash_size interface.
- */
- if (crashk_res.end) {
- __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1,
- PAGE_KERNEL,
- NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
- memblock_clear_nomap(crashk_res.start,
- resource_size(&crashk_res));
- }
-#endif
}
void mark_rodata_ro(void)
@@ -665,7 +660,7 @@ static void __init map_kernel(pgd_t *pgdp)
* Only rodata will be remapped with different permissions later on,
* all other segments are allowed to use contiguous mappings.
*/
- map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0,
+ map_kernel_segment(pgdp, _stext, _etext, text_prot, &vmlinux_text, 0,
VM_NO_GUARD);
map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
&vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
@@ -1132,8 +1127,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
void *p = NULL;
p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
- if (!p)
- return -ENOMEM;
+ if (!p) {
+ if (vmemmap_populate_basepages(addr, next, node, altmap))
+ return -ENOMEM;
+ continue;
+ }
pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
} else
@@ -1444,11 +1442,28 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
}
+static bool inside_linear_region(u64 start, u64 size)
+{
+ /*
+ * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
+ * accommodating both its ends but excluding PAGE_END. Max physical
+ * range which can be mapped inside this linear mapping range, must
+ * also be derived from its end points.
+ */
+ return start >= __pa(_PAGE_OFFSET(vabits_actual)) &&
+ (start + size - 1) <= __pa(PAGE_END - 1);
+}
+
int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_params *params)
{
int ret, flags = 0;
+ if (!inside_linear_region(start, size)) {
+ pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size);
+ return -EINVAL;
+ }
+
if (rodata_full || debug_pagealloc_enabled())
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
@@ -1493,13 +1508,43 @@ static int prevent_bootmem_remove_notifier(struct notifier_block *nb,
unsigned long end_pfn = arg->start_pfn + arg->nr_pages;
unsigned long pfn = arg->start_pfn;
- if (action != MEM_GOING_OFFLINE)
+ if ((action != MEM_GOING_OFFLINE) && (action != MEM_OFFLINE))
return NOTIFY_OK;
for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ unsigned long start = PFN_PHYS(pfn);
+ unsigned long end = start + (1UL << PA_SECTION_SHIFT);
+
ms = __pfn_to_section(pfn);
- if (early_section(ms))
+ if (!early_section(ms))
+ continue;
+
+ if (action == MEM_GOING_OFFLINE) {
+ /*
+ * Boot memory removal is not supported. Prevent
+ * it via blocking any attempted offline request
+ * for the boot memory and just report it.
+ */
+ pr_warn("Boot memory [%lx %lx] offlining attempted\n", start, end);
return NOTIFY_BAD;
+ } else if (action == MEM_OFFLINE) {
+ /*
+ * This should have never happened. Boot memory
+ * offlining should have been prevented by this
+ * very notifier. Probably some memory removal
+ * procedure might have changed which would then
+ * require further debug.
+ */
+ pr_err("Boot memory [%lx %lx] offlined\n", start, end);
+
+ /*
+ * Core memory hotplug does not process a return
+ * code from the notifier for MEM_OFFLINE events.
+ * The error condition has been reported. Return
+ * from here as if ignored.
+ */
+ return NOTIFY_DONE;
+ }
}
return NOTIFY_OK;
}
@@ -1508,9 +1553,66 @@ static struct notifier_block prevent_bootmem_remove_nb = {
.notifier_call = prevent_bootmem_remove_notifier,
};
+/*
+ * This ensures that boot memory sections on the platform are online
+ * from early boot. Memory sections could not be prevented from being
+ * offlined, unless for some reason they are not online to begin with.
+ * This helps validate the basic assumption on which the above memory
+ * event notifier works to prevent boot memory section offlining and
+ * its possible removal.
+ */
+static void validate_bootmem_online(void)
+{
+ phys_addr_t start, end, addr;
+ struct mem_section *ms;
+ u64 i;
+
+ /*
+ * Scanning across all memblock might be expensive
+ * on some big memory systems. Hence enable this
+ * validation only with DEBUG_VM.
+ */
+ if (!IS_ENABLED(CONFIG_DEBUG_VM))
+ return;
+
+ for_each_mem_range(i, &start, &end) {
+ for (addr = start; addr < end; addr += (1UL << PA_SECTION_SHIFT)) {
+ ms = __pfn_to_section(PHYS_PFN(addr));
+
+ /*
+ * All memory ranges in the system at this point
+ * should have been marked as early sections.
+ */
+ WARN_ON(!early_section(ms));
+
+ /*
+ * Memory notifier mechanism here to prevent boot
+ * memory offlining depends on the fact that each
+ * early section memory on the system is initially
+ * online. Otherwise a given memory section which
+ * is already offline will be overlooked and can
+ * be removed completely. Call out such sections.
+ */
+ if (!online_section(ms))
+ pr_err("Boot memory [%llx %llx] is offline, can be removed\n",
+ addr, addr + (1UL << PA_SECTION_SHIFT));
+ }
+ }
+}
+
static int __init prevent_bootmem_remove_init(void)
{
- return register_memory_notifier(&prevent_bootmem_remove_nb);
+ int ret = 0;
+
+ if (!IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
+ return ret;
+
+ validate_bootmem_online();
+ ret = register_memory_notifier(&prevent_bootmem_remove_nb);
+ if (ret)
+ pr_err("%s: Notifier registration failed %d\n", __func__, ret);
+
+ return ret;
}
-device_initcall(prevent_bootmem_remove_init);
+early_initcall(prevent_bootmem_remove_init);
#endif
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 1b94f5b82654..92eccaf595c8 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -155,7 +155,7 @@ int set_direct_map_invalid_noflush(struct page *page)
.clear_mask = __pgprot(PTE_VALID),
};
- if (!rodata_full)
+ if (!debug_pagealloc_enabled() && !rodata_full)
return 0;
return apply_to_page_range(&init_mm,
@@ -170,7 +170,7 @@ int set_direct_map_default_noflush(struct page *page)
.clear_mask = __pgprot(PTE_RDONLY),
};
- if (!rodata_full)
+ if (!debug_pagealloc_enabled() && !rodata_full)
return 0;
return apply_to_page_range(&init_mm,
@@ -178,6 +178,7 @@ int set_direct_map_default_noflush(struct page *page)
PAGE_SIZE, change_page_range, &data);
}
+#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (!debug_pagealloc_enabled() && !rodata_full)
@@ -185,6 +186,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
set_memory_valid((unsigned long)page_address(page), numpages, enable);
}
+#endif /* CONFIG_DEBUG_PAGEALLOC */
/*
* This function is used to determine if a linear map page has been marked as
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 23c326a06b2d..a0831bf8a018 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -40,7 +40,7 @@
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
#ifdef CONFIG_KASAN_SW_TAGS
-#define TCR_KASAN_FLAGS TCR_TBI1
+#define TCR_KASAN_FLAGS TCR_TBI1 | TCR_TBID1
#else
#define TCR_KASAN_FLAGS 0
#endif
@@ -168,7 +168,7 @@ SYM_FUNC_END(cpu_do_resume)
.pushsection ".idmap.text", "awx"
.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
- adrp \tmp1, empty_zero_page
+ adrp \tmp1, reserved_pg_dir
phys_to_ttbr \tmp2, \tmp1
offset_ttbr1 \tmp2, \tmp1
msr ttbr1_el1, \tmp2
@@ -489,6 +489,6 @@ SYM_FUNC_START(__cpu_setup)
/*
* Prepare SCTLR
*/
- mov_q x0, SCTLR_EL1_SET
+ mov_q x0, INIT_SCTLR_EL1_MMU_ON
ret // return to head.S
SYM_FUNC_END(__cpu_setup)
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 48d66bf0465d..bdeeac28b1be 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -19,7 +19,6 @@ config C6X
select IRQ_DOMAIN
select OF
select OF_EARLY_FLATTREE
- select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA
select MMU_GATHER_NO_RANGE if MMU
select SET_FS
diff --git a/arch/c6x/include/asm/elf.h b/arch/c6x/include/asm/elf.h
index 89b4437c4844..ca88acbf560b 100644
--- a/arch/c6x/include/asm/elf.h
+++ b/arch/c6x/include/asm/elf.h
@@ -39,8 +39,6 @@ do { \
#define ELF_FDPIC_CORE_EFLAGS 0
-#define ELF_CORE_COPY_FPREGS(...) 0 /* No FPU regs to copy */
-
/*
* These are used to set parameters in the core dumps.
*/
@@ -56,7 +54,6 @@ do { \
/* Nothing for now. Need to setup DP... */
#define ELF_PLAT_INIT(_r)
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_CORE_COPY_REGS(_dest, _regs) \
diff --git a/arch/c6x/include/asm/mmu_context.h b/arch/c6x/include/asm/mmu_context.h
new file mode 100644
index 000000000000..d2659d0a3297
--- /dev/null
+++ b/arch/c6x/include/asm/mmu_context.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_C6X_MMU_CONTEXT_H
+#define _ASM_C6X_MMU_CONTEXT_H
+
+#include <asm-generic/nommu_context.h>
+
+#endif /* _ASM_C6X_MMU_CONTEXT_H */
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 268fad5f51cf..89dd2fcf38fa 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -28,7 +28,6 @@ config CSKY
select GENERIC_LIB_UCMPDI2
select GENERIC_ALLOCATOR
select GENERIC_ATOMIC64
- select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_PROBE
@@ -286,6 +285,7 @@ config NR_CPUS
config HIGHMEM
bool "High Memory Support"
depends on !CPU_CK610
+ select KMAP_LOCAL
default y
config FORCE_MAX_ZONEORDER
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild
index 64876e59e2ef..93372255984d 100644
--- a/arch/csky/include/asm/Kbuild
+++ b/arch/csky/include/asm/Kbuild
@@ -4,6 +4,5 @@ generic-y += gpio.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += qrwlock.h
-generic-y += seccomp.h
generic-y += user.h
generic-y += vmlinux.lds.h
diff --git a/arch/csky/include/asm/elf.h b/arch/csky/include/asm/elf.h
index e1ec558278bc..eb2cc5a673b5 100644
--- a/arch/csky/include/asm/elf.h
+++ b/arch/csky/include/asm/elf.h
@@ -50,7 +50,6 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
/*
* These are used to set parameters in the core dumps.
*/
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_CLASS ELFCLASS32
#define ELF_PLAT_INIT(_r, load_addr) { _r->a0 = 0; }
diff --git a/arch/csky/include/asm/fixmap.h b/arch/csky/include/asm/fixmap.h
index 81f9477d5330..4b589cc20900 100644
--- a/arch/csky/include/asm/fixmap.h
+++ b/arch/csky/include/asm/fixmap.h
@@ -8,7 +8,7 @@
#include <asm/memory.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
-#include <asm/kmap_types.h>
+#include <asm/kmap_size.h>
#endif
enum fixed_addresses {
@@ -17,7 +17,7 @@ enum fixed_addresses {
#endif
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN,
- FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
#endif
__end_of_fixed_addresses
};
diff --git a/arch/csky/include/asm/highmem.h b/arch/csky/include/asm/highmem.h
index 14645e3d5cd5..1f4ed3f4c0d9 100644
--- a/arch/csky/include/asm/highmem.h
+++ b/arch/csky/include/asm/highmem.h
@@ -9,7 +9,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
-#include <asm/kmap_types.h>
+#include <asm/kmap_size.h>
#include <asm/cache.h>
/* undef for production */
@@ -32,10 +32,12 @@ extern pte_t *pkmap_page_table;
#define ARCH_HAS_KMAP_FLUSH_TLB
extern void kmap_flush_tlb(unsigned long addr);
-extern void *kmap_atomic_pfn(unsigned long pfn);
#define flush_cache_kmaps() do {} while (0)
+#define arch_kmap_local_post_map(vaddr, pteval) kmap_flush_tlb(vaddr)
+#define arch_kmap_local_post_unmap(vaddr) kmap_flush_tlb(vaddr)
+
extern void kmap_init(void);
#endif /* __KERNEL__ */
diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h
index abdf1f1cb6ec..b227d29393a8 100644
--- a/arch/csky/include/asm/mmu_context.h
+++ b/arch/csky/include/asm/mmu_context.h
@@ -24,11 +24,6 @@
#define cpu_asid(mm) (atomic64_read(&mm->context.asid) & ASID_MASK)
#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.asid, 0); 0; })
-#define activate_mm(prev,next) switch_mm(prev, next, current)
-
-#define destroy_context(mm) do {} while (0)
-#define enter_lazy_tlb(mm, tsk) do {} while (0)
-#define deactivate_mm(tsk, mm) do {} while (0)
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
@@ -46,4 +41,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
flush_icache_deferred(next);
}
+
+#include <asm-generic/mmu_context.h>
+
#endif /* __ASM_CSKY_MMU_CONTEXT_H */
diff --git a/arch/csky/include/asm/seccomp.h b/arch/csky/include/asm/seccomp.h
new file mode 100644
index 000000000000..d33e758126fb
--- /dev/null
+++ b/arch/csky/include/asm/seccomp.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm-generic/seccomp.h>
+
+#define SECCOMP_ARCH_NATIVE AUDIT_ARCH_CSKY
+#define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+#define SECCOMP_ARCH_NATIVE_NAME "csky"
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/csky/kernel/perf_regs.c b/arch/csky/kernel/perf_regs.c
index eb32838b8210..09b7f88a2d6a 100644
--- a/arch/csky/kernel/perf_regs.c
+++ b/arch/csky/kernel/perf_regs.c
@@ -32,8 +32,7 @@ u64 perf_reg_abi(struct task_struct *task)
}
void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy)
+ struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
index f730869e21ee..69af6bc87e64 100644
--- a/arch/csky/kernel/process.c
+++ b/arch/csky/kernel/process.c
@@ -102,6 +102,6 @@ void arch_cpu_idle(void)
#ifdef CONFIG_CPU_PM_STOP
asm volatile("stop\n");
#endif
- local_irq_enable();
+ raw_local_irq_enable();
}
#endif
diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c
index 89c10800a002..4161df3c6c15 100644
--- a/arch/csky/mm/highmem.c
+++ b/arch/csky/mm/highmem.c
@@ -9,8 +9,6 @@
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
-static pte_t *kmap_pte;
-
unsigned long highstart_pfn, highend_pfn;
void kmap_flush_tlb(unsigned long addr)
@@ -19,67 +17,7 @@ void kmap_flush_tlb(unsigned long addr)
}
EXPORT_SYMBOL(kmap_flush_tlb);
-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
-{
- unsigned long vaddr;
- int idx, type;
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte - idx)));
-#endif
- set_pte(kmap_pte-idx, mk_pte(page, prot));
- flush_tlb_one((unsigned long)vaddr);
-
- return (void *)vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic_high_prot);
-
-void kunmap_atomic_high(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int idx;
-
- if (vaddr < FIXADDR_START)
- return;
-
-#ifdef CONFIG_DEBUG_HIGHMEM
- idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx();
-
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-
- pte_clear(&init_mm, vaddr, kmap_pte - idx);
- flush_tlb_one(vaddr);
-#else
- (void) idx; /* to kill a warning */
-#endif
- kmap_atomic_idx_pop();
-}
-EXPORT_SYMBOL(kunmap_atomic_high);
-
-/*
- * This is the same as kmap_atomic() but can map memory that doesn't
- * have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn)
-{
- unsigned long vaddr;
- int idx, type;
-
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
- flush_tlb_one(vaddr);
-
- return (void *) vaddr;
-}
-
-static void __init kmap_pages_init(void)
+void __init kmap_init(void)
{
unsigned long vaddr;
pgd_t *pgd;
@@ -96,14 +34,3 @@ static void __init kmap_pages_init(void)
pte = pte_offset_kernel(pmd, vaddr);
pkmap_page_table = pte;
}
-
-void __init kmap_init(void)
-{
- unsigned long vaddr;
-
- kmap_pages_init();
-
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
-
- kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr);
-}
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 7945de067e9f..3e3e0f16f7e0 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -12,7 +12,6 @@ config H8300
select FRAME_POINTER
select GENERIC_CPU_DEVICES
select MODULES_USE_ELF_RELA
- select GENERIC_CLOCKEVENTS
select COMMON_CLK
select ARCH_WANT_FRAME_POINTERS
select OF
diff --git a/arch/h8300/include/asm/mmu_context.h b/arch/h8300/include/asm/mmu_context.h
new file mode 100644
index 000000000000..a9f550f5b5ec
--- /dev/null
+++ b/arch/h8300/include/asm/mmu_context.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_H8300_MMU_CONTEXT_H
+#define _ASM_H8300_MMU_CONTEXT_H
+
+#include <asm-generic/nommu_context.h>
+
+#endif /* _ASM_H8300_MMU_CONTEXT_H */
diff --git a/arch/h8300/include/uapi/asm/signal.h b/arch/h8300/include/uapi/asm/signal.h
index e15521037348..2cd0dce2b6a6 100644
--- a/arch/h8300/include/uapi/asm/signal.h
+++ b/arch/h8300/include/uapi/asm/signal.h
@@ -57,30 +57,6 @@ typedef unsigned long sigset_t;
#define SIGRTMIN 32
#define SIGRTMAX _NSIG
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
#define SA_RESTORER 0x04000000
#define MINSIGSTKSZ 2048
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index aea0a40b77a9..bc1364db58fe 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -57,7 +57,7 @@ asmlinkage void ret_from_kernel_thread(void);
*/
void arch_cpu_idle(void)
{
- local_irq_enable();
+ raw_local_irq_enable();
__asm__("sleep");
}
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index f2afabbadd43..6e00c16a36b5 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -27,7 +27,6 @@ config HEXAGON
select GENERIC_IOMAP
select GENERIC_SMP_IDLE_THREAD
select STACKTRACE_SUPPORT
- select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST
select MODULES_USE_ELF_RELA
select GENERIC_CPU_DEVICES
diff --git a/arch/hexagon/include/asm/elf.h b/arch/hexagon/include/asm/elf.h
index 9efa203e1164..5bfdd9b147fd 100644
--- a/arch/hexagon/include/asm/elf.h
+++ b/arch/hexagon/include/asm/elf.h
@@ -181,7 +181,6 @@ do { \
*/
#define ELF_PLAT_INIT(regs, load_addr) do { } while (0)
-#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
/* Hrm is this going to cause problems for changing PAGE_SIZE? */
diff --git a/arch/hexagon/include/asm/mmu_context.h b/arch/hexagon/include/asm/mmu_context.h
index cdc4adc0300a..81947764c47d 100644
--- a/arch/hexagon/include/asm/mmu_context.h
+++ b/arch/hexagon/include/asm/mmu_context.h
@@ -15,39 +15,13 @@
#include <asm/pgalloc.h>
#include <asm/mem-layout.h>
-static inline void destroy_context(struct mm_struct *mm)
-{
-}
-
/*
* VM port hides all TLB management, so "lazy TLB" isn't very
* meaningful. Even for ports to architectures with visble TLBs,
* this is almost invariably a null function.
+ *
+ * mm->context is set up by pgd_alloc, so no init_new_context required.
*/
-static inline void enter_lazy_tlb(struct mm_struct *mm,
- struct task_struct *tsk)
-{
-}
-
-/*
- * Architecture-specific actions, if any, for memory map deactivation.
- */
-static inline void deactivate_mm(struct task_struct *tsk,
- struct mm_struct *mm)
-{
-}
-
-/**
- * init_new_context - initialize context related info for new mm_struct instance
- * @tsk: pointer to a task struct
- * @mm: pointer to a new mm struct
- */
-static inline int init_new_context(struct task_struct *tsk,
- struct mm_struct *mm)
-{
- /* mm->context is set up by pgd_alloc */
- return 0;
-}
/*
* Switch active mm context
@@ -74,6 +48,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
/*
* Activate new memory map for task
*/
+#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
unsigned long flags;
@@ -86,4 +61,6 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
/* Generic hooks for arch_dup_mmap and arch_exit_mmap */
#include <asm-generic/mm_hooks.h>
+#include <asm-generic/mmu_context.h>
+
#endif
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index 5a0a95d93ddb..67767c5ed98c 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -44,7 +44,7 @@ void arch_cpu_idle(void)
{
__vmwait();
/* interrupts wake us up, but irqs are still disabled */
- local_irq_enable();
+ raw_local_irq_enable();
}
/*
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 39b25a5a591b..eed59ec32657 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -46,6 +46,7 @@ config IA64
select ARCH_THREAD_STACK_ALLOCATOR
select ARCH_CLOCKSOURCE_DATA
select GENERIC_TIME_VSYSCALL
+ select LEGACY_TIMER_TICK
select SWIOTLB
select SYSCTL_ARCH_UNALIGN_NO_WARN
select HAVE_MOD_ARCH_SPECIFIC
@@ -288,6 +289,7 @@ config ARCH_SELECT_MEMORY_MODEL
config ARCH_DISCONTIGMEM_ENABLE
def_bool y
+ depends on BROKEN
help
Say Y to support efficient handling of discontiguous physical memory,
for architectures which are either NUMA (Non-Uniform Memory Access)
@@ -299,12 +301,11 @@ config ARCH_FLATMEM_ENABLE
config ARCH_SPARSEMEM_ENABLE
def_bool y
- depends on ARCH_DISCONTIGMEM_ENABLE
select SPARSEMEM_VMEMMAP_ENABLE
-config ARCH_DISCONTIGMEM_DEFAULT
+config ARCH_SPARSEMEM_DEFAULT
def_bool y
- depends on ARCH_DISCONTIGMEM_ENABLE
+ depends on ARCH_SPARSEMEM_ENABLE
config NUMA
bool "NUMA support"
@@ -329,7 +330,7 @@ config NODES_SHIFT
# VIRTUAL_MEM_MAP has been retained for historical reasons.
config VIRTUAL_MEM_MAP
bool "Virtual mem map"
- depends on !SPARSEMEM
+ depends on !SPARSEMEM && !FLATMEM
default y
help
Say Y to compile the kernel with support for a virtual mem map.
@@ -342,9 +343,6 @@ config HOLES_IN_ZONE
bool
default y if VIRTUAL_MEM_MAP
-config HAVE_ARCH_EARLY_PFN_TO_NID
- def_bool NUMA && SPARSEMEM
-
config HAVE_ARCH_NODEDATA_EXTENSION
def_bool y
depends on NUMA
diff --git a/arch/ia64/include/asm/kmap_types.h b/arch/ia64/include/asm/kmap_types.h
deleted file mode 100644
index 5c268cf7c2bd..000000000000
--- a/arch/ia64/include/asm/kmap_types.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_IA64_KMAP_TYPES_H
-#define _ASM_IA64_KMAP_TYPES_H
-
-#ifdef CONFIG_DEBUG_HIGHMEM
-#define __WITH_KM_FENCE
-#endif
-
-#include <asm-generic/kmap_types.h>
-
-#undef __WITH_KM_FENCE
-
-#endif /* _ASM_IA64_KMAP_TYPES_H */
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 092f1c91b36c..e789c0818edb 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -59,10 +59,8 @@ extern int reserve_elfcorehdr(u64 *start, u64 *end);
extern int register_active_ranges(u64 start, u64 len, int nid);
#ifdef CONFIG_VIRTUAL_MEM_MAP
-# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
extern unsigned long VMALLOC_END;
extern struct page *vmem_map;
- extern int find_largest_hole(u64 start, u64 end, void *arg);
extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
extern int vmemmap_find_next_valid_pfn(int, int);
#else
diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h
index 2da0e2eb036b..87a0d5bc11ef 100644
--- a/arch/ia64/include/asm/mmu_context.h
+++ b/arch/ia64/include/asm/mmu_context.h
@@ -49,11 +49,6 @@ DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
extern void mmu_context_init (void);
extern void wrap_mmu_context (struct mm_struct *mm);
-static inline void
-enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
/*
* When the context counter wraps around all TLBs need to be flushed because
* an old context number might have been reused. This is signalled by the
@@ -116,6 +111,7 @@ out:
* Initialize context number to some sane value. MM is guaranteed to be a
* brand-new address-space, so no TLB flushing is needed, ever.
*/
+#define init_new_context init_new_context
static inline int
init_new_context (struct task_struct *p, struct mm_struct *mm)
{
@@ -124,12 +120,6 @@ init_new_context (struct task_struct *p, struct mm_struct *mm)
}
static inline void
-destroy_context (struct mm_struct *mm)
-{
- /* Nothing to do. */
-}
-
-static inline void
reload_context (nv_mm_context_t context)
{
unsigned long rid;
@@ -178,11 +168,10 @@ activate_context (struct mm_struct *mm)
} while (unlikely(context != mm->context));
}
-#define deactivate_mm(tsk,mm) do { } while (0)
-
/*
* Switch from address space PREV to address space NEXT.
*/
+#define activate_mm activate_mm
static inline void
activate_mm (struct mm_struct *prev, struct mm_struct *next)
{
@@ -196,5 +185,7 @@ activate_mm (struct mm_struct *prev, struct mm_struct *next)
#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
+#include <asm-generic/mmu_context.h>
+
# endif /* ! __ASSEMBLY__ */
#endif /* _ASM_IA64_MMU_CONTEXT_H */
diff --git a/arch/ia64/include/asm/sparsemem.h b/arch/ia64/include/asm/sparsemem.h
index 336d0570e1fa..dd8c166ffd7b 100644
--- a/arch/ia64/include/asm/sparsemem.h
+++ b/arch/ia64/include/asm/sparsemem.h
@@ -18,4 +18,10 @@
#endif
#endif /* CONFIG_SPARSEMEM */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int memory_add_physaddr_to_nid(u64 addr);
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+#endif
+
#endif /* _ASM_IA64_SPARSEMEM_H */
diff --git a/arch/ia64/include/uapi/asm/signal.h b/arch/ia64/include/uapi/asm/signal.h
index aa98ff1b9e22..38166a88e4c9 100644
--- a/arch/ia64/include/uapi/asm/signal.h
+++ b/arch/ia64/include/uapi/asm/signal.h
@@ -53,30 +53,6 @@
#define SIGRTMIN 32
#define SIGRTMAX _NSIG
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
#define SA_RESTORER 0x04000000
/*
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 6b61a703bcf5..bfb85d905f83 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -239,7 +239,7 @@ void arch_cpu_idle(void)
if (mark_idle)
(*mark_idle)(1);
- safe_halt();
+ raw_safe_halt();
if (mark_idle)
(*mark_idle)(0);
@@ -487,7 +487,7 @@ do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *
unw_get_ar(info, UNW_AR_SSD, &dst[56]);
}
-void
+static void
do_copy_regs (struct unw_frame_info *info, void *arg)
{
do_copy_task_regs(current, info, arg);
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 75c070aed81e..c3490ee2daa5 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -817,8 +817,8 @@ access_nat_bits (struct task_struct *child, struct pt_regs *pt,
}
static int
-access_uarea (struct task_struct *child, unsigned long addr,
- unsigned long *data, int write_access);
+access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
+ unsigned long addr, unsigned long *data, int write_access);
static long
ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
@@ -847,13 +847,13 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
return -EIO;
}
- if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
- || access_uarea(child, PT_AR_EC, &ec, 0) < 0
- || access_uarea(child, PT_AR_LC, &lc, 0) < 0
- || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
- || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
- || access_uarea(child, PT_CFM, &cfm, 0)
- || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
+ if (access_elf_reg(child, &info, ELF_CR_IPSR_OFFSET, &psr, 0) < 0 ||
+ access_elf_reg(child, &info, ELF_AR_EC_OFFSET, &ec, 0) < 0 ||
+ access_elf_reg(child, &info, ELF_AR_LC_OFFSET, &lc, 0) < 0 ||
+ access_elf_reg(child, &info, ELF_AR_RNAT_OFFSET, &rnat, 0) < 0 ||
+ access_elf_reg(child, &info, ELF_AR_BSP_OFFSET, &bsp, 0) < 0 ||
+ access_elf_reg(child, &info, ELF_CFM_OFFSET, &cfm, 0) < 0 ||
+ access_elf_reg(child, &info, ELF_NAT_OFFSET, &nat_bits, 0) < 0)
return -EIO;
/* control regs */
@@ -972,7 +972,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
struct switch_stack *sw;
struct ia64_fpreg fpval;
struct pt_regs *pt;
- long ret, retval = 0;
+ long retval = 0;
int i;
memset(&fpval, 0, sizeof(fpval));
@@ -1097,17 +1097,16 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
retval |= __get_user(nat_bits, &ppr->nat);
- retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
- retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
- retval |= access_uarea(child, PT_AR_EC, &ec, 1);
- retval |= access_uarea(child, PT_AR_LC, &lc, 1);
- retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
- retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
- retval |= access_uarea(child, PT_CFM, &cfm, 1);
- retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
+ retval |= access_elf_reg(child, &info, ELF_CR_IPSR_OFFSET, &psr, 1);
+ retval |= access_elf_reg(child, &info, ELF_AR_RSC_OFFSET, &rsc, 1);
+ retval |= access_elf_reg(child, &info, ELF_AR_EC_OFFSET, &ec, 1);
+ retval |= access_elf_reg(child, &info, ELF_AR_LC_OFFSET, &lc, 1);
+ retval |= access_elf_reg(child, &info, ELF_AR_RNAT_OFFSET, &rnat, 1);
+ retval |= access_elf_reg(child, &info, ELF_AR_BSP_OFFSET, &bsp, 1);
+ retval |= access_elf_reg(child, &info, ELF_CFM_OFFSET, &cfm, 1);
+ retval |= access_elf_reg(child, &info, ELF_NAT_OFFSET, &nat_bits, 1);
- ret = retval ? -EIO : 0;
- return ret;
+ return retval ? -EIO : 0;
}
void
@@ -1150,6 +1149,10 @@ ptrace_disable (struct task_struct *child)
user_disable_single_step(child);
}
+static int
+access_uarea (struct task_struct *child, unsigned long addr,
+ unsigned long *data, int write_access);
+
long
arch_ptrace (struct task_struct *child, long request,
unsigned long addr, unsigned long data)
@@ -1491,7 +1494,7 @@ struct regset_membuf {
int ret;
};
-void do_gpregs_get(struct unw_frame_info *info, void *arg)
+static void do_gpregs_get(struct unw_frame_info *info, void *arg)
{
struct regset_membuf *dst = arg;
struct membuf to = dst->to;
@@ -1524,7 +1527,7 @@ void do_gpregs_get(struct unw_frame_info *info, void *arg)
}
}
-void do_gpregs_set(struct unw_frame_info *info, void *arg)
+static void do_gpregs_set(struct unw_frame_info *info, void *arg)
{
struct regset_getset *dst = arg;
@@ -1569,7 +1572,7 @@ void do_gpregs_set(struct unw_frame_info *info, void *arg)
#define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
-void do_fpregs_get(struct unw_frame_info *info, void *arg)
+static void do_fpregs_get(struct unw_frame_info *info, void *arg)
{
struct task_struct *task = info->task;
struct regset_membuf *dst = arg;
@@ -1603,7 +1606,7 @@ void do_fpregs_get(struct unw_frame_info *info, void *arg)
membuf_zero(&to, 96 * sizeof(reg));
}
-void do_fpregs_set(struct unw_frame_info *info, void *arg)
+static void do_fpregs_set(struct unw_frame_info *info, void *arg)
{
struct regset_getset *dst = arg;
elf_fpreg_t fpreg, tmp[30];
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 7abc5f37bfaf..ed9fc3d057a6 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -138,12 +138,8 @@ void vtime_account_kernel(struct task_struct *tsk)
struct thread_info *ti = task_thread_info(tsk);
__u64 stime = vtime_delta(tsk);
- if ((tsk->flags & PF_VCPU) && !irq_count())
+ if (tsk->flags & PF_VCPU)
ti->gtime += stime;
- else if (hardirq_count())
- ti->hardirq_time += stime;
- else if (in_serving_softirq())
- ti->softirq_time += stime;
else
ti->stime += stime;
}
@@ -156,44 +152,48 @@ void vtime_account_idle(struct task_struct *tsk)
ti->idle_time += vtime_delta(tsk);
}
+void vtime_account_softirq(struct task_struct *tsk)
+{
+ struct thread_info *ti = task_thread_info(tsk);
+
+ ti->softirq_time += vtime_delta(tsk);
+}
+
+void vtime_account_hardirq(struct task_struct *tsk)
+{
+ struct thread_info *ti = task_thread_info(tsk);
+
+ ti->hardirq_time += vtime_delta(tsk);
+}
+
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static irqreturn_t
timer_interrupt (int irq, void *dev_id)
{
- unsigned long new_itm;
+ unsigned long cur_itm, new_itm, ticks;
if (cpu_is_offline(smp_processor_id())) {
return IRQ_HANDLED;
}
new_itm = local_cpu_data->itm_next;
+ cur_itm = ia64_get_itc();
- if (!time_after(ia64_get_itc(), new_itm))
+ if (!time_after(cur_itm, new_itm)) {
printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
- ia64_get_itc(), new_itm);
-
- profile_tick(CPU_PROFILING);
-
- while (1) {
- update_process_times(user_mode(get_irq_regs()));
-
- new_itm += local_cpu_data->itm_delta;
-
- if (smp_processor_id() == time_keeper_id)
- xtime_update(1);
-
- local_cpu_data->itm_next = new_itm;
+ cur_itm, new_itm);
+ ticks = 1;
+ } else {
+ ticks = DIV_ROUND_UP(cur_itm - new_itm,
+ local_cpu_data->itm_delta);
+ new_itm += ticks * local_cpu_data->itm_delta;
+ }
- if (time_after(new_itm, ia64_get_itc()))
- break;
+ if (smp_processor_id() != time_keeper_id)
+ ticks = 0;
- /*
- * Allow IPIs to interrupt the timer loop.
- */
- local_irq_enable();
- local_irq_disable();
- }
+ legacy_timer_tick(ticks);
do {
/*
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index e30e360beef8..bfc4ecd0a2ab 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -19,15 +19,12 @@
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/swap.h>
+#include <linux/sizes.h>
#include <asm/meminit.h>
#include <asm/sections.h>
#include <asm/mca.h>
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-static unsigned long max_gap;
-#endif
-
/* physical address where the bootmem map is located */
unsigned long bootmap_start;
@@ -166,6 +163,32 @@ find_memory (void)
alloc_per_cpu_data();
}
+static int __init find_largest_hole(u64 start, u64 end, void *arg)
+{
+ u64 *max_gap = arg;
+
+ static u64 last_end = PAGE_OFFSET;
+
+ /* NOTE: this algorithm assumes efi memmap table is ordered */
+
+ if (*max_gap < (start - last_end))
+ *max_gap = start - last_end;
+ last_end = end;
+ return 0;
+}
+
+static void __init verify_gap_absence(void)
+{
+ unsigned long max_gap;
+
+ /* Forbid FLATMEM if hole is > than 1G */
+ efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
+ if (max_gap >= SZ_1G)
+ panic("Cannot use FLATMEM with %ldMB hole\n"
+ "Please switch over to SPARSEMEM\n",
+ (max_gap >> 20));
+}
+
/*
* Set up the page tables.
*/
@@ -177,37 +200,12 @@ paging_init (void)
unsigned long max_zone_pfns[MAX_NR_ZONES];
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-#ifdef CONFIG_ZONE_DMA32
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
max_zone_pfns[ZONE_DMA32] = max_dma;
-#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
- if (max_gap < LARGE_GAP) {
- vmem_map = (struct page *) 0;
- } else {
- unsigned long map_size;
-
- /* allocate virtual_mem_map */
+ verify_gap_absence();
- map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
- sizeof(struct page));
- VMALLOC_END -= map_size;
- vmem_map = (struct page *) VMALLOC_END;
- efi_memmap_walk(create_mem_map_page_table, NULL);
-
- /*
- * alloc_node_mem_map makes an adjustment for mem_map
- * which isn't compatible with vmem_map.
- */
- NODE_DATA(0)->node_mem_map = vmem_map +
- find_min_pfn_with_active_regions();
-
- printk("Virtual mem_map starts at 0x%p\n", mem_map);
- }
-#endif /* !CONFIG_VIRTUAL_MEM_MAP */
free_area_init(max_zone_pfns);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index dbe829fc5298..c7311131156e 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -584,6 +584,25 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
}
}
+static void __init virtual_map_init(void)
+{
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+ int node;
+
+ VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+ sizeof(struct page));
+ vmem_map = (struct page *) VMALLOC_END;
+ efi_memmap_walk(create_mem_map_page_table, NULL);
+ printk("Virtual mem_map starts at 0x%p\n", vmem_map);
+
+ for_each_online_node(node) {
+ unsigned long pfn_offset = mem_data[node].min_pfn;
+
+ NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
+ }
+#endif
+}
+
/**
* paging_init - setup page tables
*
@@ -593,38 +612,17 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
void __init paging_init(void)
{
unsigned long max_dma;
- unsigned long pfn_offset = 0;
- unsigned long max_pfn = 0;
- int node;
unsigned long max_zone_pfns[MAX_NR_ZONES];
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
sparse_init();
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
- sizeof(struct page));
- vmem_map = (struct page *) VMALLOC_END;
- efi_memmap_walk(create_mem_map_page_table, NULL);
- printk("Virtual mem_map starts at 0x%p\n", vmem_map);
-#endif
-
- for_each_online_node(node) {
- pfn_offset = mem_data[node].min_pfn;
-
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
-#endif
- if (mem_data[node].max_pfn > max_pfn)
- max_pfn = mem_data[node].max_pfn;
- }
+ virtual_map_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = max_dma;
-#endif
- max_zone_pfns[ZONE_NORMAL] = max_pfn;
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init(max_zone_pfns);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ef12e097f318..9b5acf8fb092 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -574,20 +574,6 @@ ia64_pfn_valid (unsigned long pfn)
}
EXPORT_SYMBOL(ia64_pfn_valid);
-int __init find_largest_hole(u64 start, u64 end, void *arg)
-{
- u64 *max_gap = arg;
-
- static u64 last_end = PAGE_OFFSET;
-
- /* NOTE: this algorithm assumes efi memmap table is ordered */
-
- if (*max_gap < (start - last_end))
- *max_gap = start - last_end;
- last_end = end;
- return 0;
-}
-
#endif /* CONFIG_VIRTUAL_MEM_MAP */
int __init register_active_ranges(u64 start, u64 len, int nid)
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index f34964271101..46b6e5f3a40f 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -58,36 +58,6 @@ paddr_to_nid(unsigned long paddr)
EXPORT_SYMBOL(paddr_to_nid);
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
-/*
- * Because of holes evaluate on section limits.
- * If the section of memory exists, then return the node where the section
- * resides. Otherwise return node 0 as the default. This is used by
- * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where
- * the section resides.
- */
-int __meminit __early_pfn_to_nid(unsigned long pfn,
- struct mminit_pfnnid_cache *state)
-{
- int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
-
- if (section >= state->last_start && section < state->last_end)
- return state->last_nid;
-
- for (i = 0; i < num_node_memblks; i++) {
- ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT;
- esec = (node_memblk[i].start_paddr + node_memblk[i].size +
- ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT;
- if (section >= ssec && section < esec) {
- state->last_start = ssec;
- state->last_end = esec;
- state->last_nid = node_memblk[i].nid;
- return node_memblk[i].nid;
- }
- }
-
- return -1;
-}
-
void numa_clear_node(int cpu)
{
unmap_cpu_from_node(cpu, NUMA_NO_NODE);
diff --git a/arch/m68k/68000/timers.c b/arch/m68k/68000/timers.c
index e8dfdd2556a5..0d0417cebc7f 100644
--- a/arch/m68k/68000/timers.c
+++ b/arch/m68k/68000/timers.c
@@ -52,7 +52,6 @@
#endif
static u32 m68328_tick_cnt;
-static irq_handler_t timer_interrupt;
/***************************************************************************/
@@ -62,7 +61,8 @@ static irqreturn_t hw_tick(int irq, void *dummy)
TSTAT &= 0;
m68328_tick_cnt += TICKS_PER_JIFFY;
- return timer_interrupt(irq, dummy);
+ legacy_timer_tick(1);
+ return IRQ_HANDLED;
}
/***************************************************************************/
@@ -91,7 +91,7 @@ static struct clocksource m68328_clk = {
/***************************************************************************/
-void hw_timer_init(irq_handler_t handler)
+void hw_timer_init(void)
{
int ret;
@@ -113,7 +113,6 @@ void hw_timer_init(irq_handler_t handler)
/* Enable timer 1 */
TCTL |= TCTL_TEN;
clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ);
- timer_interrupt = handler;
}
/***************************************************************************/
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 694c4fca9f5d..7246aa50298e 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -20,6 +20,7 @@ choice
config M68KCLASSIC
bool "Classic M68K CPU family support"
+ select HAVE_ARCH_PFN_VALID
config COLDFIRE
bool "Coldfire CPU family support"
@@ -104,6 +105,7 @@ config M68060
config M68328
bool "MC68328"
depends on !MMU
+ select LEGACY_TIMER_TICK
select M68000
help
Motorola 68328 processor support.
@@ -111,6 +113,7 @@ config M68328
config M68EZ328
bool "MC68EZ328"
depends on !MMU
+ select LEGACY_TIMER_TICK
select M68000
help
Motorola 68EX328 processor support.
@@ -118,6 +121,7 @@ config M68EZ328
config M68VZ328
bool "MC68VZ328"
depends on !MMU
+ select LEGACY_TIMER_TICK
select M68000
help
Motorola 68VZ328 processor support.
@@ -137,6 +141,7 @@ config M5206
bool "MCF5206"
depends on !MMU
select COLDFIRE_SW_A7
+ select COLDFIRE_TIMERS
select HAVE_MBAR
select CPU_NO_EFFICIENT_FFS
help
@@ -146,6 +151,7 @@ config M5206e
bool "MCF5206e"
depends on !MMU
select COLDFIRE_SW_A7
+ select COLDFIRE_TIMERS
select HAVE_MBAR
select CPU_NO_EFFICIENT_FFS
help
@@ -154,7 +160,7 @@ config M5206e
config M520x
bool "MCF520x"
depends on !MMU
- select GENERIC_CLOCKEVENTS
+ select COLDFIRE_PIT_TIMER
select HAVE_CACHE_SPLIT
help
Freescale Coldfire 5207/5208 processor support.
@@ -162,7 +168,7 @@ config M520x
config M523x
bool "MCF523x"
depends on !MMU
- select GENERIC_CLOCKEVENTS
+ select COLDFIRE_PIT_TIMER
select HAVE_CACHE_SPLIT
select HAVE_IPSBAR
help
@@ -172,6 +178,7 @@ config M5249
bool "MCF5249"
depends on !MMU
select COLDFIRE_SW_A7
+ select COLDFIRE_TIMERS
select HAVE_MBAR
select CPU_NO_EFFICIENT_FFS
help
@@ -181,6 +188,7 @@ config M525x
bool "MCF525x"
depends on !MMU
select COLDFIRE_SW_A7
+ select COLDFIRE_TIMERS
select HAVE_MBAR
select CPU_NO_EFFICIENT_FFS
help
@@ -189,10 +197,10 @@ config M525x
config M5271
bool "MCF5271"
depends on !MMU
+ select COLDFIRE_PIT_TIMER
select M527x
select HAVE_CACHE_SPLIT
select HAVE_IPSBAR
- select GENERIC_CLOCKEVENTS
help
Freescale (Motorola) ColdFire 5270/5271 processor support.
@@ -200,6 +208,7 @@ config M5272
bool "MCF5272"
depends on !MMU
select COLDFIRE_SW_A7
+ select COLDFIRE_TIMERS
select HAVE_MBAR
select CPU_NO_EFFICIENT_FFS
help
@@ -208,17 +217,17 @@ config M5272
config M5275
bool "MCF5275"
depends on !MMU
+ select COLDFIRE_PIT_TIMER
select M527x
select HAVE_CACHE_SPLIT
select HAVE_IPSBAR
- select GENERIC_CLOCKEVENTS
help
Freescale (Motorola) ColdFire 5274/5275 processor support.
config M528x
bool "MCF528x"
depends on !MMU
- select GENERIC_CLOCKEVENTS
+ select COLDFIRE_PIT_TIMER
select HAVE_CACHE_SPLIT
select HAVE_IPSBAR
help
@@ -227,6 +236,7 @@ config M528x
config M5307
bool "MCF5307"
depends on !MMU
+ select COLDFIRE_TIMERS
select COLDFIRE_SW_A7
select HAVE_CACHE_CB
select HAVE_MBAR
@@ -237,6 +247,7 @@ config M5307
config M532x
bool "MCF532x"
depends on !MMU
+ select COLDFIRE_TIMERS
select M53xx
select HAVE_CACHE_CB
help
@@ -245,6 +256,7 @@ config M532x
config M537x
bool "MCF537x"
depends on !MMU
+ select COLDFIRE_TIMERS
select M53xx
select HAVE_CACHE_CB
help
@@ -254,6 +266,7 @@ config M5407
bool "MCF5407"
depends on !MMU
select COLDFIRE_SW_A7
+ select COLDFIRE_TIMERS
select HAVE_CACHE_CB
select HAVE_MBAR
select CPU_NO_EFFICIENT_FFS
@@ -263,6 +276,7 @@ config M5407
config M547x
bool "MCF547x"
select M54xx
+ select COLDFIRE_SLTIMERS
select MMU_COLDFIRE if MMU
select FPU if MMU
select HAVE_CACHE_CB
@@ -273,6 +287,7 @@ config M547x
config M548x
bool "MCF548x"
+ select COLDFIRE_SLTIMERS
select MMU_COLDFIRE if MMU
select FPU if MMU
select M54xx
@@ -284,8 +299,8 @@ config M548x
config M5441x
bool "MCF5441x"
+ select COLDFIRE_PIT_TIMER
select MMU_COLDFIRE if MMU
- select GENERIC_CLOCKEVENTS
select HAVE_CACHE_CB
help
Freescale Coldfire 54410/54415/54416/54417/54418 processor support.
@@ -302,6 +317,17 @@ config M54xx
select HAVE_PCI
bool
+config COLDFIRE_PIT_TIMER
+ bool
+
+config COLDFIRE_TIMERS
+ bool
+ select LEGACY_TIMER_TICK
+
+config COLDFIRE_SLTIMERS
+ bool
+ select LEGACY_TIMER_TICK
+
endif # COLDFIRE
@@ -373,16 +399,38 @@ config RMW_INSNS
config SINGLE_MEMORY_CHUNK
bool "Use one physical chunk of memory only" if ADVANCED && !SUN3
depends on MMU
- default y if SUN3
- select NEED_MULTIPLE_NODES
+ default y if SUN3 || MMU_COLDFIRE
help
Ignore all but the first contiguous chunk of physical memory for VM
purposes. This will save a few bytes kernel size and may speed up
- some operations. Say N if not sure.
+ some operations.
+ When this option os set to N, you may want to lower "Maximum zone
+ order" to save memory that could be wasted for unused memory map.
+ Say N if not sure.
config ARCH_DISCONTIGMEM_ENABLE
+ depends on BROKEN
def_bool MMU && !SINGLE_MEMORY_CHUNK
+config FORCE_MAX_ZONEORDER
+ int "Maximum zone order" if ADVANCED
+ depends on !SINGLE_MEMORY_CHUNK
+ default "11"
+ help
+ The kernel memory allocator divides physically contiguous memory
+ blocks into "zones", where each zone is a power of two number of
+ pages. This option selects the largest power of two that the kernel
+ keeps in the memory allocator. If you need to allocate very large
+ blocks of physically contiguous memory, then you may need to
+ increase this value.
+
+ For systems that have holes in their physical address space this
+ value also defines the minimal size of the hole that allows
+ freeing unused memory map.
+
+ This config option is actually maximum order plus one. For example,
+ a value of 11 means that the largest free memory block is 2^10 pages.
+
config 060_WRITETHROUGH
bool "Use write-through caching for 68060 supervisor accesses"
depends on ADVANCED && M68060
@@ -406,7 +454,7 @@ config M68K_L2_CACHE
config NODES_SHIFT
int
default "3"
- depends on !SINGLE_MEMORY_CHUNK
+ depends on DISCONTIGMEM
config CPU_HAS_NO_BITFIELDS
bool
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 17e8c3a292d7..cf6961d4e657 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -7,6 +7,7 @@ config AMIGA
bool "Amiga support"
depends on MMU
select MMU_MOTOROLA if MMU
+ select LEGACY_TIMER_TICK
help
This option enables support for the Amiga series of computers. If
you plan to use this kernel on an Amiga, say Y here and browse the
@@ -17,6 +18,7 @@ config ATARI
depends on MMU
select MMU_MOTOROLA if MMU
select HAVE_ARCH_NVRAM_OPS
+ select LEGACY_TIMER_TICK
help
This option enables support for the 68000-based Atari series of
computers (including the TT, Falcon and Medusa). If you plan to use
@@ -28,18 +30,18 @@ config MAC
depends on MMU
select MMU_MOTOROLA if MMU
select HAVE_ARCH_NVRAM_OPS
+ select LEGACY_TIMER_TICK
help
This option enables support for the Apple Macintosh series of
- computers (yes, there is experimental support now, at least for part
- of the series).
-
- Say N unless you're willing to code the remaining necessary support.
- ;)
+ computers. If you plan to use this kernel on a Mac, say Y here and
+ browse the documentation available at <http://www.mac.linux-m68k.org/>;
+ otherwise say N.
config APOLLO
bool "Apollo support"
depends on MMU
select MMU_MOTOROLA if MMU
+ select LEGACY_TIMER_TICK
help
Say Y here if you want to run Linux on an MC680x0-based Apollo
Domain workstation such as the DN3500.
@@ -58,6 +60,7 @@ config MVME147
bool "MVME147 support"
depends on MMU
depends on VME
+ select LEGACY_TIMER_TICK
help
Say Y to include support for early Motorola VME boards. This will
build a kernel which can run on MVME147 single-board computers. If
@@ -68,6 +71,7 @@ config MVME16x
bool "MVME162, 166 and 167 support"
depends on MMU
depends on VME
+ select LEGACY_TIMER_TICK
help
Say Y to include support for Motorola VME boards. This will build a
kernel which can run on MVME162, MVME166, MVME167, MVME172, and
@@ -79,6 +83,7 @@ config BVME6000
bool "BVME4000 and BVME6000 support"
depends on MMU
depends on VME
+ select LEGACY_TIMER_TICK
help
Say Y to include support for VME boards from BVM Ltd. This will
build a kernel which can run on BVME4000 and BVME6000 boards. If
@@ -89,6 +94,7 @@ config HP300
bool "HP9000/300 and HP9000/400 support"
depends on MMU
select MMU_MOTOROLA if MMU
+ select LEGACY_TIMER_TICK
help
This option enables support for the HP9000/300 and HP9000/400 series
of workstations. Support for these machines is still somewhat
@@ -99,6 +105,7 @@ config HP300
config SUN3X
bool "Sun3x support"
depends on MMU
+ select LEGACY_TIMER_TICK
select MMU_MOTOROLA if MMU
select M68030
help
@@ -114,6 +121,7 @@ config Q40
bool "Q40/Q60 support"
depends on MMU
select MMU_MOTOROLA if MMU
+ select LEGACY_TIMER_TICK
help
The Q40 is a Motorola 68040-based successor to the Sinclair QL
manufactured in Germany. There is an official Q40 home page at
@@ -126,6 +134,7 @@ config SUN3
depends on MMU
depends on !MMU_MOTOROLA
select MMU_SUN3 if MMU
+ select LEGACY_TIMER_TICK
select NO_DMA
select M68020
help
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index bee9f240f35d..be2dfab48fd4 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -92,7 +92,7 @@ static char *amiga_models[] __initdata = {
static char amiga_model_name[13] = "Amiga ";
-static void amiga_sched_init(irq_handler_t handler);
+static void amiga_sched_init(void);
static void amiga_get_model(char *model);
static void amiga_get_hardware_list(struct seq_file *m);
extern void amiga_mksound(unsigned int count, unsigned int ticks);
@@ -383,14 +383,6 @@ void __init config_amiga(void)
mach_init_IRQ = amiga_init_IRQ;
mach_get_model = amiga_get_model;
mach_get_hardware_list = amiga_get_hardware_list;
-
- /*
- * default MAX_DMA=0xffffffff on all machines. If we don't do so, the SCSI
- * code will not be able to allocate any mem for transfers, unless we are
- * dealing with a Z2 mem only system. /Jes
- */
- mach_max_dma_address = 0xffffffff;
-
mach_reset = amiga_reset;
#if IS_ENABLED(CONFIG_INPUT_M68K_BEEP)
mach_beep = amiga_mksound;
@@ -475,16 +467,15 @@ static u32 clk_total, clk_offset;
static irqreturn_t ciab_timer_handler(int irq, void *dev_id)
{
- irq_handler_t timer_routine = dev_id;
-
clk_total += jiffy_ticks;
clk_offset = 0;
- timer_routine(0, NULL);
+ legacy_timer_tick(1);
+ timer_heartbeat();
return IRQ_HANDLED;
}
-static void __init amiga_sched_init(irq_handler_t timer_routine)
+static void __init amiga_sched_init(void)
{
static struct resource sched_res = {
.name = "timer", .start = 0x00bfd400, .end = 0x00bfd5ff,
@@ -503,7 +494,7 @@ static void __init amiga_sched_init(irq_handler_t timer_routine)
* SCSI code. We'll have to take a look at this later
*/
if (request_irq(IRQ_AMIGA_CIAB_TA, ciab_timer_handler, IRQF_TIMER,
- "timer", timer_routine))
+ "timer", NULL))
pr_err("Couldn't register timer interrupt\n");
/* start timer */
ciab.cra |= 0x11;
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index 762da5d7a415..581a5f68d102 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -26,7 +26,7 @@ u_long cpuctrl_physaddr;
u_long timer_physaddr;
u_long apollo_model;
-extern void dn_sched_init(irq_handler_t handler);
+extern void dn_sched_init(void);
extern void dn_init_IRQ(void);
extern int dn_dummy_hwclk(int, struct rtc_time *);
extern void dn_dummy_reset(void);
@@ -150,7 +150,6 @@ void __init config_apollo(void)
mach_sched_init=dn_sched_init; /* */
mach_init_IRQ=dn_init_IRQ;
- mach_max_dma_address = 0xffffffff;
mach_hwclk = dn_dummy_hwclk; /* */
mach_reset = dn_dummy_reset; /* */
#ifdef CONFIG_HEARTBEAT
@@ -168,11 +167,10 @@ void __init config_apollo(void)
irqreturn_t dn_timer_int(int irq, void *dev_id)
{
- irq_handler_t timer_handler = dev_id;
-
volatile unsigned char x;
- timer_handler(irq, dev_id);
+ legacy_timer_tick(1);
+ timer_heartbeat();
x = *(volatile unsigned char *)(apollo_timer + 3);
x = *(volatile unsigned char *)(apollo_timer + 5);
@@ -180,7 +178,7 @@ irqreturn_t dn_timer_int(int irq, void *dev_id)
return IRQ_HANDLED;
}
-void dn_sched_init(irq_handler_t timer_routine)
+void dn_sched_init(void)
{
/* program timer 1 */
*(volatile unsigned char *)(apollo_timer + 3) = 0x01;
@@ -198,7 +196,7 @@ void dn_sched_init(irq_handler_t timer_routine)
*(volatile unsigned char *)(apollo_timer + 0x3));
#endif
- if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine))
+ if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", NULL))
pr_err("Couldn't register timer interrupt\n");
}
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 7ec3161e8517..44f9b5216ac9 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -77,7 +77,7 @@ static void atari_heartbeat(int on);
#endif
/* atari specific timer functions (in time.c) */
-extern void atari_sched_init(irq_handler_t);
+extern void atari_sched_init(void);
extern int atari_mste_hwclk (int, struct rtc_time *);
extern int atari_tt_hwclk (int, struct rtc_time *);
@@ -205,7 +205,6 @@ void __init config_atari(void)
mach_get_model = atari_get_model;
mach_get_hardware_list = atari_get_hardware_list;
mach_reset = atari_reset;
- mach_max_dma_address = 0xffffff;
#if IS_ENABLED(CONFIG_INPUT_M68K_BEEP)
mach_beep = atari_mksound;
#endif
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
index ce923a523695..1068670cb741 100644
--- a/arch/m68k/atari/time.c
+++ b/arch/m68k/atari/time.c
@@ -21,6 +21,7 @@
#include <linux/export.h>
#include <asm/atariints.h>
+#include <asm/machdep.h>
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL_GPL(rtc_lock);
@@ -40,7 +41,6 @@ static u8 last_timer_count;
static irqreturn_t mfp_timer_c_handler(int irq, void *dev_id)
{
- irq_handler_t timer_routine = dev_id;
unsigned long flags;
local_irq_save(flags);
@@ -48,14 +48,15 @@ static irqreturn_t mfp_timer_c_handler(int irq, void *dev_id)
last_timer_count = st_mfp.tim_dt_c;
} while (last_timer_count == 1);
clk_total += INT_TICKS;
- timer_routine(0, NULL);
+ legacy_timer_tick(1);
+ timer_heartbeat();
local_irq_restore(flags);
return IRQ_HANDLED;
}
void __init
-atari_sched_init(irq_handler_t timer_routine)
+atari_sched_init(void)
{
/* set Timer C data Register */
st_mfp.tim_dt_c = INT_TICKS;
@@ -63,7 +64,7 @@ atari_sched_init(irq_handler_t timer_routine)
st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 15) | 0x60;
/* install interrupt service routine for MFP Timer C */
if (request_irq(IRQ_MFP_TIMC, mfp_timer_c_handler, IRQF_TIMER, "timer",
- timer_routine))
+ NULL))
pr_err("Couldn't register timer interrupt\n");
clocksource_register_hz(&atari_clk, INT_CLK);
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 50f4d01363df..0c6feafbbd11 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -38,7 +38,7 @@
#include <asm/bvme6000hw.h>
static void bvme6000_get_model(char *model);
-extern void bvme6000_sched_init(irq_handler_t handler);
+extern void bvme6000_sched_init(void);
extern int bvme6000_hwclk (int, struct rtc_time *);
extern void bvme6000_reset (void);
void bvme6000_set_vectors (void);
@@ -101,7 +101,6 @@ void __init config_bvme6000(void)
bvme6000_set_vectors();
#endif
- mach_max_dma_address = 0xffffffff;
mach_sched_init = bvme6000_sched_init;
mach_init_IRQ = bvme6000_init_IRQ;
mach_hwclk = bvme6000_hwclk;
@@ -165,7 +164,6 @@ static u32 clk_total, clk_offset;
static irqreturn_t bvme6000_timer_int (int irq, void *dev_id)
{
- irq_handler_t timer_routine = dev_id;
unsigned long flags;
volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
unsigned char msr;
@@ -175,7 +173,7 @@ static irqreturn_t bvme6000_timer_int (int irq, void *dev_id)
rtc->msr = msr | 0x20; /* Ack the interrupt */
clk_total += RTC_TIMER_CYCLES;
clk_offset = 0;
- timer_routine(0, NULL);
+ legacy_timer_tick(1);
local_irq_restore(flags);
return IRQ_HANDLED;
@@ -190,7 +188,7 @@ static irqreturn_t bvme6000_timer_int (int irq, void *dev_id)
* so divide by 8 to get the microsecond result.
*/
-void bvme6000_sched_init (irq_handler_t timer_routine)
+void bvme6000_sched_init (void)
{
volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
unsigned char msr = rtc->msr & 0xc0;
@@ -198,7 +196,7 @@ void bvme6000_sched_init (irq_handler_t timer_routine)
rtc->msr = 0; /* Ensure timer registers accessible */
if (request_irq(BVME_IRQ_RTC, bvme6000_timer_int, IRQF_TIMER, "timer",
- timer_routine))
+ NULL))
panic ("Couldn't register timer int");
rtc->t1cr_omr = 0x04; /* Mode 2, ext clk */
diff --git a/arch/m68k/coldfire/Makefile b/arch/m68k/coldfire/Makefile
index 573eabca1a3a..a3e18d73d8b8 100644
--- a/arch/m68k/coldfire/Makefile
+++ b/arch/m68k/coldfire/Makefile
@@ -16,20 +16,24 @@
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
obj-$(CONFIG_COLDFIRE) += cache.o clk.o device.o dma.o entry.o vectors.o
-obj-$(CONFIG_M5206) += m5206.o timers.o intc.o reset.o
-obj-$(CONFIG_M5206e) += m5206.o timers.o intc.o reset.o
-obj-$(CONFIG_M520x) += m520x.o pit.o intc-simr.o reset.o
-obj-$(CONFIG_M523x) += m523x.o pit.o dma_timer.o intc-2.o reset.o
-obj-$(CONFIG_M5249) += m5249.o timers.o intc.o intc-5249.o reset.o
-obj-$(CONFIG_M525x) += m525x.o timers.o intc.o intc-525x.o reset.o
-obj-$(CONFIG_M527x) += m527x.o pit.o intc-2.o reset.o
-obj-$(CONFIG_M5272) += m5272.o intc-5272.o timers.o
-obj-$(CONFIG_M528x) += m528x.o pit.o intc-2.o reset.o
-obj-$(CONFIG_M5307) += m5307.o timers.o intc.o reset.o
-obj-$(CONFIG_M53xx) += m53xx.o timers.o intc-simr.o reset.o
-obj-$(CONFIG_M5407) += m5407.o timers.o intc.o reset.o
-obj-$(CONFIG_M54xx) += m54xx.o sltimers.o intc-2.o
-obj-$(CONFIG_M5441x) += m5441x.o pit.o intc-simr.o reset.o
+obj-$(CONFIG_M5206) += m5206.o intc.o reset.o
+obj-$(CONFIG_M5206e) += m5206.o intc.o reset.o
+obj-$(CONFIG_M520x) += m520x.o intc-simr.o reset.o
+obj-$(CONFIG_M523x) += m523x.o dma_timer.o intc-2.o reset.o
+obj-$(CONFIG_M5249) += m5249.o intc.o intc-5249.o reset.o
+obj-$(CONFIG_M525x) += m525x.o intc.o intc-525x.o reset.o
+obj-$(CONFIG_M527x) += m527x.o intc-2.o reset.o
+obj-$(CONFIG_M5272) += m5272.o intc-5272.o
+obj-$(CONFIG_M528x) += m528x.o intc-2.o reset.o
+obj-$(CONFIG_M5307) += m5307.o intc.o reset.o
+obj-$(CONFIG_M53xx) += m53xx.o intc-simr.o reset.o
+obj-$(CONFIG_M5407) += m5407.o intc.o reset.o
+obj-$(CONFIG_M54xx) += m54xx.o intc-2.o
+obj-$(CONFIG_M5441x) += m5441x.o intc-simr.o reset.o
+
+obj-$(CONFIG_COLDFIRE_PIT_TIMER) += pit.o
+obj-$(CONFIG_COLDFIRE_TIMERS) += timers.o
+obj-$(CONFIG_COLDFIRE_SLTIMERS) += sltimers.o
obj-$(CONFIG_NETtel) += nettel.o
obj-$(CONFIG_CLEOPATRA) += nettel.o
diff --git a/arch/m68k/coldfire/pit.c b/arch/m68k/coldfire/pit.c
index fd1d9c915daa..855d0af47097 100644
--- a/arch/m68k/coldfire/pit.c
+++ b/arch/m68k/coldfire/pit.c
@@ -136,7 +136,7 @@ static struct clocksource pit_clk = {
/***************************************************************************/
-void hw_timer_init(irq_handler_t handler)
+void hw_timer_init(void)
{
int ret;
diff --git a/arch/m68k/coldfire/sltimers.c b/arch/m68k/coldfire/sltimers.c
index 5ab81c9c552d..f9d572ee63db 100644
--- a/arch/m68k/coldfire/sltimers.c
+++ b/arch/m68k/coldfire/sltimers.c
@@ -83,14 +83,13 @@ void mcfslt_profile_init(void)
static u32 mcfslt_cycles_per_jiffy;
static u32 mcfslt_cnt;
-static irq_handler_t timer_interrupt;
-
static irqreturn_t mcfslt_tick(int irq, void *dummy)
{
/* Reset Slice Timer 0 */
__raw_writel(MCFSLT_SSR_BE | MCFSLT_SSR_TE, TA(MCFSLT_SSR));
mcfslt_cnt += mcfslt_cycles_per_jiffy;
- return timer_interrupt(irq, dummy);
+ legacy_timer_tick(1);
+ return IRQ_HANDLED;
}
static u64 mcfslt_read_clk(struct clocksource *cs)
@@ -119,7 +118,7 @@ static struct clocksource mcfslt_clk = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-void hw_timer_init(irq_handler_t handler)
+void hw_timer_init(void)
{
int r;
@@ -136,7 +135,6 @@ void hw_timer_init(irq_handler_t handler)
/* initialize mcfslt_cnt knowing that slice timers count down */
mcfslt_cnt = mcfslt_cycles_per_jiffy;
- timer_interrupt = handler;
r = request_irq(MCF_IRQ_TIMER, mcfslt_tick, IRQF_TIMER, "timer", NULL);
if (r) {
pr_err("Failed to request irq %d (timer): %pe\n", MCF_IRQ_TIMER,
diff --git a/arch/m68k/coldfire/timers.c b/arch/m68k/coldfire/timers.c
index b8301fddf901..05a42d8e0a59 100644
--- a/arch/m68k/coldfire/timers.c
+++ b/arch/m68k/coldfire/timers.c
@@ -48,8 +48,6 @@ void coldfire_profile_init(void);
static u32 mcftmr_cycles_per_jiffy;
static u32 mcftmr_cnt;
-static irq_handler_t timer_interrupt;
-
/***************************************************************************/
static void init_timer_irq(void)
@@ -77,7 +75,8 @@ static irqreturn_t mcftmr_tick(int irq, void *dummy)
__raw_writeb(MCFTIMER_TER_CAP | MCFTIMER_TER_REF, TA(MCFTIMER_TER));
mcftmr_cnt += mcftmr_cycles_per_jiffy;
- return timer_interrupt(irq, dummy);
+ legacy_timer_tick(1);
+ return IRQ_HANDLED;
}
/***************************************************************************/
@@ -108,7 +107,7 @@ static struct clocksource mcftmr_clk = {
/***************************************************************************/
-void hw_timer_init(irq_handler_t handler)
+void hw_timer_init(void)
{
int r;
@@ -126,7 +125,6 @@ void hw_timer_init(irq_handler_t handler)
clocksource_register_hz(&mcftmr_clk, FREQ);
- timer_interrupt = handler;
init_timer_irq();
r = request_irq(MCF_IRQ_TIMER, mcftmr_tick, IRQF_TIMER, "timer", NULL);
if (r) {
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index f9f4fa595e13..19b40b6bc4b7 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -563,6 +563,7 @@ CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -583,7 +584,6 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
@@ -626,6 +626,8 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_TEST_LOCKUP=m
CONFIG_WW_MUTEX_SELFTEST=m
CONFIG_EARLY_PRINTK=y
+CONFIG_KUNIT=m
+CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
@@ -638,7 +640,6 @@ CONFIG_TEST_STRSCPY=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
-CONFIG_TEST_BITFIELD=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_OVERFLOW=m
@@ -653,9 +654,13 @@ CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
+CONFIG_BITFIELD_KUNIT=m
+CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
+CONFIG_TEST_FREE_PAGES=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index f4828e86d547..07516abe0489 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -519,6 +519,7 @@ CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -539,7 +540,6 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
@@ -582,6 +582,8 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_TEST_LOCKUP=m
CONFIG_WW_MUTEX_SELFTEST=m
CONFIG_EARLY_PRINTK=y
+CONFIG_KUNIT=m
+CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
@@ -594,7 +596,6 @@ CONFIG_TEST_STRSCPY=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
-CONFIG_TEST_BITFIELD=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_OVERFLOW=m
@@ -609,9 +610,13 @@ CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
+CONFIG_BITFIELD_KUNIT=m
+CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
+CONFIG_TEST_FREE_PAGES=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index e7911f141de1..cc901c4e9492 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -541,6 +541,7 @@ CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -561,7 +562,6 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
@@ -604,6 +604,8 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_TEST_LOCKUP=m
CONFIG_WW_MUTEX_SELFTEST=m
CONFIG_EARLY_PRINTK=y
+CONFIG_KUNIT=m
+CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
@@ -616,7 +618,6 @@ CONFIG_TEST_STRSCPY=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
-CONFIG_TEST_BITFIELD=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_OVERFLOW=m
@@ -631,9 +632,13 @@ CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
+CONFIG_BITFIELD_KUNIT=m
+CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
+CONFIG_TEST_FREE_PAGES=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index d574e438e6db..fc9a94aa7d6b 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -512,6 +512,7 @@ CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -532,7 +533,6 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
@@ -575,6 +575,8 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_TEST_LOCKUP=m
CONFIG_WW_MUTEX_SELFTEST=m
CONFIG_EARLY_PRINTK=y
+CONFIG_KUNIT=m
+CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
@@ -587,7 +589,6 @@ CONFIG_TEST_STRSCPY=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
-CONFIG_TEST_BITFIELD=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_OVERFLOW=m
@@ -602,9 +603,13 @@ CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
+CONFIG_BITFIELD_KUNIT=m
+CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
+CONFIG_TEST_FREE_PAGES=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index c7ce206e6138..260f1206c810 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -521,6 +521,7 @@ CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -541,7 +542,6 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
@@ -584,6 +584,8 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_TEST_LOCKUP=m
CONFIG_WW_MUTEX_SELFTEST=m
CONFIG_EARLY_PRINTK=y
+CONFIG_KUNIT=m
+CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
@@ -596,7 +598,6 @@ CONFIG_TEST_STRSCPY=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
-CONFIG_TEST_BITFIELD=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_OVERFLOW=m
@@ -611,9 +612,13 @@ CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
+CONFIG_BITFIELD_KUNIT=m
+CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
+CONFIG_TEST_FREE_PAGES=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 3cd76bfaee03..f6d50b3fe8c2 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -544,6 +544,7 @@ CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -564,7 +565,6 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
@@ -607,6 +607,8 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_TEST_LOCKUP=m
CONFIG_WW_MUTEX_SELFTEST=m
CONFIG_EARLY_PRINTK=y
+CONFIG_KUNIT=m
+CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
@@ -619,7 +621,6 @@ CONFIG_TEST_STRSCPY=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
-CONFIG_TEST_BITFIELD=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_OVERFLOW=m
@@ -634,9 +635,13 @@ CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
+CONFIG_BITFIELD_KUNIT=m
+CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
+CONFIG_TEST_FREE_PAGES=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index c3d6faa7894f..fbe000ca0003 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -630,6 +630,7 @@ CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -650,7 +651,6 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
@@ -693,6 +693,8 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_TEST_LOCKUP=m
CONFIG_WW_MUTEX_SELFTEST=m
CONFIG_EARLY_PRINTK=y
+CONFIG_KUNIT=m
+CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
@@ -705,7 +707,6 @@ CONFIG_TEST_STRSCPY=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
-CONFIG_TEST_BITFIELD=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_OVERFLOW=m
@@ -720,9 +721,13 @@ CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
+CONFIG_BITFIELD_KUNIT=m
+CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
+CONFIG_TEST_FREE_PAGES=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 5568aa7d9d41..25ca836a5701 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -511,6 +511,7 @@ CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -531,7 +532,6 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
@@ -574,6 +574,8 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_TEST_LOCKUP=m
CONFIG_WW_MUTEX_SELFTEST=m
CONFIG_EARLY_PRINTK=y
+CONFIG_KUNIT=m
+CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
@@ -586,7 +588,6 @@ CONFIG_TEST_STRSCPY=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
-CONFIG_TEST_BITFIELD=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_OVERFLOW=m
@@ -601,9 +602,13 @@ CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
+CONFIG_BITFIELD_KUNIT=m
+CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
+CONFIG_TEST_FREE_PAGES=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 5b1e72ce53f8..5794e43a2acb 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -512,6 +512,7 @@ CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -532,7 +533,6 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
@@ -575,6 +575,8 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_TEST_LOCKUP=m
CONFIG_WW_MUTEX_SELFTEST=m
CONFIG_EARLY_PRINTK=y
+CONFIG_KUNIT=m
+CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
@@ -587,7 +589,6 @@ CONFIG_TEST_STRSCPY=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
-CONFIG_TEST_BITFIELD=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_OVERFLOW=m
@@ -602,9 +603,13 @@ CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
+CONFIG_BITFIELD_KUNIT=m
+CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
+CONFIG_TEST_FREE_PAGES=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index c3a3dcf30fb9..dbfb18938e11 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -530,6 +530,7 @@ CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -550,7 +551,6 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
@@ -593,6 +593,8 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_TEST_LOCKUP=m
CONFIG_WW_MUTEX_SELFTEST=m
CONFIG_EARLY_PRINTK=y
+CONFIG_KUNIT=m
+CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
@@ -605,7 +607,6 @@ CONFIG_TEST_STRSCPY=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
-CONFIG_TEST_BITFIELD=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_OVERFLOW=m
@@ -620,9 +621,13 @@ CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
+CONFIG_BITFIELD_KUNIT=m
+CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
+CONFIG_TEST_FREE_PAGES=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 3c00e52f1bf0..e6afbeee7c4a 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -514,6 +514,7 @@ CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -534,7 +535,6 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
@@ -576,6 +576,8 @@ CONFIG_STRING_SELFTEST=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_TEST_LOCKUP=m
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_KUNIT=m
+CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
@@ -588,7 +590,6 @@ CONFIG_TEST_STRSCPY=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
-CONFIG_TEST_BITFIELD=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_OVERFLOW=m
@@ -603,9 +604,13 @@ CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
+CONFIG_BITFIELD_KUNIT=m
+CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
+CONFIG_TEST_FREE_PAGES=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 241242d73cbd..5340507a9fff 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -513,6 +513,7 @@ CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -533,7 +534,6 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
@@ -576,6 +576,8 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_TEST_LOCKUP=m
CONFIG_WW_MUTEX_SELFTEST=m
CONFIG_EARLY_PRINTK=y
+CONFIG_KUNIT=m
+CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
@@ -588,7 +590,6 @@ CONFIG_TEST_STRSCPY=m
CONFIG_TEST_KSTRTOX=m
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
-CONFIG_TEST_BITFIELD=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_OVERFLOW=m
@@ -603,9 +604,13 @@ CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_FIND_BIT_BENCHMARK=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
+CONFIG_BITFIELD_KUNIT=m
+CONFIG_LINEAR_RANGES_TEST=m
+CONFIG_BITS_TEST=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
+CONFIG_TEST_FREE_PAGES=m
diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c
index a161d44fd20b..ce1eb3d3d55d 100644
--- a/arch/m68k/hp300/config.c
+++ b/arch/m68k/hp300/config.c
@@ -260,7 +260,6 @@ void __init config_hp300(void)
#ifdef CONFIG_HEARTBEAT
mach_heartbeat = hp300_pulse;
#endif
- mach_max_dma_address = 0xffffffff;
if (hp300_model >= HP_330 && hp300_model <= HP_433S &&
hp300_model != HP_350) {
diff --git a/arch/m68k/hp300/time.c b/arch/m68k/hp300/time.c
index bfee13e1d0fe..1d1b7b3b5dd4 100644
--- a/arch/m68k/hp300/time.c
+++ b/arch/m68k/hp300/time.c
@@ -55,7 +55,6 @@ static u32 clk_total, clk_offset;
static irqreturn_t hp300_tick(int irq, void *dev_id)
{
- irq_handler_t timer_routine = dev_id;
unsigned long flags;
unsigned long tmp;
@@ -64,7 +63,8 @@ static irqreturn_t hp300_tick(int irq, void *dev_id)
asm volatile ("movpw %1@(5),%0" : "=d" (tmp) : "a" (CLOCKBASE));
clk_total += INTVAL;
clk_offset = 0;
- timer_routine(0, NULL);
+ legacy_timer_tick(1);
+ timer_heartbeat();
local_irq_restore(flags);
/* Turn off the network and SCSI leds */
@@ -98,14 +98,14 @@ again:
return ticks;
}
-void __init hp300_sched_init(irq_handler_t vector)
+void __init hp300_sched_init(void)
{
out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */
out_8(CLOCKBASE + CLKCR1, 0x1); /* reset */
asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE));
- if (request_irq(IRQ_AUTO_6, hp300_tick, IRQF_TIMER, "timer tick", vector))
+ if (request_irq(IRQ_AUTO_6, hp300_tick, IRQF_TIMER, "timer tick", NULL))
pr_err("Couldn't register timer interrupt\n");
out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */
diff --git a/arch/m68k/hp300/time.h b/arch/m68k/hp300/time.h
index 1d77b55cc72a..040a098b7db1 100644
--- a/arch/m68k/hp300/time.h
+++ b/arch/m68k/hp300/time.h
@@ -1 +1 @@
-extern void hp300_sched_init(irq_handler_t vector);
+extern void hp300_sched_init(void);
diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h
index 3a3bdcfcd375..a4aa82021d3b 100644
--- a/arch/m68k/include/asm/cmpxchg.h
+++ b/arch/m68k/include/asm/cmpxchg.h
@@ -76,7 +76,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
}
#endif
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
#include <asm-generic/cmpxchg-local.h>
@@ -119,11 +119,11 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
}
#define cmpxchg(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
+ ({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr)));})
#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
+ ({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr)));})
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 49bd3266b4b1..8fd80ef1b77e 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -12,7 +12,7 @@ struct rtc_time;
struct rtc_pll_info;
struct buffer_head;
-extern void (*mach_sched_init) (irq_handler_t handler);
+extern void (*mach_sched_init) (void);
/* machine dependent irq functions */
extern void (*mach_init_IRQ) (void);
extern void (*mach_get_model) (char *model);
@@ -27,14 +27,19 @@ extern void (*mach_halt)( void );
extern void (*mach_power_off)( void );
extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
extern void (*mach_hd_setup)(char *, int *);
-extern long mach_max_dma_address;
extern void (*mach_heartbeat) (int);
extern void (*mach_l2_flush) (int);
extern void (*mach_beep) (unsigned int, unsigned int);
/* Hardware clock functions */
-extern void hw_timer_init(irq_handler_t handler);
-extern unsigned long hw_timer_offset(void);
+extern void hw_timer_init(void);
+#ifdef CONFIG_HEARTBEAT
+extern void timer_heartbeat(void);
+#else
+static inline void timer_heartbeat(void)
+{
+}
+#endif
extern void config_BSP(char *command, int len);
diff --git a/arch/m68k/include/asm/mmu_context.h b/arch/m68k/include/asm/mmu_context.h
index 993fd7e37069..a5d358855878 100644
--- a/arch/m68k/include/asm/mmu_context.h
+++ b/arch/m68k/include/asm/mmu_context.h
@@ -5,10 +5,6 @@
#include <asm-generic/mm_hooks.h>
#include <linux/mm_types.h>
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
#ifdef CONFIG_MMU
#if defined(CONFIG_COLDFIRE)
@@ -58,6 +54,7 @@ static inline void get_mmu_context(struct mm_struct *mm)
/*
* We're finished using the context for an address space.
*/
+#define destroy_context destroy_context
static inline void destroy_context(struct mm_struct *mm)
{
if (mm->context != NO_CONTEXT) {
@@ -83,6 +80,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
+#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *active_mm,
struct mm_struct *mm)
{
@@ -90,8 +88,6 @@ static inline void activate_mm(struct mm_struct *active_mm,
set_context(mm->context, mm->pgd);
}
-#define deactivate_mm(tsk, mm) do { } while (0)
-
#define prepare_arch_switch(next) load_ksp_mmu(next)
static inline void load_ksp_mmu(struct task_struct *task)
@@ -176,6 +172,7 @@ extern unsigned long get_free_context(struct mm_struct *mm);
extern void clear_context(unsigned long context);
/* set the context for a new task to unmapped */
+#define init_new_context init_new_context
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
@@ -192,6 +189,7 @@ static inline void get_mmu_context(struct mm_struct *mm)
}
/* flush context if allocated... */
+#define destroy_context destroy_context
static inline void destroy_context(struct mm_struct *mm)
{
if (mm->context != SUN3_INVALID_CONTEXT)
@@ -210,8 +208,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
activate_context(tsk->mm);
}
-#define deactivate_mm(tsk, mm) do { } while (0)
-
+#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
@@ -224,6 +221,7 @@ static inline void activate_mm(struct mm_struct *prev_mm,
#include <asm/page.h>
#include <asm/cacheflush.h>
+#define init_new_context init_new_context
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
@@ -231,8 +229,6 @@ static inline int init_new_context(struct task_struct *tsk,
return 0;
}
-#define destroy_context(mm) do { } while(0)
-
static inline void switch_mm_0230(struct mm_struct *mm)
{
unsigned long crp[2] = {
@@ -300,8 +296,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str
}
}
-#define deactivate_mm(tsk,mm) do { } while (0)
-
+#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
@@ -315,24 +310,11 @@ static inline void activate_mm(struct mm_struct *prev_mm,
#endif
-#else /* !CONFIG_MMU */
-
-static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
- return 0;
-}
-
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
-{
-}
+#include <asm-generic/mmu_context.h>
-#define destroy_context(mm) do { } while (0)
-#define deactivate_mm(tsk,mm) do { } while (0)
+#else /* !CONFIG_MMU */
-static inline void activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
-{
-}
+#include <asm-generic/nommu_context.h>
#endif /* CONFIG_MMU */
#endif /* __M68K_MMU_CONTEXT_H */
diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h
index 2614a1206f2f..6116d7094292 100644
--- a/arch/m68k/include/asm/page.h
+++ b/arch/m68k/include/asm/page.h
@@ -62,8 +62,10 @@ extern unsigned long _ramend;
#include <asm/page_no.h>
#endif
+#ifdef CONFIG_DISCONTIGMEM
#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
+#endif
#include <asm-generic/getorder.h>
diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h
index e6b75992192b..7f5912af2a52 100644
--- a/arch/m68k/include/asm/page_mm.h
+++ b/arch/m68k/include/asm/page_mm.h
@@ -126,7 +126,7 @@ static inline void *__va(unsigned long x)
extern int m68k_virt_to_node_shift;
-#ifdef CONFIG_SINGLE_MEMORY_CHUNK
+#ifndef CONFIG_DISCONTIGMEM
#define __virt_to_node(addr) (&pg_data_map[0])
#else
extern struct pglist_data *pg_data_table[];
@@ -153,6 +153,7 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
pfn_to_virt(page_to_pfn(page)); \
})
+#ifdef CONFIG_DISCONTIGMEM
#define pfn_to_page(pfn) ({ \
unsigned long __pfn = (pfn); \
struct pglist_data *pgdat; \
@@ -165,6 +166,10 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
pgdat = &pg_data_map[page_to_nid(__p)]; \
((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
})
+#else
+#define ARCH_PFN_OFFSET (m68k_memory[0].addr)
+#include <asm-generic/memory_model.h>
+#endif
#define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory)
#define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
diff --git a/arch/m68k/include/asm/virtconvert.h b/arch/m68k/include/asm/virtconvert.h
index dfe43083b579..ca91b32dc6ef 100644
--- a/arch/m68k/include/asm/virtconvert.h
+++ b/arch/m68k/include/asm/virtconvert.h
@@ -29,12 +29,7 @@ static inline void *phys_to_virt(unsigned long address)
}
/* Permanent address of a page. */
-#if defined(CONFIG_MMU) && defined(CONFIG_SINGLE_MEMORY_CHUNK)
-#define page_to_phys(page) \
- __pa(PAGE_OFFSET + (((page) - pg_data_map[0].node_mem_map) << PAGE_SHIFT))
-#else
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-#endif
/*
* IO bus memory addresses are 1:1 with the physical address,
diff --git a/arch/m68k/include/uapi/asm/signal.h b/arch/m68k/include/uapi/asm/signal.h
index 915cc755a184..4619291df601 100644
--- a/arch/m68k/include/uapi/asm/signal.h
+++ b/arch/m68k/include/uapi/asm/signal.h
@@ -57,30 +57,6 @@ typedef unsigned long sigset_t;
#define SIGRTMIN 32
#define SIGRTMAX _NSIG
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index ab8aa7be260f..017bac3aab80 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -82,7 +82,7 @@ static struct m68k_mem_info m68k_ramdisk __initdata;
static char m68k_command_line[CL_SIZE] __initdata;
-void (*mach_sched_init) (irq_handler_t handler) __initdata = NULL;
+void (*mach_sched_init) (void) __initdata = NULL;
/* machine dependent irq functions */
void (*mach_init_IRQ) (void) __initdata = NULL;
void (*mach_get_model) (char *model);
@@ -99,7 +99,6 @@ EXPORT_SYMBOL(mach_set_rtc_pll);
void (*mach_reset)( void );
void (*mach_halt)( void );
void (*mach_power_off)( void );
-long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
#ifdef CONFIG_HEARTBEAT
void (*mach_heartbeat) (int);
EXPORT_SYMBOL(mach_heartbeat);
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index f66f4b1d062e..e377b4219528 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(memory_end);
char __initdata command_line[COMMAND_LINE_SIZE];
/* machine dependent timer functions */
-void (*mach_sched_init)(irq_handler_t handler) __initdata = NULL;
+void (*mach_sched_init)(void) __initdata = NULL;
int (*mach_hwclk) (int, struct rtc_time*);
/* machine dependent reboot functions */
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index c2697a4d4ddd..340ffeea0a9d 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -35,18 +35,9 @@
unsigned long (*mach_random_get_entropy)(void);
EXPORT_SYMBOL_GPL(mach_random_get_entropy);
-
-/*
- * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick
- */
-static irqreturn_t timer_interrupt(int irq, void *dummy)
-{
- xtime_update(1);
- update_process_times(user_mode(get_irq_regs()));
- profile_tick(CPU_PROFILING);
-
#ifdef CONFIG_HEARTBEAT
+void timer_heartbeat(void)
+{
/* use power LED as a heartbeat instead -- much more useful
for debugging -- based on the version for PReP by Cort */
/* acts like an actual heart beat -- ie thump-thump-pause... */
@@ -68,9 +59,8 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
dist = period / 4;
}
}
-#endif /* CONFIG_HEARTBEAT */
- return IRQ_HANDLED;
}
+#endif /* CONFIG_HEARTBEAT */
#ifdef CONFIG_M68KCLASSIC
#if !IS_BUILTIN(CONFIG_RTC_DRV_GENERIC)
@@ -154,5 +144,5 @@ module_init(rtc_init);
void __init time_init(void)
{
- mach_sched_init(timer_interrupt);
+ mach_sched_init();
}
diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds
index 7b975420c3d9..396e126a4258 100644
--- a/arch/m68k/kernel/vmlinux-nommu.lds
+++ b/arch/m68k/kernel/vmlinux-nommu.lds
@@ -65,7 +65,6 @@ SECTIONS {
_edata = .;
EXCEPTION_TABLE(16)
- NOTES
. = ALIGN(PAGE_SIZE);
__init_begin = .;
@@ -87,7 +86,7 @@ SECTIONS {
_end = .;
STABS_DEBUG
- .comment 0 : { *(.comment) }
+ ELF_DETAILS
/* Sections to be discarded */
DISCARDS
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index 4d33da4e7106..ed1d9eda3190 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -49,7 +49,6 @@ SECTIONS
*(.m68k_fixup)
__stop_fixup = .;
}
- NOTES
.init_end : {
/* This ALIGN be in a section so that _end is at the end of the
load segment. */
@@ -60,7 +59,7 @@ SECTIONS
_end = . ;
STABS_DEBUG
- .comment 0 : { *(.comment) }
+ ELF_DETAILS
/* Sections to be discarded */
DISCARDS
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index 87d9f4d08f65..4a52f44f2ef0 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -33,7 +33,6 @@ SECTIONS
RW_DATA(16, PAGE_SIZE, THREAD_SIZE) :data
/* End of data goes *here* so that freeing init code works properly. */
_edata = .;
- NOTES
/* will be freed after init */
. = ALIGN(PAGE_SIZE); /* Init code and data */
@@ -53,6 +52,7 @@ __init_begin = .;
_end = . ;
STABS_DEBUG
+ ELF_DETAILS
/* Sections to be discarded */
DISCARDS
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 0ac53d87493c..1cdac959bd91 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -55,10 +55,9 @@ struct mac_booter_data mac_bi_data;
static unsigned long mac_orig_videoaddr;
extern int mac_hwclk(int, struct rtc_time *);
-extern void iop_preinit(void);
extern void iop_init(void);
extern void via_init(void);
-extern void via_init_clock(irq_handler_t func);
+extern void via_init_clock(void);
extern void oss_init(void);
extern void psc_init(void);
extern void baboon_init(void);
@@ -69,9 +68,9 @@ static void mac_get_model(char *str);
static void mac_identify(void);
static void mac_report_hardware(void);
-static void __init mac_sched_init(irq_handler_t vector)
+static void __init mac_sched_init(void)
{
- via_init_clock(vector);
+ via_init_clock();
}
/*
@@ -141,7 +140,6 @@ void __init config_mac(void)
mach_reset = mac_reset;
mach_halt = mac_poweroff;
mach_power_off = mac_poweroff;
- mach_max_dma_address = 0xffffffff;
#if IS_ENABLED(CONFIG_INPUT_M68K_BEEP)
mach_beep = mac_mksound;
#endif
@@ -777,16 +775,12 @@ static struct resource scc_b_rsrcs[] = {
struct platform_device scc_a_pdev = {
.name = "scc",
.id = 0,
- .num_resources = ARRAY_SIZE(scc_a_rsrcs),
- .resource = scc_a_rsrcs,
};
EXPORT_SYMBOL(scc_a_pdev);
struct platform_device scc_b_pdev = {
.name = "scc",
.id = 1,
- .num_resources = ARRAY_SIZE(scc_b_rsrcs),
- .resource = scc_b_rsrcs,
};
EXPORT_SYMBOL(scc_b_pdev);
@@ -813,10 +807,15 @@ static void __init mac_identify(void)
/* Set up serial port resources for the console initcall. */
- scc_a_rsrcs[0].start = (resource_size_t) mac_bi_data.sccbase + 2;
- scc_a_rsrcs[0].end = scc_a_rsrcs[0].start;
- scc_b_rsrcs[0].start = (resource_size_t) mac_bi_data.sccbase;
- scc_b_rsrcs[0].end = scc_b_rsrcs[0].start;
+ scc_a_rsrcs[0].start = (resource_size_t)mac_bi_data.sccbase + 2;
+ scc_a_rsrcs[0].end = scc_a_rsrcs[0].start;
+ scc_a_pdev.num_resources = ARRAY_SIZE(scc_a_rsrcs);
+ scc_a_pdev.resource = scc_a_rsrcs;
+
+ scc_b_rsrcs[0].start = (resource_size_t)mac_bi_data.sccbase;
+ scc_b_rsrcs[0].end = scc_b_rsrcs[0].start;
+ scc_b_pdev.num_resources = ARRAY_SIZE(scc_b_rsrcs);
+ scc_b_pdev.resource = scc_b_rsrcs;
switch (macintosh_config->scc_type) {
case MAC_SCC_PSC:
@@ -835,13 +834,6 @@ static void __init mac_identify(void)
break;
}
- /*
- * We need to pre-init the IOPs, if any. Otherwise
- * the serial console won't work if the user had
- * the serial ports set to "Faster" mode in MacOS.
- */
- iop_preinit();
-
pr_info("Detected Macintosh model: %d\n", model);
/*
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
index c669a7644301..de156a027f5b 100644
--- a/arch/m68k/mac/iop.c
+++ b/arch/m68k/mac/iop.c
@@ -47,6 +47,10 @@
*
* TODO:
*
+ * o The SCC IOP has to be placed in bypass mode before the serial console
+ * gets initialized. iop_init() would be one place to do that. Or the
+ * bootloader could do that. For now, the Serial Switch control panel
+ * is needed for that -- contrary to the changelog above.
* o Something should be periodically checking iop_alive() to make sure the
* IOP hasn't died.
* o Some of the IOP manager routines need better error checking and
@@ -225,40 +229,6 @@ static struct iop_msg *iop_get_unused_msg(void)
}
/*
- * This is called by the startup code before anything else. Its purpose
- * is to find and initialize the IOPs early in the boot sequence, so that
- * the serial IOP can be placed into bypass mode _before_ we try to
- * initialize the serial console.
- */
-
-void __init iop_preinit(void)
-{
- if (macintosh_config->scc_type == MAC_SCC_IOP) {
- if (macintosh_config->ident == MAC_MODEL_IIFX) {
- iop_base[IOP_NUM_SCC] = (struct mac_iop *) SCC_IOP_BASE_IIFX;
- } else {
- iop_base[IOP_NUM_SCC] = (struct mac_iop *) SCC_IOP_BASE_QUADRA;
- }
- iop_scc_present = 1;
- } else {
- iop_base[IOP_NUM_SCC] = NULL;
- iop_scc_present = 0;
- }
- if (macintosh_config->adb_type == MAC_ADB_IOP) {
- if (macintosh_config->ident == MAC_MODEL_IIFX) {
- iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_IIFX;
- } else {
- iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_QUADRA;
- }
- iop_stop(iop_base[IOP_NUM_ISM]);
- iop_ism_present = 1;
- } else {
- iop_base[IOP_NUM_ISM] = NULL;
- iop_ism_present = 0;
- }
-}
-
-/*
* Initialize the IOPs, if present.
*/
@@ -266,11 +236,23 @@ void __init iop_init(void)
{
int i;
- if (iop_scc_present) {
+ if (macintosh_config->scc_type == MAC_SCC_IOP) {
+ if (macintosh_config->ident == MAC_MODEL_IIFX)
+ iop_base[IOP_NUM_SCC] = (struct mac_iop *)SCC_IOP_BASE_IIFX;
+ else
+ iop_base[IOP_NUM_SCC] = (struct mac_iop *)SCC_IOP_BASE_QUADRA;
+ iop_scc_present = 1;
pr_debug("SCC IOP detected at %p\n", iop_base[IOP_NUM_SCC]);
}
- if (iop_ism_present) {
+ if (macintosh_config->adb_type == MAC_ADB_IOP) {
+ if (macintosh_config->ident == MAC_MODEL_IIFX)
+ iop_base[IOP_NUM_ISM] = (struct mac_iop *)ISM_IOP_BASE_IIFX;
+ else
+ iop_base[IOP_NUM_ISM] = (struct mac_iop *)ISM_IOP_BASE_QUADRA;
+ iop_ism_present = 1;
pr_debug("ISM IOP detected at %p\n", iop_base[IOP_NUM_ISM]);
+
+ iop_stop(iop_base[IOP_NUM_ISM]);
iop_start(iop_base[IOP_NUM_ISM]);
iop_alive(iop_base[IOP_NUM_ISM]); /* clears the alive flag */
}
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index ac77d73af19a..3d11d6219cdd 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -169,8 +169,6 @@ void __init via_init(void)
via1[vIER] = 0x7F;
via1[vIFR] = 0x7F;
- via1[vT1LL] = 0;
- via1[vT1LH] = 0;
via1[vT1CL] = 0;
via1[vT1CH] = 0;
via1[vT2CL] = 0;
@@ -225,8 +223,6 @@ void __init via_init(void)
via2[gIER] = 0x7F;
via2[gIFR] = 0x7F | rbv_clear;
if (!rbv_present) {
- via2[vT1LL] = 0;
- via2[vT1LH] = 0;
via2[vT1CL] = 0;
via2[vT1CH] = 0;
via2[vT2CL] = 0;
@@ -305,21 +301,6 @@ void via_l2_flush(int writeback)
}
/*
- * Return the status of the L2 cache on a IIci
- */
-
-int via_get_cache_disable(void)
-{
- /* Safeguard against being called accidentally */
- if (!via2) {
- printk(KERN_ERR "via_get_cache_disable called on a non-VIA machine!\n");
- return 1;
- }
-
- return (int) via2[gBufB] & VIA2B_vCDis;
-}
-
-/*
* Initialize VIA2 for Nubus access
*/
@@ -602,25 +583,21 @@ static u32 clk_total, clk_offset;
static irqreturn_t via_timer_handler(int irq, void *dev_id)
{
- irq_handler_t timer_routine = dev_id;
-
clk_total += VIA_TIMER_CYCLES;
clk_offset = 0;
- timer_routine(0, NULL);
+ legacy_timer_tick(1);
return IRQ_HANDLED;
}
-void __init via_init_clock(irq_handler_t timer_routine)
+void __init via_init_clock(void)
{
if (request_irq(IRQ_MAC_TIMER_1, via_timer_handler, IRQF_TIMER, "timer",
- timer_routine)) {
+ NULL)) {
pr_err("Couldn't register %s interrupt\n", "timer");
return;
}
- via1[vT1LL] = VIA_TC_LOW;
- via1[vT1LH] = VIA_TC_HIGH;
via1[vT1CL] = VIA_TC_LOW;
via1[vT1CH] = VIA_TC_HIGH;
via1[vACR] |= 0x40;
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 53040857a9ed..14c1e541451c 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -42,19 +42,19 @@ EXPORT_SYMBOL(empty_zero_page);
#ifdef CONFIG_MMU
+int m68k_virt_to_node_shift;
+
+#ifdef CONFIG_DISCONTIGMEM
pg_data_t pg_data_map[MAX_NUMNODES];
EXPORT_SYMBOL(pg_data_map);
-int m68k_virt_to_node_shift;
-
-#ifndef CONFIG_SINGLE_MEMORY_CHUNK
pg_data_t *pg_data_table[65];
EXPORT_SYMBOL(pg_data_table);
#endif
void __init m68k_setup_node(int node)
{
-#ifndef CONFIG_SINGLE_MEMORY_CHUNK
+#ifdef CONFIG_DISCONTIGMEM
struct m68k_mem_info *info = m68k_memory + node;
int i, end;
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index 490700aa2212..cfdc7f912e14 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -37,7 +37,7 @@
static void mvme147_get_model(char *model);
-extern void mvme147_sched_init(irq_handler_t handler);
+extern void mvme147_sched_init(void);
extern int mvme147_hwclk (int, struct rtc_time *);
extern void mvme147_reset (void);
@@ -80,7 +80,6 @@ void __init mvme147_init_IRQ(void)
void __init config_mvme147(void)
{
- mach_max_dma_address = 0x01000000;
mach_sched_init = mvme147_sched_init;
mach_init_IRQ = mvme147_init_IRQ;
mach_hwclk = mvme147_hwclk;
@@ -112,24 +111,23 @@ static u32 clk_total;
static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
{
- irq_handler_t timer_routine = dev_id;
unsigned long flags;
local_irq_save(flags);
m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF;
clk_total += PCC_TIMER_CYCLES;
- timer_routine(0, NULL);
+ legacy_timer_tick(1);
local_irq_restore(flags);
return IRQ_HANDLED;
}
-void mvme147_sched_init (irq_handler_t timer_routine)
+void mvme147_sched_init (void)
{
if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, IRQF_TIMER,
- "timer 1", timer_routine))
+ "timer 1", NULL))
pr_err("Couldn't register timer interrupt\n");
/* Init the clock with a value */
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 5b86d10e0f84..30357fe4ba6c 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -43,7 +43,7 @@ extern t_bdid mvme_bdid;
static MK48T08ptr_t volatile rtc = (MK48T08ptr_t)MVME_RTC_BASE;
static void mvme16x_get_model(char *model);
-extern void mvme16x_sched_init(irq_handler_t handler);
+extern void mvme16x_sched_init(void);
extern int mvme16x_hwclk (int, struct rtc_time *);
extern void mvme16x_reset (void);
@@ -268,7 +268,6 @@ void __init config_mvme16x(void)
char id[40];
uint16_t brdno = be16_to_cpu(p->brdno);
- mach_max_dma_address = 0xffffffff;
mach_sched_init = mvme16x_sched_init;
mach_init_IRQ = mvme16x_init_IRQ;
mach_hwclk = mvme16x_hwclk;
@@ -372,20 +371,19 @@ static u32 clk_total;
static irqreturn_t mvme16x_timer_int (int irq, void *dev_id)
{
- irq_handler_t timer_routine = dev_id;
unsigned long flags;
local_irq_save(flags);
out_8(PCCTIC1, in_8(PCCTIC1) | PCCTIC1_INT_CLR);
out_8(PCCTOVR1, PCCTOVR1_OVR_CLR);
clk_total += PCC_TIMER_CYCLES;
- timer_routine(0, NULL);
+ legacy_timer_tick(1);
local_irq_restore(flags);
return IRQ_HANDLED;
}
-void mvme16x_sched_init (irq_handler_t timer_routine)
+void mvme16x_sched_init(void)
{
uint16_t brdno = be16_to_cpu(mvme_bdid.brdno);
int irq;
@@ -396,7 +394,7 @@ void mvme16x_sched_init (irq_handler_t timer_routine)
out_8(PCCTOVR1, in_8(PCCTOVR1) | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
out_8(PCCTIC1, PCCTIC1_INT_EN | 6);
if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, IRQF_TIMER, "timer",
- timer_routine))
+ NULL))
panic ("Couldn't register timer int");
clocksource_register_hz(&mvme16x_clk, PCC_TIMER_CLOCK_FREQ);
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 4627de3c0603..d6a423875231 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -37,7 +37,7 @@
extern void q40_init_IRQ(void);
static void q40_get_model(char *model);
-extern void q40_sched_init(irq_handler_t handler);
+extern void q40_sched_init(void);
static int q40_hwclk(int, struct rtc_time *);
static unsigned int q40_get_ss(void);
@@ -185,11 +185,6 @@ void __init config_q40(void)
/* disable a few things that SMSQ might have left enabled */
q40_disable_irqs();
-
- /* no DMA at all, but ide-scsi requires it.. make sure
- * all physical RAM fits into the boundary - otherwise
- * allocator may play costly and useless tricks */
- mach_max_dma_address = 1024*1024*1024;
}
diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c
index 1c696906c159..6886a5d0007b 100644
--- a/arch/m68k/q40/q40ints.c
+++ b/arch/m68k/q40/q40ints.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <asm/machdep.h>
#include <asm/ptrace.h>
#include <asm/traps.h>
@@ -129,8 +130,6 @@ void q40_mksound(unsigned int hz, unsigned int ticks)
static irqreturn_t q40_timer_int(int irq, void *dev_id)
{
- irq_handler_t timer_routine = dev_id;
-
ql_ticks = ql_ticks ? 0 : 1;
if (sound_ticks) {
unsigned char sval=(sound_ticks & 1) ? 128-SVOL : 128+SVOL;
@@ -143,19 +142,20 @@ static irqreturn_t q40_timer_int(int irq, void *dev_id)
unsigned long flags;
local_irq_save(flags);
- timer_routine(0, NULL);
+ legacy_timer_tick(1);
+ timer_heartbeat();
local_irq_restore(flags);
}
return IRQ_HANDLED;
}
-void q40_sched_init (irq_handler_t timer_routine)
+void q40_sched_init (void)
{
int timer_irq;
timer_irq = Q40_IRQ_FRAME;
- if (request_irq(timer_irq, q40_timer_int, 0, "timer", timer_routine))
+ if (request_irq(timer_irq, q40_timer_int, 0, "timer", NULL))
panic("Couldn't register timer int");
master_outb(-1, FRAME_CLEAR_REG);
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 7204c0ea0dc7..f7dd47232b6c 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -36,7 +36,7 @@
char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
-static void sun3_sched_init(irq_handler_t handler);
+static void sun3_sched_init(void);
extern void sun3_get_model (char* model);
extern int sun3_hwclk(int set, struct rtc_time *t);
@@ -151,7 +151,7 @@ void __init config_sun3(void)
sun3_bootmem_alloc(memory_start, memory_end);
}
-static void __init sun3_sched_init(irq_handler_t timer_routine)
+static void __init sun3_sched_init(void)
{
sun3_disable_interrupts();
intersil_clock->cmd_reg=(INTERSIL_RUN|INTERSIL_INT_DISABLE|INTERSIL_24H_MODE);
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
index a5824abb4a39..41ae422119d3 100644
--- a/arch/m68k/sun3/sun3ints.c
+++ b/arch/m68k/sun3/sun3ints.c
@@ -73,8 +73,7 @@ static irqreturn_t sun3_int5(int irq, void *dev_id)
#ifdef CONFIG_SUN3
intersil_clear();
#endif
- xtime_update(1);
- update_process_times(user_mode(get_irq_regs()));
+ legacy_timer_tick(1);
cnt = kstat_irqs_cpu(irq, 0);
if (!(cnt % 20))
sun3_leds(led_pattern[cnt % 160 / 20]);
diff --git a/arch/m68k/sun3x/config.c b/arch/m68k/sun3x/config.c
index d806dee71a9c..37121a0f1253 100644
--- a/arch/m68k/sun3x/config.c
+++ b/arch/m68k/sun3x/config.c
@@ -44,8 +44,6 @@ void __init config_sun3x(void)
sun3x_prom_init();
- mach_max_dma_address = 0xffffffff; /* we can DMA anywhere, whee */
-
mach_sched_init = sun3x_sched_init;
mach_init_IRQ = sun3_init_IRQ;
diff --git a/arch/m68k/sun3x/time.c b/arch/m68k/sun3x/time.c
index 9163294b0fb6..a2c97821faf2 100644
--- a/arch/m68k/sun3x/time.c
+++ b/arch/m68k/sun3x/time.c
@@ -77,21 +77,20 @@ int sun3x_hwclk(int set, struct rtc_time *t)
#if 0
static irqreturn_t sun3x_timer_tick(int irq, void *dev_id)
{
- irq_handler_t timer_routine = dev_id;
unsigned long flags;
local_irq_save(flags);
/* Clear the pending interrupt - pulse the enable line low */
disable_irq(5);
enable_irq(5);
- timer_routine(0, NULL);
+ legacy_timer_tick(1);
local_irq_restore(flags);
return IRQ_HANDLED;
}
#endif
-void __init sun3x_sched_init(irq_handler_t vector)
+void __init sun3x_sched_init(void)
{
sun3_disable_interrupts();
diff --git a/arch/m68k/sun3x/time.h b/arch/m68k/sun3x/time.h
index 86ce78bb3c28..7cfff22e4986 100644
--- a/arch/m68k/sun3x/time.h
+++ b/arch/m68k/sun3x/time.h
@@ -3,7 +3,7 @@
#define SUN3X_TIME_H
extern int sun3x_hwclk(int set, struct rtc_time *t);
-void sun3x_sched_init(irq_handler_t vector);
+void sun3x_sched_init(void);
struct mostek_dt {
volatile unsigned char csr;
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 33925ffed68f..f82795592ce5 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -3,21 +3,18 @@ config MICROBLAZE
def_bool y
select ARCH_32BIT_OFF_T
select ARCH_NO_SWAP
- select ARCH_HAS_BINFMT_FLAT if !MMU
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select ARCH_HAS_DMA_SET_UNCACHED if !MMU
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_TABLE_SORT
select TIMER_OF
select CLONE_BACKWARDS3
select COMMON_CLK
- select DMA_DIRECT_REMAP if MMU
+ select DMA_DIRECT_REMAP
select GENERIC_ATOMIC64
- select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_PROBE
@@ -45,7 +42,7 @@ config MICROBLAZE
select TRACING_SUPPORT
select VIRT_TO_BUS
select CPU_NO_EFFICIENT_FFS
- select MMU_GATHER_NO_RANGE if MMU
+ select MMU_GATHER_NO_RANGE
select SPARSE_IRQ
select SET_FS
@@ -96,8 +93,7 @@ menu "Processor type and features"
source "kernel/Kconfig.hz"
config MMU
- bool "MMU support"
- default n
+ def_bool y
comment "Boot options"
@@ -143,18 +139,9 @@ config ADVANCED_OPTIONS
comment "Default settings for advanced configuration options are used"
depends on !ADVANCED_OPTIONS
-config XILINX_UNCACHED_SHADOW
- bool "Are you using uncached shadow for RAM ?"
- depends on ADVANCED_OPTIONS && !MMU
- default n
- help
- This is needed to be able to allocate uncachable memory regions.
- The feature requires the design to define the RAM memory controller
- window to be twice as large as the actual physical memory.
-
config HIGHMEM
bool "High memory support"
- depends on MMU
+ select KMAP_LOCAL
help
The address space of Microblaze processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address
@@ -167,7 +154,7 @@ config HIGHMEM
config LOWMEM_SIZE_BOOL
bool "Set maximum low memory"
- depends on ADVANCED_OPTIONS && MMU
+ depends on ADVANCED_OPTIONS
help
This option allows you to set the maximum amount of memory which
will be used as "low memory", that is, memory which the kernel can
@@ -205,12 +192,11 @@ config KERNEL_START_BOOL
config KERNEL_START
hex "Virtual address of kernel base" if KERNEL_START_BOOL
- default "0xc0000000" if MMU
- default KERNEL_BASE_ADDR if !MMU
+ default "0xc0000000"
config TASK_SIZE_BOOL
bool "Set custom user task size"
- depends on ADVANCED_OPTIONS && MMU
+ depends on ADVANCED_OPTIONS
help
This option allows you to set the amount of virtual address space
allocated to user tasks. This can be useful in optimizing the
@@ -222,33 +208,6 @@ config TASK_SIZE
hex "Size of user task space" if TASK_SIZE_BOOL
default "0x80000000"
-choice
- prompt "Page size"
- default MICROBLAZE_4K_PAGES
- depends on ADVANCED_OPTIONS && !MMU
- help
- Select the kernel logical page size. Increasing the page size
- will reduce software overhead at each page boundary, allow
- hardware prefetch mechanisms to be more effective, and allow
- larger dma transfers increasing IO efficiency and reducing
- overhead. However the utilization of memory will increase.
- For example, each cached file will using a multiple of the
- page size to hold its contents and the difference between the
- end of file and the end of page is wasted.
-
- If unsure, choose 4K_PAGES.
-
-config MICROBLAZE_4K_PAGES
- bool "4k page size"
-
-config MICROBLAZE_16K_PAGES
- bool "16k page size"
-
-config MICROBLAZE_64K_PAGES
- bool "64k page size"
-
-endchoice
-
endmenu
menu "Bus Options"
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 7b340a35b194..bb980891816d 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -1,11 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
KBUILD_DEFCONFIG := mmu_defconfig
-ifeq ($(CONFIG_MMU),y)
UTS_SYSNAME = -DUTS_SYSNAME=\"Linux\"
-else
-UTS_SYSNAME = -DUTS_SYSNAME=\"uClinux\"
-endif
# What CPU vesion are we building for, and crack it open
# as major.minor.rev
@@ -67,12 +63,7 @@ DTB:=$(subst simpleImage.,,$(filter simpleImage.%, $(MAKECMDGOALS)))
core-y += $(boot)/dts/
-# defines filename extension depending memory management type
-ifeq ($(CONFIG_MMU),)
-MMU := -nommu
-endif
-
-export MMU DTB
+export DTB
all: linux.bin
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index 9b8a50f30662..51337fffb947 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -16,7 +16,6 @@ CONFIG_XILINX_MICROBLAZE0_USE_DIV=1
CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL=2
CONFIG_XILINX_MICROBLAZE0_USE_FPU=2
CONFIG_HZ_100=y
-CONFIG_MMU=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE_FORCE=y
CONFIG_HIGHMEM=y
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
deleted file mode 100644
index 8c420782d6e4..000000000000
--- a/arch/microblaze/configs/nommu_defconfig
+++ /dev/null
@@ -1,90 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_AUDIT=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
-# CONFIG_BASE_FULL is not set
-CONFIG_KALLSYMS_ALL=y
-CONFIG_EMBEDDED=y
-CONFIG_SLAB=y
-CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
-CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
-CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
-CONFIG_XILINX_MICROBLAZE0_USE_DIV=1
-CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL=2
-CONFIG_XILINX_MICROBLAZE0_USE_FPU=2
-CONFIG_HZ_100=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE_FORCE=y
-CONFIG_PCI_XILINX=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_EFI_PARTITION is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-# CONFIG_IPV6 is not set
-CONFIG_PCI=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_RAM=y
-CONFIG_MTD_UCLINUX=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_NETDEVICES=y
-CONFIG_XILINX_EMACLITE=y
-CONFIG_XILINX_LL_TEMAC=y
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_UARTLITE=y
-CONFIG_SERIAL_UARTLITE_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_XILINX_HWICAP=y
-CONFIG_I2C=y
-CONFIG_I2C_XILINX=y
-CONFIG_SPI=y
-CONFIG_SPI_XILINX=y
-CONFIG_GPIOLIB=y
-CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_XILINX=y
-CONFIG_POWER_RESET=y
-CONFIG_POWER_RESET_GPIO_RESTART=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_XILINX_WATCHDOG=y
-CONFIG_FB=y
-CONFIG_FB_XILINX=y
-# CONFIG_USB_SUPPORT is not set
-CONFIG_EXT3_FS=y
-# CONFIG_DNOTIFY is not set
-CONFIG_CRAMFS=y
-CONFIG_ROMFS_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NLS=y
-CONFIG_KEYS=y
-CONFIG_ENCRYPTED_KEYS=y
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_MD4=y
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_ARC4=y
-CONFIG_CRYPTO_DES=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_SLAB=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_SPINLOCK=y
diff --git a/arch/microblaze/include/asm/dma.h b/arch/microblaze/include/asm/dma.h
index e6cb6d0725af..f801582be912 100644
--- a/arch/microblaze/include/asm/dma.h
+++ b/arch/microblaze/include/asm/dma.h
@@ -6,14 +6,8 @@
#ifndef _ASM_MICROBLAZE_DMA_H
#define _ASM_MICROBLAZE_DMA_H
-#ifndef CONFIG_MMU
-/* we don't have dma address limit. define it as zero to be
- * unlimited. */
-#define MAX_DMA_ADDRESS (0)
-#else
/* Virtual address corresponding to last available physical memory address. */
#define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1)
-#endif
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
diff --git a/arch/microblaze/include/asm/exceptions.h b/arch/microblaze/include/asm/exceptions.h
index d67e65b72215..967f175173e1 100644
--- a/arch/microblaze/include/asm/exceptions.h
+++ b/arch/microblaze/include/asm/exceptions.h
@@ -11,11 +11,6 @@
#define _ASM_MICROBLAZE_EXCEPTIONS_H
#ifdef __KERNEL__
-
-#ifndef CONFIG_MMU
-#define EX_HANDLER_STACK_SIZ (4*19)
-#endif
-
#ifndef __ASSEMBLY__
/* Macros to enable and disable HW exceptions in the MSR */
diff --git a/arch/microblaze/include/asm/fixmap.h b/arch/microblaze/include/asm/fixmap.h
index 0379ce5229e3..e6e9288bff76 100644
--- a/arch/microblaze/include/asm/fixmap.h
+++ b/arch/microblaze/include/asm/fixmap.h
@@ -20,7 +20,7 @@
#include <asm/page.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
-#include <asm/kmap_types.h>
+#include <asm/kmap_size.h>
#endif
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
@@ -47,7 +47,7 @@ enum fixed_addresses {
FIX_HOLE,
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
- FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * num_possible_cpus()) - 1,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * num_possible_cpus()) - 1,
#endif
__end_of_fixed_addresses
};
diff --git a/arch/microblaze/include/asm/highmem.h b/arch/microblaze/include/asm/highmem.h
index 284ca8fb54c1..4418633fb163 100644
--- a/arch/microblaze/include/asm/highmem.h
+++ b/arch/microblaze/include/asm/highmem.h
@@ -25,7 +25,6 @@
#include <linux/uaccess.h>
#include <asm/fixmap.h>
-extern pte_t *kmap_pte;
extern pte_t *pkmap_page_table;
/*
@@ -52,6 +51,11 @@ extern pte_t *pkmap_page_table;
#define flush_cache_kmaps() { flush_icache(); flush_dcache(); }
+#define arch_kmap_local_post_map(vaddr, pteval) \
+ local_flush_tlb_page(NULL, vaddr);
+#define arch_kmap_local_post_unmap(vaddr) \
+ local_flush_tlb_page(NULL, vaddr);
+
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index 1dd6fae41897..b6a57f8468f0 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -30,15 +30,12 @@ extern resource_size_t isa_mem_base;
#define PCI_IOBASE ((void __iomem *)_IO_BASE)
#define IO_SPACE_LIMIT (0xFFFFFFFF)
-#ifdef CONFIG_MMU
#define page_to_bus(page) (page_to_phys(page))
extern void iounmap(volatile void __iomem *addr);
extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
-#endif /* CONFIG_MMU */
-
/* Big Endian */
#define out_be32(a, v) __raw_writel((v), (void __iomem __force *)(a))
#define out_be16(a, v) __raw_writew((v), (a))
diff --git a/arch/microblaze/include/asm/mmu.h b/arch/microblaze/include/asm/mmu.h
index 97f1243101cc..b928a87c0076 100644
--- a/arch/microblaze/include/asm/mmu.h
+++ b/arch/microblaze/include/asm/mmu.h
@@ -8,9 +8,6 @@
#ifndef _ASM_MICROBLAZE_MMU_H
#define _ASM_MICROBLAZE_MMU_H
-# ifndef CONFIG_MMU
-# include <asm-generic/mmu.h>
-# else /* CONFIG_MMU */
# ifdef __KERNEL__
# ifndef __ASSEMBLY__
@@ -119,5 +116,4 @@ extern u32 tlb_skip;
# define TLB_G 0x00000001 /* Memory is guarded from prefetch */
# endif /* __KERNEL__ */
-# endif /* CONFIG_MMU */
#endif /* _ASM_MICROBLAZE_MMU_H */
diff --git a/arch/microblaze/include/asm/mmu_context.h b/arch/microblaze/include/asm/mmu_context.h
index f74f9da07fdc..866e52da5eb9 100644
--- a/arch/microblaze/include/asm/mmu_context.h
+++ b/arch/microblaze/include/asm/mmu_context.h
@@ -1,6 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifdef CONFIG_MMU
# include <asm/mmu_context_mm.h>
-#else
-# include <asm-generic/mmu_context.h>
-#endif
diff --git a/arch/microblaze/include/asm/mmu_context_mm.h b/arch/microblaze/include/asm/mmu_context_mm.h
index a1c7dd48454c..c2c77f708455 100644
--- a/arch/microblaze/include/asm/mmu_context_mm.h
+++ b/arch/microblaze/include/asm/mmu_context_mm.h
@@ -33,10 +33,6 @@
to represent all kernel pages as shared among all contexts.
*/
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
# define NO_CONTEXT 256
# define LAST_CONTEXT 255
# define FIRST_CONTEXT 1
@@ -105,6 +101,7 @@ static inline void get_mmu_context(struct mm_struct *mm)
/*
* We're finished using the context for an address space.
*/
+#define destroy_context destroy_context
static inline void destroy_context(struct mm_struct *mm)
{
if (mm->context != NO_CONTEXT) {
@@ -126,6 +123,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
+#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *active_mm,
struct mm_struct *mm)
{
@@ -136,5 +134,7 @@ static inline void activate_mm(struct mm_struct *active_mm,
extern void mmu_context_init(void);
+#include <asm-generic/mmu_context.h>
+
# endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index b13463d39b38..bf681f272f72 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -20,13 +20,7 @@
#ifdef __KERNEL__
/* PAGE_SHIFT determines the page size */
-#if defined(CONFIG_MICROBLAZE_64K_PAGES)
-#define PAGE_SHIFT 16
-#elif defined(CONFIG_MICROBLAZE_16K_PAGES)
-#define PAGE_SHIFT 14
-#else
#define PAGE_SHIFT 12
-#endif
#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
@@ -44,17 +38,6 @@
#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1)))
-#ifndef CONFIG_MMU
-/*
- * PAGE_OFFSET -- the first address of the first page of memory. When not
- * using MMU this corresponds to the first free page in physical memory (aligned
- * on a page boundary).
- */
-extern unsigned int __page_offset;
-#define PAGE_OFFSET __page_offset
-
-#else /* CONFIG_MMU */
-
/*
* PAGE_OFFSET -- the first address of the first page of memory. With MMU
* it is set to the kernel start address (aligned on a page boundary).
@@ -70,8 +53,6 @@ extern unsigned int __page_offset;
typedef unsigned long pte_basic_t;
#define PTE_FMT "%.8lx"
-#endif /* CONFIG_MMU */
-
# define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
# define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
@@ -86,25 +67,12 @@ typedef struct page *pgtable_t;
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgprot; } pgprot_t;
/* FIXME this can depend on linux kernel version */
-# ifdef CONFIG_MMU
typedef struct { unsigned long pgd; } pgd_t;
-# else /* CONFIG_MMU */
-typedef struct { unsigned long ste[64]; } pmd_t;
-typedef struct { pmd_t pue[1]; } pud_t;
-typedef struct { pud_t p4e[1]; } p4d_t;
-typedef struct { p4d_t pge[1]; } pgd_t;
-# endif /* CONFIG_MMU */
# define pte_val(x) ((x).pte)
# define pgprot_val(x) ((x).pgprot)
-# ifdef CONFIG_MMU
# define pgd_val(x) ((x).pgd)
-# else /* CONFIG_MMU */
-# define pmd_val(x) ((x).ste[0])
-# define pud_val(x) ((x).pue[0])
-# define pgd_val(x) ((x).pge[0])
-# endif /* CONFIG_MMU */
# define __pte(x) ((pte_t) { (x) })
# define __pgd(x) ((pgd_t) { (x) })
@@ -142,28 +110,12 @@ extern int page_is_ram(unsigned long pfn);
# define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr))))
# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
-# ifdef CONFIG_MMU
-
# define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
# define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
# define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-# else /* CONFIG_MMU */
-# define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
-# define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
-# define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
-# define page_to_bus(page) (page_to_phys(page))
-# define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
-# endif /* CONFIG_MMU */
-
-# ifndef CONFIG_MMU
-# define pfn_valid(pfn) (((pfn) >= min_low_pfn) && \
- ((pfn) <= (min_low_pfn + max_mapnr)))
-# define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
-# else /* CONFIG_MMU */
# define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT)
# define pfn_valid(pfn) ((pfn) < (max_mapnr + ARCH_PFN_OFFSET))
-# endif /* CONFIG_MMU */
# endif /* __ASSEMBLY__ */
@@ -174,12 +126,6 @@ extern int page_is_ram(unsigned long pfn);
/* Convert between virtual and physical address for MMU. */
/* Handle MicroBlaze processor with virtual memory. */
-#ifndef CONFIG_MMU
-#define __virt_to_phys(addr) addr
-#define __phys_to_virt(addr) addr
-#define tophys(rd, rs) addik rd, rs, 0
-#define tovirt(rd, rs) addik rd, rs, 0
-#else
#define __virt_to_phys(addr) \
((addr) + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START)
#define __phys_to_virt(addr) \
@@ -188,14 +134,9 @@ extern int page_is_ram(unsigned long pfn);
addik rd, rs, (CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START)
#define tovirt(rd, rs) \
addik rd, rs, (CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR)
-#endif /* CONFIG_MMU */
#define TOPHYS(addr) __virt_to_phys(addr)
-#ifdef CONFIG_MMU
-
-#endif /* CONFIG_MMU */
-
#endif /* __KERNEL__ */
#include <asm-generic/memory_model.h>
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 8839ce00ea05..d56b9f670ad1 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -8,8 +8,6 @@
#ifndef _ASM_MICROBLAZE_PGALLOC_H
#define _ASM_MICROBLAZE_PGALLOC_H
-#ifdef CONFIG_MMU
-
#include <linux/kernel.h> /* For min/max macros */
#include <linux/highmem.h>
#include <linux/pgtable.h>
@@ -42,6 +40,4 @@ extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
#define pmd_populate_kernel(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long) (pte))
-#endif /* CONFIG_MMU */
-
#endif /* _ASM_MICROBLAZE_PGALLOC_H */
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 3fa1df90925e..9ae8d2c17dd5 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -14,47 +14,6 @@
extern int mem_init_done;
#endif
-#ifndef CONFIG_MMU
-
-#define pgd_present(pgd) (1) /* pages are always present on non MMU */
-#define pgd_none(pgd) (0)
-#define pgd_bad(pgd) (0)
-#define pgd_clear(pgdp)
-#define kern_addr_valid(addr) (1)
-
-#define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */
-#define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */
-#define PAGE_COPY __pgprot(0) /* these mean nothing to non MMU */
-#define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */
-#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */
-
-#define pgprot_noncached(x) (x)
-#define pgprot_writecombine pgprot_noncached
-#define pgprot_device pgprot_noncached
-
-#define __swp_type(x) (0)
-#define __swp_offset(x) (0)
-#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-
-#define ZERO_PAGE(vaddr) ({ BUG(); NULL; })
-
-#define swapper_pg_dir ((pgd_t *) NULL)
-
-#define arch_enter_lazy_cpu_mode() do {} while (0)
-
-#define pgprot_noncached_wc(prot) prot
-
-/*
- * All 32bit addresses are effectively valid for vmalloc...
- * Sort of meaningless for non-VM targets.
- */
-#define VMALLOC_START 0
-#define VMALLOC_END 0xffffffff
-
-#else /* CONFIG_MMU */
-
#include <asm-generic/pgtable-nopmd.h>
#ifdef __KERNEL__
@@ -491,8 +450,6 @@ void __init *early_get_page(void);
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
-#endif /* CONFIG_MMU */
-
#ifndef __ASSEMBLY__
extern unsigned long ioremap_bot, ioremap_base;
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 1ff5a82b76b6..06c6e493590a 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -31,42 +31,6 @@ extern void ret_from_kernel_thread(void);
# endif /* __ASSEMBLY__ */
-# ifndef CONFIG_MMU
-/*
- * User space process size: memory size
- *
- * TASK_SIZE on MMU cpu is usually 1GB. However, on no-MMU arch, both
- * user processes and the kernel is on the same memory region. They
- * both share the memory space and that is limited by the amount of
- * physical memory. thus, we set TASK_SIZE == amount of total memory.
- */
-# define TASK_SIZE (0x81000000 - 0x80000000)
-
-/*
- * This decides where the kernel will search for a free chunk of vm
- * space during mmap's. We won't be using it
- */
-# define TASK_UNMAPPED_BASE 0
-
-/* definition in include/linux/sched.h */
-struct task_struct;
-
-/* thread_struct is gone. use thread_info instead. */
-struct thread_struct { };
-# define INIT_THREAD { }
-
-/* Free all resources held by a thread. */
-static inline void release_thread(struct task_struct *dead_task)
-{
-}
-
-extern unsigned long get_wchan(struct task_struct *p);
-
-# define KSTK_EIP(tsk) (0)
-# define KSTK_ESP(tsk) (0)
-
-# else /* CONFIG_MMU */
-
/*
* This is used to define STACK_TOP, and with MMU it must be below
* kernel base to select the correct PGD when handling MMU exceptions.
@@ -122,9 +86,6 @@ unsigned long get_wchan(struct task_struct *p);
# define KSTK_EIP(task) (task_pc(task))
# define KSTK_ESP(task) (task_sp(task))
-/* FIXME */
-# define deactivate_mm(tsk, mm) do { } while (0)
-
# define STACK_TOP TASK_SIZE
# define STACK_TOP_MAX STACK_TOP
@@ -133,5 +94,4 @@ extern struct dentry *of_debugfs_root;
#endif
# endif /* __ASSEMBLY__ */
-# endif /* CONFIG_MMU */
#endif /* _ASM_MICROBLAZE_PROCESSOR_H */
diff --git a/arch/microblaze/include/asm/registers.h b/arch/microblaze/include/asm/registers.h
index ee81e1cba008..6b36693fc621 100644
--- a/arch/microblaze/include/asm/registers.h
+++ b/arch/microblaze/include/asm/registers.h
@@ -27,7 +27,6 @@
#define FSR_UF (1<<1) /* Underflow */
#define FSR_DO (1<<0) /* Denormalized operand error */
-# ifdef CONFIG_MMU
/* Machine State Register (MSR) Fields */
# define MSR_UM (1<<11) /* User Mode */
# define MSR_UMS (1<<12) /* User Mode Save */
@@ -43,5 +42,4 @@
# define ESR_DIZ (1<<11) /* Zone Protection */
# define ESR_S (1<<10) /* Store instruction */
-# endif /* CONFIG_MMU */
#endif /* _ASM_MICROBLAZE_REGISTERS_H */
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index be10da9d87cb..a06cc1f97aa9 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -14,9 +14,7 @@ extern char cmd_line[COMMAND_LINE_SIZE];
extern char *klimit;
-# ifdef CONFIG_MMU
extern void mmu_reset(void);
-# endif /* CONFIG_MMU */
void time_init(void);
void init_IRQ(void);
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index ad8e8fcb90d3..44f5ca331862 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -107,6 +107,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
/* restore singlestep on return to user mode */
#define TIF_SINGLESTEP 4
+#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */
#define TIF_MEMDIE 6 /* is terminating due to OOM killer */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
#define TIF_SECCOMP 10 /* secure computing */
@@ -119,6 +120,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h
index 1200e2bf14bb..2038168ed128 100644
--- a/arch/microblaze/include/asm/tlbflush.h
+++ b/arch/microblaze/include/asm/tlbflush.h
@@ -8,8 +8,6 @@
#ifndef _ASM_MICROBLAZE_TLBFLUSH_H
#define _ASM_MICROBLAZE_TLBFLUSH_H
-#ifdef CONFIG_MMU
-
#include <linux/sched.h>
#include <linux/threads.h>
#include <asm/processor.h> /* For TASK_SIZE */
@@ -50,16 +48,4 @@ static inline void local_flush_tlb_range(struct vm_area_struct *vma,
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end) { }
-#else /* CONFIG_MMU */
-
-#define flush_tlb() BUG()
-#define flush_tlb_all() BUG()
-#define flush_tlb_mm(mm) BUG()
-#define flush_tlb_page(vma, addr) BUG()
-#define flush_tlb_range(mm, start, end) BUG()
-#define flush_tlb_pgtables(mm, start, end) BUG()
-#define flush_tlb_kernel_range(start, end) BUG()
-
-#endif /* CONFIG_MMU */
-
#endif /* _ASM_MICROBLAZE_TLBFLUSH_H */
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 304b04ffea2f..c44b59470e45 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -30,35 +30,14 @@
*/
# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-# ifndef CONFIG_MMU
-# define KERNEL_DS MAKE_MM_SEG(0)
-# define USER_DS KERNEL_DS
-# else
# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
-# endif
# define get_fs() (current_thread_info()->addr_limit)
# define set_fs(val) (current_thread_info()->addr_limit = (val))
# define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
-#ifndef CONFIG_MMU
-
-/* Check against bounds of physical memory */
-static inline int ___range_ok(unsigned long addr, unsigned long size)
-{
- return ((addr < memory_start) ||
- ((addr + size - 1) > (memory_start + memory_size - 1)));
-}
-
-#define __range_ok(addr, size) \
- ___range_ok((unsigned long)(addr), (unsigned long)(size))
-
-#define access_ok(addr, size) (__range_ok((addr), (size)) == 0)
-
-#else
-
static inline int access_ok(const void __user *addr, unsigned long size)
{
if (!size)
@@ -77,15 +56,9 @@ ok:
(u32)get_fs().seg);
return 1;
}
-#endif
-#ifdef CONFIG_MMU
# define __FIXUP_SECTION ".section .fixup,\"ax\"\n"
# define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n"
-#else
-# define __FIXUP_SECTION ".section .discard,\"ax\"\n"
-# define __EX_TABLE_SECTION ".section .discard,\"ax\"\n"
-#endif
extern unsigned long __copy_tofrom_user(void __user *to,
const void __user *from, unsigned long size);
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index dd71637437f4..15a20eb814ce 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -22,9 +22,9 @@ obj-y += dma.o exceptions.o \
obj-y += cpu/
obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
-obj-$(CONFIG_MMU) += misc.o
+obj-y += misc.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount.o
obj-$(CONFIG_KGDB) += kgdb.o
-obj-y += entry$(MMU).o
+obj-y += entry.o
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c
index c1b459c97571..6c69ce7be2e8 100644
--- a/arch/microblaze/kernel/asm-offsets.c
+++ b/arch/microblaze/kernel/asm-offsets.c
@@ -70,7 +70,6 @@ int main(int argc, char *argv[])
/* struct task_struct */
DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack));
-#ifdef CONFIG_MMU
DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
@@ -84,7 +83,6 @@ int main(int argc, char *argv[])
DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
BLANK();
-#endif
/* struct thread_info */
DEFINE(TI_TASK, offsetof(struct thread_info, task));
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
deleted file mode 100644
index 7e394fc2c439..000000000000
--- a/arch/microblaze/kernel/entry-nommu.S
+++ /dev/null
@@ -1,622 +0,0 @@
-/*
- * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2007-2009 PetaLogix
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/linkage.h>
-#include <asm/thread_info.h>
-#include <linux/errno.h>
-#include <asm/entry.h>
-#include <asm/asm-offsets.h>
-#include <asm/registers.h>
-#include <asm/unistd.h>
-#include <asm/percpu.h>
-#include <asm/signal.h>
-
-#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
- .macro disable_irq
- msrclr r0, MSR_IE
- .endm
-
- .macro enable_irq
- msrset r0, MSR_IE
- .endm
-
- .macro clear_bip
- msrclr r0, MSR_BIP
- .endm
-#else
- .macro disable_irq
- mfs r11, rmsr
- andi r11, r11, ~MSR_IE
- mts rmsr, r11
- .endm
-
- .macro enable_irq
- mfs r11, rmsr
- ori r11, r11, MSR_IE
- mts rmsr, r11
- .endm
-
- .macro clear_bip
- mfs r11, rmsr
- andi r11, r11, ~MSR_BIP
- mts rmsr, r11
- .endm
-#endif
-
-ENTRY(_interrupt)
- swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
- swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
- lwi r11, r0, PER_CPU(KM) /* load mode indicator */
- beqid r11, 1f
- nop
- brid 2f /* jump over */
- addik r1, r1, (-PT_SIZE) /* room for pt_regs (delay slot) */
-1: /* switch to kernel stack */
- lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
- lwi r1, r1, TS_THREAD_INFO /* get the thread info */
- /* calculate kernel stack pointer */
- addik r1, r1, THREAD_SIZE - PT_SIZE
-2:
- swi r11, r1, PT_MODE /* store the mode */
- lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
- swi r2, r1, PT_R2
- swi r3, r1, PT_R3
- swi r4, r1, PT_R4
- swi r5, r1, PT_R5
- swi r6, r1, PT_R6
- swi r7, r1, PT_R7
- swi r8, r1, PT_R8
- swi r9, r1, PT_R9
- swi r10, r1, PT_R10
- swi r11, r1, PT_R11
- swi r12, r1, PT_R12
- swi r13, r1, PT_R13
- swi r14, r1, PT_R14
- swi r14, r1, PT_PC
- swi r15, r1, PT_R15
- swi r16, r1, PT_R16
- swi r17, r1, PT_R17
- swi r18, r1, PT_R18
- swi r19, r1, PT_R19
- swi r20, r1, PT_R20
- swi r21, r1, PT_R21
- swi r22, r1, PT_R22
- swi r23, r1, PT_R23
- swi r24, r1, PT_R24
- swi r25, r1, PT_R25
- swi r26, r1, PT_R26
- swi r27, r1, PT_R27
- swi r28, r1, PT_R28
- swi r29, r1, PT_R29
- swi r30, r1, PT_R30
- swi r31, r1, PT_R31
- /* special purpose registers */
- mfs r11, rmsr
- swi r11, r1, PT_MSR
- mfs r11, rear
- swi r11, r1, PT_EAR
- mfs r11, resr
- swi r11, r1, PT_ESR
- mfs r11, rfsr
- swi r11, r1, PT_FSR
- /* reload original stack pointer and save it */
- lwi r11, r0, PER_CPU(ENTRY_SP)
- swi r11, r1, PT_R1
- /* update mode indicator we are in kernel mode */
- addik r11, r0, 1
- swi r11, r0, PER_CPU(KM)
- /* restore r31 */
- lwi r31, r0, PER_CPU(CURRENT_SAVE)
- /* prepare the link register, the argument and jump */
- addik r15, r0, ret_from_intr - 8
- addk r6, r0, r15
- braid do_IRQ
- add r5, r0, r1
-
-ret_from_intr:
- lwi r11, r1, PT_MODE
- bneid r11, no_intr_resched
-
-3:
- lwi r6, r31, TS_THREAD_INFO /* get thread info */
- lwi r19, r6, TI_FLAGS /* get flags in thread info */
- /* do an extra work if any bits are set */
-
- andi r11, r19, _TIF_NEED_RESCHED
- beqi r11, 1f
- bralid r15, schedule
- nop
- bri 3b
-1: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
- beqid r11, no_intr_resched
- addk r5, r1, r0
- bralid r15, do_notify_resume
- addk r6, r0, r0
- bri 3b
-
-no_intr_resched:
- /* Disable interrupts, we are now committed to the state restore */
- disable_irq
-
- /* save mode indicator */
- lwi r11, r1, PT_MODE
- swi r11, r0, PER_CPU(KM)
-
- /* save r31 */
- swi r31, r0, PER_CPU(CURRENT_SAVE)
-restore_context:
- /* special purpose registers */
- lwi r11, r1, PT_FSR
- mts rfsr, r11
- lwi r11, r1, PT_ESR
- mts resr, r11
- lwi r11, r1, PT_EAR
- mts rear, r11
- lwi r11, r1, PT_MSR
- mts rmsr, r11
-
- lwi r31, r1, PT_R31
- lwi r30, r1, PT_R30
- lwi r29, r1, PT_R29
- lwi r28, r1, PT_R28
- lwi r27, r1, PT_R27
- lwi r26, r1, PT_R26
- lwi r25, r1, PT_R25
- lwi r24, r1, PT_R24
- lwi r23, r1, PT_R23
- lwi r22, r1, PT_R22
- lwi r21, r1, PT_R21
- lwi r20, r1, PT_R20
- lwi r19, r1, PT_R19
- lwi r18, r1, PT_R18
- lwi r17, r1, PT_R17
- lwi r16, r1, PT_R16
- lwi r15, r1, PT_R15
- lwi r14, r1, PT_PC
- lwi r13, r1, PT_R13
- lwi r12, r1, PT_R12
- lwi r11, r1, PT_R11
- lwi r10, r1, PT_R10
- lwi r9, r1, PT_R9
- lwi r8, r1, PT_R8
- lwi r7, r1, PT_R7
- lwi r6, r1, PT_R6
- lwi r5, r1, PT_R5
- lwi r4, r1, PT_R4
- lwi r3, r1, PT_R3
- lwi r2, r1, PT_R2
- lwi r1, r1, PT_R1
- rtid r14, 0
- nop
-
-ENTRY(_reset)
- brai 0;
-
-ENTRY(_user_exception)
- swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
- swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
- lwi r11, r0, PER_CPU(KM) /* load mode indicator */
- beqid r11, 1f /* Already in kernel mode? */
- nop
- brid 2f /* jump over */
- addik r1, r1, (-PT_SIZE) /* Room for pt_regs (delay slot) */
-1: /* Switch to kernel stack */
- lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
- lwi r1, r1, TS_THREAD_INFO /* get the thread info */
- /* calculate kernel stack pointer */
- addik r1, r1, THREAD_SIZE - PT_SIZE
-2:
- swi r11, r1, PT_MODE /* store the mode */
- lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
- /* save them on stack */
- swi r2, r1, PT_R2
- swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
- swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
- swi r5, r1, PT_R5
- swi r6, r1, PT_R6
- swi r7, r1, PT_R7
- swi r8, r1, PT_R8
- swi r9, r1, PT_R9
- swi r10, r1, PT_R10
- swi r11, r1, PT_R11
- /* r12: _always_ in clobber list; see unistd.h */
- swi r12, r1, PT_R12
- swi r13, r1, PT_R13
- /* r14: _always_ in clobber list; see unistd.h */
- swi r14, r1, PT_R14
- /* but we want to return to the next inst. */
- addik r14, r14, 0x4
- swi r14, r1, PT_PC /* increment by 4 and store in pc */
- swi r15, r1, PT_R15
- swi r16, r1, PT_R16
- swi r17, r1, PT_R17
- swi r18, r1, PT_R18
- swi r19, r1, PT_R19
- swi r20, r1, PT_R20
- swi r21, r1, PT_R21
- swi r22, r1, PT_R22
- swi r23, r1, PT_R23
- swi r24, r1, PT_R24
- swi r25, r1, PT_R25
- swi r26, r1, PT_R26
- swi r27, r1, PT_R27
- swi r28, r1, PT_R28
- swi r29, r1, PT_R29
- swi r30, r1, PT_R30
- swi r31, r1, PT_R31
-
- disable_irq
- nop /* make sure IE bit is in effect */
- clear_bip /* once IE is in effect it is safe to clear BIP */
- nop
-
- /* special purpose registers */
- mfs r11, rmsr
- swi r11, r1, PT_MSR
- mfs r11, rear
- swi r11, r1, PT_EAR
- mfs r11, resr
- swi r11, r1, PT_ESR
- mfs r11, rfsr
- swi r11, r1, PT_FSR
- /* reload original stack pointer and save it */
- lwi r11, r0, PER_CPU(ENTRY_SP)
- swi r11, r1, PT_R1
- /* update mode indicator we are in kernel mode */
- addik r11, r0, 1
- swi r11, r0, PER_CPU(KM)
- /* restore r31 */
- lwi r31, r0, PER_CPU(CURRENT_SAVE)
- /* re-enable interrupts now we are in kernel mode */
- enable_irq
-
- /* See if the system call number is valid. */
- addi r11, r12, -__NR_syscalls
- bgei r11, 1f /* return to user if not valid */
- /* Figure out which function to use for this system call. */
- /* Note Microblaze barrel shift is optional, so don't rely on it */
- add r12, r12, r12 /* convert num -> ptr */
- addik r30, r0, 1 /* restarts allowed */
- add r12, r12, r12
- lwi r12, r12, sys_call_table /* Get function pointer */
- addik r15, r0, ret_to_user-8 /* set return address */
- bra r12 /* Make the system call. */
- bri 0 /* won't reach here */
-1:
- brid ret_to_user /* jump to syscall epilogue */
- addi r3, r0, -ENOSYS /* set errno in delay slot */
-
-/*
- * Debug traps are like a system call, but entered via brki r14, 0x60
- * All we need to do is send the SIGTRAP signal to current, ptrace and
- * do_notify_resume will handle the rest
- */
-ENTRY(_debug_exception)
- swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
- lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
- lwi r1, r1, TS_THREAD_INFO /* get the thread info */
- addik r1, r1, THREAD_SIZE - PT_SIZE /* get the kernel stack */
- swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
- lwi r11, r0, PER_CPU(KM) /* load mode indicator */
-//save_context:
- swi r11, r1, PT_MODE /* store the mode */
- lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
- /* save them on stack */
- swi r2, r1, PT_R2
- swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
- swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
- swi r5, r1, PT_R5
- swi r6, r1, PT_R6
- swi r7, r1, PT_R7
- swi r8, r1, PT_R8
- swi r9, r1, PT_R9
- swi r10, r1, PT_R10
- swi r11, r1, PT_R11
- /* r12: _always_ in clobber list; see unistd.h */
- swi r12, r1, PT_R12
- swi r13, r1, PT_R13
- /* r14: _always_ in clobber list; see unistd.h */
- swi r14, r1, PT_R14
- swi r14, r1, PT_PC /* Will return to interrupted instruction */
- swi r15, r1, PT_R15
- swi r16, r1, PT_R16
- swi r17, r1, PT_R17
- swi r18, r1, PT_R18
- swi r19, r1, PT_R19
- swi r20, r1, PT_R20
- swi r21, r1, PT_R21
- swi r22, r1, PT_R22
- swi r23, r1, PT_R23
- swi r24, r1, PT_R24
- swi r25, r1, PT_R25
- swi r26, r1, PT_R26
- swi r27, r1, PT_R27
- swi r28, r1, PT_R28
- swi r29, r1, PT_R29
- swi r30, r1, PT_R30
- swi r31, r1, PT_R31
-
- disable_irq
- nop /* make sure IE bit is in effect */
- clear_bip /* once IE is in effect it is safe to clear BIP */
- nop
-
- /* special purpose registers */
- mfs r11, rmsr
- swi r11, r1, PT_MSR
- mfs r11, rear
- swi r11, r1, PT_EAR
- mfs r11, resr
- swi r11, r1, PT_ESR
- mfs r11, rfsr
- swi r11, r1, PT_FSR
- /* reload original stack pointer and save it */
- lwi r11, r0, PER_CPU(ENTRY_SP)
- swi r11, r1, PT_R1
- /* update mode indicator we are in kernel mode */
- addik r11, r0, 1
- swi r11, r0, PER_CPU(KM)
- /* restore r31 */
- lwi r31, r0, PER_CPU(CURRENT_SAVE)
- /* re-enable interrupts now we are in kernel mode */
- enable_irq
-
- addi r5, r0, SIGTRAP /* sending the trap signal */
- add r6, r0, r31 /* to current */
- bralid r15, send_sig
- add r7, r0, r0 /* 3rd param zero */
-
- addik r30, r0, 1 /* restarts allowed ??? */
- /* Restore r3/r4 to work around how ret_to_user works */
- lwi r3, r1, PT_R3
- lwi r4, r1, PT_R4
- bri ret_to_user
-
-ENTRY(_break)
- bri 0
-
-/* struct task_struct *_switch_to(struct thread_info *prev,
- struct thread_info *next); */
-ENTRY(_switch_to)
- /* prepare return value */
- addk r3, r0, r31
-
- /* save registers in cpu_context */
- /* use r11 and r12, volatile registers, as temp register */
- addik r11, r5, TI_CPU_CONTEXT
- swi r1, r11, CC_R1
- swi r2, r11, CC_R2
- /* skip volatile registers.
- * they are saved on stack when we jumped to _switch_to() */
- /* dedicated registers */
- swi r13, r11, CC_R13
- swi r14, r11, CC_R14
- swi r15, r11, CC_R15
- swi r16, r11, CC_R16
- swi r17, r11, CC_R17
- swi r18, r11, CC_R18
- /* save non-volatile registers */
- swi r19, r11, CC_R19
- swi r20, r11, CC_R20
- swi r21, r11, CC_R21
- swi r22, r11, CC_R22
- swi r23, r11, CC_R23
- swi r24, r11, CC_R24
- swi r25, r11, CC_R25
- swi r26, r11, CC_R26
- swi r27, r11, CC_R27
- swi r28, r11, CC_R28
- swi r29, r11, CC_R29
- swi r30, r11, CC_R30
- /* special purpose registers */
- mfs r12, rmsr
- swi r12, r11, CC_MSR
- mfs r12, rear
- swi r12, r11, CC_EAR
- mfs r12, resr
- swi r12, r11, CC_ESR
- mfs r12, rfsr
- swi r12, r11, CC_FSR
-
- /* update r31, the current */
- lwi r31, r6, TI_TASK
- swi r31, r0, PER_CPU(CURRENT_SAVE)
-
- /* get new process' cpu context and restore */
- addik r11, r6, TI_CPU_CONTEXT
-
- /* special purpose registers */
- lwi r12, r11, CC_FSR
- mts rfsr, r12
- lwi r12, r11, CC_ESR
- mts resr, r12
- lwi r12, r11, CC_EAR
- mts rear, r12
- lwi r12, r11, CC_MSR
- mts rmsr, r12
- /* non-volatile registers */
- lwi r30, r11, CC_R30
- lwi r29, r11, CC_R29
- lwi r28, r11, CC_R28
- lwi r27, r11, CC_R27
- lwi r26, r11, CC_R26
- lwi r25, r11, CC_R25
- lwi r24, r11, CC_R24
- lwi r23, r11, CC_R23
- lwi r22, r11, CC_R22
- lwi r21, r11, CC_R21
- lwi r20, r11, CC_R20
- lwi r19, r11, CC_R19
- /* dedicated registers */
- lwi r18, r11, CC_R18
- lwi r17, r11, CC_R17
- lwi r16, r11, CC_R16
- lwi r15, r11, CC_R15
- lwi r14, r11, CC_R14
- lwi r13, r11, CC_R13
- /* skip volatile registers */
- lwi r2, r11, CC_R2
- lwi r1, r11, CC_R1
-
- rtsd r15, 8
- nop
-
-ENTRY(ret_from_fork)
- addk r5, r0, r3
- brlid r15, schedule_tail
- nop
- swi r31, r1, PT_R31 /* save r31 in user context. */
- /* will soon be restored to r31 in ret_to_user */
- addk r3, r0, r0
- brid ret_to_user
- nop
-
-ENTRY(ret_from_kernel_thread)
- brlid r15, schedule_tail
- addk r5, r0, r3
- brald r15, r20
- addk r5, r0, r19
- brid ret_to_user
- addk r3, r0, r0
-
-work_pending:
- lwi r11, r1, PT_MODE
- bneid r11, 2f
-3:
- enable_irq
- andi r11, r19, _TIF_NEED_RESCHED
- beqi r11, 1f
- bralid r15, schedule
- nop
- bri 4f
-1: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
- beqi r11, no_work_pending
- addk r5, r30, r0
- bralid r15, do_notify_resume
- addik r6, r0, 1
- addk r30, r0, r0 /* no restarts from now on */
-4:
- disable_irq
- lwi r6, r31, TS_THREAD_INFO /* get thread info */
- lwi r19, r6, TI_FLAGS /* get flags in thread info */
- bri 3b
-
-ENTRY(ret_to_user)
- disable_irq
-
- swi r4, r1, PT_R4 /* return val */
- swi r3, r1, PT_R3 /* return val */
-
- lwi r6, r31, TS_THREAD_INFO /* get thread info */
- lwi r19, r6, TI_FLAGS /* get flags in thread info */
- bnei r19, work_pending /* do an extra work if any bits are set */
-no_work_pending:
- disable_irq
-
-2:
- /* save r31 */
- swi r31, r0, PER_CPU(CURRENT_SAVE)
- /* save mode indicator */
- lwi r18, r1, PT_MODE
- swi r18, r0, PER_CPU(KM)
-//restore_context:
- /* special purpose registers */
- lwi r18, r1, PT_FSR
- mts rfsr, r18
- lwi r18, r1, PT_ESR
- mts resr, r18
- lwi r18, r1, PT_EAR
- mts rear, r18
- lwi r18, r1, PT_MSR
- mts rmsr, r18
-
- lwi r31, r1, PT_R31
- lwi r30, r1, PT_R30
- lwi r29, r1, PT_R29
- lwi r28, r1, PT_R28
- lwi r27, r1, PT_R27
- lwi r26, r1, PT_R26
- lwi r25, r1, PT_R25
- lwi r24, r1, PT_R24
- lwi r23, r1, PT_R23
- lwi r22, r1, PT_R22
- lwi r21, r1, PT_R21
- lwi r20, r1, PT_R20
- lwi r19, r1, PT_R19
- lwi r18, r1, PT_R18
- lwi r17, r1, PT_R17
- lwi r16, r1, PT_R16
- lwi r15, r1, PT_R15
- lwi r14, r1, PT_PC
- lwi r13, r1, PT_R13
- lwi r12, r1, PT_R12
- lwi r11, r1, PT_R11
- lwi r10, r1, PT_R10
- lwi r9, r1, PT_R9
- lwi r8, r1, PT_R8
- lwi r7, r1, PT_R7
- lwi r6, r1, PT_R6
- lwi r5, r1, PT_R5
- lwi r4, r1, PT_R4 /* return val */
- lwi r3, r1, PT_R3 /* return val */
- lwi r2, r1, PT_R2
- lwi r1, r1, PT_R1
-
- rtid r14, 0
- nop
-
-sys_rt_sigreturn_wrapper:
- addk r30, r0, r0 /* no restarts for this one */
- brid sys_rt_sigreturn
- addk r5, r1, r0
-
- /* Interrupt vector table */
- .section .init.ivt, "ax"
- .org 0x0
- brai _reset
- brai _user_exception
- brai _interrupt
- brai _break
- brai _hw_exception_handler
- .org 0x60
- brai _debug_exception
-
-.section .rodata,"a"
-#include "syscall_table.S"
-
-syscall_table_size=(.-sys_call_table)
-
-type_SYSCALL:
- .ascii "SYSCALL\0"
-type_IRQ:
- .ascii "IRQ\0"
-type_IRQ_PREEMPT:
- .ascii "IRQ (PREEMPTED)\0"
-type_SYSCALL_PREEMPT:
- .ascii " SYSCALL (PREEMPTED)\0"
-
- /*
- * Trap decoding for stack unwinder
- * Tuples are (start addr, end addr, string)
- * If return address lies on [start addr, end addr],
- * unwinder displays 'string'
- */
-
- .align 4
-.global microblaze_trap_handlers
-microblaze_trap_handlers:
- /* Exact matches come first */
- .word ret_to_user ; .word ret_to_user ; .word type_SYSCALL
- .word ret_from_intr; .word ret_from_intr ; .word type_IRQ
- /* Fuzzy matches go here */
- .word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT
- .word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT
- /* End of table */
- .word 0 ; .word 0 ; .word 0
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
index cf99c411503e..908788497b28 100644
--- a/arch/microblaze/kernel/exceptions.c
+++ b/arch/microblaze/kernel/exceptions.c
@@ -69,9 +69,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
int fsr, int addr)
{
-#ifdef CONFIG_MMU
addr = regs->pc;
-#endif
#if 0
pr_warn("Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n",
@@ -132,13 +130,10 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
fsr = FPE_FLTRES;
_exception(SIGFPE, regs, fsr, addr);
break;
-
-#ifdef CONFIG_MMU
case MICROBLAZE_PRIVILEGED_EXCEPTION:
pr_debug("Privileged exception\n");
_exception(SIGILL, regs, ILL_PRVOPC, addr);
break;
-#endif
default:
/* FIXME what to do in unexpected exception */
pr_warn("Unexpected exception %02x PC=%08x in %s mode\n",
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 14b276406153..ec2fcb545e64 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -34,7 +34,6 @@
#include <asm/page.h>
#include <linux/of_fdt.h> /* for OF_DT_HEADER */
-#ifdef CONFIG_MMU
#include <asm/setup.h> /* COMMAND_LINE_SIZE */
#include <asm/mmu.h>
#include <asm/processor.h>
@@ -48,8 +47,6 @@ empty_zero_page:
swapper_pg_dir:
.space PAGE_SIZE
-#endif /* CONFIG_MMU */
-
.section .rodata
.align 4
endian_check:
@@ -108,8 +105,6 @@ _copy_fdt:
addik r3, r3, -4 /* descrement loop */
no_fdt_arg:
-#ifdef CONFIG_MMU
-
#ifndef CONFIG_CMDLINE_BOOL
/*
* handling command line
@@ -329,7 +324,6 @@ turn_on_mmu:
nop
start_here:
-#endif /* CONFIG_MMU */
/* Initialize small data anchors */
addik r13, r0, _KERNEL_SDA_BASE_
@@ -345,11 +339,6 @@ start_here:
brald r15, r11
nop
-#ifndef CONFIG_MMU
- addik r15, r0, machine_halt
- braid start_kernel
- nop
-#else
/*
* Initialize the MMU.
*/
@@ -383,4 +372,3 @@ kernel_load_context:
nop
rted r17, 0 /* enable MMU and jump to start_kernel */
nop
-#endif /* CONFIG_MMU */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 54411de22fa6..07ea23965f81 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -80,7 +80,6 @@
/* Helpful Macros */
#define NUM_TO_REG(num) r ## num
-#ifdef CONFIG_MMU
#define RESTORE_STATE \
lwi r5, r1, 0; \
mts rmsr, r5; \
@@ -92,7 +91,6 @@
lwi r11, r1, PT_R11; \
lwi r31, r1, PT_R31; \
lwi r1, r1, PT_R1;
-#endif /* CONFIG_MMU */
#define LWREG_NOP \
bri ex_handler_unhandled; \
@@ -102,10 +100,6 @@
bri ex_handler_unhandled; \
nop;
-/* FIXME this is weird - for noMMU kernel is not possible to use brid
- * instruction which can shorten executed time
- */
-
/* r3 is the source */
#define R3_TO_LWREG_V(regnum) \
swi r3, r1, 4 * regnum; \
@@ -126,7 +120,6 @@
or r3, r0, NUM_TO_REG (regnum); \
bri ex_sw_tail;
-#ifdef CONFIG_MMU
#define R3_TO_LWREG_VM_V(regnum) \
brid ex_lw_end_vm; \
swi r3, r7, 4 * regnum;
@@ -193,7 +186,6 @@
.endm
#endif
-#endif /* CONFIG_MMU */
.extern other_exception_handler /* Defined in exception.c */
@@ -251,7 +243,6 @@
*/
/* wrappers to restore state before coming to entry.S */
-#ifdef CONFIG_MMU
.section .data
.align 4
pt_pool_space:
@@ -316,31 +307,24 @@ _MB_HW_ExceptionVectorTable:
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
-#endif
.global _hw_exception_handler
.section .text
.align 4
.ent _hw_exception_handler
_hw_exception_handler:
-#ifndef CONFIG_MMU
- addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */
-#else
swi r1, r0, TOPHYS(pt_pool_space + PT_R1); /* GET_SP */
/* Save date to kernel memory. Here is the problem
* when you came from user space */
ori r1, r0, TOPHYS(pt_pool_space);
-#endif
swi r3, r1, PT_R3
swi r4, r1, PT_R4
swi r5, r1, PT_R5
swi r6, r1, PT_R6
-#ifdef CONFIG_MMU
swi r11, r1, PT_R11
swi r31, r1, PT_R31
lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)) /* get saved current */
-#endif
mfs r5, rmsr;
nop
@@ -350,18 +334,8 @@ _hw_exception_handler:
mfs r3, rear;
nop
-#ifndef CONFIG_MMU
- andi r5, r4, 0x1000; /* Check ESR[DS] */
- beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */
- mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
- nop
-not_in_delay_slot:
- swi r17, r1, PT_R17
-#endif
-
andi r5, r4, 0x1F; /* Extract ESR[EXC] */
-#ifdef CONFIG_MMU
/* Calculate exception vector offset = r5 << 2 */
addk r6, r5, r5; /* << 1 */
addk r6, r6, r6; /* << 2 */
@@ -383,73 +357,6 @@ not_in_delay_slot:
full_exception_trapw:
RESTORE_STATE
bri full_exception_trap
-#else
- /* Exceptions enabled here. This will allow nested exceptions */
- mfs r6, rmsr;
- nop
- swi r6, r1, 0; /* RMSR_OFFSET */
- ori r6, r6, 0x100; /* Turn ON the EE bit */
- andi r6, r6, ~2; /* Disable interrupts */
- mts rmsr, r6;
- nop
-
- xori r6, r5, 1; /* 00001 = Unaligned Exception */
- /* Jump to unalignment exception handler */
- beqi r6, handle_unaligned_ex;
-
-handle_other_ex: /* Handle Other exceptions here */
- /* Save other volatiles before we make procedure calls below */
- swi r7, r1, PT_R7
- swi r8, r1, PT_R8
- swi r9, r1, PT_R9
- swi r10, r1, PT_R10
- swi r11, r1, PT_R11
- swi r12, r1, PT_R12
- swi r14, r1, PT_R14
- swi r15, r1, PT_R15
- swi r18, r1, PT_R18
-
- or r5, r1, r0
- andi r6, r4, 0x1F; /* Load ESR[EC] */
- lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */
- swi r7, r1, PT_MODE
- mfs r7, rfsr
- nop
- addk r8, r17, r0; /* Load exception address */
- bralid r15, full_exception; /* Branch to the handler */
- nop;
- mts rfsr, r0; /* Clear sticky fsr */
- nop
-
- /*
- * Trigger execution of the signal handler by enabling
- * interrupts and calling an invalid syscall.
- */
- mfs r5, rmsr;
- nop
- ori r5, r5, 2;
- mts rmsr, r5; /* enable interrupt */
- nop
- addi r12, r0, __NR_syscalls;
- brki r14, 0x08;
- mfs r5, rmsr; /* disable interrupt */
- nop
- andi r5, r5, ~2;
- mts rmsr, r5;
- nop
-
- lwi r7, r1, PT_R7
- lwi r8, r1, PT_R8
- lwi r9, r1, PT_R9
- lwi r10, r1, PT_R10
- lwi r11, r1, PT_R11
- lwi r12, r1, PT_R12
- lwi r14, r1, PT_R14
- lwi r15, r1, PT_R15
- lwi r18, r1, PT_R18
-
- bri ex_handler_done; /* Complete exception handling */
-#endif
/* 0x01 - Unaligned data access exception
* This occurs when a word access is not aligned on a word boundary,
@@ -463,7 +370,6 @@ handle_unaligned_ex:
* R4 = ESR
* R3 = EAR
*/
-#ifdef CONFIG_MMU
andi r6, r4, 0x1000 /* Check ESR[DS] */
beqi r6, _no_delayslot /* Branch if ESR[DS] not set */
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
@@ -472,7 +378,7 @@ _no_delayslot:
/* jump to high level unaligned handler */
RESTORE_STATE;
bri unaligned_data_trap
-#endif
+
andi r6, r4, 0x3E0; /* Mask and extract the register operand */
srl r6, r6; /* r6 >> 5 */
srl r6, r6;
@@ -558,25 +464,10 @@ ex_shw:
ex_sw_end: /* Exception handling of store word, ends. */
ex_handler_done:
-#ifndef CONFIG_MMU
- lwi r5, r1, 0 /* RMSR */
- mts rmsr, r5
- nop
- lwi r3, r1, PT_R3
- lwi r4, r1, PT_R4
- lwi r5, r1, PT_R5
- lwi r6, r1, PT_R6
- lwi r17, r1, PT_R17
-
- rted r17, 0
- addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */
-#else
RESTORE_STATE;
rted r17, 0
nop
-#endif
-#ifdef CONFIG_MMU
/* Exception vector entry code. This code runs with address translation
* turned off (i.e. using physical addresses). */
@@ -882,13 +773,7 @@ ex_handler_done:
* bits 20 and 21 are zero.
*/
andi r3, r3, PAGE_MASK
-#ifdef CONFIG_MICROBLAZE_64K_PAGES
- ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_64K)
-#elif CONFIG_MICROBLAZE_16K_PAGES
- ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_16K)
-#else
ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_4K)
-#endif
mts rtlbhi, r3 /* Load TLB HI */
nop
@@ -926,10 +811,8 @@ ex_handler_done:
rtsd r15,8
nop
-#endif
.end _hw_exception_handler
-#ifdef CONFIG_MMU
/* Unaligned data access exception last on a 4k page for MMU.
* When this is called, we are in virtual mode with exceptions enabled
* and registers 1-13,15,17,18 saved.
@@ -1044,7 +927,6 @@ ex_unaligned_fixup:
.word store6,ex_unaligned_fixup;
.previous;
.end _unaligned_data_exception
-#endif /* CONFIG_MMU */
.global ex_handler_unhandled
ex_handler_unhandled:
@@ -1093,11 +975,7 @@ lw_r27: R3_TO_LWREG (27);
lw_r28: R3_TO_LWREG (28);
lw_r29: R3_TO_LWREG (29);
lw_r30: R3_TO_LWREG (30);
-#ifdef CONFIG_MMU
lw_r31: R3_TO_LWREG_V (31);
-#else
-lw_r31: R3_TO_LWREG (31);
-#endif
sw_table:
sw_r0: SWREG_TO_R3 (0);
@@ -1131,13 +1009,8 @@ sw_r27: SWREG_TO_R3 (27);
sw_r28: SWREG_TO_R3 (28);
sw_r29: SWREG_TO_R3 (29);
sw_r30: SWREG_TO_R3 (30);
-#ifdef CONFIG_MMU
sw_r31: SWREG_TO_R3_V (31);
-#else
-sw_r31: SWREG_TO_R3 (31);
-#endif
-#ifdef CONFIG_MMU
lw_table_vm:
lw_r0_vm: R3_TO_LWREG_VM (0);
lw_r1_vm: R3_TO_LWREG_VM_V (1);
@@ -1205,7 +1078,6 @@ sw_r28_vm: SWREG_TO_R3_VM_V (28);
sw_r29_vm: SWREG_TO_R3_VM_V (29);
sw_r30_vm: SWREG_TO_R3_VM_V (30);
sw_r31_vm: SWREG_TO_R3_VM_V (31);
-#endif /* CONFIG_MMU */
/* Temporary data structures used in the handler */
.section .data
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index 51c43ee5e380..303aaf13573b 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -33,9 +33,7 @@ EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
#endif
-#ifdef CONFIG_MMU
EXPORT_SYMBOL(empty_zero_page);
-#endif
EXPORT_SYMBOL(mbc);
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index a9e46e525cd0..657c2beb665e 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -69,9 +69,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
ti->cpu_context.r19 = (unsigned long)arg;
childregs->pt_mode = 1;
local_save_flags(childregs->msr);
-#ifdef CONFIG_MMU
ti->cpu_context.msr = childregs->msr & ~MSR_IE;
-#endif
ti->cpu_context.r15 = (unsigned long)ret_from_kernel_thread - 8;
return 0;
}
@@ -81,9 +79,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
ti->cpu_context.r1 = (unsigned long)childregs;
-#ifndef CONFIG_MMU
- ti->cpu_context.msr = (unsigned long)childregs->msr;
-#else
childregs->msr |= MSR_UMS;
/* we should consider the fact that childregs is a copy of the parent
@@ -105,7 +100,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
ti->cpu_context.msr = (childregs->msr|MSR_VM);
ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */
ti->cpu_context.msr &= ~MSR_IE;
-#endif
ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
/*
@@ -130,13 +124,10 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
regs->pc = pc;
regs->r1 = usp;
regs->pt_mode = 0;
-#ifdef CONFIG_MMU
regs->msr |= MSR_UMS;
regs->msr &= ~MSR_VM;
-#endif
}
-#ifdef CONFIG_MMU
#include <linux/elfcore.h>
/*
* Set up a thread for executing a new program
@@ -145,9 +136,8 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
{
return 0; /* MicroBlaze has no separate FPU registers */
}
-#endif /* CONFIG_MMU */
void arch_cpu_idle(void)
{
- local_irq_enable();
+ raw_local_irq_enable();
}
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 333b09658ca8..f417333eccae 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -9,7 +9,7 @@
*/
#include <linux/init.h>
-#include <linux/clk-provider.h>
+#include <linux/of_clk.h>
#include <linux/clocksource.h>
#include <linux/string.h>
#include <linux/seq_file.h>
@@ -190,12 +190,10 @@ static int microblaze_debugfs_init(void)
}
arch_initcall(microblaze_debugfs_init);
-# ifdef CONFIG_MMU
static int __init debugfs_tlb(void)
{
debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip);
return 0;
}
device_initcall(debugfs_tlb);
-# endif
#endif
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index f11a0ccccabc..fc61eb0eb8dd 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -157,10 +157,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct rt_sigframe __user *frame;
int err = 0, sig = ksig->sig;
unsigned long address = 0;
-#ifdef CONFIG_MMU
pmd_t *pmdp;
pte_t *ptep;
-#endif
frame = get_sigframe(ksig, regs, sizeof(*frame));
@@ -192,7 +190,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
regs->r15 = ((unsigned long)frame->tramp)-8;
address = ((unsigned long)frame->tramp);
-#ifdef CONFIG_MMU
pmdp = pmd_off(current->mm, address);
preempt_disable();
@@ -208,10 +205,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
}
pte_unmap(ptep);
preempt_enable();
-#else
- flush_icache_range(address, address + 8);
- flush_dcache_range(address, address + 8);
-#endif
if (err)
return -EFAULT;
@@ -313,7 +306,8 @@ static void do_signal(struct pt_regs *regs, int in_syscall)
asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall)
{
- if (test_thread_flag(TIF_SIGPENDING))
+ if (test_thread_flag(TIF_SIGPENDING) ||
+ test_thread_flag(TIF_NOTIFY_SIGNAL))
do_signal(regs, in_syscall);
if (test_thread_flag(TIF_NOTIFY_RESUME))
diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c
index 778a761af0a7..a530a7a6be7d 100644
--- a/arch/microblaze/kernel/unwind.c
+++ b/arch/microblaze/kernel/unwind.c
@@ -161,22 +161,12 @@ static void microblaze_unwind_inner(struct task_struct *task,
* unwind_trap - Unwind through a system trap, that stored previous state
* on the stack.
*/
-#ifdef CONFIG_MMU
static inline void unwind_trap(struct task_struct *task, unsigned long pc,
unsigned long fp, struct stack_trace *trace,
const char *loglvl)
{
/* To be implemented */
}
-#else
-static inline void unwind_trap(struct task_struct *task, unsigned long pc,
- unsigned long fp, struct stack_trace *trace,
- const char *loglvl)
-{
- const struct pt_regs *regs = (const struct pt_regs *) fp;
- microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace, loglvl);
-}
-#endif
/**
* microblaze_unwind_inner - Unwind the stack from the specified point
@@ -215,16 +205,7 @@ static void microblaze_unwind_inner(struct task_struct *task,
* HW exception handler doesn't save all registers,
* so we open-code a special case of unwind_trap()
*/
-#ifndef CONFIG_MMU
- const struct pt_regs *regs =
- (const struct pt_regs *) fp;
-#endif
printk("%sHW EXCEPTION\n", loglvl);
-#ifndef CONFIG_MMU
- microblaze_unwind_inner(task, regs->r17 - 4,
- fp + EX_HANDLER_STACK_SIZ,
- regs->r15, trace, loglvl);
-#endif
return;
}
diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile
index 1b16875cea70..75edfc110d3e 100644
--- a/arch/microblaze/mm/Makefile
+++ b/arch/microblaze/mm/Makefile
@@ -3,7 +3,4 @@
# Makefile
#
-obj-y := consistent.o init.o
-
-obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o
-obj-$(CONFIG_HIGHMEM) += highmem.o
+obj-y := consistent.o init.o pgtable.o mmu_context.o fault.o
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index 81dffe43b18c..b7ad4a98636d 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -21,32 +21,3 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
flush_dcache_range(paddr, paddr + size);
}
-
-#ifndef CONFIG_MMU
-/*
- * Consistent memory allocators. Used for DMA devices that want to share
- * uncached memory with the processor core. My crufty no-MMU approach is
- * simple. In the HW platform we can optionally mirror the DDR up above the
- * processor cacheable region. So, memory accessed in this mirror region will
- * not be cached. It's alloced from the same pool as normal memory, but the
- * handle we return is shifted up into the uncached region. This will no doubt
- * cause big problems if memory allocated here is not also freed properly. -- JW
- *
- * I have to use dcache values because I can't relate on ram size:
- */
-#ifdef CONFIG_XILINX_UNCACHED_SHADOW
-#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
-#else
-#define UNCACHED_SHADOW_MASK 0
-#endif /* CONFIG_XILINX_UNCACHED_SHADOW */
-
-void *arch_dma_set_uncached(void *ptr, size_t size)
-{
- unsigned long addr = (unsigned long)ptr;
-
- addr |= UNCACHED_SHADOW_MASK;
- if (addr > cpuinfo.dcache_base && addr < cpuinfo.dcache_high)
- pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
- return (void *)addr;
-}
-#endif /* CONFIG_MMU */
diff --git a/arch/microblaze/mm/highmem.c b/arch/microblaze/mm/highmem.c
deleted file mode 100644
index 92e0890416c9..000000000000
--- a/arch/microblaze/mm/highmem.c
+++ /dev/null
@@ -1,78 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * highmem.c: virtual kernel memory mappings for high memory
- *
- * PowerPC version, stolen from the i386 version.
- *
- * Used in CONFIG_HIGHMEM systems for memory pages which
- * are not addressable by direct kernel virtual addresses.
- *
- * Copyright (C) 1999 Gerhard Wichert, Siemens AG
- * Gerhard.Wichert@pdb.siemens.de
- *
- *
- * Redesigned the x86 32-bit VM architecture to deal with
- * up to 16 Terrabyte physical memory. With current x86 CPUs
- * we now support up to 64 Gigabytes physical RAM.
- *
- * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
- *
- * Reworked for PowerPC by various contributors. Moved from
- * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
- */
-
-#include <linux/export.h>
-#include <linux/highmem.h>
-
-/*
- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
- * gives a more generic (and caching) interface. But kmap_atomic can
- * be used in IRQ contexts, so in some (very limited) cases we need
- * it.
- */
-#include <asm/tlbflush.h>
-
-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
-{
-
- unsigned long vaddr;
- int idx, type;
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte-idx)));
-#endif
- set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
- local_flush_tlb_page(NULL, vaddr);
-
- return (void *) vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic_high_prot);
-
-void kunmap_atomic_high(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int type;
- unsigned int idx;
-
- if (vaddr < __fix_to_virt(FIX_KMAP_END))
- return;
-
- type = kmap_atomic_idx();
-
- idx = type + KM_TYPE_NR * smp_processor_id();
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-#endif
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- local_flush_tlb_page(NULL, vaddr);
-
- kmap_atomic_idx_pop();
-}
-EXPORT_SYMBOL(kunmap_atomic_high);
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 45da639bd22c..181e48782e6c 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -29,11 +29,6 @@
/* Use for MMU and noMMU because of PCI generic code */
int mem_init_done;
-#ifndef CONFIG_MMU
-unsigned int __page_offset;
-EXPORT_SYMBOL(__page_offset);
-#endif /* CONFIG_MMU */
-
char *klimit = _end;
/*
@@ -50,16 +45,11 @@ EXPORT_SYMBOL(min_low_pfn);
EXPORT_SYMBOL(max_low_pfn);
#ifdef CONFIG_HIGHMEM
-pte_t *kmap_pte;
-EXPORT_SYMBOL(kmap_pte);
-
static void __init highmem_init(void)
{
pr_debug("%x\n", (u32)PKMAP_BASE);
map_page(PKMAP_BASE, 0, 0); /* XXX gross */
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
-
- kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
}
static void highmem_setup(void)
@@ -82,13 +72,11 @@ static void highmem_setup(void)
static void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES];
-#ifdef CONFIG_MMU
int idx;
/* Setup fixmaps */
for (idx = 0; idx < __end_of_fixed_addresses; idx++)
clear_fixmap(idx);
-#endif
/* Clean every zones */
memset(zones_size, 0, sizeof(zones_size));
@@ -108,40 +96,6 @@ static void __init paging_init(void)
void __init setup_memory(void)
{
-#ifndef CONFIG_MMU
- u32 kernel_align_start, kernel_align_size;
- phys_addr_t start, end;
- u64 i;
-
- /* Find main memory where is the kernel */
- for_each_mem_range(i, &start, &end) {
- memory_start = start;
- lowmem_size = end - start;
- if ((memory_start <= (u32)_text) &&
- ((u32)_text <= (memory_start + lowmem_size - 1))) {
- memory_size = lowmem_size;
- PAGE_OFFSET = memory_start;
- pr_info("%s: Main mem: 0x%x, size 0x%08x\n",
- __func__, (u32) memory_start,
- (u32) memory_size);
- break;
- }
- }
-
- if (!memory_start || !memory_size) {
- panic("%s: Missing memory setting 0x%08x, size=0x%08x\n",
- __func__, (u32) memory_start, (u32) memory_size);
- }
-
- /* reservation of region where is the kernel */
- kernel_align_start = PAGE_DOWN((u32)_text);
- /* ALIGN can be remove because _end in vmlinux.lds.S is align */
- kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
- pr_info("%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
- __func__, kernel_align_start, kernel_align_start
- + kernel_align_size, kernel_align_size);
- memblock_reserve(kernel_align_start, kernel_align_size);
-#endif
/*
* Kernel:
* start: base phys address of kernel - page align
@@ -181,12 +135,6 @@ void __init mem_init(void)
mem_init_done = 1;
}
-#ifndef CONFIG_MMU
-int page_is_ram(unsigned long pfn)
-{
- return __range_ok(pfn, 0);
-}
-#else
int page_is_ram(unsigned long pfn)
{
return pfn < max_low_pfn;
@@ -330,8 +278,6 @@ void __init *early_get_page(void)
NUMA_NO_NODE);
}
-#endif /* CONFIG_MMU */
-
void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
{
void *p;
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 60a58c0015f2..557585f1be41 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -325,12 +325,10 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus,
* memory, effectively behaving just like /dev/zero
*/
if ((offset + size) > hose->isa_mem_size) {
-#ifdef CONFIG_MMU
pr_debug("Process %s (pid:%d) mapped non-existing PCI",
current->comm, current->pid);
pr_debug("legacy memory for 0%04x:%02x\n",
pci_domain_nr(bus), bus->number);
-#endif
if (vma->vm_flags & VM_SHARED)
return shmem_zero_setup(vma);
return 0;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 85c7b0637734..0a17bedf4f0d 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -23,7 +23,6 @@ config MIPS
select CPU_NO_EFFICIENT_FFS if (TARGET_ISA_REV < 1)
select CPU_PM if CPU_IDLE
select GENERIC_ATOMIC64 if !64BIT
- select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
select GENERIC_GETTIMEOFDAY
@@ -2724,6 +2723,7 @@ config WAR_MIPS34K_MISSED_ITLB
config HIGHMEM
bool "High Memory Support"
depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
+ select KMAP_LOCAL
config CPU_SUPPORTS_HIGHMEM
bool
diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.h b/arch/mips/cavium-octeon/crypto/octeon-crypto.h
index 7315cc307397..cb68f9e284bb 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-crypto.h
+++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.h
@@ -41,7 +41,7 @@ do { \
*/
#define read_octeon_64bit_hash_dword(index) \
({ \
- u64 __value; \
+ __be64 __value; \
\
__asm__ __volatile__ ( \
"dmfc2 %[rt],0x0048+" STR(index) \
diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c
index 8c8ea139653e..5ee4ade99b99 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-md5.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c
@@ -68,10 +68,11 @@ static int octeon_md5_init(struct shash_desc *desc)
{
struct md5_state *mctx = shash_desc_ctx(desc);
- mctx->hash[0] = cpu_to_le32(MD5_H0);
- mctx->hash[1] = cpu_to_le32(MD5_H1);
- mctx->hash[2] = cpu_to_le32(MD5_H2);
- mctx->hash[3] = cpu_to_le32(MD5_H3);
+ mctx->hash[0] = MD5_H0;
+ mctx->hash[1] = MD5_H1;
+ mctx->hash[2] = MD5_H2;
+ mctx->hash[3] = MD5_H3;
+ cpu_to_le32_array(mctx->hash, 4);
mctx->byte_count = 0;
return 0;
@@ -139,8 +140,9 @@ static int octeon_md5_final(struct shash_desc *desc, u8 *out)
}
memset(p, 0, padding);
- mctx->block[14] = cpu_to_le32(mctx->byte_count << 3);
- mctx->block[15] = cpu_to_le32(mctx->byte_count >> 29);
+ mctx->block[14] = mctx->byte_count << 3;
+ mctx->block[15] = mctx->byte_count >> 29;
+ cpu_to_le32_array(mctx->block + 14, 2);
octeon_md5_transform(mctx->block);
octeon_md5_read_hash(mctx);
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha1.c b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
index 75e79b47abfe..30f1d75208a5 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha1.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
@@ -14,7 +14,7 @@
*/
#include <linux/mm.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha256.c b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
index a682ce76716a..36cb92895d72 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha256.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
@@ -15,7 +15,7 @@
*/
#include <linux/mm.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha512.c b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
index 50722a0cfb53..359f039820d8 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha512.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
@@ -14,7 +14,7 @@
*/
#include <linux/mm.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
diff --git a/arch/mips/configs/generic/board-ranchu.config b/arch/mips/configs/generic/board-ranchu.config
index fee9ad4c5598..640aac845ebe 100644
--- a/arch/mips/configs/generic/board-ranchu.config
+++ b/arch/mips/configs/generic/board-ranchu.config
@@ -5,7 +5,6 @@ CONFIG_FB=y
CONFIG_FB_GOLDFISH=y
CONFIG_GOLDFISH=y
CONFIG_STAGING=y
-CONFIG_GOLDFISH_AUDIO=y
CONFIG_GOLDFISH_PIC=y
CONFIG_GOLDFISH_PIPE=y
CONFIG_GOLDFISH_TTY=y
diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
index 87e20f3391ed..5cb91509bb7c 100644
--- a/arch/mips/configs/gpr_defconfig
+++ b/arch/mips/configs/gpr_defconfig
@@ -228,9 +228,7 @@ CONFIG_FARSYNC=m
CONFIG_DSCC4=m
CONFIG_DSCC4_PCISYNC=y
CONFIG_DSCC4_PCI_RST=y
-CONFIG_DLCI=m
CONFIG_LAPBETHER=m
-CONFIG_X25_ASY=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 0ef2373404e5..bbe0f39f8088 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -378,9 +378,7 @@ CONFIG_FARSYNC=m
CONFIG_DSCC4=m
CONFIG_DSCC4_PCISYNC=y
CONFIG_DSCC4_PCI_RST=y
-CONFIG_DLCI=m
CONFIG_LAPBETHER=m
-CONFIG_X25_ASY=m
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
@@ -564,7 +562,6 @@ CONFIG_USB_SERIAL_SAFE=m
CONFIG_USB_SERIAL_SIERRAWIRELESS=m
CONFIG_USB_SERIAL_TI=m
CONFIG_USB_SERIAL_CYBERJACK=m
-CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OPTION=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_EMI62=m
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 30d7c3db884e..3dc2da2bee0d 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -311,7 +311,6 @@ CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_SAFE=m
CONFIG_USB_SERIAL_SAFE_PADDED=y
CONFIG_USB_SERIAL_CYBERJACK=m
-CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h
index 743535be7528..beea14761cef 100644
--- a/arch/mips/include/asm/fixmap.h
+++ b/arch/mips/include/asm/fixmap.h
@@ -17,7 +17,7 @@
#include <spaces.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
-#include <asm/kmap_types.h>
+#include <asm/kmap_size.h>
#endif
/*
@@ -52,7 +52,7 @@ enum fixed_addresses {
#ifdef CONFIG_HIGHMEM
/* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN = FIX_CMAP_END + 1,
- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
#endif
__end_of_fixed_addresses
};
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index f1f788b57166..19edf8e69971 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -24,7 +24,7 @@
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <asm/cpu-features.h>
-#include <asm/kmap_types.h>
+#include <asm/kmap_size.h>
/* declarations for highmem.c */
extern unsigned long highstart_pfn, highend_pfn;
@@ -48,11 +48,11 @@ extern pte_t *pkmap_page_table;
#define ARCH_HAS_KMAP_FLUSH_TLB
extern void kmap_flush_tlb(unsigned long addr);
-extern void *kmap_atomic_pfn(unsigned long pfn);
#define flush_cache_kmaps() BUG_ON(cpu_has_dc_aliases)
-extern void kmap_init(void);
+#define arch_kmap_local_post_map(vaddr, pteval) local_flush_tlb_one(vaddr)
+#define arch_kmap_local_post_unmap(vaddr) local_flush_tlb_one(vaddr)
#endif /* __KERNEL__ */
diff --git a/arch/mips/include/asm/kmap_types.h b/arch/mips/include/asm/kmap_types.h
deleted file mode 100644
index 16665dc2431b..000000000000
--- a/arch/mips/include/asm/kmap_types.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_KMAP_TYPES_H
-#define _ASM_KMAP_TYPES_H
-
-#ifdef CONFIG_DEBUG_HIGHMEM
-#define __WITH_KM_FENCE
-#endif
-
-#include <asm-generic/kmap_types.h>
-
-#undef __WITH_KM_FENCE
-
-#endif
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index cddead91acd4..ed9f2d748f63 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -124,10 +124,6 @@ static inline void set_cpu_context(unsigned int cpu,
#define cpu_asid(cpu, mm) \
(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
extern void get_new_mmu_context(struct mm_struct *mm);
extern void check_mmu_context(struct mm_struct *mm);
extern void check_switch_mmu_context(struct mm_struct *mm);
@@ -136,6 +132,7 @@ extern void check_switch_mmu_context(struct mm_struct *mm);
* Initialize the context related info for a new mm_struct
* instance.
*/
+#define init_new_context init_new_context
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -180,14 +177,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
+#define destroy_context destroy_context
static inline void destroy_context(struct mm_struct *mm)
{
dsemul_mm_cleanup(mm);
}
-#define activate_mm(prev, next) switch_mm(prev, next, current)
-#define deactivate_mm(tsk, mm) do { } while (0)
-
static inline void
drop_mmu_context(struct mm_struct *mm)
{
@@ -237,4 +232,6 @@ drop_mmu_context(struct mm_struct *mm)
local_irq_restore(flags);
}
+#include <asm-generic/mmu_context.h>
+
#endif /* _ASM_MMU_CONTEXT_H */
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index a950fc1ddb4d..6c0532d7b211 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -154,6 +154,7 @@ static inline void pmd_clear(pmd_t *pmdp)
#if defined(CONFIG_XPA)
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
static inline pte_t
pfn_pte(unsigned long pfn, pgprot_t prot)
@@ -169,6 +170,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#define MAX_POSSIBLE_PHYSMEM_BITS 36
#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
@@ -183,6 +185,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
#else
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
#ifdef CONFIG_CPU_VR41XX
#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h
index 53104b10aae2..e6c78a15cb2f 100644
--- a/arch/mips/include/uapi/asm/signal.h
+++ b/arch/mips/include/uapi/asm/signal.h
@@ -62,18 +62,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
#define SIGRTMAX _NSIG
/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- *
* SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever
* supported its use and no libc was using it, so the entire sa-restorer
* functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index d0a9ed2ca2d6..2d949969313b 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -135,6 +135,9 @@
#define SO_DETACH_REUSEPORT_BPF 68
+#define SO_PREFER_BUSY_POLL 69
+#define SO_BUSY_POLL_BUDGET 70
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 5bc3b04693c7..18e69ebf5691 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -33,19 +33,19 @@ static void __cpuidle r3081_wait(void)
{
unsigned long cfg = read_c0_conf();
write_c0_conf(cfg | R30XX_CONF_HALT);
- local_irq_enable();
+ raw_local_irq_enable();
}
static void __cpuidle r39xx_wait(void)
{
if (!need_resched())
write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
- local_irq_enable();
+ raw_local_irq_enable();
}
void __cpuidle r4k_wait(void)
{
- local_irq_enable();
+ raw_local_irq_enable();
__r4k_wait();
}
@@ -64,7 +64,7 @@ void __cpuidle r4k_wait_irqoff(void)
" .set arch=r4000 \n"
" wait \n"
" .set pop \n");
- local_irq_enable();
+ raw_local_irq_enable();
}
/*
@@ -84,7 +84,7 @@ static void __cpuidle rm7k_wait_irqoff(void)
" wait \n"
" mtc0 $1, $12 # stalls until W stage \n"
" .set pop \n");
- local_irq_enable();
+ raw_local_irq_enable();
}
/*
@@ -257,7 +257,7 @@ void arch_cpu_idle(void)
if (cpu_wait)
cpu_wait();
else
- local_irq_enable();
+ raw_local_irq_enable();
}
#ifdef CONFIG_CPU_IDLE
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 75ebd8d7bd5d..d7e288f3a1e7 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -702,7 +702,6 @@ unsigned long arch_align_stack(unsigned long sp)
return sp & ALMASK;
}
-static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
static struct cpumask backtrace_csd_busy;
static void handle_backtrace(void *info)
@@ -711,6 +710,9 @@ static void handle_backtrace(void *info)
cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
}
+static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) =
+ CSD_INIT(handle_backtrace, NULL);
+
static void raise_backtrace(cpumask_t *mask)
{
call_single_data_t *csd;
@@ -730,7 +732,6 @@ static void raise_backtrace(cpumask_t *mask)
}
csd = &per_cpu(backtrace_csd, cpu);
- csd->func = handle_backtrace;
smp_call_function_single_async(cpu, csd);
}
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 48d84d5fcc36..74b9102fd06e 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -687,36 +687,23 @@ EXPORT_SYMBOL(flush_tlb_one);
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
-static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
-
-void tick_broadcast(const struct cpumask *mask)
-{
- call_single_data_t *csd;
- int cpu;
-
- for_each_cpu(cpu, mask) {
- csd = &per_cpu(tick_broadcast_csd, cpu);
- smp_call_function_single_async(cpu, csd);
- }
-}
-
static void tick_broadcast_callee(void *info)
{
tick_receive_broadcast();
}
-static int __init tick_broadcast_init(void)
+static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd) =
+ CSD_INIT(tick_broadcast_callee, NULL);
+
+void tick_broadcast(const struct cpumask *mask)
{
call_single_data_t *csd;
int cpu;
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ for_each_cpu(cpu, mask) {
csd = &per_cpu(tick_broadcast_csd, cpu);
- csd->func = tick_broadcast_callee;
+ smp_call_function_single_async(cpu, csd);
}
-
- return 0;
}
-early_initcall(tick_broadcast_init);
#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 5fec7f45d79a..57e2f08f00d0 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -8,8 +8,6 @@
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
-static pte_t *kmap_pte;
-
unsigned long highstart_pfn, highend_pfn;
void kmap_flush_tlb(unsigned long addr)
@@ -17,78 +15,3 @@ void kmap_flush_tlb(unsigned long addr)
flush_tlb_one(addr);
}
EXPORT_SYMBOL(kmap_flush_tlb);
-
-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
-{
- unsigned long vaddr;
- int idx, type;
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte - idx)));
-#endif
- set_pte(kmap_pte-idx, mk_pte(page, prot));
- local_flush_tlb_one((unsigned long)vaddr);
-
- return (void*) vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic_high_prot);
-
-void kunmap_atomic_high(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int type __maybe_unused;
-
- if (vaddr < FIXADDR_START)
- return;
-
- type = kmap_atomic_idx();
-#ifdef CONFIG_DEBUG_HIGHMEM
- {
- int idx = type + KM_TYPE_NR * smp_processor_id();
-
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- local_flush_tlb_one(vaddr);
- }
-#endif
- kmap_atomic_idx_pop();
-}
-EXPORT_SYMBOL(kunmap_atomic_high);
-
-/*
- * This is the same as kmap_atomic() but can map memory that doesn't
- * have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn)
-{
- unsigned long vaddr;
- int idx, type;
-
- preempt_disable();
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
- flush_tlb_one(vaddr);
-
- return (void*) vaddr;
-}
-
-void __init kmap_init(void)
-{
- unsigned long kmap_vstart;
-
- /* cache the first kmap pte */
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = virt_to_kpte(kmap_vstart);
-}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 07e84a774938..bc80893e5c0f 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -36,7 +36,6 @@
#include <asm/cachectl.h>
#include <asm/cpu.h>
#include <asm/dma.h>
-#include <asm/kmap_types.h>
#include <asm/maar.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
@@ -402,9 +401,6 @@ void __init paging_init(void)
pagetable_init();
-#ifdef CONFIG_HIGHMEM
- kmap_init();
-#endif
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
#endif
diff --git a/arch/mips/vdso/genvdso.c b/arch/mips/vdso/genvdso.c
index abb06ae04b40..09e30eb4be86 100644
--- a/arch/mips/vdso/genvdso.c
+++ b/arch/mips/vdso/genvdso.c
@@ -263,10 +263,6 @@ int main(int argc, char **argv)
fprintf(out_file, " const struct vm_special_mapping *sm,\n");
fprintf(out_file, " struct vm_area_struct *new_vma)\n");
fprintf(out_file, "{\n");
- fprintf(out_file, " unsigned long new_size =\n");
- fprintf(out_file, " new_vma->vm_end - new_vma->vm_start;\n");
- fprintf(out_file, " if (vdso_image.size != new_size)\n");
- fprintf(out_file, " return -EINVAL;\n");
fprintf(out_file, " current->mm->context.vdso =\n");
fprintf(out_file, " (void *)(new_vma->vm_start);\n");
fprintf(out_file, " return 0;\n");
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index e8e541fd2267..62313902d75d 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -17,7 +17,6 @@ config NDS32
select DMA_DIRECT_REMAP
select GENERIC_ATOMIC64
select GENERIC_CPU_DEVICES
- select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_SHOW
select GENERIC_IOREMAP
diff --git a/arch/nds32/Kconfig.cpu b/arch/nds32/Kconfig.cpu
index f88a12fdf0f3..c10759952485 100644
--- a/arch/nds32/Kconfig.cpu
+++ b/arch/nds32/Kconfig.cpu
@@ -157,6 +157,7 @@ config HW_SUPPORT_UNALIGNMENT_ACCESS
config HIGHMEM
bool "High Memory Support"
depends on MMU && !CPU_CACHE_ALIASING
+ select KMAP_LOCAL
help
The address space of Andes processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address
diff --git a/arch/nds32/include/asm/elf.h b/arch/nds32/include/asm/elf.h
index 1c8e56d7013d..1853dc89b8ac 100644
--- a/arch/nds32/include/asm/elf.h
+++ b/arch/nds32/include/asm/elf.h
@@ -126,7 +126,6 @@ struct elf32_hdr;
#define ELF_DATA ELFDATA2LSB
#endif
#define ELF_ARCH EM_NDS32
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/nds32/include/asm/fixmap.h b/arch/nds32/include/asm/fixmap.h
index 5a4bf11e5800..2fa09a2de428 100644
--- a/arch/nds32/include/asm/fixmap.h
+++ b/arch/nds32/include/asm/fixmap.h
@@ -6,7 +6,7 @@
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
-#include <asm/kmap_types.h>
+#include <asm/kmap_size.h>
#endif
enum fixed_addresses {
@@ -14,7 +14,7 @@ enum fixed_addresses {
FIX_KMAP_RESERVED,
FIX_KMAP_BEGIN,
#ifdef CONFIG_HIGHMEM
- FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS),
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
#endif
FIX_EARLYCON_MEM_BASE,
__end_of_fixed_addresses
diff --git a/arch/nds32/include/asm/highmem.h b/arch/nds32/include/asm/highmem.h
index fe986d0e6e3f..16159a8716f2 100644
--- a/arch/nds32/include/asm/highmem.h
+++ b/arch/nds32/include/asm/highmem.h
@@ -5,7 +5,6 @@
#define _ASM_HIGHMEM_H
#include <asm/proc-fns.h>
-#include <asm/kmap_types.h>
#include <asm/fixmap.h>
/*
@@ -45,11 +44,22 @@ extern pte_t *pkmap_page_table;
extern void kmap_init(void);
/*
- * The following functions are already defined by <linux/highmem.h>
- * when CONFIG_HIGHMEM is not set.
+ * FIXME: The below looks broken vs. a kmap_atomic() in task context which
+ * is interupted and another kmap_atomic() happens in interrupt context.
+ * But what do I know about nds32. -- tglx
*/
-#ifdef CONFIG_HIGHMEM
-extern void *kmap_atomic_pfn(unsigned long pfn);
-#endif
+#define arch_kmap_local_post_map(vaddr, pteval) \
+ do { \
+ __nds32__tlbop_inv(vaddr); \
+ __nds32__mtsr_dsb(vaddr, NDS32_SR_TLB_VPN); \
+ __nds32__tlbop_rwr(pteval); \
+ __nds32__isb(); \
+ } while (0)
+
+#define arch_kmap_local_pre_unmap(vaddr) \
+ do { \
+ __nds32__tlbop_inv(vaddr); \
+ __nds32__isb(); \
+ } while (0)
#endif
diff --git a/arch/nds32/include/asm/mmu_context.h b/arch/nds32/include/asm/mmu_context.h
index b8fd3d189fdc..c651bc8cacdc 100644
--- a/arch/nds32/include/asm/mmu_context.h
+++ b/arch/nds32/include/asm/mmu_context.h
@@ -9,6 +9,7 @@
#include <asm/proc-fns.h>
#include <asm-generic/mm_hooks.h>
+#define init_new_context init_new_context
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -16,8 +17,6 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}
-#define destroy_context(mm) do { } while(0)
-
#define CID_BITS 9
extern spinlock_t cid_lock;
extern unsigned int cpu_last_cid;
@@ -47,10 +46,6 @@ static inline void check_context(struct mm_struct *mm)
__new_context(mm);
}
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
@@ -62,7 +57,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
}
}
-#define deactivate_mm(tsk,mm) do { } while (0)
-#define activate_mm(prev,next) switch_mm(prev, next, NULL)
+#include <asm-generic/mmu_context.h>
#endif
diff --git a/arch/nds32/mm/Makefile b/arch/nds32/mm/Makefile
index 897ecaf5cf54..14fb2e8eb036 100644
--- a/arch/nds32/mm/Makefile
+++ b/arch/nds32/mm/Makefile
@@ -3,7 +3,6 @@ obj-y := extable.o tlb.o fault.o init.o mmap.o \
mm-nds32.o cacheflush.o proc.o
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
-obj-$(CONFIG_HIGHMEM) += highmem.o
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_proc.o = $(CC_FLAGS_FTRACE)
diff --git a/arch/nds32/mm/highmem.c b/arch/nds32/mm/highmem.c
deleted file mode 100644
index 4284cd59e21a..000000000000
--- a/arch/nds32/mm/highmem.c
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2005-2017 Andes Technology Corporation
-
-#include <linux/export.h>
-#include <linux/highmem.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/memblock.h>
-#include <asm/fixmap.h>
-#include <asm/tlbflush.h>
-
-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
-{
- unsigned int idx;
- unsigned long vaddr, pte;
- int type;
- pte_t *ptep;
-
- type = kmap_atomic_idx_push();
-
- idx = type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- pte = (page_to_pfn(page) << PAGE_SHIFT) | prot;
- ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
- set_pte(ptep, pte);
-
- __nds32__tlbop_inv(vaddr);
- __nds32__mtsr_dsb(vaddr, NDS32_SR_TLB_VPN);
- __nds32__tlbop_rwr(pte);
- __nds32__isb();
- return (void *)vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic_high_prot);
-
-void kunmap_atomic_high(void *kvaddr)
-{
- if (kvaddr >= (void *)FIXADDR_START) {
- unsigned long vaddr = (unsigned long)kvaddr;
- pte_t *ptep;
- kmap_atomic_idx_pop();
- __nds32__tlbop_inv(vaddr);
- __nds32__isb();
- ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
- set_pte(ptep, 0);
- }
-}
-EXPORT_SYMBOL(kunmap_atomic_high);
diff --git a/arch/nds32/mm/mm-nds32.c b/arch/nds32/mm/mm-nds32.c
index 55bec50ccc03..f2778f2b39f6 100644
--- a/arch/nds32/mm/mm-nds32.c
+++ b/arch/nds32/mm/mm-nds32.c
@@ -34,8 +34,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
cpu_dcache_wb_range((unsigned long)new_pgd,
(unsigned long)new_pgd +
PTRS_PER_PGD * sizeof(pgd_t));
- inc_zone_page_state(virt_to_page((unsigned long *)new_pgd),
- NR_PAGETABLE);
+ inc_lruvec_page_state(virt_to_page((unsigned long *)new_pgd),
+ NR_PAGETABLE);
return new_pgd;
}
@@ -59,7 +59,7 @@ void pgd_free(struct mm_struct *mm, pgd_t * pgd)
pte = pmd_page(*pmd);
pmd_clear(pmd);
- dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
+ dec_lruvec_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
pte_free(mm, pte);
mm_dec_nr_ptes(mm);
pmd_free(mm, pmd);
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index c7c6ba6bec9d..c24955c81c92 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -10,7 +10,6 @@ config NIOS2
select COMMON_CLK
select TIMER_OF
select GENERIC_ATOMIC64
- select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
diff --git a/arch/nios2/include/asm/mmu_context.h b/arch/nios2/include/asm/mmu_context.h
index 78ab3dacf579..4f99ed09b5a7 100644
--- a/arch/nios2/include/asm/mmu_context.h
+++ b/arch/nios2/include/asm/mmu_context.h
@@ -26,16 +26,13 @@ extern unsigned long get_pid_from_context(mm_context_t *ctx);
*/
extern pgd_t *pgd_current;
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
/*
* Initialize the context related info for a new mm_struct instance.
*
* Set all new contexts to 0, that way the generation will never match
* the currently running generation when this context is switched in.
*/
+#define init_new_context init_new_context
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
@@ -43,26 +40,16 @@ static inline int init_new_context(struct task_struct *tsk,
return 0;
}
-/*
- * Destroy context related info for an mm_struct that is about
- * to be put to rest.
- */
-static inline void destroy_context(struct mm_struct *mm)
-{
-}
-
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
-static inline void deactivate_mm(struct task_struct *tsk,
- struct mm_struct *mm)
-{
-}
-
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
+#define activate_mm activate_mm
void activate_mm(struct mm_struct *prev, struct mm_struct *next);
+#include <asm-generic/mmu_context.h>
+
#endif /* _ASM_NIOS2_MMU_CONTEXT_H */
diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c
index 4ffe857e6ada..50b4eb19a6cc 100644
--- a/arch/nios2/kernel/process.c
+++ b/arch/nios2/kernel/process.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(pm_power_off);
void arch_cpu_idle(void)
{
- local_irq_enable();
+ raw_local_irq_enable();
}
/*
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 6233c6293180..591acc5990dc 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -24,7 +24,6 @@ config OPENRISC
select GENERIC_CPU_DEVICES
select HAVE_UID16
select GENERIC_ATOMIC64
- select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
diff --git a/arch/openrisc/include/asm/mmu_context.h b/arch/openrisc/include/asm/mmu_context.h
index ced577542e29..a6702384c77d 100644
--- a/arch/openrisc/include/asm/mmu_context.h
+++ b/arch/openrisc/include/asm/mmu_context.h
@@ -17,13 +17,13 @@
#include <asm-generic/mm_hooks.h>
+#define init_new_context init_new_context
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+#define destroy_context destroy_context
extern void destroy_context(struct mm_struct *mm);
extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
-#define deactivate_mm(tsk, mm) do { } while (0)
-
#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
/* current active pgd - this is similar to other processors pgd
@@ -32,8 +32,6 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
extern volatile pgd_t *current_pgd[]; /* defined in arch/openrisc/mm/fault.c */
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
+#include <asm-generic/mmu_context.h>
#endif
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 0ff391f00334..3c98728cce24 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -79,7 +79,7 @@ void machine_power_off(void)
*/
void arch_cpu_idle(void)
{
- local_irq_enable();
+ raw_local_irq_enable();
if (mfspr(SPR_UPR) & SPR_UPR_PMP)
mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
}
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index 8348feaaf46e..bf9b2310fc93 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -33,7 +33,6 @@
#include <asm/io.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
-#include <asm/kmap_types.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index a978590d802d..5aed97a18bac 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -15,7 +15,6 @@
#include <linux/io.h>
#include <linux/pgtable.h>
#include <asm/pgalloc.h>
-#include <asm/kmap_types.h>
#include <asm/fixmap.h>
#include <asm/bug.h>
#include <linux/sched.h>
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index b234e8154cbd..78b17621ee4a 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -52,7 +52,7 @@ config PARISC
select HAVE_REGS_AND_STACK_ACCESS_API
select GENERIC_SCHED_CLOCK
select HAVE_UNSTABLE_SCHED_CLOCK if SMP
- select GENERIC_CLOCKEVENTS
+ select LEGACY_TIMER_TICK
select CPU_NO_EFFICIENT_FFS
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index e3ee5c0bfe80..f16c4db80116 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -5,5 +5,4 @@ generated-y += syscall_table_c32.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
-generic-y += seccomp.h
generic-y += user.h
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h
index 7f7039516e53..fad29aa6f45f 100644
--- a/arch/parisc/include/asm/hardirq.h
+++ b/arch/parisc/include/asm/hardirq.h
@@ -32,7 +32,6 @@ typedef struct {
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT
-#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
#define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member)
#define ack_bad_irq(irq) WARN(1, "unexpected IRQ trap at vector %02x\n", irq)
diff --git a/arch/parisc/include/asm/kmap_types.h b/arch/parisc/include/asm/kmap_types.h
deleted file mode 100644
index 3e70b5cd1123..000000000000
--- a/arch/parisc/include/asm/kmap_types.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_KMAP_TYPES_H
-#define _ASM_KMAP_TYPES_H
-
-#ifdef CONFIG_DEBUG_HIGHMEM
-#define __WITH_KM_FENCE
-#endif
-
-#include <asm-generic/kmap_types.h>
-
-#undef __WITH_KM_FENCE
-
-#endif
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
index cb5f2f730421..46f8c22c5977 100644
--- a/arch/parisc/include/asm/mmu_context.h
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -7,16 +7,13 @@
#include <linux/atomic.h>
#include <asm-generic/mm_hooks.h>
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
/* on PA-RISC, we actually have enough contexts to justify an allocator
* for them. prumpf */
extern unsigned long alloc_sid(void);
extern void free_sid(unsigned long);
+#define init_new_context init_new_context
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -26,6 +23,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}
+#define destroy_context destroy_context
static inline void
destroy_context(struct mm_struct *mm)
{
@@ -71,8 +69,7 @@ static inline void switch_mm(struct mm_struct *prev,
}
#define switch_mm_irqs_off switch_mm_irqs_off
-#define deactivate_mm(tsk,mm) do { } while (0)
-
+#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
/*
@@ -90,4 +87,7 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
switch_mm(prev,next,current);
}
+
+#include <asm-generic/mmu_context.h>
+
#endif
diff --git a/arch/parisc/include/asm/seccomp.h b/arch/parisc/include/asm/seccomp.h
new file mode 100644
index 000000000000..b058b2220322
--- /dev/null
+++ b/arch/parisc/include/asm/seccomp.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm-generic/seccomp.h>
+
+#ifdef CONFIG_64BIT
+# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_PARISC64
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "parisc64"
+# ifdef CONFIG_COMPAT
+# define SECCOMP_ARCH_COMPAT AUDIT_ARCH_PARISC
+# define SECCOMP_ARCH_COMPAT_NR NR_syscalls
+# define SECCOMP_ARCH_COMPAT_NAME "parisc"
+# endif
+#else /* !CONFIG_64BIT */
+# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_PARISC
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "parisc"
+#endif
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index e605197b462c..e5a2657477ac 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -41,19 +41,6 @@
#define SIGRTMIN 32
#define SIGRTMAX _NSIG
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
#define SA_ONSTACK 0x00000001
#define SA_RESETHAND 0x00000004
#define SA_NOCLDSTOP 0x00000008
@@ -68,14 +55,7 @@
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
-
-#define SIG_BLOCK 0 /* for blocking signals */
-#define SIG_UNBLOCK 1 /* for unblocking signals */
-#define SIG_SETMASK 2 /* for setting the signal mask */
-
-#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
-#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
-#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
+#include <asm-generic/signal-defs.h>
# ifndef __ASSEMBLY__
@@ -84,18 +64,6 @@
/* Avoid too many header ordering problems. */
struct siginfo;
-/* Type of a signal handler. */
-#if defined(__LP64__)
-/* function pointers on 64-bit parisc are pointers to little structs and the
- * compiler doesn't support code which changes or tests the address of
- * the function in the little struct. This is really ugly -PB
- */
-typedef char __user *__sighandler_t;
-#else
-typedef void __signalfn_t(int);
-typedef __signalfn_t __user *__sighandler_t;
-#endif
-
typedef struct sigaltstack {
void __user *ss_sp;
int ss_flags;
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 10173c32195e..f60904329bbc 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -116,6 +116,9 @@
#define SO_DETACH_REUSEPORT_BPF 0x4042
+#define SO_PREFER_BUSY_POLL 0x4043
+#define SO_BUSY_POLL_BUDGET 0x4044
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index f196d96e2f9f..a92a23d6acd9 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -169,7 +169,7 @@ void __cpuidle arch_cpu_idle_dead(void)
void __cpuidle arch_cpu_idle(void)
{
- local_irq_enable();
+ raw_local_irq_enable();
/* nop on real hardware, qemu will idle sleep. */
asm volatile("or %%r10,%%r10,%%r10\n":::);
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 13d94f0f94a0..08e4d480abe1 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -70,8 +70,6 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
/* gcc can optimize for "read-only" case with a local clocktick */
unsigned long cpt = clocktick;
- profile_tick(CPU_PROFILING);
-
/* Initialize next_tick to the old expected tick time. */
next_tick = cpuinfo->it_value;
@@ -86,10 +84,9 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
cpuinfo->it_value = next_tick;
/* Go do system house keeping. */
- if (cpu == 0)
- xtime_update(ticks_elapsed);
-
- update_process_times(user_mode(get_irq_regs()));
+ if (cpu != 0)
+ ticks_elapsed = 0;
+ legacy_timer_tick(ticks_elapsed);
/* Skip clockticks on purpose if we know we would miss those.
* The new CR16 must be "later" than current CR16 otherwise
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e9f13fe08492..e307f777d942 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -146,12 +146,14 @@ config PPC
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC32 || PPC_BOOK3S_64
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS
select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
+ select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
select BUILDTIME_TABLE_SORT
@@ -163,7 +165,6 @@ config PPC
select EDAC_ATOMIC_SCRUB
select EDAC_SUPPORT
select GENERIC_ATOMIC64 if PPC32
- select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
@@ -355,10 +356,6 @@ config PPC_OF_PLATFORM_PCI
depends on PCI
depends on PPC64 # not supported on 32 bits yet
-config ARCH_SUPPORTS_DEBUG_PAGEALLOC
- depends on PPC32 || PPC_BOOK3S_64
- def_bool y
-
config ARCH_SUPPORTS_UPROBES
def_bool y
@@ -409,6 +406,7 @@ menu "Kernel options"
config HIGHMEM
bool "High memory support"
depends on PPC32
+ select KMAP_LOCAL
source "kernel/Kconfig.hz"
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index a4d56f0a41d9..5c8c06215dd4 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -123,7 +123,6 @@ endif
LDFLAGS_vmlinux-y := -Bstatic
LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie
LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y)
-LDFLAGS_vmlinux += $(call ld-option,--orphan-handling=warn)
ifdef CONFIG_PPC64
ifeq ($(call cc-option-yn,-mcmodel=medium),y)
@@ -248,7 +247,6 @@ KBUILD_CFLAGS += $(call cc-option,-mno-string)
cpu-as-$(CONFIG_40x) += -Wa,-m405
cpu-as-$(CONFIG_44x) += -Wa,-m440
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
-cpu-as-$(CONFIG_E200) += -Wa,-me200
cpu-as-$(CONFIG_E500) += -Wa,-me500
# When using '-many -mpower4' gas will first try and find a matching power4
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index f8ce6d2dde7b..1659963a8f1d 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -31,6 +31,7 @@ endif
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
-pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
+ -include $(srctree)/include/linux/compiler_attributes.h \
$(LINUXINCLUDE)
ifdef CONFIG_PPC64_BOOT_WRAPPER
diff --git a/arch/powerpc/boot/decompress.c b/arch/powerpc/boot/decompress.c
index 8bf39ef7d2df..6098b879ac97 100644
--- a/arch/powerpc/boot/decompress.c
+++ b/arch/powerpc/boot/decompress.c
@@ -21,7 +21,6 @@
#define STATIC static
#define INIT
-#define __always_inline inline
/*
* The build process will copy the required zlib source files and headers
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 1c674c4c1d86..1de0dbf6cbba 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -194,7 +194,6 @@ CONFIG_USB_SERIAL_SAFE=m
CONFIG_USB_SERIAL_SAFE_PADDED=y
CONFIG_USB_SERIAL_TI=m
CONFIG_USB_SERIAL_CYBERJACK=m
-CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_APPLEDISPLAY=m
CONFIG_EXT2_FS=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 66e9a0fd64ff..ef09f3cce1fa 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -579,7 +579,6 @@ CONFIG_JOYSTICK_XPAD_LEDS=y
CONFIG_INPUT_TABLET=y
CONFIG_TABLET_USB_ACECAD=m
CONFIG_TABLET_USB_AIPTEK=m
-CONFIG_TABLET_USB_GTCO=m
CONFIG_TABLET_USB_KBTAB=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PCSPKR=m
@@ -598,8 +597,6 @@ CONFIG_GAMEPORT_FM801=m
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_ROCKETPORT=m
CONFIG_CYCLADES=m
-CONFIG_SYNCLINK=m
-CONFIG_SYNCLINKMP=m
CONFIG_SYNCLINK_GT=m
CONFIG_NOZOMI=m
CONFIG_N_HDLC=m
@@ -911,7 +908,6 @@ CONFIG_USB_SERIAL_SAFE_PADDED=y
CONFIG_USB_SERIAL_SIERRAWIRELESS=m
CONFIG_USB_SERIAL_TI=m
CONFIG_USB_SERIAL_CYBERJACK=m
-CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OPTION=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_SERIAL_DEBUG=m
diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c
index cb57be4ada61..b1e577cbf00c 100644
--- a/arch/powerpc/crypto/sha1-spe-glue.c
+++ b/arch/powerpc/crypto/sha1-spe-glue.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <asm/byteorder.h>
#include <asm/switch_to.h>
#include <linux/hardirq.h>
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index b40dc50a6908..7a55d790cdb1 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -17,7 +17,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <asm/byteorder.h>
void powerpc_sha_transform(u32 *state, const u8 *src);
diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c
index ceb0b6c980b3..a6e650a97d8f 100644
--- a/arch/powerpc/crypto/sha256-spe-glue.c
+++ b/arch/powerpc/crypto/sha256-spe-glue.c
@@ -13,7 +13,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <asm/byteorder.h>
#include <asm/switch_to.h>
#include <linux/hardirq.h>
@@ -177,7 +177,7 @@ static int ppc_spe_sha256_final(struct shash_desc *desc, u8 *out)
static int ppc_spe_sha224_final(struct shash_desc *desc, u8 *out)
{
- u32 D[SHA256_DIGEST_SIZE >> 2];
+ __be32 D[SHA256_DIGEST_SIZE >> 2];
__be32 *dst = (__be32 *)out;
ppc_spe_sha256_final(desc, (u8 *)D);
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 36443cda8dcf..1376be95e975 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -36,8 +36,10 @@ static inline bool pte_user(pte_t pte)
*/
#ifdef CONFIG_PTE_64BIT
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 36
#else
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
/*
diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
index 3ee1ec60be84..a39e2d193fdc 100644
--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
@@ -27,6 +27,7 @@
#endif
.endm
+#ifdef CONFIG_PPC_KUAP
.macro kuap_check_amr gpr1, gpr2
#ifdef CONFIG_PPC_KUAP_DEBUG
BEGIN_MMU_FTR_SECTION_NESTED(67)
@@ -38,6 +39,7 @@
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
#endif
.endm
+#endif
.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
#ifdef CONFIG_PPC_KUAP
@@ -61,6 +63,10 @@
#else /* !__ASSEMBLY__ */
+#include <linux/jump_label.h>
+
+DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
+
#ifdef CONFIG_PPC_KUAP
#include <asm/mmu.h>
@@ -103,8 +109,16 @@ static inline void kuap_check_amr(void)
static inline unsigned long get_kuap(void)
{
+ /*
+ * We return AMR_KUAP_BLOCKED when we don't support KUAP because
+ * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
+ * cause restore_user_access to do a flush.
+ *
+ * This has no effect in terms of actually blocking things on hash,
+ * so it doesn't break anything.
+ */
if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
- return 0;
+ return AMR_KUAP_BLOCKED;
return mfspr(SPRN_AMR);
}
@@ -123,6 +137,29 @@ static inline void set_kuap(unsigned long value)
isync();
}
+static inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+ return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
+ (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
+ "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
+}
+#else /* CONFIG_PPC_KUAP */
+static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { }
+
+static inline unsigned long kuap_get_and_check_amr(void)
+{
+ return 0UL;
+}
+
+static inline unsigned long get_kuap(void)
+{
+ return AMR_KUAP_BLOCKED;
+}
+
+static inline void set_kuap(unsigned long value) { }
+#endif /* !CONFIG_PPC_KUAP */
+
static __always_inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
@@ -142,6 +179,8 @@ static inline void prevent_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
set_kuap(AMR_KUAP_BLOCKED);
+ if (static_branch_unlikely(&uaccess_flush_key))
+ do_uaccess_flush();
}
static inline unsigned long prevent_user_access_return(void)
@@ -149,6 +188,8 @@ static inline unsigned long prevent_user_access_return(void)
unsigned long flags = get_kuap();
set_kuap(AMR_KUAP_BLOCKED);
+ if (static_branch_unlikely(&uaccess_flush_key))
+ do_uaccess_flush();
return flags;
}
@@ -156,30 +197,9 @@ static inline unsigned long prevent_user_access_return(void)
static inline void restore_user_access(unsigned long flags)
{
set_kuap(flags);
+ if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
+ do_uaccess_flush();
}
-
-static inline bool
-bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
-{
- return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
- (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
- "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
-}
-#else /* CONFIG_PPC_KUAP */
-static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
-{
-}
-
-static inline void kuap_check_amr(void)
-{
-}
-
-static inline unsigned long kuap_get_and_check_amr(void)
-{
- return 0;
-}
-#endif /* CONFIG_PPC_KUAP */
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index e0b52940e43c..750918451dd2 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -242,6 +242,18 @@ extern void radix_init_pseries(void);
static inline void radix_init_pseries(void) { };
#endif
+#ifdef CONFIG_HOTPLUG_CPU
+#define arch_clear_mm_cpumask_cpu(cpu, mm) \
+ do { \
+ if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { \
+ atomic_dec(&(mm)->context.active_cpus); \
+ cpumask_clear_cpu(cpu, mm_cpumask(mm)); \
+ } \
+ } while (0)
+
+void cleanup_cpu_mmu_context(void);
+#endif
+
static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
{
int index = ea >> MAX_EA_BITS_PER_CONTEXT;
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index ebe95aa04d53..1d32b174ab6a 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -57,11 +57,18 @@
nop; \
nop
+#define ENTRY_FLUSH_SLOT \
+ ENTRY_FLUSH_FIXUP_SECTION; \
+ nop; \
+ nop; \
+ nop;
+
/*
* r10 must be free to use, r13 must be paca
*/
#define INTERRUPT_TO_KERNEL \
- STF_ENTRY_BARRIER_SLOT
+ STF_ENTRY_BARRIER_SLOT; \
+ ENTRY_FLUSH_SLOT
/*
* Macros for annotating the expected destination of (h)rfid
@@ -137,6 +144,9 @@
RFSCV; \
b rfscv_flush_fallback
+#else /* __ASSEMBLY__ */
+/* Prototype for function defined in exceptions-64s.S */
+void do_uaccess_flush(void);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_EXCEPTION_H */
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index b0af97add751..fbd406cd6916 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -205,6 +205,22 @@ label##3: \
FTR_ENTRY_OFFSET 955b-956b; \
.popsection;
+#define UACCESS_FLUSH_FIXUP_SECTION \
+959: \
+ .pushsection __uaccess_flush_fixup,"a"; \
+ .align 2; \
+960: \
+ FTR_ENTRY_OFFSET 959b-960b; \
+ .popsection;
+
+#define ENTRY_FLUSH_FIXUP_SECTION \
+957: \
+ .pushsection __entry_flush_fixup,"a"; \
+ .align 2; \
+958: \
+ FTR_ENTRY_OFFSET 957b-958b; \
+ .popsection;
+
#define RFI_FLUSH_FIXUP_SECTION \
951: \
.pushsection __rfi_flush_fixup,"a"; \
@@ -237,8 +253,11 @@ label##3: \
#include <linux/types.h>
extern long stf_barrier_fallback;
+extern long entry_flush_fallback;
extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
+extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
+extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 6bfc87915d5d..8d03c16a3663 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -20,7 +20,7 @@
#include <asm/page.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
-#include <asm/kmap_types.h>
+#include <asm/kmap_size.h>
#endif
#ifdef CONFIG_KASAN
@@ -55,7 +55,7 @@ enum fixed_addresses {
FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
#endif
#ifdef CONFIG_PPC_8xx
/* For IMMR we need an aligned 512K area */
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 104026f7d6bc..80a5ae771c65 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -24,12 +24,10 @@
#ifdef __KERNEL__
#include <linux/interrupt.h>
-#include <asm/kmap_types.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
#include <asm/fixmap.h>
-extern pte_t *kmap_pte;
extern pte_t *pkmap_page_table;
/*
@@ -60,6 +58,11 @@ extern pte_t *pkmap_page_table;
#define flush_cache_kmaps() flush_cache_all()
+#define arch_kmap_local_post_map(vaddr, pteval) \
+ local_flush_tlb_page(NULL, vaddr)
+#define arch_kmap_local_post_unmap(vaddr) \
+ local_flush_tlb_page(NULL, vaddr)
+
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
deleted file mode 100644
index c8fa182d48c8..000000000000
--- a/arch/powerpc/include/asm/kmap_types.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _ASM_POWERPC_KMAP_TYPES_H
-#define _ASM_POWERPC_KMAP_TYPES_H
-
-#ifdef __KERNEL__
-
-/*
- */
-
-#define KM_TYPE_NR 16
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_KMAP_TYPES_H */
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index 1d0f7d838b2e..0d93331d0fab 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -14,7 +14,7 @@
#define KUAP_CURRENT_WRITE 8
#define KUAP_CURRENT (KUAP_CURRENT_READ | KUAP_CURRENT_WRITE)
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/kup-radix.h>
#endif
#ifdef CONFIG_PPC_8xx
@@ -35,6 +35,9 @@
.macro kuap_check current, gpr
.endm
+.macro kuap_check_amr gpr1, gpr2
+.endm
+
#endif
#else /* !__ASSEMBLY__ */
@@ -53,17 +56,28 @@ static inline void setup_kuep(bool disabled) { }
void setup_kuap(bool disabled);
#else
static inline void setup_kuap(bool disabled) { }
+
+static inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+ return false;
+}
+
+static inline void kuap_check_amr(void) { }
+
+/*
+ * book3s/64/kup-radix.h defines these functions for the !KUAP case to flush
+ * the L1D cache after user accesses. Only include the empty stubs for other
+ * platforms.
+ */
+#ifndef CONFIG_PPC_BOOK3S_64
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { }
static inline void prevent_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { }
static inline unsigned long prevent_user_access_return(void) { return 0UL; }
static inline void restore_user_access(unsigned long flags) { }
-static inline bool
-bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
-{
- return false;
-}
+#endif /* CONFIG_PPC_BOOK3S_64 */
#endif /* CONFIG_PPC_KUAP */
static inline void allow_read_from_user(const void __user *from, unsigned long size)
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index e02aa793420b..b42813359f49 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -14,7 +14,9 @@
/*
* Most if the context management is out of line
*/
+#define init_new_context init_new_context
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+#define destroy_context destroy_context
extern void destroy_context(struct mm_struct *mm);
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct mm_iommu_table_group_mem_t;
@@ -235,27 +237,26 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
}
#define switch_mm_irqs_off switch_mm_irqs_off
-
-#define deactivate_mm(tsk,mm) do { } while (0)
-
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
+#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
switch_mm_irqs_off(prev, next, current);
}
/* We don't currently use enter_lazy_tlb() for anything */
+#ifdef CONFIG_PPC_BOOK3E_64
+#define enter_lazy_tlb enter_lazy_tlb
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
/* 64-bit Book3E keeps track of current PGD in the PACA */
-#ifdef CONFIG_PPC_BOOK3E_64
get_paca()->pgd = NULL;
-#endif
}
+#endif
extern void arch_exit_mmap(struct mm_struct *mm);
@@ -298,5 +299,7 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm,
return 0;
}
+#include <asm-generic/mmu_context.h>
+
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index 91c69ff53a8a..6cda76b57c5d 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -46,5 +46,10 @@ u64 memory_hotplug_max(void);
#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
#endif
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern int create_section_mapping(unsigned long start, unsigned long end,
+ int nid, pgprot_t prot);
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
index 85ed2390fb99..567cdc557402 100644
--- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
@@ -63,7 +63,7 @@ static inline void restore_user_access(unsigned long flags)
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
- return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xf0000000),
+ return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xff000000),
"Bug: fault blocked by AP register !");
}
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index 1d9ac0f9c794..0bd1b144eb76 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -33,19 +33,18 @@
* respectively NA for All or X for Supervisor and no access for User.
* Then we use the APG to say whether accesses are according to Page rules or
* "all Supervisor" rules (Access to all)
- * Therefore, we define 2 APG groups. lsb is _PMD_USER
- * 0 => Kernel => 01 (all accesses performed according to page definition)
- * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
- * 2-15 => Not Used
- */
-#define MI_APG_INIT 0x40000000
-
-/*
- * 0 => Kernel => 01 (all accesses performed according to page definition)
- * 1 => User => 10 (all accesses performed according to swaped page definition)
- * 2-15 => Not Used
- */
-#define MI_APG_KUEP 0x60000000
+ * _PAGE_ACCESSED is also managed via APG. When _PAGE_ACCESSED is not set, say
+ * "all User" rules, that will lead to NA for all.
+ * Therefore, we define 4 APG groups. lsb is _PAGE_ACCESSED
+ * 0 => Kernel => 11 (all accesses performed according as user iaw page definition)
+ * 1 => Kernel+Accessed => 01 (all accesses performed according to page definition)
+ * 2 => User => 11 (all accesses performed according as user iaw page definition)
+ * 3 => User+Accessed => 00 (all accesses performed as supervisor iaw page definition) for INIT
+ * => 10 (all accesses performed according to swaped page definition) for KUEP
+ * 4-15 => Not Used
+ */
+#define MI_APG_INIT 0xdc000000
+#define MI_APG_KUEP 0xde000000
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MI_RPN is written, bits in
@@ -106,25 +105,9 @@
#define MD_Ks 0x80000000 /* Should not be set */
#define MD_Kp 0x40000000 /* Should always be set */
-/*
- * All pages' PP data bits are set to either 000 or 011 or 001, which means
- * respectively RW for Supervisor and no access for User, or RO for
- * Supervisor and no access for user and NA for ALL.
- * Then we use the APG to say whether accesses are according to Page rules or
- * "all Supervisor" rules (Access to all)
- * Therefore, we define 2 APG groups. lsb is _PMD_USER
- * 0 => Kernel => 01 (all accesses performed according to page definition)
- * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
- * 2-15 => Not Used
- */
-#define MD_APG_INIT 0x40000000
-
-/*
- * 0 => No user => 01 (all accesses performed according to page definition)
- * 1 => User => 10 (all accesses performed according to swaped page definition)
- * 2-15 => Not Used
- */
-#define MD_APG_KUAP 0x60000000
+/* See explanation above at the definition of MI_APG_INIT */
+#define MD_APG_INIT 0xdc000000
+#define MD_APG_KUAP 0xde000000
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MD_RPN is written, bits in
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index ee2243ba96cf..96522f7f0618 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -153,8 +153,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
*/
#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 36
#else
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
/*
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index 66f403a7da44..fcc48d590d88 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -39,9 +39,9 @@
* into the TLB.
*/
#define _PAGE_GUARDED 0x0010 /* Copied to L1 G entry in DTLB */
-#define _PAGE_SPECIAL 0x0020 /* SW entry */
+#define _PAGE_ACCESSED 0x0020 /* Copied to L1 APG 1 entry in I/DTLB */
#define _PAGE_EXEC 0x0040 /* Copied to PP (bit 21) in ITLB */
-#define _PAGE_ACCESSED 0x0080 /* software: page referenced */
+#define _PAGE_SPECIAL 0x0080 /* SW entry */
#define _PAGE_NA 0x0200 /* Supervisor NA, User no access */
#define _PAGE_RO 0x0600 /* Supervisor RO, User no access */
@@ -59,11 +59,12 @@
#define _PMD_PRESENT 0x0001
#define _PMD_PRESENT_MASK _PMD_PRESENT
-#define _PMD_BAD 0x0fd0
+#define _PMD_BAD 0x0f90
#define _PMD_PAGE_MASK 0x000c
#define _PMD_PAGE_8M 0x000c
#define _PMD_PAGE_512K 0x0004
-#define _PMD_USER 0x0020 /* APG 1 */
+#define _PMD_ACCESSED 0x0020 /* APG 1 */
+#define _PMD_USER 0x0040 /* APG 2 */
#define _PTE_NONE_MASK 0
@@ -134,6 +135,29 @@ static inline pte_t pte_mkhuge(pte_t pte)
}
#define pte_mkhuge pte_mkhuge
+
+static inline unsigned long pgd_leaf_size(pgd_t pgd)
+{
+ if (pgd_val(pgd) & _PMD_PAGE_8M)
+ return SZ_8M;
+ return SZ_4M;
+}
+
+#define pgd_leaf_size pgd_leaf_size
+
+static inline unsigned long pte_leaf_size(pte_t pte)
+{
+ pte_basic_t val = pte_val(pte);
+
+ if (val & _PAGE_HUGE)
+ return SZ_512K;
+ if (val & _PAGE_SPS)
+ return SZ_16K;
+ return SZ_4K;
+}
+
+#define pte_leaf_size pte_leaf_size
+
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/seccomp.h b/arch/powerpc/include/asm/seccomp.h
index 51209f6071c5..ac2033f134f0 100644
--- a/arch/powerpc/include/asm/seccomp.h
+++ b/arch/powerpc/include/asm/seccomp.h
@@ -8,4 +8,27 @@
#include <asm-generic/seccomp.h>
+#ifdef __LITTLE_ENDIAN__
+#define __SECCOMP_ARCH_LE __AUDIT_ARCH_LE
+#define __SECCOMP_ARCH_LE_NAME "le"
+#else
+#define __SECCOMP_ARCH_LE 0
+#define __SECCOMP_ARCH_LE_NAME
+#endif
+
+#ifdef CONFIG_PPC64
+# define SECCOMP_ARCH_NATIVE (AUDIT_ARCH_PPC64 | __SECCOMP_ARCH_LE)
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "ppc64" __SECCOMP_ARCH_LE_NAME
+# ifdef CONFIG_COMPAT
+# define SECCOMP_ARCH_COMPAT (AUDIT_ARCH_PPC | __SECCOMP_ARCH_LE)
+# define SECCOMP_ARCH_COMPAT_NR NR_syscalls
+# define SECCOMP_ARCH_COMPAT_NAME "ppc" __SECCOMP_ARCH_LE_NAME
+# endif
+#else /* !CONFIG_PPC64 */
+# define SECCOMP_ARCH_NATIVE (AUDIT_ARCH_PPC | __SECCOMP_ARCH_LE)
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "ppc" __SECCOMP_ARCH_LE_NAME
+#endif
+
#endif /* _ASM_POWERPC_SECCOMP_H */
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
index fbb8fa32150f..b774a4477d5f 100644
--- a/arch/powerpc/include/asm/security_features.h
+++ b/arch/powerpc/include/asm/security_features.h
@@ -86,12 +86,19 @@ static inline bool security_ftr_enabled(u64 feature)
// Software required to flush link stack on context switch
#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
+// The L1-D cache should be flushed when entering the kernel
+#define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull
+
+// The L1-D cache should be flushed after user accesses from the kernel
+#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull
// Features enabled by default
#define SEC_FTR_DEFAULT \
(SEC_FTR_L1D_FLUSH_HV | \
SEC_FTR_L1D_FLUSH_PR | \
SEC_FTR_BNDS_CHK_SPEC_BAR | \
+ SEC_FTR_L1D_FLUSH_ENTRY | \
+ SEC_FTR_L1D_FLUSH_UACCESS | \
SEC_FTR_FAVOUR_SECURITY)
#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 9efbddee2bca..a466749703f1 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -52,12 +52,16 @@ enum l1d_flush_type {
};
void setup_rfi_flush(enum l1d_flush_type, bool enable);
+void setup_entry_flush(bool enable);
+void setup_uaccess_flush(bool enable);
void do_rfi_flush_fixups(enum l1d_flush_type types);
#ifdef CONFIG_PPC_BARRIER_NOSPEC
void setup_barrier_nospec(void);
#else
static inline void setup_barrier_nospec(void) { };
#endif
+void do_uaccess_flush_fixups(enum l1d_flush_type types);
+void do_entry_flush_fixups(enum l1d_flush_type types);
void do_barrier_nospec_fixups(bool enable);
extern bool barrier_nospec_enabled;
diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h
index 1e6fa371cc38..d072866842e4 100644
--- a/arch/powerpc/include/asm/sparsemem.h
+++ b/arch/powerpc/include/asm/sparsemem.h
@@ -13,9 +13,9 @@
#endif /* CONFIG_SPARSEMEM */
#ifdef CONFIG_MEMORY_HOTPLUG
-extern int create_section_mapping(unsigned long start, unsigned long end,
- int nid, pgprot_t prot);
extern int remove_section_mapping(unsigned long start, unsigned long end);
+extern int memory_add_physaddr_to_nid(u64 start);
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
#ifdef CONFIG_NUMA
extern int hot_add_scn_to_nid(unsigned long scn_addr);
@@ -26,6 +26,5 @@ static inline int hot_add_scn_to_nid(unsigned long scn_addr)
}
#endif /* CONFIG_NUMA */
#endif /* CONFIG_MEMORY_HOTPLUG */
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SPARSEMEM_H */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 8728590f514a..3beeb030cd78 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -6,6 +6,7 @@
struct device;
struct device_node;
+struct drmem_lmb;
#ifdef CONFIG_NUMA
@@ -61,6 +62,9 @@ static inline int early_cpu_to_node(int cpu)
*/
return (nid < 0) ? 0 : nid;
}
+
+int of_drconf_to_nid_single(struct drmem_lmb *lmb);
+
#else
static inline int early_cpu_to_node(int cpu) { return 0; }
@@ -84,10 +88,12 @@ static inline int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
return 0;
}
-#endif /* CONFIG_NUMA */
+static inline int of_drconf_to_nid_single(struct drmem_lmb *lmb)
+{
+ return first_online_node;
+}
-struct drmem_lmb;
-int of_drconf_to_nid_single(struct drmem_lmb *lmb);
+#endif /* CONFIG_NUMA */
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
extern int find_and_online_cpu_nid(int cpu);
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index ef5bbb705c08..501c9a79038c 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -178,7 +178,7 @@ do { \
* are no aliasing issues.
*/
#define __put_user_asm_goto(x, addr, label, op) \
- asm volatile goto( \
+ asm_volatile_goto( \
"1: " op "%U1%X1 %0,%1 # put_user\n" \
EX_TABLE(1b, %l2) \
: \
@@ -191,7 +191,7 @@ do { \
__put_user_asm_goto(x, ptr, label, "std")
#else /* __powerpc64__ */
#define __put_user_asm2_goto(x, addr, label) \
- asm volatile goto( \
+ asm_volatile_goto( \
"1: stw%X1 %0, %1\n" \
"2: stw%X1 %L0, %L1\n" \
EX_TABLE(1b, %l2) \
diff --git a/arch/powerpc/include/uapi/asm/signal.h b/arch/powerpc/include/uapi/asm/signal.h
index 85b0a7aa43e7..04873dd311c2 100644
--- a/arch/powerpc/include/uapi/asm/signal.h
+++ b/arch/powerpc/include/uapi/asm/signal.h
@@ -60,30 +60,6 @@ typedef struct {
#define SIGRTMIN 32
#define SIGRTMAX _NSIG
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK is not currently supported, but will allow sigaltstack(2).
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001U
-#define SA_NOCLDWAIT 0x00000002U
-#define SA_SIGINFO 0x00000004U
-#define SA_ONSTACK 0x08000000U
-#define SA_RESTART 0x10000000U
-#define SA_NODEFER 0x40000000U
-#define SA_RESETHAND 0x80000000U
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
#define SA_RESTORER 0x04000000U
#define MINSIGSTKSZ 2048
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
index 6b50bf15d8c1..bf3270426d82 100644
--- a/arch/powerpc/kernel/eeh_cache.c
+++ b/arch/powerpc/kernel/eeh_cache.c
@@ -264,8 +264,9 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v)
{
struct pci_io_addr_range *piar;
struct rb_node *n;
+ unsigned long flags;
- spin_lock(&pci_io_addr_cache_root.piar_lock);
+ spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) {
piar = rb_entry(n, struct pci_io_addr_range, rb_node);
@@ -273,7 +274,7 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v)
(piar->flags & IORESOURCE_IO) ? "i/o" : "mem",
&piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
}
- spin_unlock(&pci_io_addr_cache_root.piar_lock);
+ spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
return 0;
}
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index f7d748b88705..4d01f09ecf80 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1000,8 +1000,6 @@ TRAMP_REAL_BEGIN(system_reset_idle_wake)
* Vectors for the FWNMI option. Share common code.
*/
TRAMP_REAL_BEGIN(system_reset_fwnmi)
- /* XXX: fwnmi guest could run a nested/PR guest, so why no test? */
- __IKVM_REAL(system_reset)=0
GEN_INT_ENTRY system_reset, virt=0
#endif /* CONFIG_PPC_PSERIES */
@@ -1412,6 +1410,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
* If none is found, do a Linux page fault. Linux page faults can happen in
* kernel mode due to user copy operations of course.
*
+ * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest
+ * MMU context, which may cause a DSI in the host, which must go to the
+ * KVM handler. MSR[IR] is not enabled, so the real-mode handler will
+ * always be used regardless of AIL setting.
+ *
* - Radix MMU
* The hardware loads from the Linux page table directly, so a fault goes
* immediately to Linux page fault.
@@ -1422,10 +1425,8 @@ INT_DEFINE_BEGIN(data_access)
IVEC=0x300
IDAR=1
IDSISR=1
-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_SKIP=1
IKVM_REAL=1
-#endif
INT_DEFINE_END(data_access)
EXC_REAL_BEGIN(data_access, 0x300, 0x80)
@@ -1464,6 +1465,8 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
* ppc64_bolted_size (first segment). The kernel handler must avoid stomping
* on user-handler data structures.
*
+ * KVM: Same as 0x300, DSLB must test for KVM guest.
+ *
* A dedicated save area EXSLB is used (XXX: but it actually need not be
* these days, we could use EXGEN).
*/
@@ -1472,10 +1475,8 @@ INT_DEFINE_BEGIN(data_access_slb)
IAREA=PACA_EXSLB
IRECONCILE=0
IDAR=1
-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_SKIP=1
IKVM_REAL=1
-#endif
INT_DEFINE_END(data_access_slb)
EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
@@ -2951,15 +2952,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
.endr
blr
-TRAMP_REAL_BEGIN(rfi_flush_fallback)
- SET_SCRATCH0(r13);
- GET_PACA(r13);
- std r1,PACA_EXRFI+EX_R12(r13)
- ld r1,PACAKSAVE(r13)
- std r9,PACA_EXRFI+EX_R9(r13)
- std r10,PACA_EXRFI+EX_R10(r13)
- std r11,PACA_EXRFI+EX_R11(r13)
- mfctr r9
+/* Clobbers r10, r11, ctr */
+.macro L1D_DISPLACEMENT_FLUSH
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
ld r11,PACA_L1D_FLUSH_SIZE(r13)
srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
@@ -2970,7 +2964,7 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
sync
/*
- * The load adresses are at staggered offsets within cachelines,
+ * The load addresses are at staggered offsets within cachelines,
* which suits some pipelines better (on others it should not
* hurt).
*/
@@ -2985,7 +2979,30 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
ld r11,(0x80 + 8)*7(r10)
addi r10,r10,0x80*8
bdnz 1b
+.endm
+TRAMP_REAL_BEGIN(entry_flush_fallback)
+ std r9,PACA_EXRFI+EX_R9(r13)
+ std r10,PACA_EXRFI+EX_R10(r13)
+ std r11,PACA_EXRFI+EX_R11(r13)
+ mfctr r9
+ L1D_DISPLACEMENT_FLUSH
+ mtctr r9
+ ld r9,PACA_EXRFI+EX_R9(r13)
+ ld r10,PACA_EXRFI+EX_R10(r13)
+ ld r11,PACA_EXRFI+EX_R11(r13)
+ blr
+
+TRAMP_REAL_BEGIN(rfi_flush_fallback)
+ SET_SCRATCH0(r13);
+ GET_PACA(r13);
+ std r1,PACA_EXRFI+EX_R12(r13)
+ ld r1,PACAKSAVE(r13)
+ std r9,PACA_EXRFI+EX_R9(r13)
+ std r10,PACA_EXRFI+EX_R10(r13)
+ std r11,PACA_EXRFI+EX_R11(r13)
+ mfctr r9
+ L1D_DISPLACEMENT_FLUSH
mtctr r9
ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13)
@@ -3003,32 +3020,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13)
mfctr r9
- ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
- ld r11,PACA_L1D_FLUSH_SIZE(r13)
- srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
- mtctr r11
- DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
-
- /* order ld/st prior to dcbt stop all streams with flushing */
- sync
-
- /*
- * The load adresses are at staggered offsets within cachelines,
- * which suits some pipelines better (on others it should not
- * hurt).
- */
-1:
- ld r11,(0x80 + 8)*0(r10)
- ld r11,(0x80 + 8)*1(r10)
- ld r11,(0x80 + 8)*2(r10)
- ld r11,(0x80 + 8)*3(r10)
- ld r11,(0x80 + 8)*4(r10)
- ld r11,(0x80 + 8)*5(r10)
- ld r11,(0x80 + 8)*6(r10)
- ld r11,(0x80 + 8)*7(r10)
- addi r10,r10,0x80*8
- bdnz 1b
-
+ L1D_DISPLACEMENT_FLUSH
mtctr r9
ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13)
@@ -3079,8 +3071,21 @@ TRAMP_REAL_BEGIN(rfscv_flush_fallback)
RFSCV
USE_TEXT_SECTION()
- MASKED_INTERRUPT
- MASKED_INTERRUPT hsrr=1
+
+_GLOBAL(do_uaccess_flush)
+ UACCESS_FLUSH_FIXUP_SECTION
+ nop
+ nop
+ nop
+ blr
+ L1D_DISPLACEMENT_FLUSH
+ blr
+_ASM_NOKPROBE_SYMBOL(do_uaccess_flush)
+EXPORT_SYMBOL(do_uaccess_flush)
+
+
+MASKED_INTERRUPT
+MASKED_INTERRUPT hsrr=1
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
kvmppc_skip_interrupt:
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 44c9018aed1b..a1ae00689e0f 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -284,11 +284,7 @@ _ENTRY(saved_ksp_limit)
rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */
lwz r11, 0(r11) /* Get Linux PTE */
-#ifdef CONFIG_SWAP
li r9, _PAGE_PRESENT | _PAGE_ACCESSED
-#else
- li r9, _PAGE_PRESENT
-#endif
andc. r9, r9, r11 /* Check permission */
bne 5f
@@ -369,11 +365,7 @@ _ENTRY(saved_ksp_limit)
rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */
lwz r11, 0(r11) /* Get Linux PTE */
-#ifdef CONFIG_SWAP
li r9, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
-#else
- li r9, _PAGE_PRESENT | _PAGE_EXEC
-#endif
andc. r9, r9, r11 /* Check permission */
bne 5f
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 9f359d3fba74..ee0bfebc375f 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -202,9 +202,7 @@ SystemCall:
InstructionTLBMiss:
mtspr SPRN_SPRG_SCRATCH0, r10
-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) || defined(CONFIG_HUGETLBFS)
mtspr SPRN_SPRG_SCRATCH1, r11
-#endif
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
@@ -224,25 +222,13 @@ InstructionTLBMiss:
3:
mtcr r11
#endif
-#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT)
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
mtspr SPRN_MD_TWC, r11
-#else
- lwz r10, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
- mtspr SPRN_MI_TWC, r10 /* Set segment attributes */
- mtspr SPRN_MD_TWC, r10
-#endif
mfspr r10, SPRN_MD_TWC
lwz r10, 0(r10) /* Get the pte */
-#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT)
+ rlwimi r11, r10, 0, _PAGE_GUARDED | _PAGE_ACCESSED
rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K
mtspr SPRN_MI_TWC, r11
-#endif
-#ifdef CONFIG_SWAP
- rlwinm r11, r10, 32-5, _PAGE_PRESENT
- and r11, r11, r10
- rlwimi r10, r11, 0, _PAGE_PRESENT
-#endif
/* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 20 and 23 must be clear.
* Software indicator bits 22, 24, 25, 26, and 27 must be
@@ -256,9 +242,7 @@ InstructionTLBMiss:
/* Restore registers */
0: mfspr r10, SPRN_SPRG_SCRATCH0
-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) || defined(CONFIG_HUGETLBFS)
mfspr r11, SPRN_SPRG_SCRATCH1
-#endif
rfi
patch_site 0b, patch__itlbmiss_exit_1
@@ -268,9 +252,7 @@ InstructionTLBMiss:
addi r10, r10, 1
stw r10, (itlb_miss_counter - PAGE_OFFSET)@l(0)
mfspr r10, SPRN_SPRG_SCRATCH0
-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP)
mfspr r11, SPRN_SPRG_SCRATCH1
-#endif
rfi
#endif
@@ -297,30 +279,16 @@ DataStoreTLBMiss:
mfspr r10, SPRN_MD_TWC
lwz r10, 0(r10) /* Get the pte */
- /* Insert the Guarded flag into the TWC from the Linux PTE.
+ /* Insert Guarded and Accessed flags into the TWC from the Linux PTE.
* It is bit 27 of both the Linux PTE and the TWC (at least
* I got that right :-). It will be better when we can put
* this into the Linux pgd/pmd and load it in the operation
* above.
*/
- rlwimi r11, r10, 0, _PAGE_GUARDED
+ rlwimi r11, r10, 0, _PAGE_GUARDED | _PAGE_ACCESSED
rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K
mtspr SPRN_MD_TWC, r11
- /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
- * We also need to know if the insn is a load/store, so:
- * Clear _PAGE_PRESENT and load that which will
- * trap into DTLB Error with store bit set accordinly.
- */
- /* PRESENT=0x1, ACCESSED=0x20
- * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
- * r10 = (r10 & ~PRESENT) | r11;
- */
-#ifdef CONFIG_SWAP
- rlwinm r11, r10, 32-5, _PAGE_PRESENT
- and r11, r11, r10
- rlwimi r10, r11, 0, _PAGE_PRESENT
-#endif
/* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior
@@ -711,7 +679,7 @@ initial_mmu:
li r9, 4 /* up to 4 pages of 8M */
mtctr r9
lis r9, KERNELBASE@h /* Create vaddr for TLB */
- li r10, MI_PS8MEG | MI_SVALID /* Set 8M byte page */
+ li r10, MI_PS8MEG | _PMD_ACCESSED | MI_SVALID
li r11, MI_BOOTINIT /* Create RPN for address 0 */
1:
mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
@@ -775,7 +743,7 @@ _GLOBAL(mmu_pin_tlb)
#ifdef CONFIG_PIN_TLB_TEXT
LOAD_REG_IMMEDIATE(r5, 28 << 8)
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
- LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG)
+ LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
LOAD_REG_ADDR(r9, _sinittext)
li r0, 4
@@ -797,7 +765,7 @@ _GLOBAL(mmu_pin_tlb)
LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM)
#ifdef CONFIG_PIN_TLB_DATA
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
- LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG)
+ LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
#ifdef CONFIG_PIN_TLB_IMMR
li r0, 3
#else
@@ -834,7 +802,7 @@ _GLOBAL(mmu_pin_tlb)
#endif
#ifdef CONFIG_PIN_TLB_IMMR
LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID)
- LOAD_REG_IMMEDIATE(r7, MD_SVALID | MD_PS512K | MD_GUARDED)
+ LOAD_REG_IMMEDIATE(r7, MD_SVALID | MD_PS512K | MD_GUARDED | _PMD_ACCESSED)
mfspr r8, SPRN_IMMR
rlwinm r8, r8, 0, 0xfff80000
ori r8, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index 5eb9eedac920..a0dda2a1f2df 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -156,6 +156,7 @@ __after_mmu_off:
bl initial_bats
bl load_segment_registers
BEGIN_MMU_FTR_SECTION
+ bl reloc_offset
bl early_hash_table
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
#if defined(CONFIG_BOOTX_TEXT)
@@ -457,11 +458,7 @@ InstructionTLBMiss:
cmplw 0,r1,r3
#endif
mfspr r2, SPRN_SPRG_PGDIR
-#ifdef CONFIG_SWAP
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
-#else
- li r1,_PAGE_PRESENT | _PAGE_EXEC
-#endif
#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
@@ -523,11 +520,7 @@ DataLoadTLBMiss:
lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
mfspr r2, SPRN_SPRG_PGDIR
-#ifdef CONFIG_SWAP
li r1, _PAGE_PRESENT | _PAGE_ACCESSED
-#else
- li r1, _PAGE_PRESENT
-#endif
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
@@ -603,11 +596,7 @@ DataStoreTLBMiss:
lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
mfspr r2, SPRN_SPRG_PGDIR
-#ifdef CONFIG_SWAP
li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
-#else
- li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT
-#endif
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
@@ -932,7 +921,7 @@ early_hash_table:
ori r6, r6, 3 /* 256kB table */
mtspr SPRN_SDR1, r6
lis r6, early_hash@h
- lis r3, Hash@ha
+ addis r3, r3, Hash@ha
stw r6, Hash@l(r3)
blr
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index ae0e2632393d..1f835539fda4 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -52,9 +52,9 @@ void arch_cpu_idle(void)
* interrupts enabled, some don't.
*/
if (irqs_disabled())
- local_irq_enable();
+ raw_local_irq_enable();
} else {
- local_irq_enable();
+ raw_local_irq_enable();
/*
* Go into low thread priority and possibly
* low power mode.
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 38ae5933d917..e9d4eb6144e1 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -355,6 +355,7 @@ static int __init prom_strtobool(const char *s, bool *res)
default:
break;
}
+ break;
default:
break;
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index bb9cab3641d7..74fd47f46fa5 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -945,7 +945,13 @@ early_initcall(disable_hardlockup_detector);
static enum l1d_flush_type enabled_flush_types;
static void *l1d_flush_fallback_area;
static bool no_rfi_flush;
+static bool no_entry_flush;
+static bool no_uaccess_flush;
bool rfi_flush;
+bool entry_flush;
+bool uaccess_flush;
+DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
+EXPORT_SYMBOL(uaccess_flush_key);
static int __init handle_no_rfi_flush(char *p)
{
@@ -955,6 +961,22 @@ static int __init handle_no_rfi_flush(char *p)
}
early_param("no_rfi_flush", handle_no_rfi_flush);
+static int __init handle_no_entry_flush(char *p)
+{
+ pr_info("entry-flush: disabled on command line.");
+ no_entry_flush = true;
+ return 0;
+}
+early_param("no_entry_flush", handle_no_entry_flush);
+
+static int __init handle_no_uaccess_flush(char *p)
+{
+ pr_info("uaccess-flush: disabled on command line.");
+ no_uaccess_flush = true;
+ return 0;
+}
+early_param("no_uaccess_flush", handle_no_uaccess_flush);
+
/*
* The RFI flush is not KPTI, but because users will see doco that says to use
* nopti we hijack that option here to also disable the RFI flush.
@@ -986,6 +1008,32 @@ void rfi_flush_enable(bool enable)
rfi_flush = enable;
}
+void entry_flush_enable(bool enable)
+{
+ if (enable) {
+ do_entry_flush_fixups(enabled_flush_types);
+ on_each_cpu(do_nothing, NULL, 1);
+ } else {
+ do_entry_flush_fixups(L1D_FLUSH_NONE);
+ }
+
+ entry_flush = enable;
+}
+
+void uaccess_flush_enable(bool enable)
+{
+ if (enable) {
+ do_uaccess_flush_fixups(enabled_flush_types);
+ static_branch_enable(&uaccess_flush_key);
+ on_each_cpu(do_nothing, NULL, 1);
+ } else {
+ static_branch_disable(&uaccess_flush_key);
+ do_uaccess_flush_fixups(L1D_FLUSH_NONE);
+ }
+
+ uaccess_flush = enable;
+}
+
static void __ref init_fallback_flush(void)
{
u64 l1d_size, limit;
@@ -1044,10 +1092,28 @@ void setup_rfi_flush(enum l1d_flush_type types, bool enable)
enabled_flush_types = types;
- if (!no_rfi_flush && !cpu_mitigations_off())
+ if (!cpu_mitigations_off() && !no_rfi_flush)
rfi_flush_enable(enable);
}
+void setup_entry_flush(bool enable)
+{
+ if (cpu_mitigations_off())
+ return;
+
+ if (!no_entry_flush)
+ entry_flush_enable(enable);
+}
+
+void setup_uaccess_flush(bool enable)
+{
+ if (cpu_mitigations_off())
+ return;
+
+ if (!no_uaccess_flush)
+ uaccess_flush_enable(enable);
+}
+
#ifdef CONFIG_DEBUG_FS
static int rfi_flush_set(void *data, u64 val)
{
@@ -1075,9 +1141,63 @@ static int rfi_flush_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
+static int entry_flush_set(void *data, u64 val)
+{
+ bool enable;
+
+ if (val == 1)
+ enable = true;
+ else if (val == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ /* Only do anything if we're changing state */
+ if (enable != entry_flush)
+ entry_flush_enable(enable);
+
+ return 0;
+}
+
+static int entry_flush_get(void *data, u64 *val)
+{
+ *val = entry_flush ? 1 : 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
+
+static int uaccess_flush_set(void *data, u64 val)
+{
+ bool enable;
+
+ if (val == 1)
+ enable = true;
+ else if (val == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ /* Only do anything if we're changing state */
+ if (enable != uaccess_flush)
+ uaccess_flush_enable(enable);
+
+ return 0;
+}
+
+static int uaccess_flush_get(void *data, u64 *val)
+{
+ *val = uaccess_flush ? 1 : 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
+
static __init int rfi_flush_debugfs_init(void)
{
debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
+ debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush);
+ debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush);
return 0;
}
device_initcall(rfi_flush_debugfs_init);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 3c6b9822f978..8c2857cbd960 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -1393,13 +1393,14 @@ static void add_cpu_to_masks(int cpu)
/* Activate a secondary processor. */
void start_secondary(void *unused)
{
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu = raw_smp_processor_id();
mmgrab(&init_mm);
current->active_mm = &init_mm;
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
+ rcu_cpu_starting(cpu);
preempt_disable();
cpu_callin_map[cpu] = 1;
diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c
index 8e50818aa50b..310bcd768cd5 100644
--- a/arch/powerpc/kernel/syscall_64.c
+++ b/arch/powerpc/kernel/syscall_64.c
@@ -2,7 +2,7 @@
#include <linux/err.h>
#include <asm/asm-prototypes.h>
-#include <asm/book3s/64/kup-radix.h>
+#include <asm/kup.h>
#include <asm/cputime.h>
#include <asm/hw_irq.h>
#include <asm/kprobes.h>
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 74efe46f5532..cf3f8db7e0e3 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -311,12 +311,11 @@ static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
return stime_scaled;
}
-static unsigned long vtime_delta(struct task_struct *tsk,
+static unsigned long vtime_delta(struct cpu_accounting_data *acct,
unsigned long *stime_scaled,
unsigned long *steal_time)
{
unsigned long now, stime;
- struct cpu_accounting_data *acct = get_accounting(tsk);
WARN_ON_ONCE(!irqs_disabled());
@@ -331,29 +330,30 @@ static unsigned long vtime_delta(struct task_struct *tsk,
return stime;
}
+static void vtime_delta_kernel(struct cpu_accounting_data *acct,
+ unsigned long *stime, unsigned long *stime_scaled)
+{
+ unsigned long steal_time;
+
+ *stime = vtime_delta(acct, stime_scaled, &steal_time);
+ *stime -= min(*stime, steal_time);
+ acct->steal_time += steal_time;
+}
+
void vtime_account_kernel(struct task_struct *tsk)
{
- unsigned long stime, stime_scaled, steal_time;
struct cpu_accounting_data *acct = get_accounting(tsk);
+ unsigned long stime, stime_scaled;
- stime = vtime_delta(tsk, &stime_scaled, &steal_time);
-
- stime -= min(stime, steal_time);
- acct->steal_time += steal_time;
+ vtime_delta_kernel(acct, &stime, &stime_scaled);
- if ((tsk->flags & PF_VCPU) && !irq_count()) {
+ if (tsk->flags & PF_VCPU) {
acct->gtime += stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
acct->utime_scaled += stime_scaled;
#endif
} else {
- if (hardirq_count())
- acct->hardirq_time += stime;
- else if (in_serving_softirq())
- acct->softirq_time += stime;
- else
- acct->stime += stime;
-
+ acct->stime += stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
acct->stime_scaled += stime_scaled;
#endif
@@ -366,10 +366,34 @@ void vtime_account_idle(struct task_struct *tsk)
unsigned long stime, stime_scaled, steal_time;
struct cpu_accounting_data *acct = get_accounting(tsk);
- stime = vtime_delta(tsk, &stime_scaled, &steal_time);
+ stime = vtime_delta(acct, &stime_scaled, &steal_time);
acct->idle_time += stime + steal_time;
}
+static void vtime_account_irq_field(struct cpu_accounting_data *acct,
+ unsigned long *field)
+{
+ unsigned long stime, stime_scaled;
+
+ vtime_delta_kernel(acct, &stime, &stime_scaled);
+ *field += stime;
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
+ acct->stime_scaled += stime_scaled;
+#endif
+}
+
+void vtime_account_softirq(struct task_struct *tsk)
+{
+ struct cpu_accounting_data *acct = get_accounting(tsk);
+ vtime_account_irq_field(acct, &acct->softirq_time);
+}
+
+void vtime_account_hardirq(struct task_struct *tsk)
+{
+ struct cpu_accounting_data *acct = get_accounting(tsk);
+ vtime_account_irq_field(acct, &acct->hardirq_time);
+}
+
static void vtime_flush_scaled(struct task_struct *tsk,
struct cpu_accounting_data *acct)
{
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index d200e7df7167..e8a63713e655 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -141,6 +141,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self,
case DIE_SSTEP:
if (uprobe_post_sstep_notifier(regs))
return NOTIFY_STOP;
+ break;
default:
break;
}
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index e0548b4950de..e184d17387f6 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -132,6 +132,20 @@ SECTIONS
}
. = ALIGN(8);
+ __uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) {
+ __start___uaccess_flush_fixup = .;
+ *(__uaccess_flush_fixup)
+ __stop___uaccess_flush_fixup = .;
+ }
+
+ . = ALIGN(8);
+ __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) {
+ __start___entry_flush_fixup = .;
+ *(__entry_flush_fixup)
+ __stop___entry_flush_fixup = .;
+ }
+
+ . = ALIGN(8);
__stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
__start___stf_exit_barrier_fixup = .;
*(__stf_exit_barrier_fixup)
@@ -299,6 +313,10 @@ SECTIONS
#else
.data : AT(ADDR(.data) - LOAD_OFFSET) {
DATA_DATA
+#ifdef CONFIG_UBSAN
+ *(.data..Lubsan_data*)
+ *(.data..Lubsan_type*)
+#endif
*(.data.rel*)
*(.toc1)
*(.branch_lt)
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 85215e79db42..a0ebc29f30b2 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -1214,12 +1214,9 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
{
/* We have a block of xive->nr_servers VPs. We just need to check
- * raw vCPU ids are below the expected limit for this guest's
- * core stride ; kvmppc_pack_vcpu_id() will pack them down to an
- * index that can be safely used to compute a VP id that belongs
- * to the VP block.
+ * packed vCPU ids are below that.
*/
- return cpu < xive->nr_servers * xive->kvm->arch.emul_smt_mode;
+ return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
}
int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index d0c2db0e07fa..a59a94f02733 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -251,6 +251,13 @@ static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
}
state = &sb->irq_state[src];
+
+ /* Some sanity checking */
+ if (!state->valid) {
+ pr_devel("%s: source %lx invalid !\n", __func__, irq);
+ return VM_FAULT_SIGBUS;
+ }
+
kvmppc_xive_select_irq(state, &hw_num, &xd);
arch_spin_lock(&sb->lock);
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 4c0a7ee9fa00..321c12a9ef6b 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -234,6 +234,110 @@ void do_stf_barrier_fixups(enum stf_barrier_type types)
do_stf_exit_barrier_fixups(types);
}
+void do_uaccess_flush_fixups(enum l1d_flush_type types)
+{
+ unsigned int instrs[4], *dest;
+ long *start, *end;
+ int i;
+
+ start = PTRRELOC(&__start___uaccess_flush_fixup);
+ end = PTRRELOC(&__stop___uaccess_flush_fixup);
+
+ instrs[0] = 0x60000000; /* nop */
+ instrs[1] = 0x60000000; /* nop */
+ instrs[2] = 0x60000000; /* nop */
+ instrs[3] = 0x4e800020; /* blr */
+
+ i = 0;
+ if (types == L1D_FLUSH_FALLBACK) {
+ instrs[3] = 0x60000000; /* nop */
+ /* fallthrough to fallback flush */
+ }
+
+ if (types & L1D_FLUSH_ORI) {
+ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+ instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+ }
+
+ if (types & L1D_FLUSH_MTTRIG)
+ instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+
+ for (i = 0; start < end; start++, i++) {
+ dest = (void *)start + *start;
+
+ pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+ patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+
+ patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
+ patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3]));
+ }
+
+ printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
+ (types == L1D_FLUSH_NONE) ? "no" :
+ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
+ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
+ ? "ori+mttrig type"
+ : "ori type" :
+ (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
+ : "unknown");
+}
+
+void do_entry_flush_fixups(enum l1d_flush_type types)
+{
+ unsigned int instrs[3], *dest;
+ long *start, *end;
+ int i;
+
+ start = PTRRELOC(&__start___entry_flush_fixup);
+ end = PTRRELOC(&__stop___entry_flush_fixup);
+
+ instrs[0] = 0x60000000; /* nop */
+ instrs[1] = 0x60000000; /* nop */
+ instrs[2] = 0x60000000; /* nop */
+
+ i = 0;
+ if (types == L1D_FLUSH_FALLBACK) {
+ instrs[i++] = 0x7d4802a6; /* mflr r10 */
+ instrs[i++] = 0x60000000; /* branch patched below */
+ instrs[i++] = 0x7d4803a6; /* mtlr r10 */
+ }
+
+ if (types & L1D_FLUSH_ORI) {
+ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+ instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+ }
+
+ if (types & L1D_FLUSH_MTTRIG)
+ instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+
+ for (i = 0; start < end; start++, i++) {
+ dest = (void *)start + *start;
+
+ pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+ patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+
+ if (types == L1D_FLUSH_FALLBACK)
+ patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&entry_flush_fallback,
+ BRANCH_SET_LINK);
+ else
+ patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
+
+ patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ }
+
+ printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
+ (types == L1D_FLUSH_NONE) ? "no" :
+ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
+ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
+ ? "ori+mttrig type"
+ : "ori type" :
+ (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
+ : "unknown");
+}
+
void do_rfi_flush_fixups(enum l1d_flush_type types)
{
unsigned int instrs[3], *dest;
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 5e147986400d..3b4e9e4e25ea 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -5,7 +5,7 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-obj-y := fault.o mem.o pgtable.o mmap.o \
+obj-y := fault.o mem.o pgtable.o mmap.o maccess.o \
init_$(BITS).o pgtable_$(BITS).o \
pgtable-frag.o ioremap.o ioremap_$(BITS).o \
init-common.o mmu_context.o drmem.o
@@ -16,7 +16,6 @@ obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
-obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o
obj-$(CONFIG_PPC_PTDUMP) += ptdump/
obj-$(CONFIG_KASAN) += kasan/
diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index 0203cdf48c54..52e170bd95ae 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -68,7 +68,7 @@ static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned in
rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
- : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
+ : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "i"(r)
: "memory");
}
@@ -92,16 +92,15 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
asm volatile("ptesync": : :"memory");
/*
- * Flush the first set of the TLB, and any caching of partition table
- * entries. Then flush the remaining sets of the TLB. Hash mode uses
- * partition scoped TLB translations.
+ * Flush the partition table cache if this is HV mode.
*/
- tlbiel_hash_set_isa300(0, is, 0, 2, 0);
- for (set = 1; set < num_sets; set++)
- tlbiel_hash_set_isa300(set, is, 0, 0, 0);
+ if (early_cpu_has_feature(CPU_FTR_HVMODE))
+ tlbiel_hash_set_isa300(0, is, 0, 2, 0);
/*
- * Now invalidate the process table cache.
+ * Now invalidate the process table cache. UPRT=0 HPT modes (what
+ * current hardware implements) do not use the process table, but
+ * add the flushes anyway.
*
* From ISA v3.0B p. 1078:
* The following forms are invalid.
@@ -110,6 +109,14 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
*/
tlbiel_hash_set_isa300(0, is, 0, 2, 1);
+ /*
+ * Then flush the sets of the TLB proper. Hash mode uses
+ * partition scoped TLB translations, which may be flushed
+ * in !HV mode.
+ */
+ for (set = 0; set < num_sets; set++)
+ tlbiel_hash_set_isa300(set, is, 0, 0, 0);
+
ppc_after_tlbiel_barrier();
asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
index 1c54821de7bf..0c8557220ae2 100644
--- a/arch/powerpc/mm/book3s64/mmu_context.c
+++ b/arch/powerpc/mm/book3s64/mmu_context.c
@@ -17,6 +17,7 @@
#include <linux/export.h>
#include <linux/gfp.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
@@ -307,3 +308,22 @@ void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
isync();
}
#endif
+
+/**
+ * cleanup_cpu_mmu_context - Clean up MMU details for this CPU (newly offlined)
+ *
+ * This clears the CPU from mm_cpumask for all processes, and then flushes the
+ * local TLB to ensure TLB coherency in case the CPU is onlined again.
+ *
+ * KVM guest translations are not necessarily flushed here. If KVM started
+ * using mm_cpumask or the Linux APIs which do, this would have to be resolved.
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+void cleanup_cpu_mmu_context(void)
+{
+ int cpu = smp_processor_id();
+
+ clear_tasks_mm_cpumask(cpu);
+ tlbiel_all();
+}
+#endif
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
deleted file mode 100644
index 624b4438aff9..000000000000
--- a/arch/powerpc/mm/highmem.c
+++ /dev/null
@@ -1,67 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * highmem.c: virtual kernel memory mappings for high memory
- *
- * PowerPC version, stolen from the i386 version.
- *
- * Used in CONFIG_HIGHMEM systems for memory pages which
- * are not addressable by direct kernel virtual addresses.
- *
- * Copyright (C) 1999 Gerhard Wichert, Siemens AG
- * Gerhard.Wichert@pdb.siemens.de
- *
- *
- * Redesigned the x86 32-bit VM architecture to deal with
- * up to 16 Terrabyte physical memory. With current x86 CPUs
- * we now support up to 64 Gigabytes physical RAM.
- *
- * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
- *
- * Reworked for PowerPC by various contributors. Moved from
- * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
- */
-
-#include <linux/highmem.h>
-#include <linux/module.h>
-
-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
-{
- unsigned long vaddr;
- int idx, type;
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- WARN_ON(IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !pte_none(*(kmap_pte - idx)));
- __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
- local_flush_tlb_page(NULL, vaddr);
-
- return (void*) vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic_high_prot);
-
-void kunmap_atomic_high(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-
- if (vaddr < __fix_to_virt(FIX_KMAP_END))
- return;
-
- if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM)) {
- int type = kmap_atomic_idx();
- unsigned int idx;
-
- idx = type + KM_TYPE_NR * smp_processor_id();
- WARN_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- local_flush_tlb_page(NULL, vaddr);
- }
-
- kmap_atomic_idx_pop();
-}
-EXPORT_SYMBOL(kunmap_atomic_high);
diff --git a/arch/powerpc/mm/maccess.c b/arch/powerpc/mm/maccess.c
new file mode 100644
index 000000000000..fa9a7a718fc6
--- /dev/null
+++ b/arch/powerpc/mm/maccess.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+{
+ return is_kernel_addr((unsigned long)unsafe_src);
+}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 01ec2a252f09..25284fdb300c 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -50,6 +50,7 @@
#include <asm/rtas.h>
#include <asm/kasan.h>
#include <asm/svm.h>
+#include <asm/mmzone.h>
#include <mm/mmu_decl.h>
@@ -61,11 +62,6 @@
unsigned long long memory_limit;
bool init_mem_is_free;
-#ifdef CONFIG_HIGHMEM
-pte_t *kmap_pte;
-EXPORT_SYMBOL(kmap_pte);
-#endif
-
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -235,8 +231,6 @@ void __init paging_init(void)
map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
-
- kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
#endif /* CONFIG_HIGHMEM */
printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 63f61d8b55e5..f2bf98bdcea2 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -742,8 +742,7 @@ static int __init parse_numa_properties(void)
of_node_put(cpu);
}
- if (likely(nid > 0))
- node_set_online(nid);
+ node_set_online(nid);
}
get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 08643cba1494..6586f7e71cfb 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2074,6 +2074,9 @@ static struct pmu power_pmu = {
.sched_task = power_pmu_sched_task,
};
+#define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \
+ PERF_SAMPLE_PHYS_ADDR | \
+ PERF_SAMPLE_DATA_PAGE_SIZE)
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
@@ -2129,8 +2132,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
- if (event->attr.sample_type &
- (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR))
+ if (event->attr.sample_type & PERF_SAMPLE_ADDR_TYPE)
perf_get_data_addr(event, regs, &data.addr);
if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 9ed4fcccf8a9..e106909ff9c3 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -1336,7 +1336,7 @@ static void dump_trace_imc_data(struct perf_event *event)
/* If this is a valid record, create the sample */
struct perf_output_handle handle;
- if (perf_output_begin(&handle, event, header.size))
+ if (perf_output_begin(&handle, &data, event, header.size))
return;
perf_output_sample(&handle, &header, &data, event);
@@ -1500,6 +1500,7 @@ static int update_pmu_ops(struct imc_pmu *pmu)
pmu->pmu.stop = trace_imc_event_stop;
pmu->pmu.read = trace_imc_event_read;
pmu->attr_groups[IMC_FORMAT_ATTR] = &trace_imc_format_group;
+ break;
default:
break;
}
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index 8e53f2fc3fe0..6f681b105eec 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -144,8 +144,7 @@ u64 perf_reg_abi(struct task_struct *task)
}
void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy)
+ struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 026c181a98c5..60b5583e9eaf 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -74,7 +74,7 @@ static struct spu_context *coredump_next_context(int *fd)
*fd = n - 1;
rcu_read_lock();
- file = fcheck(*fd);
+ file = lookup_fd_rcu(*fd);
ctx = SPUFS_I(file_inode(file))->i_ctx;
get_spu_context(ctx);
rcu_read_unlock();
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 74ebe664b016..adae2a6712e1 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -911,6 +911,8 @@ static int smp_core99_cpu_disable(void)
mpic_cpu_set_priority(0xf);
+ cleanup_cpu_mmu_context();
+
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 9acaa0f131b9..4426a109ec2f 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -98,7 +98,7 @@ static void init_fw_feat_flags(struct device_node *np)
security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
}
-static void pnv_setup_rfi_flush(void)
+static void pnv_setup_security_mitigations(void)
{
struct device_node *np, *fw_features;
enum l1d_flush_type type;
@@ -122,12 +122,31 @@ static void pnv_setup_rfi_flush(void)
type = L1D_FLUSH_ORI;
}
+ /*
+ * If we are non-Power9 bare metal, we don't need to flush on kernel
+ * entry or after user access: they fix a P9 specific vulnerability.
+ */
+ if (!pvr_version_is(PVR_POWER9)) {
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
+ }
+
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
(security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \
security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
setup_rfi_flush(type, enable);
setup_count_cache_flush();
+
+ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
+ setup_entry_flush(enable);
+
+ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
+ setup_uaccess_flush(enable);
+
+ setup_stf_barrier();
}
static void __init pnv_check_guarded_cores(void)
@@ -156,8 +175,7 @@ static void __init pnv_setup_arch(void)
{
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
- pnv_setup_rfi_flush();
- setup_stf_barrier();
+ pnv_setup_security_mitigations();
/* Initialize SMP */
pnv_smp_init();
@@ -193,11 +211,16 @@ static void __init pnv_init(void)
add_preferred_console("hvc", 0, NULL);
if (!radix_enabled()) {
+ size_t size = sizeof(struct slb_entry) * mmu_slb_size;
int i;
/* Allocate per cpu area to save old slb contents during MCE */
- for_each_possible_cpu(i)
- paca_ptrs[i]->mce_faulty_slbs = memblock_alloc_node(mmu_slb_size, __alignof__(*paca_ptrs[i]->mce_faulty_slbs), cpu_to_node(i));
+ for_each_possible_cpu(i) {
+ paca_ptrs[i]->mce_faulty_slbs =
+ memblock_alloc_node(size,
+ __alignof__(struct slb_entry),
+ cpu_to_node(i));
+ }
}
}
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 54c4ba45c7ce..cbb67813cd5d 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -143,6 +143,9 @@ static int pnv_smp_cpu_disable(void)
xive_smp_disable_cpu();
else
xics_migrate_irqs_away();
+
+ cleanup_cpu_mmu_context();
+
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index f2837e33bf5d..a02012f1b04a 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -90,6 +90,9 @@ static int pseries_cpu_disable(void)
xive_smp_disable_cpu();
else
xics_migrate_irqs_away();
+
+ cleanup_cpu_mmu_context();
+
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index d6f4162478a5..2f73cb5bf12d 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -349,8 +349,8 @@ void post_mobility_fixup(void)
cpus_read_unlock();
- /* Possibly switch to a new RFI flush type */
- pseries_setup_rfi_flush();
+ /* Possibly switch to a new L1 flush type */
+ pseries_setup_security_mitigations();
/* Reinitialise system information for hv-24x7 */
read_24x7_sys_info();
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 133f6adcb39c..b3ac2455faad 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -458,7 +458,8 @@ again:
return hwirq;
}
- virq = irq_create_mapping(NULL, hwirq);
+ virq = irq_create_mapping_affinity(NULL, hwirq,
+ entry->affinity);
if (!virq) {
pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq);
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 13fa370a87e4..593840847cd3 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -111,7 +111,7 @@ static inline unsigned long cmo_get_page_size(void)
int dlpar_workqueue_init(void);
-void pseries_setup_rfi_flush(void);
+void pseries_setup_security_mitigations(void);
void pseries_lpar_read_hblkrm_characteristics(void);
#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 633c45ec406d..090c13f6c881 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -542,7 +542,7 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
}
-void pseries_setup_rfi_flush(void)
+void pseries_setup_security_mitigations(void)
{
struct h_cpu_char_result result;
enum l1d_flush_type types;
@@ -579,6 +579,16 @@ void pseries_setup_rfi_flush(void)
setup_rfi_flush(types, enable);
setup_count_cache_flush();
+
+ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
+ setup_entry_flush(enable);
+
+ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
+ setup_uaccess_flush(enable);
+
+ setup_stf_barrier();
}
#ifdef CONFIG_PCI_IOV
@@ -768,8 +778,7 @@ static void __init pSeries_setup_arch(void)
fwnmi_init();
- pseries_setup_rfi_flush();
- setup_stf_barrier();
+ pseries_setup_security_mitigations();
pseries_lpar_read_hblkrm_characteristics();
/* By default, only probe PCI (can be overridden by rtas_pci) */
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 44377fd7860e..880c2b3b65d0 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -14,6 +14,7 @@ config RISCV
def_bool y
select ARCH_CLOCKSOURCE_INIT
select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_VIRTUAL if MMU
@@ -37,7 +38,6 @@ config RISCV
select EDAC_SUPPORT
select GENERIC_ARCH_TOPOLOGY if SMP
select GENERIC_ATOMIC64 if !64BIT
- select GENERIC_CLOCKEVENTS
select GENERIC_EARLY_IOREMAP
select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
select GENERIC_IOREMAP
@@ -153,9 +153,6 @@ config ARCH_SELECT_MEMORY_MODEL
config ARCH_WANT_GENERAL_HUGETLB
def_bool y
-config ARCH_SUPPORTS_DEBUG_PAGEALLOC
- def_bool y
-
config SYS_SUPPORTS_HUGETLBFS
depends on MMU
def_bool y
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index 67c463812e2d..250defa06f3a 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -13,34 +13,16 @@
#include <linux/mm.h>
#include <linux/sched.h>
-static inline void enter_lazy_tlb(struct mm_struct *mm,
- struct task_struct *task)
-{
-}
-
-/* Initialize context-related info for a new mm_struct */
-static inline int init_new_context(struct task_struct *task,
- struct mm_struct *mm)
-{
- return 0;
-}
-
-static inline void destroy_context(struct mm_struct *mm)
-{
-}
-
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *task);
+#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
switch_mm(prev, next, NULL);
}
-static inline void deactivate_mm(struct task_struct *task,
- struct mm_struct *mm)
-{
-}
+#include <asm-generic/mmu_context.h>
#endif /* _ASM_RISCV_MMU_CONTEXT_H */
diff --git a/arch/riscv/include/asm/pgtable-32.h b/arch/riscv/include/asm/pgtable-32.h
index b0ab66e5fdb1..5b2e79e5bfa5 100644
--- a/arch/riscv/include/asm/pgtable-32.h
+++ b/arch/riscv/include/asm/pgtable-32.h
@@ -14,4 +14,6 @@
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 34
+
#endif /* _ASM_RISCV_PGTABLE_32_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 183f1f4b2ae6..41a72861987c 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -461,8 +461,6 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
#define VMALLOC_START 0
#define VMALLOC_END TASK_SIZE
-static inline void __kernel_map_pages(struct page *page, int numpages, int enable) {}
-
#endif /* !CONFIG_MMU */
#define kern_addr_valid(addr) (1) /* FIXME */
diff --git a/arch/riscv/include/asm/seccomp.h b/arch/riscv/include/asm/seccomp.h
index bf7744ee3b3d..c7ee6a3507be 100644
--- a/arch/riscv/include/asm/seccomp.h
+++ b/arch/riscv/include/asm/seccomp.h
@@ -7,4 +7,14 @@
#include <asm-generic/seccomp.h>
+#ifdef CONFIG_64BIT
+# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_RISCV64
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "riscv64"
+#else /* !CONFIG_64BIT */
+# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_RISCV32
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "riscv32"
+#endif
+
#endif /* _ASM_SECCOMP_H */
diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
index 4c5bae7ca01c..d690b08dff2a 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -24,6 +24,7 @@ static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
+bool kernel_page_present(struct page *page);
#endif /* __ASSEMBLY__ */
diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h
index ab104905d4db..81de51e6aa32 100644
--- a/arch/riscv/include/asm/timex.h
+++ b/arch/riscv/include/asm/timex.h
@@ -60,6 +60,8 @@ static inline u32 get_cycles_hi(void)
}
#define get_cycles_hi get_cycles_hi
+#endif /* !CONFIG_RISCV_M_MODE */
+
#ifdef CONFIG_64BIT
static inline u64 get_cycles64(void)
{
@@ -79,8 +81,6 @@ static inline u64 get_cycles64(void)
}
#endif /* CONFIG_64BIT */
-#endif /* !CONFIG_RISCV_M_MODE */
-
#define ARCH_HAS_READ_CURRENT_TIMER
static inline int read_current_timer(unsigned long *timer_val)
{
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index c47e6b35c551..824b2c9da75b 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -476,7 +476,7 @@ do { \
do { \
long __kr_err; \
\
- __put_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
+ __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
if (unlikely(__kr_err)) \
goto err_label; \
} while (0)
diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
index 82a5693b1861..134388cbaaa1 100644
--- a/arch/riscv/include/asm/vdso/processor.h
+++ b/arch/riscv/include/asm/vdso/processor.h
@@ -4,6 +4,8 @@
#ifndef __ASSEMBLY__
+#include <asm/barrier.h>
+
static inline void cpu_relax(void)
{
#ifdef __riscv_muldiv
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 99e12faa5498..765b62434f30 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 Linaro Limited
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 11e2a4fe66e0..7e849797c9c3 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -35,6 +35,10 @@ ENTRY(_start)
.word 0
#endif
.balign 8
+#ifdef CONFIG_RISCV_M_MODE
+ /* Image load offset (0MB) from start of RAM for M-mode */
+ .dword 0
+#else
#if __riscv_xlen == 64
/* Image load offset(2MB) from start of RAM */
.dword 0x200000
@@ -42,6 +46,7 @@ ENTRY(_start)
/* Image load offset(4MB) from start of RAM */
.dword 0x400000
#endif
+#endif
/* Effective size of kernel image */
.dword _end - _start
.dword __HEAD_FLAGS
diff --git a/arch/riscv/kernel/perf_regs.c b/arch/riscv/kernel/perf_regs.c
index 04a38fbeb9c7..fd304a248de6 100644
--- a/arch/riscv/kernel/perf_regs.c
+++ b/arch/riscv/kernel/perf_regs.c
@@ -36,8 +36,7 @@ u64 perf_reg_abi(struct task_struct *task)
}
void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy)
+ struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 19225ec65db6..dd5f985b1f40 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -36,7 +36,7 @@ extern asmlinkage void ret_from_kernel_thread(void);
void arch_cpu_idle(void)
{
wait_for_interrupt();
- local_irq_enable();
+ raw_local_irq_enable();
}
void show_regs(struct pt_regs *regs)
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index c424cc6dd833..117f3212a8e4 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -75,6 +75,7 @@ void __init setup_arch(char **cmdline_p)
*cmdline_p = boot_command_line;
early_ioremap_setup();
+ jump_label_init();
parse_early_param();
efi_init();
diff --git a/arch/riscv/kernel/vdso/.gitignore b/arch/riscv/kernel/vdso/.gitignore
index 11ebee9e4c1d..3a19def868ec 100644
--- a/arch/riscv/kernel/vdso/.gitignore
+++ b/arch/riscv/kernel/vdso/.gitignore
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
vdso.lds
*.tmp
+vdso-syms.S
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 7d6a94d45ec9..0cfd6da784f8 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -43,19 +43,14 @@ $(obj)/vdso.o: $(obj)/vdso.so
SYSCFLAGS_vdso.so.dbg = $(c_flags)
$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
+SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
+ -Wl,--build-id=sha1 -Wl,--hash-style=both
# We also create a special relocatable object that should mirror the symbol
# table and layout of the linked DSO. With ld --just-symbols we can then
# refer to these symbols in the kernel code rather than hand-coded addresses.
-
-SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
- -Wl,--build-id=sha1 -Wl,--hash-style=both
-$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
- $(call if_changed,vdsold)
-
-LDFLAGS_vdso-syms.o := -r --just-symbols
-$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
- $(call if_changed,ld)
+$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE
+ $(call if_changed,so2s)
# strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
@@ -73,6 +68,11 @@ quiet_cmd_vdsold = VDSOLD $@
$(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
rm $@.tmp
+# Extracts symbol offsets from the VDSO, converting them into an assembly file
+# that contains the same symbols at the same offsets.
+quiet_cmd_so2s = SO2S $@
+ cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@
+
# install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
diff --git a/arch/riscv/kernel/vdso/so2s.sh b/arch/riscv/kernel/vdso/so2s.sh
new file mode 100755
index 000000000000..e64cb6d9440e
--- /dev/null
+++ b/arch/riscv/kernel/vdso/so2s.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2020 Palmer Dabbelt <palmerdabbelt@google.com>
+
+sed 's!\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_4.15\)*!.global \2\n.set \2,0x\1!' \
+| grep '^\.'
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 1359e21c0c62..3c8b9e433c67 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -86,6 +86,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
int index;
+ unsigned long pfn;
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs))
@@ -100,7 +101,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
* of a task switch.
*/
index = pgd_index(addr);
- pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
+ pfn = csr_read(CSR_SATP) & SATP_PPN;
+ pgd = (pgd_t *)pfn_to_virt(pfn) + index;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k)) {
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index ea933b789a88..8e577f14f120 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -154,9 +154,8 @@ disable:
void __init setup_bootmem(void)
{
- phys_addr_t mem_size = 0;
- phys_addr_t total_mem = 0;
- phys_addr_t mem_start, start, end = 0;
+ phys_addr_t mem_start = 0;
+ phys_addr_t start, end = 0;
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
u64 i;
@@ -164,21 +163,18 @@ void __init setup_bootmem(void)
/* Find the memory region containing the kernel */
for_each_mem_range(i, &start, &end) {
phys_addr_t size = end - start;
- if (!total_mem)
+ if (!mem_start)
mem_start = start;
if (start <= vmlinux_start && vmlinux_end <= end)
BUG_ON(size == 0);
- total_mem = total_mem + size;
}
/*
- * Remove memblock from the end of usable area to the
- * end of region
+ * The maximal physical memory size is -PAGE_OFFSET.
+ * Make sure that any memory beyond mem_start + (-PAGE_OFFSET) is removed
+ * as it is unusable by kernel.
*/
- mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
- if (mem_start + mem_size < end)
- memblock_remove(mem_start + mem_size,
- end - mem_start - mem_size);
+ memblock_enforce_memory_limit(mem_start - PAGE_OFFSET);
/* Reserve from the start of the kernel to the end of the kernel */
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
@@ -297,6 +293,7 @@ pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
#define NUM_EARLY_PMDS (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
#endif
pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
+pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
{
@@ -494,6 +491,18 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
load_pa + (va - PAGE_OFFSET),
map_size, PAGE_KERNEL_EXEC);
+#ifndef __PAGETABLE_PMD_FOLDED
+ /* Setup early PMD for DTB */
+ create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
+ (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
+ /* Create two consecutive PMD mappings for FDT early scan */
+ pa = dtb_pa & ~(PMD_SIZE - 1);
+ create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
+ pa, PMD_SIZE, PAGE_KERNEL);
+ create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
+ pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
+ dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
+#else
/* Create two consecutive PGD mappings for FDT early scan */
pa = dtb_pa & ~(PGDIR_SIZE - 1);
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
@@ -501,6 +510,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
+#endif
dtb_early_pa = dtb_pa;
/*
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index 19fecb362d81..87ba5a68bbb8 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -184,6 +184,7 @@ int set_direct_map_default_noflush(struct page *page)
return ret;
}
+#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (!debug_pagealloc_enabled())
@@ -196,3 +197,33 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
__set_memory((unsigned long)page_address(page), numpages,
__pgprot(0), __pgprot(_PAGE_PRESENT));
}
+#endif
+
+bool kernel_page_present(struct page *page)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ pgd_t *pgd;
+ pud_t *pud;
+ p4d_t *p4d;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = pgd_offset_k(addr);
+ if (!pgd_present(*pgd))
+ return false;
+
+ p4d = p4d_offset(pgd, addr);
+ if (!p4d_present(*p4d))
+ return false;
+
+ pud = pud_offset(p4d, addr);
+ if (!pud_present(*pud))
+ return false;
+
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd))
+ return false;
+
+ pte = pte_offset_kernel(pmd, addr);
+ return pte_present(*pte);
+}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 4a2a12be04c9..f795eebf648f 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -35,9 +35,6 @@ config GENERIC_LOCKBREAK
config PGSTE
def_bool y if KVM
-config ARCH_SUPPORTS_DEBUG_PAGEALLOC
- def_bool y
-
config AUDIT_ARCH
def_bool y
@@ -53,8 +50,7 @@ config ARCH_SUPPORTS_UPROBES
config KASAN_SHADOW_OFFSET
hex
depends on KASAN
- default 0x18000000000000 if KASAN_S390_4_LEVEL_PAGING
- default 0x30000000000
+ default 0x18000000000000
config S390
def_bool y
@@ -106,6 +102,7 @@ config S390
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
@@ -116,7 +113,6 @@ config S390
select CLONE_BACKWARDS2
select DMA_OPS if PCI
select DYNAMIC_FTRACE if FUNCTION_TRACER
- select GENERIC_CLOCKEVENTS
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES
select GENERIC_FIND_FIRST_BIT
@@ -181,6 +177,7 @@ config S390
select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
+ select HAVE_VIRT_CPU_ACCOUNTING_IDLE
select IOMMU_HELPER if PCI
select IOMMU_SUPPORT if PCI
select MODULES_USE_ELF_RELA
@@ -191,7 +188,6 @@ config S390
select PCI_DOMAINS if PCI
select PCI_MSI if PCI
select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
- select SET_FS
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
@@ -714,7 +710,7 @@ if PCI
config PCI_NR_FUNCTIONS
int "Maximum number of PCI functions (1-4096)"
range 1 4096
- default "128"
+ default "512"
help
This allows you to specify the maximum number of PCI functions which
this kernel will support.
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index ab48b694ade8..6bfaceebbbc0 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -5,3 +5,11 @@ config TRACE_IRQFLAGS_SUPPORT
config EARLY_PRINTK
def_bool y
+
+config DEBUG_USER_ASCE
+ bool "Debug User ASCE"
+ help
+ Check on exit to user space that address space control
+ elements are setup correctly.
+
+ If unsure, say N.
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index ba94b03c8b2f..8db267d2a543 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -25,7 +25,7 @@ KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf))
KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2
KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
-KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float
+KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain
KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index 2ea603f70c3b..8b50967f5804 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -2,20 +2,32 @@
#ifndef BOOT_BOOT_H
#define BOOT_BOOT_H
+#include <linux/types.h>
+
+#define BOOT_STACK_OFFSET 0x8000
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+
void startup_kernel(void);
-void detect_memory(void);
+unsigned long detect_memory(void);
+bool is_ipl_block_dump(void);
void store_ipl_parmblock(void);
void setup_boot_command_line(void);
void parse_boot_command_line(void);
-void setup_memory_end(void);
void verify_facilities(void);
void print_missing_facilities(void);
void print_pgm_check_info(void);
unsigned long get_random_base(unsigned long safe_addr);
+void __printf(1, 2) decompressor_printk(const char *fmt, ...);
-extern int kaslr_enabled;
extern const char kernel_version[];
+extern unsigned long memory_limit;
+extern int vmalloc_size_set;
+extern int kaslr_enabled;
unsigned long read_ipl_report(unsigned long safe_offset);
+#endif /* __ASSEMBLY__ */
#endif /* BOOT_BOOT_H */
diff --git a/arch/s390/boot/compressed/.gitignore b/arch/s390/boot/compressed/.gitignore
index 765a08f1bd77..01d93832cf4a 100644
--- a/arch/s390/boot/compressed/.gitignore
+++ b/arch/s390/boot/compressed/.gitignore
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
vmlinux
vmlinux.lds
+vmlinux.syms
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index b235ed95a3d8..de18dab518bb 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -10,21 +10,39 @@ GCOV_PROFILE := n
UBSAN_SANITIZE := n
KASAN_SANITIZE := n
-obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) piggy.o info.o
+obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
+obj-all := $(obj-y) piggy.o syms.o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
-targets += info.bin $(obj-y)
+targets += info.bin syms.bin vmlinux.syms $(obj-all)
KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
OBJCOPYFLAGS :=
OBJECTS := $(addprefix $(obj)/,$(obj-y))
+OBJECTS_ALL := $(addprefix $(obj)/,$(obj-all))
-LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
-$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) FORCE
+LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup --build-id=sha1 -T
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS_ALL) FORCE
$(call if_changed,ld)
+LDFLAGS_vmlinux.syms := --oformat $(LD_BFD) -e startup -T
+$(obj)/vmlinux.syms: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) FORCE
+ $(call if_changed,ld)
+
+quiet_cmd_dumpsyms = DUMPSYMS $<
+define cmd_dumpsyms
+ $(NM) -n -S --format=bsd "$<" | $(PERL) -ne '/(\w+)\s+(\w+)\s+[tT]\s+(\w+)/ and printf "%x %x %s\0",hex $$1,hex $$2,$$3' > "$@"
+endef
+
+$(obj)/syms.bin: $(obj)/vmlinux.syms FORCE
+ $(call if_changed,dumpsyms)
+
+OBJCOPYFLAGS_syms.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.decompressor.syms
+$(obj)/syms.o: $(obj)/syms.bin FORCE
+ $(call if_changed,objcopy)
+
OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=load
$(obj)/info.bin: vmlinux FORCE
$(call if_changed,objcopy)
diff --git a/arch/s390/boot/compressed/decompressor.h b/arch/s390/boot/compressed/decompressor.h
index c15eb7114d83..41f0ad97a4db 100644
--- a/arch/s390/boot/compressed/decompressor.h
+++ b/arch/s390/boot/compressed/decompressor.h
@@ -2,8 +2,10 @@
#ifndef BOOT_COMPRESSED_DECOMPRESSOR_H
#define BOOT_COMPRESSED_DECOMPRESSOR_H
+#include <linux/stddef.h>
+
#ifdef CONFIG_KERNEL_UNCOMPRESSED
-static inline void *decompress_kernel(void) {}
+static inline void *decompress_kernel(void) { return NULL; }
#else
void *decompress_kernel(void);
#endif
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
index 9427e2cd0c15..27a09c1c78f6 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -27,6 +27,7 @@ SECTIONS
*(.rodata.*)
_erodata = . ;
}
+ NOTES
.data : {
_data = . ;
*(.data)
@@ -82,6 +83,14 @@ SECTIONS
*(.vmlinux.info)
}
+ .decompressor.syms : {
+ . += 1; /* make sure we have \0 before the first entry */
+ . = ALIGN(2);
+ _decompressor_syms_start = .;
+ *(.decompressor.syms)
+ _decompressor_syms_end = .;
+ }
+
#ifdef CONFIG_KERNEL_UNCOMPRESSED
. = 0x100000;
#else
diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
index 1a2c2b1ed964..dacb7813f982 100644
--- a/arch/s390/boot/head.S
+++ b/arch/s390/boot/head.S
@@ -28,6 +28,7 @@
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/ptrace.h>
+#include "boot.h"
#define ARCH_OFFSET 4
@@ -62,8 +63,12 @@ __HEAD
.org __LC_RST_NEW_PSW # 0x1a0
.quad 0,iplstart
+ .org __LC_EXT_NEW_PSW # 0x1b0
+ .quad 0x0002000180000000,0x1b0 # disabled wait
.org __LC_PGM_NEW_PSW # 0x1d0
.quad 0x0000000180000000,startup_pgm_check_handler
+ .org __LC_IO_NEW_PSW # 0x1f0
+ .quad 0x0002000180000000,0x1f0 # disabled wait
.org 0x200
@@ -275,8 +280,8 @@ iplstart:
# or linload or SALIPL
#
.org 0x10000
-ENTRY(startup)
- j .Lep_startup_normal
+SYM_CODE_START(startup)
+ j startup_normal
.org EP_OFFSET
#
# This is a list of s390 kernel entry points. At address 0x1000f the number of
@@ -290,9 +295,9 @@ ENTRY(startup)
# kdump startup-code at 0x10010, running in 64 bit absolute addressing mode
#
.org 0x10010
-ENTRY(startup_kdump)
- j .Lep_startup_kdump
-.Lep_startup_normal:
+ j startup_kdump
+SYM_CODE_END(startup)
+SYM_CODE_START_LOCAL(startup_normal)
mvi __LC_AR_MODE_ID,1 # set esame flag
slr %r0,%r0 # set cpuid to zero
lhi %r1,2 # mode 2 = esame (dump)
@@ -303,6 +308,9 @@ ENTRY(startup_kdump)
sam64 # switch to 64 bit addressing mode
basr %r13,0 # get base
.LPG0:
+ mvc __LC_EXT_NEW_PSW(16),.Lext_new_psw-.LPG0(%r13)
+ mvc __LC_PGM_NEW_PSW(16),.Lpgm_new_psw-.LPG0(%r13)
+ mvc __LC_IO_NEW_PSW(16),.Lio_new_psw-.LPG0(%r13)
xc 0x200(256),0x200 # partially clear lowcore
xc 0x300(256),0x300
xc 0xe00(256),0xe00
@@ -315,12 +323,18 @@ ENTRY(startup_kdump)
l %r15,.Lstack-.LPG0(%r13)
brasl %r14,verify_facilities
brasl %r14,startup_kernel
+SYM_CODE_END(startup_normal)
.Lstack:
- .long 0x8000 + (1<<(PAGE_SHIFT+BOOT_STACK_ORDER)) - STACK_FRAME_OVERHEAD
+ .long BOOT_STACK_OFFSET + BOOT_STACK_SIZE - STACK_FRAME_OVERHEAD
.align 8
6: .long 0x7fffffff,0xffffffff
-
+.Lext_new_psw:
+ .quad 0x0002000180000000,0x1b0 # disabled wait
+.Lpgm_new_psw:
+ .quad 0x0000000180000000,startup_pgm_check_handler
+.Lio_new_psw:
+ .quad 0x0002000180000000,0x1f0 # disabled wait
.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
.quad 0 # cr1: primary space segment table
.quad .Lduct # cr2: dispatchable unit control table
@@ -359,7 +373,7 @@ ENTRY(startup_kdump)
# It simply saves general/control registers and psw in
# the save area and does disabled wait with a faulty address.
#
-ENTRY(startup_pgm_check_handler)
+SYM_CODE_START_LOCAL(startup_pgm_check_handler)
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
la %r8,4095
stctg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r8)
@@ -378,9 +392,9 @@ ENTRY(startup_pgm_check_handler)
la %r8,4095
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8)
lpswe __LC_RETURN_PSW # disabled wait
+SYM_CODE_END(startup_pgm_check_handler)
.Ldump_info_stack:
.long 0x5000 + PAGE_SIZE - STACK_FRAME_OVERHEAD
-ENDPROC(startup_pgm_check_handler)
#
# params at 10400 (setup.h)
diff --git a/arch/s390/boot/head_kdump.S b/arch/s390/boot/head_kdump.S
index 174d6959bf5b..f015469e7db9 100644
--- a/arch/s390/boot/head_kdump.S
+++ b/arch/s390/boot/head_kdump.S
@@ -19,8 +19,7 @@
# Note: This code has to be position independent
#
-.align 2
-.Lep_startup_kdump:
+SYM_CODE_START_LOCAL(startup_kdump)
lhi %r1,2 # mode 2 = esame (dump)
sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to esame mode
sam64 # Switch to 64 bit addressing
@@ -87,14 +86,15 @@
startup_kdump_relocated:
basr %r13,0
0: lpswe .Lrestart_psw-0b(%r13) # Start new kernel...
+SYM_CODE_END(startup_kdump)
.align 8
.Lrestart_psw:
.quad 0x0000000080000000,0x0000000000000000 + startup
#else
-.align 2
-.Lep_startup_kdump:
+SYM_CODE_START_LOCAL(startup_kdump)
larl %r13,startup_kdump_crash
lpswe 0(%r13)
+SYM_CODE_END(startup_kdump)
.align 8
startup_kdump_crash:
.quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index f94b91d72620..d372a45fe10e 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -17,10 +17,10 @@ int __bootdata_preserved(ipl_block_valid);
unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
unsigned long __bootdata(vmalloc_size) = VMALLOC_DEFAULT_SIZE;
-unsigned long __bootdata(memory_end);
-int __bootdata(memory_end_set);
int __bootdata(noexec_disabled);
+unsigned long memory_limit;
+int vmalloc_size_set;
int kaslr_enabled;
static inline int __diag308(unsigned long subcode, void *addr)
@@ -57,6 +57,17 @@ void store_ipl_parmblock(void)
ipl_block_valid = 1;
}
+bool is_ipl_block_dump(void)
+{
+ if (ipl_block.pb0_hdr.pbt == IPL_PBT_FCP &&
+ ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP)
+ return true;
+ if (ipl_block.pb0_hdr.pbt == IPL_PBT_NVME &&
+ ipl_block.nvme.opt == IPL_PB0_NVME_OPT_DUMP)
+ return true;
+ return false;
+}
+
static size_t scpdata_length(const u8 *buf, size_t count)
{
while (count) {
@@ -237,13 +248,13 @@ void parse_boot_command_line(void)
while (*args) {
args = next_arg(args, &param, &val);
- if (!strcmp(param, "mem") && val) {
- memory_end = round_down(memparse(val, NULL), PAGE_SIZE);
- memory_end_set = 1;
- }
+ if (!strcmp(param, "mem") && val)
+ memory_limit = round_down(memparse(val, NULL), PAGE_SIZE);
- if (!strcmp(param, "vmalloc") && val)
+ if (!strcmp(param, "vmalloc") && val) {
vmalloc_size = round_up(memparse(val, NULL), PAGE_SIZE);
+ vmalloc_size_set = 1;
+ }
if (!strcmp(param, "dfltcc") && val) {
if (!strcmp(val, "off"))
@@ -279,27 +290,3 @@ void parse_boot_command_line(void)
#endif
}
}
-
-static inline bool is_ipl_block_dump(void)
-{
- if (ipl_block.pb0_hdr.pbt == IPL_PBT_FCP &&
- ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP)
- return true;
- if (ipl_block.pb0_hdr.pbt == IPL_PBT_NVME &&
- ipl_block.nvme.opt == IPL_PB0_NVME_OPT_DUMP)
- return true;
- return false;
-}
-
-void setup_memory_end(void)
-{
-#ifdef CONFIG_CRASH_DUMP
- if (OLDMEM_BASE) {
- kaslr_enabled = 0;
- } else if (ipl_block_valid && is_ipl_block_dump()) {
- kaslr_enabled = 0;
- if (!sclp_early_get_hsa_size(&memory_end) && memory_end)
- memory_end_set = 1;
- }
-#endif
-}
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
index d844a5ef9089..0dd48fbdbaa4 100644
--- a/arch/s390/boot/kaslr.c
+++ b/arch/s390/boot/kaslr.c
@@ -7,6 +7,7 @@
#include <asm/cpacf.h>
#include <asm/timex.h>
#include <asm/sclp.h>
+#include <asm/kasan.h>
#include "compressed/decompressor.h"
#include "boot.h"
@@ -176,8 +177,14 @@ unsigned long get_random_base(unsigned long safe_addr)
unsigned long kasan_needs;
int i;
- if (memory_end_set)
- memory_limit = min(memory_limit, memory_end);
+ memory_limit = min(memory_limit, ident_map_size);
+
+ /*
+ * Avoid putting kernel in the end of physical memory
+ * which kasan will use for shadow memory and early pgtable
+ * mapping allocations.
+ */
+ memory_limit -= kasan_estimate_memory_needs(memory_limit);
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
if (safe_addr < INITRD_START + INITRD_SIZE)
@@ -185,28 +192,6 @@ unsigned long get_random_base(unsigned long safe_addr)
}
safe_addr = ALIGN(safe_addr, THREAD_SIZE);
- if ((IS_ENABLED(CONFIG_KASAN))) {
- /*
- * Estimate kasan memory requirements, which it will reserve
- * at the very end of available physical memory. To estimate
- * that, we take into account that kasan would require
- * 1/8 of available physical memory (for shadow memory) +
- * creating page tables for the whole memory + shadow memory
- * region (1 + 1/8). To keep page tables estimates simple take
- * the double of combined ptes size.
- */
- memory_limit = get_mem_detect_end();
- if (memory_end_set && memory_limit > memory_end)
- memory_limit = memory_end;
-
- /* for shadow memory */
- kasan_needs = memory_limit / 8;
- /* for paging structures */
- kasan_needs += (memory_limit + kasan_needs) / PAGE_SIZE /
- _PAGE_ENTRIES * _PAGE_TABLE_SIZE * 2;
- memory_limit -= kasan_needs;
- }
-
kernel_size = vmlinux.image_size + vmlinux.bss_size;
if (safe_addr + kernel_size > memory_limit)
return 0;
diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c
index 62e7c13ce85c..40168e59abd3 100644
--- a/arch/s390/boot/mem_detect.c
+++ b/arch/s390/boot/mem_detect.c
@@ -8,7 +8,6 @@
#include "compressed/decompressor.h"
#include "boot.h"
-unsigned long __bootdata(max_physmem_end);
struct mem_detect_info __bootdata(mem_detect);
/* up to 256 storage elements, 1020 subincrements each */
@@ -149,27 +148,29 @@ static void search_mem_end(void)
add_mem_detect_block(0, (offset + 1) << 20);
}
-void detect_memory(void)
+unsigned long detect_memory(void)
{
+ unsigned long max_physmem_end;
+
sclp_early_get_memsize(&max_physmem_end);
if (!sclp_early_read_storage_info()) {
mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
- return;
+ return max_physmem_end;
}
if (!diag260()) {
mem_detect.info_source = MEM_DETECT_DIAG260;
- return;
+ return max_physmem_end;
}
if (max_physmem_end) {
add_mem_detect_block(0, max_physmem_end);
mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
- return;
+ return max_physmem_end;
}
search_mem_end();
mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
- max_physmem_end = get_mem_detect_end();
+ return get_mem_detect_end();
}
diff --git a/arch/s390/boot/pgm_check_info.c b/arch/s390/boot/pgm_check_info.c
index a3c9862bcede..3a46abed2549 100644
--- a/arch/s390/boot/pgm_check_info.c
+++ b/arch/s390/boot/pgm_check_info.c
@@ -1,99 +1,181 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/ctype.h>
+#include <asm/stacktrace.h>
+#include <asm/boot_data.h>
#include <asm/lowcore.h>
#include <asm/setup.h>
#include <asm/sclp.h>
+#include <asm/uv.h>
+#include <stdarg.h>
#include "boot.h"
const char hex_asc[] = "0123456789abcdef";
-#define add_val_as_hex(dst, val) \
- __add_val_as_hex(dst, (const unsigned char *)&val, sizeof(val))
+static char *as_hex(char *dst, unsigned long val, int pad)
+{
+ char *p, *end = p = dst + max(pad, (int)__fls(val | 1) / 4 + 1);
+
+ for (*p-- = 0; p >= dst; val >>= 4)
+ *p-- = hex_asc[val & 0x0f];
+ return end;
+}
-static char *__add_val_as_hex(char *dst, const unsigned char *src, size_t count)
+static char *symstart(char *p)
{
- while (count--)
- dst = hex_byte_pack(dst, *src++);
- return dst;
+ while (*p)
+ p--;
+ return p + 1;
}
-static char *add_str(char *dst, char *src)
+extern char _decompressor_syms_start[], _decompressor_syms_end[];
+static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned short *len)
{
- strcpy(dst, src);
- return dst + strlen(dst);
+ /* symbol entries are in a form "10000 c4 startup\0" */
+ char *a = _decompressor_syms_start;
+ char *b = _decompressor_syms_end;
+ unsigned long start;
+ unsigned long size;
+ char *pivot;
+ char *endp;
+
+ while (a < b) {
+ pivot = symstart(a + (b - a) / 2);
+ start = simple_strtoull(pivot, &endp, 16);
+ size = simple_strtoull(endp + 1, &endp, 16);
+ if (ip < start) {
+ b = pivot;
+ continue;
+ }
+ if (ip > start + size) {
+ a = pivot + strlen(pivot) + 1;
+ continue;
+ }
+ *off = ip - start;
+ *len = size;
+ return endp + 1;
+ }
+ return NULL;
}
-void print_pgm_check_info(void)
+static noinline char *strsym(void *ip)
{
- struct psw_bits *psw = &psw_bits(S390_lowcore.psw_save_area);
- unsigned short ilc = S390_lowcore.pgm_ilc >> 1;
- char buf[256];
- int row, col;
+ static char buf[64];
+ unsigned short off;
+ unsigned short len;
char *p;
- add_str(buf, "Linux version ");
- strlcat(buf, kernel_version, sizeof(buf) - 1);
- strlcat(buf, "\n", sizeof(buf));
- sclp_early_printk(buf);
+ p = findsym((unsigned long)ip, &off, &len);
+ if (p) {
+ strncpy(buf, p, sizeof(buf));
+ /* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */
+ p = buf + strnlen(buf, sizeof(buf) - 15);
+ strcpy(p, "+0x");
+ p = as_hex(p + 3, off, 0);
+ strcpy(p, "/0x");
+ as_hex(p + 3, len, 0);
+ } else {
+ as_hex(buf, (unsigned long)ip, 16);
+ }
+ return buf;
+}
- p = add_str(buf, "Kernel fault: interruption code ");
- p = add_val_as_hex(buf + strlen(buf), S390_lowcore.pgm_code);
- p = add_str(p, " ilc:");
- *p++ = hex_asc_lo(ilc);
- add_str(p, "\n");
- sclp_early_printk(buf);
+void decompressor_printk(const char *fmt, ...)
+{
+ char buf[1024] = { 0 };
+ char *end = buf + sizeof(buf) - 1; /* make sure buf is 0 terminated */
+ unsigned long pad;
+ char *p = buf;
+ va_list args;
- if (kaslr_enabled) {
- p = add_str(buf, "Kernel random base: ");
- p = add_val_as_hex(p, __kaslr_offset);
- add_str(p, "\n");
- sclp_early_printk(buf);
+ va_start(args, fmt);
+ for (; p < end && *fmt; fmt++) {
+ if (*fmt != '%') {
+ *p++ = *fmt;
+ continue;
+ }
+ pad = isdigit(*++fmt) ? simple_strtol(fmt, (char **)&fmt, 10) : 0;
+ switch (*fmt) {
+ case 's':
+ p = buf + strlcat(buf, va_arg(args, char *), sizeof(buf));
+ break;
+ case 'p':
+ if (*++fmt != 'S')
+ goto out;
+ p = buf + strlcat(buf, strsym(va_arg(args, void *)), sizeof(buf));
+ break;
+ case 'l':
+ if (*++fmt != 'x' || end - p <= max(sizeof(long) * 2, pad))
+ goto out;
+ p = as_hex(p, va_arg(args, unsigned long), pad);
+ break;
+ case 'x':
+ if (end - p <= max(sizeof(int) * 2, pad))
+ goto out;
+ p = as_hex(p, va_arg(args, unsigned int), pad);
+ break;
+ default:
+ goto out;
+ }
}
-
- p = add_str(buf, "PSW : ");
- p = add_val_as_hex(p, S390_lowcore.psw_save_area.mask);
- p = add_str(p, " ");
- p = add_val_as_hex(p, S390_lowcore.psw_save_area.addr);
- add_str(p, "\n");
+out:
+ va_end(args);
sclp_early_printk(buf);
+}
- p = add_str(buf, " R:");
- *p++ = hex_asc_lo(psw->per);
- p = add_str(p, " T:");
- *p++ = hex_asc_lo(psw->dat);
- p = add_str(p, " IO:");
- *p++ = hex_asc_lo(psw->io);
- p = add_str(p, " EX:");
- *p++ = hex_asc_lo(psw->ext);
- p = add_str(p, " Key:");
- *p++ = hex_asc_lo(psw->key);
- p = add_str(p, " M:");
- *p++ = hex_asc_lo(psw->mcheck);
- p = add_str(p, " W:");
- *p++ = hex_asc_lo(psw->wait);
- p = add_str(p, " P:");
- *p++ = hex_asc_lo(psw->pstate);
- p = add_str(p, " AS:");
- *p++ = hex_asc_lo(psw->as);
- p = add_str(p, " CC:");
- *p++ = hex_asc_lo(psw->cc);
- p = add_str(p, " PM:");
- *p++ = hex_asc_lo(psw->pm);
- p = add_str(p, " RI:");
- *p++ = hex_asc_lo(psw->ri);
- p = add_str(p, " EA:");
- *p++ = hex_asc_lo(psw->eaba);
- add_str(p, "\n");
- sclp_early_printk(buf);
+static noinline void print_stacktrace(void)
+{
+ struct stack_info boot_stack = { STACK_TYPE_TASK, BOOT_STACK_OFFSET,
+ BOOT_STACK_OFFSET + BOOT_STACK_SIZE };
+ unsigned long sp = S390_lowcore.gpregs_save_area[15];
+ bool first = true;
- for (row = 0; row < 4; row++) {
- p = add_str(buf, row == 0 ? "GPRS:" : " ");
- for (col = 0; col < 4; col++) {
- p = add_str(p, " ");
- p = add_val_as_hex(p, S390_lowcore.gpregs_save_area[row * 4 + col]);
- }
- add_str(p, "\n");
- sclp_early_printk(buf);
+ decompressor_printk("Call Trace:\n");
+ while (!(sp & 0x7) && on_stack(&boot_stack, sp, sizeof(struct stack_frame))) {
+ struct stack_frame *sf = (struct stack_frame *)sp;
+
+ decompressor_printk(first ? "(sp:%016lx [<%016lx>] %pS)\n" :
+ " sp:%016lx [<%016lx>] %pS\n",
+ sp, sf->gprs[8], (void *)sf->gprs[8]);
+ if (sf->back_chain <= sp)
+ break;
+ sp = sf->back_chain;
+ first = false;
}
}
+
+void print_pgm_check_info(void)
+{
+ unsigned long *gpregs = (unsigned long *)S390_lowcore.gpregs_save_area;
+ struct psw_bits *psw = &psw_bits(S390_lowcore.psw_save_area);
+
+ decompressor_printk("Linux version %s\n", kernel_version);
+ if (!is_prot_virt_guest() && early_command_line[0])
+ decompressor_printk("Kernel command line: %s\n", early_command_line);
+ decompressor_printk("Kernel fault: interruption code %04x ilc:%x\n",
+ S390_lowcore.pgm_code, S390_lowcore.pgm_ilc >> 1);
+ if (kaslr_enabled)
+ decompressor_printk("Kernel random base: %lx\n", __kaslr_offset);
+ decompressor_printk("PSW : %016lx %016lx (%pS)\n",
+ S390_lowcore.psw_save_area.mask,
+ S390_lowcore.psw_save_area.addr,
+ (void *)S390_lowcore.psw_save_area.addr);
+ decompressor_printk(
+ " R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x P:%x AS:%x CC:%x PM:%x RI:%x EA:%x\n",
+ psw->per, psw->dat, psw->io, psw->ext, psw->key, psw->mcheck,
+ psw->wait, psw->pstate, psw->as, psw->cc, psw->pm, psw->ri,
+ psw->eaba);
+ decompressor_printk("GPRS: %016lx %016lx %016lx %016lx\n",
+ gpregs[0], gpregs[1], gpregs[2], gpregs[3]);
+ decompressor_printk(" %016lx %016lx %016lx %016lx\n",
+ gpregs[4], gpregs[5], gpregs[6], gpregs[7]);
+ decompressor_printk(" %016lx %016lx %016lx %016lx\n",
+ gpregs[8], gpregs[9], gpregs[10], gpregs[11]);
+ decompressor_printk(" %016lx %016lx %016lx %016lx\n",
+ gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
+ print_stacktrace();
+ decompressor_printk("Last Breaking-Event-Address:\n");
+ decompressor_printk(" [<%016lx>] %pS\n", (unsigned long)S390_lowcore.breaking_event_addr,
+ (void *)S390_lowcore.breaking_event_addr);
+}
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index cc96b04cc0ba..05f8eefa3dcf 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/string.h>
#include <linux/elf.h>
+#include <asm/boot_data.h>
#include <asm/sections.h>
+#include <asm/cpu_mf.h>
#include <asm/setup.h>
#include <asm/kexec.h>
#include <asm/sclp.h>
@@ -13,6 +15,7 @@
extern char __boot_data_start[], __boot_data_end[];
extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
unsigned long __bootdata_preserved(__kaslr_offset);
+unsigned long __bootdata(ident_map_size);
/*
* Some code and data needs to stay below 2 GB, even when the kernel would be
@@ -58,6 +61,14 @@ void error(char *x)
disabled_wait();
}
+static void setup_lpp(void)
+{
+ S390_lowcore.current_pid = 0;
+ S390_lowcore.lpp = LPP_MAGIC;
+ if (test_facility(40))
+ lpp(&S390_lowcore.lpp);
+}
+
#ifdef CONFIG_KERNEL_UNCOMPRESSED
unsigned long mem_safe_offset(void)
{
@@ -119,6 +130,46 @@ static void handle_relocs(unsigned long offset)
}
/*
+ * Merge information from several sources into a single ident_map_size value.
+ * "ident_map_size" represents the upper limit of physical memory we may ever
+ * reach. It might not be all online memory, but also include standby (offline)
+ * memory. "ident_map_size" could be lower then actual standby or even online
+ * memory present, due to limiting factors. We should never go above this limit.
+ * It is the size of our identity mapping.
+ *
+ * Consider the following factors:
+ * 1. max_physmem_end - end of physical memory online or standby.
+ * Always <= end of the last online memory block (get_mem_detect_end()).
+ * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
+ * kernel is able to support.
+ * 3. "mem=" kernel command line option which limits physical memory usage.
+ * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
+ * crash kernel.
+ * 5. "hsa" size which is a memory limit when the kernel is executed during
+ * zfcp/nvme dump.
+ */
+static void setup_ident_map_size(unsigned long max_physmem_end)
+{
+ unsigned long hsa_size;
+
+ ident_map_size = max_physmem_end;
+ if (memory_limit)
+ ident_map_size = min(ident_map_size, memory_limit);
+ ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
+
+#ifdef CONFIG_CRASH_DUMP
+ if (OLDMEM_BASE) {
+ kaslr_enabled = 0;
+ ident_map_size = min(ident_map_size, OLDMEM_SIZE);
+ } else if (ipl_block_valid && is_ipl_block_dump()) {
+ kaslr_enabled = 0;
+ if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
+ ident_map_size = min(ident_map_size, hsa_size);
+ }
+#endif
+}
+
+/*
* This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
*/
static void clear_bss_section(void)
@@ -126,12 +177,27 @@ static void clear_bss_section(void)
memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size);
}
+/*
+ * Set vmalloc area size to an 8th of (potential) physical memory
+ * size, unless size has been set by kernel command line parameter.
+ */
+static void setup_vmalloc_size(void)
+{
+ unsigned long size;
+
+ if (vmalloc_size_set)
+ return;
+ size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
+ vmalloc_size = max(size, vmalloc_size);
+}
+
void startup_kernel(void)
{
unsigned long random_lma;
unsigned long safe_addr;
void *img;
+ setup_lpp();
store_ipl_parmblock();
safe_addr = mem_safe_offset();
safe_addr = read_ipl_report(safe_addr);
@@ -140,8 +206,8 @@ void startup_kernel(void)
sclp_early_read_info();
setup_boot_command_line();
parse_boot_command_line();
- setup_memory_end();
- detect_memory();
+ setup_ident_map_size(detect_memory());
+ setup_vmalloc_size();
random_lma = __kaslr_offset = 0;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 0784bf3caf43..1be32fcf6f2e 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -1,3 +1,4 @@
+CONFIG_UAPI_HEADER_TEST=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_WATCH_QUEUE=y
@@ -93,14 +94,15 @@ CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_AREAS=7
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
-CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PERCPU_STATS=y
-CONFIG_GUP_BENCHMARK=y
+CONFIG_GUP_TEST=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -378,7 +380,6 @@ CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
-# CONFIG_NET_DROP_MONITOR is not set
CONFIG_PCI=y
# CONFIG_PCIEASPM is not set
CONFIG_PCI_DEBUG=y
@@ -386,7 +387,7 @@ CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_DEVTMPFS=y
CONFIG_CONNECTOR=y
-CONFIG_ZRAM=m
+CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
@@ -689,6 +690,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_CHACHA20POLY1305=m
@@ -709,7 +711,6 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -753,6 +754,7 @@ CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
+CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_CORDIC=m
CONFIG_CRC32_SELFTEST=y
CONFIG_CRC4=m
@@ -824,11 +826,13 @@ CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y
+CONFIG_DEBUG_USER_ASCE=y
CONFIG_NOTIFIER_ERROR_INJECTION=m
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y
CONFIG_FAILSLAB=y
CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAULT_INJECTION_USERCOPY=y
CONFIG_FAIL_MAKE_REQUEST=y
CONFIG_FAIL_IO_TIMEOUT=y
CONFIG_FAIL_FUTEX=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 905bc8c4cfaf..e2171a008809 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -87,14 +87,15 @@ CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
+CONFIG_CMA_AREAS=7
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
-CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PERCPU_STATS=y
-CONFIG_GUP_BENCHMARK=y
+CONFIG_GUP_TEST=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -371,7 +372,6 @@ CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
-# CONFIG_NET_DROP_MONITOR is not set
CONFIG_PCI=y
# CONFIG_PCIEASPM is not set
CONFIG_HOTPLUG_PCI=y
@@ -379,7 +379,7 @@ CONFIG_HOTPLUG_PCI_S390=y
CONFIG_UEVENT_HELPER=y
CONFIG_DEVTMPFS=y
CONFIG_CONNECTOR=y
-CONFIG_ZRAM=m
+CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
@@ -680,6 +680,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_CHACHA20POLY1305=m
@@ -701,7 +702,6 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -745,6 +745,7 @@ CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
+CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_CORDIC=m
CONFIG_PRIME_NUMBERS=m
CONFIG_CRC4=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 8f67c55625f9..a302630341ef 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -17,11 +17,11 @@ CONFIG_HZ_100=y
# CONFIG_CHSC_SCH is not set
# CONFIG_SCM_BUS is not set
CONFIG_CRASH_DUMP=y
-# CONFIG_SECCOMP is not set
# CONFIG_PFAULT is not set
# CONFIG_S390_HYPFS_FS is not set
# CONFIG_VIRTUALIZATION is not set
# CONFIG_S390_GUEST is not set
+# CONFIG_SECCOMP is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c
index dd95cdbd22ce..7b947728d57e 100644
--- a/arch/s390/crypto/arch_random.c
+++ b/arch/s390/crypto/arch_random.c
@@ -2,7 +2,7 @@
/*
* s390 arch random implementation.
*
- * Copyright IBM Corp. 2017, 2018
+ * Copyright IBM Corp. 2017, 2020
* Author(s): Harald Freudenberger
*
* The s390_arch_random_generate() function may be called from random.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <linux/static_key.h>
#include <linux/workqueue.h>
+#include <linux/moduleparam.h>
#include <asm/cpacf.h>
DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
@@ -99,6 +100,113 @@ static void arch_rng_refill_buffer(struct work_struct *unused)
queue_delayed_work(system_long_wq, &arch_rng_work, delay);
}
+/*
+ * Here follows the implementation of s390_arch_get_random_long().
+ *
+ * The random longs to be pulled by arch_get_random_long() are
+ * prepared in an 4K buffer which is filled from the NIST 800-90
+ * compliant s390 drbg. By default the random long buffer is refilled
+ * 256 times before the drbg itself needs a reseed. The reseed of the
+ * drbg is done with 32 bytes fetched from the high quality (but slow)
+ * trng which is assumed to deliver 100% entropy. So the 32 * 8 = 256
+ * bits of entropy are spread over 256 * 4KB = 1MB serving 131072
+ * arch_get_random_long() invocations before reseeded.
+ *
+ * How often the 4K random long buffer is refilled with the drbg
+ * before the drbg is reseeded can be adjusted. There is a module
+ * parameter 's390_arch_rnd_long_drbg_reseed' accessible via
+ * /sys/module/arch_random/parameters/rndlong_drbg_reseed
+ * or as kernel command line parameter
+ * arch_random.rndlong_drbg_reseed=<value>
+ * This parameter tells how often the drbg fills the 4K buffer before
+ * it is re-seeded by fresh entropy from the trng.
+ * A value of 16 results in reseeding the drbg at every 16 * 4 KB = 64
+ * KB with 32 bytes of fresh entropy pulled from the trng. So a value
+ * of 16 would result in 256 bits entropy per 64 KB.
+ * A value of 256 results in 1MB of drbg output before a reseed of the
+ * drbg is done. So this would spread the 256 bits of entropy among 1MB.
+ * Setting this parameter to 0 forces the reseed to take place every
+ * time the 4K buffer is depleted, so the entropy rises to 256 bits
+ * entropy per 4K or 0.5 bit entropy per arch_get_random_long(). With
+ * setting this parameter to negative values all this effort is
+ * disabled, arch_get_random long() returns false and thus indicating
+ * that the arch_get_random_long() feature is disabled at all.
+ */
+
+static unsigned long rndlong_buf[512];
+static DEFINE_SPINLOCK(rndlong_lock);
+static int rndlong_buf_index;
+
+static int rndlong_drbg_reseed = 256;
+module_param_named(rndlong_drbg_reseed, rndlong_drbg_reseed, int, 0600);
+MODULE_PARM_DESC(rndlong_drbg_reseed, "s390 arch_get_random_long() drbg reseed");
+
+static inline void refill_rndlong_buf(void)
+{
+ static u8 prng_ws[240];
+ static int drbg_counter;
+
+ if (--drbg_counter < 0) {
+ /* need to re-seed the drbg */
+ u8 seed[32];
+
+ /* fetch seed from trng */
+ cpacf_trng(NULL, 0, seed, sizeof(seed));
+ /* seed drbg */
+ memset(prng_ws, 0, sizeof(prng_ws));
+ cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
+ &prng_ws, NULL, 0, seed, sizeof(seed));
+ /* re-init counter for drbg */
+ drbg_counter = rndlong_drbg_reseed;
+ }
+
+ /* fill the arch_get_random_long buffer from drbg */
+ cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prng_ws,
+ (u8 *) rndlong_buf, sizeof(rndlong_buf),
+ NULL, 0);
+}
+
+bool s390_arch_get_random_long(unsigned long *v)
+{
+ bool rc = false;
+ unsigned long flags;
+
+ /* arch_get_random_long() disabled ? */
+ if (rndlong_drbg_reseed < 0)
+ return false;
+
+ /* try to lock the random long lock */
+ if (!spin_trylock_irqsave(&rndlong_lock, flags))
+ return false;
+
+ if (--rndlong_buf_index >= 0) {
+ /* deliver next long value from the buffer */
+ *v = rndlong_buf[rndlong_buf_index];
+ rc = true;
+ goto out;
+ }
+
+ /* buffer is depleted and needs refill */
+ if (in_interrupt()) {
+ /* delay refill in interrupt context to next caller */
+ rndlong_buf_index = 0;
+ goto out;
+ }
+
+ /* refill random long buffer */
+ refill_rndlong_buf();
+ rndlong_buf_index = ARRAY_SIZE(rndlong_buf);
+
+ /* and provide one random long */
+ *v = rndlong_buf[--rndlong_buf_index];
+ rc = true;
+
+out:
+ spin_unlock_irqrestore(&rndlong_lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL(s390_arch_get_random_long);
+
static int __init s390_arch_random_init(void)
{
/* all the needed PRNO subfunctions available ? */
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 5057773f82e9..b2f219ec379c 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -674,20 +674,6 @@ static const struct file_operations prng_tdes_fops = {
.llseek = noop_llseek,
};
-static struct miscdevice prng_sha512_dev = {
- .name = "prandom",
- .minor = MISC_DYNAMIC_MINOR,
- .mode = 0644,
- .fops = &prng_sha512_fops,
-};
-static struct miscdevice prng_tdes_dev = {
- .name = "prandom",
- .minor = MISC_DYNAMIC_MINOR,
- .mode = 0644,
- .fops = &prng_tdes_fops,
-};
-
-
/* chunksize attribute (ro) */
static ssize_t prng_chunksize_show(struct device *dev,
struct device_attribute *attr,
@@ -801,18 +787,30 @@ static struct attribute *prng_sha512_dev_attrs[] = {
&dev_attr_strength.attr,
NULL
};
+ATTRIBUTE_GROUPS(prng_sha512_dev);
+
static struct attribute *prng_tdes_dev_attrs[] = {
&dev_attr_chunksize.attr,
&dev_attr_byte_counter.attr,
&dev_attr_mode.attr,
NULL
};
+ATTRIBUTE_GROUPS(prng_tdes_dev);
-static struct attribute_group prng_sha512_dev_attr_group = {
- .attrs = prng_sha512_dev_attrs
+static struct miscdevice prng_sha512_dev = {
+ .name = "prandom",
+ .minor = MISC_DYNAMIC_MINOR,
+ .mode = 0644,
+ .fops = &prng_sha512_fops,
+ .groups = prng_sha512_dev_groups,
};
-static struct attribute_group prng_tdes_dev_attr_group = {
- .attrs = prng_tdes_dev_attrs
+
+static struct miscdevice prng_tdes_dev = {
+ .name = "prandom",
+ .minor = MISC_DYNAMIC_MINOR,
+ .mode = 0644,
+ .fops = &prng_tdes_fops,
+ .groups = prng_tdes_dev_groups,
};
@@ -867,13 +865,6 @@ static int __init prng_init(void)
prng_sha512_deinstantiate();
goto out;
}
- ret = sysfs_create_group(&prng_sha512_dev.this_device->kobj,
- &prng_sha512_dev_attr_group);
- if (ret) {
- misc_deregister(&prng_sha512_dev);
- prng_sha512_deinstantiate();
- goto out;
- }
} else {
@@ -898,14 +889,6 @@ static int __init prng_init(void)
prng_tdes_deinstantiate();
goto out;
}
- ret = sysfs_create_group(&prng_tdes_dev.this_device->kobj,
- &prng_tdes_dev_attr_group);
- if (ret) {
- misc_deregister(&prng_tdes_dev);
- prng_tdes_deinstantiate();
- goto out;
- }
-
}
out:
@@ -916,13 +899,9 @@ out:
static void __exit prng_exit(void)
{
if (prng_mode == PRNG_MODE_SHA512) {
- sysfs_remove_group(&prng_sha512_dev.this_device->kobj,
- &prng_sha512_dev_attr_group);
misc_deregister(&prng_sha512_dev);
prng_sha512_deinstantiate();
} else {
- sysfs_remove_group(&prng_tdes_dev.this_device->kobj,
- &prng_tdes_dev_attr_group);
misc_deregister(&prng_tdes_dev);
prng_tdes_deinstantiate();
}
diff --git a/arch/s390/crypto/sha.h b/arch/s390/crypto/sha.h
index ada2f98c27b7..65ea12fc87a1 100644
--- a/arch/s390/crypto/sha.h
+++ b/arch/s390/crypto/sha.h
@@ -11,7 +11,8 @@
#define _CRYPTO_ARCH_S390_SHA_H
#include <linux/crypto.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
#include <crypto/sha3.h>
/* must be big enough for the largest SHA variant */
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 698b1e6d3c14..a3fabf310a38 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -22,7 +22,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <asm/cpacf.h>
#include "sha.h"
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index b52c87e44939..24983f175676 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <asm/cpacf.h>
#include "sha.h"
diff --git a/arch/s390/crypto/sha3_256_s390.c b/arch/s390/crypto/sha3_256_s390.c
index 460cbbbaa44a..30ac49b635bf 100644
--- a/arch/s390/crypto/sha3_256_s390.c
+++ b/arch/s390/crypto/sha3_256_s390.c
@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
-#include <crypto/sha.h>
#include <crypto/sha3.h>
#include <asm/cpacf.h>
diff --git a/arch/s390/crypto/sha3_512_s390.c b/arch/s390/crypto/sha3_512_s390.c
index 72cf460a53e5..e70d50f7620f 100644
--- a/arch/s390/crypto/sha3_512_s390.c
+++ b/arch/s390/crypto/sha3_512_s390.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
-#include <crypto/sha.h>
#include <crypto/sha3.h>
#include <asm/cpacf.h>
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index ad29db085a18..29a6bd404c59 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -8,7 +8,7 @@
* Author(s): Jan Glauber (jang@de.ibm.com)
*/
#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h
index de61ce562052..5dc712fde3c7 100644
--- a/arch/s390/include/asm/archrandom.h
+++ b/arch/s390/include/asm/archrandom.h
@@ -2,7 +2,7 @@
/*
* Kernel interface for the s390 arch_random_* functions
*
- * Copyright IBM Corp. 2017
+ * Copyright IBM Corp. 2017, 2020
*
* Author: Harald Freudenberger <freude@de.ibm.com>
*
@@ -19,10 +19,13 @@
DECLARE_STATIC_KEY_FALSE(s390_arch_random_available);
extern atomic64_t s390_arch_random_counter;
+bool s390_arch_get_random_long(unsigned long *v);
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
static inline bool __must_check arch_get_random_long(unsigned long *v)
{
+ if (static_branch_likely(&s390_arch_random_available))
+ return s390_arch_get_random_long(v);
return false;
}
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index c0be5fe1ddba..e3e2ab0acf83 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -115,7 +115,7 @@ enum uc_todo {
};
/**
- * struct ccw driver - device driver for channel attached devices
+ * struct ccw_driver - device driver for channel attached devices
* @ids: ids supported by this driver
* @probe: function called on probe
* @remove: function called on remove
@@ -124,11 +124,6 @@ enum uc_todo {
* @notify: notify driver of device state changes
* @path_event: notify driver of channel path events
* @shutdown: called at device shutdown
- * @prepare: prepare for pm state transition
- * @complete: undo work done in @prepare
- * @freeze: callback for freezing during hibernation snapshotting
- * @thaw: undo work done in @freeze
- * @restore: callback for restoring after hibernation
* @uc_handler: callback for unit check handler
* @driver: embedded device driver structure
* @int_class: interruption class to use for accounting interrupts
@@ -142,11 +137,6 @@ struct ccw_driver {
int (*notify) (struct ccw_device *, int);
void (*path_event) (struct ccw_device *, int *);
void (*shutdown) (struct ccw_device *);
- int (*prepare) (struct ccw_device *);
- void (*complete) (struct ccw_device *);
- int (*freeze)(struct ccw_device *);
- int (*thaw) (struct ccw_device *);
- int (*restore)(struct ccw_device *);
enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *);
struct device_driver driver;
enum interruption_class int_class;
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 5c58756d6476..23dceb8d0453 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -329,7 +329,7 @@ struct ccw_dev_id {
};
/**
- * ccw_device_id_is_equal() - compare two ccw_dev_ids
+ * ccw_dev_id_is_equal() - compare two ccw_dev_ids
* @dev_id1: a ccw_dev_id
* @dev_id2: another ccw_dev_id
* Returns:
diff --git a/arch/s390/include/asm/delay.h b/arch/s390/include/asm/delay.h
index 898323fd93d2..4a08379cd1eb 100644
--- a/arch/s390/include/asm/delay.h
+++ b/arch/s390/include/asm/delay.h
@@ -13,6 +13,7 @@
#ifndef _S390_DELAY_H
#define _S390_DELAY_H
+void udelay_enable(void);
void __ndelay(unsigned long long nsecs);
void __udelay(unsigned long long usecs);
void udelay_simple(unsigned long long usecs);
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 68d362f8d6c1..695c61989f97 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -2,16 +2,9 @@
#ifndef _ASM_S390_FTRACE_H
#define _ASM_S390_FTRACE_H
+#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#define ARCH_SUPPORTS_FTRACE_OPS 1
-
-#if defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)
#define MCOUNT_INSN_SIZE 6
-#else
-#define MCOUNT_INSN_SIZE 24
-#define MCOUNT_RETURN_FIXUP 18
-#endif
-
-#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#ifndef __ASSEMBLY__
@@ -22,7 +15,6 @@
#define ftrace_return_address(n) __builtin_return_address(n)
#endif
-void _mcount(void);
void ftrace_caller(void);
extern char ftrace_graph_caller_end;
@@ -30,12 +22,20 @@ extern unsigned long ftrace_plt;
struct dyn_arch_ftrace { };
-#define MCOUNT_ADDR ((unsigned long)_mcount)
+#define MCOUNT_ADDR 0
#define FTRACE_ADDR ((unsigned long)ftrace_caller)
#define KPROBE_ON_FTRACE_NOP 0
#define KPROBE_ON_FTRACE_CALL 1
+struct module;
+struct dyn_ftrace;
+/*
+ * Either -mhotpatch or -mnop-mcount is used - no explicit init is required
+ */
+static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) { return 0; }
+#define ftrace_init_nop ftrace_init_nop
+
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
@@ -49,28 +49,17 @@ struct ftrace_insn {
static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
{
#ifdef CONFIG_FUNCTION_TRACER
-#if defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)
/* brcl 0,0 */
insn->opc = 0xc004;
insn->disp = 0;
-#else
- /* jg .+24 */
- insn->opc = 0xc0f4;
- insn->disp = MCOUNT_INSN_SIZE / 2;
-#endif
#endif
}
static inline int is_ftrace_nop(struct ftrace_insn *insn)
{
#ifdef CONFIG_FUNCTION_TRACER
-#if defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)
if (insn->disp == 0)
return 1;
-#else
- if (insn->disp == MCOUNT_INSN_SIZE / 2)
- return 1;
-#endif
#endif
return 0;
}
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index 26f9144562c9..c22debfcebf1 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -26,9 +26,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
u32 __user *uaddr)
{
int oldval = 0, newval, ret;
- mm_segment_t old_fs;
- old_fs = enable_sacf_uaccess();
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("lr %2,%5\n",
@@ -53,7 +51,6 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
default:
ret = -ENOSYS;
}
- disable_sacf_uaccess(old_fs);
if (!ret)
*oval = oldval;
@@ -64,10 +61,8 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
- mm_segment_t old_fs;
int ret;
- old_fs = enable_sacf_uaccess();
asm volatile(
" sacf 256\n"
"0: cs %1,%4,0(%5)\n"
@@ -77,7 +72,6 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "=d" (ret), "+d" (oldval), "=m" (*uaddr)
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
: "cc", "memory");
- disable_sacf_uaccess(old_fs);
*uval = oldval;
return ret;
}
diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h
index e9bf486de136..76f351bd6645 100644
--- a/arch/s390/include/asm/kasan.h
+++ b/arch/s390/include/asm/kasan.h
@@ -2,28 +2,51 @@
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
+#include <asm/pgtable.h>
+
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SCALE_SHIFT 3
-#ifdef CONFIG_KASAN_S390_4_LEVEL_PAGING
#define KASAN_SHADOW_SIZE \
(_AC(1, UL) << (_REGION1_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
-#else
-#define KASAN_SHADOW_SIZE \
- (_AC(1, UL) << (_REGION2_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
-#endif
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
#define KASAN_SHADOW_START KASAN_SHADOW_OFFSET
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
extern void kasan_early_init(void);
-extern void kasan_copy_shadow(pgd_t *dst);
+extern void kasan_copy_shadow_mapping(void);
extern void kasan_free_early_identity(void);
extern unsigned long kasan_vmax;
+
+/*
+ * Estimate kasan memory requirements, which it will reserve
+ * at the very end of available physical memory. To estimate
+ * that, we take into account that kasan would require
+ * 1/8 of available physical memory (for shadow memory) +
+ * creating page tables for the whole memory + shadow memory
+ * region (1 + 1/8). To keep page tables estimates simple take
+ * the double of combined ptes size.
+ *
+ * physmem parameter has to be already adjusted if not entire physical memory
+ * would be used (e.g. due to effect of "mem=" option).
+ */
+static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem)
+{
+ unsigned long kasan_needs;
+ unsigned long pages;
+ /* for shadow memory */
+ kasan_needs = round_up(physmem / 8, PAGE_SIZE);
+ /* for paging structures */
+ pages = DIV_ROUND_UP(physmem + kasan_needs, PAGE_SIZE);
+ kasan_needs += DIV_ROUND_UP(pages, _PAGE_ENTRIES) * _PAGE_TABLE_SIZE * 2;
+
+ return kasan_needs;
+}
#else
static inline void kasan_early_init(void) { }
-static inline void kasan_copy_shadow(pgd_t *dst) { }
+static inline void kasan_copy_shadow_mapping(void) { }
static inline void kasan_free_early_identity(void) { }
+static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem) { return 0; }
#endif
#endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 612ed3c6d581..69ce9191eaf1 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -116,7 +116,7 @@ struct lowcore {
/* Address space pointer. */
__u64 kernel_asce; /* 0x0380 */
__u64 user_asce; /* 0x0388 */
- __u64 vdso_asce; /* 0x0390 */
+ __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
/*
* The lpp and current_pid fields form a
@@ -134,7 +134,7 @@ struct lowcore {
__u32 spinlock_index; /* 0x03b0 */
__u32 fpu_flags; /* 0x03b4 */
__u64 percpu_offset; /* 0x03b8 */
- __u64 vdso_per_cpu_data; /* 0x03c0 */
+ __u8 pad_0x03c0[0x03c8-0x03c0]; /* 0x03c0 */
__u64 machine_flags; /* 0x03c8 */
__u64 gmap; /* 0x03d0 */
__u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index c9f3d8a52756..e7cffc7b5c2f 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -15,6 +15,7 @@
#include <asm/ctl_reg.h>
#include <asm-generic/mm_hooks.h>
+#define init_new_context init_new_context
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
@@ -69,41 +70,18 @@ static inline int init_new_context(struct task_struct *tsk,
return 0;
}
-#define destroy_context(mm) do { } while (0)
-
-static inline void set_user_asce(struct mm_struct *mm)
-{
- S390_lowcore.user_asce = mm->context.asce;
- __ctl_load(S390_lowcore.user_asce, 1, 1);
- clear_cpu_flag(CIF_ASCE_PRIMARY);
-}
-
-static inline void clear_user_asce(void)
-{
- S390_lowcore.user_asce = S390_lowcore.kernel_asce;
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
- set_cpu_flag(CIF_ASCE_PRIMARY);
-}
-
-mm_segment_t enable_sacf_uaccess(void);
-void disable_sacf_uaccess(mm_segment_t old_fs);
-
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
int cpu = smp_processor_id();
- S390_lowcore.user_asce = next->context.asce;
+ if (next == &init_mm)
+ S390_lowcore.user_asce = s390_invalid_asce;
+ else
+ S390_lowcore.user_asce = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
- /* Clear previous user-ASCE from CR1 and CR7 */
- if (!test_cpu_flag(CIF_ASCE_PRIMARY)) {
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
- set_cpu_flag(CIF_ASCE_PRIMARY);
- }
- if (test_cpu_flag(CIF_ASCE_SECONDARY)) {
- __ctl_load(S390_lowcore.vdso_asce, 7, 7);
- clear_cpu_flag(CIF_ASCE_SECONDARY);
- }
+ /* Clear previous user-ASCE from CR7 */
+ __ctl_load(s390_invalid_asce, 7, 7);
if (prev != next)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
@@ -122,18 +100,18 @@ static inline void finish_arch_post_lock_switch(void)
__tlb_flush_mm_lazy(mm);
preempt_enable();
}
- set_fs(current->thread.mm_segment);
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
}
-#define enter_lazy_tlb(mm,tsk) do { } while (0)
-#define deactivate_mm(tsk,mm) do { } while (0)
-
+#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
switch_mm(prev, next, current);
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
- set_user_asce(next);
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
}
+#include <asm-generic/mmu_context.h>
+
#endif /* __S390_MMU_CONTEXT_H */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 6b8d8c69b1a1..794746a32806 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -23,6 +23,7 @@
extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
+extern unsigned long s390_invalid_asce;
enum {
PG_DIRECT_MAP_4K = 0,
@@ -79,15 +80,15 @@ extern unsigned long zero_page_mask;
/*
* The vmalloc and module area will always be on the topmost area of the
- * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
- * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
- * modules will reside. That makes sure that inter module branches always
- * happen without trampolines and in addition the placement within a 2GB frame
- * is branch prediction unit friendly.
+ * kernel mapping. 512GB are reserved for vmalloc by default.
+ * At the top of the vmalloc area a 2GB area is reserved where modules
+ * will reside. That makes sure that inter module branches always
+ * happen without trampolines and in addition the placement within a
+ * 2GB frame is branch prediction unit friendly.
*/
extern unsigned long VMALLOC_START;
extern unsigned long VMALLOC_END;
-#define VMALLOC_DEFAULT_SIZE ((128UL << 30) - MODULES_LEN)
+#define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
extern struct page *vmemmap;
extern unsigned long vmemmap_size;
@@ -692,16 +693,6 @@ static inline int pud_large(pud_t pud)
return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
}
-static inline unsigned long pud_pfn(pud_t pud)
-{
- unsigned long origin_mask;
-
- origin_mask = _REGION_ENTRY_ORIGIN;
- if (pud_large(pud))
- origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
- return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
-}
-
#define pmd_leaf pmd_large
static inline int pmd_large(pmd_t pmd)
{
@@ -747,16 +738,6 @@ static inline int pmd_none(pmd_t pmd)
return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
}
-static inline unsigned long pmd_pfn(pmd_t pmd)
-{
- unsigned long origin_mask;
-
- origin_mask = _SEGMENT_ENTRY_ORIGIN;
- if (pmd_large(pmd))
- origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
- return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
-}
-
#define pmd_write pmd_write
static inline int pmd_write(pmd_t pmd)
{
@@ -1238,11 +1219,39 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
-#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
+static inline unsigned long pmd_deref(pmd_t pmd)
+{
+ unsigned long origin_mask;
+
+ origin_mask = _SEGMENT_ENTRY_ORIGIN;
+ if (pmd_large(pmd))
+ origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
+ return pmd_val(pmd) & origin_mask;
+}
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+ return pmd_deref(pmd) >> PAGE_SHIFT;
+}
+
+static inline unsigned long pud_deref(pud_t pud)
+{
+ unsigned long origin_mask;
+
+ origin_mask = _REGION_ENTRY_ORIGIN;
+ if (pud_large(pud))
+ origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
+ return pud_val(pud) & origin_mask;
+}
+
+static inline unsigned long pud_pfn(pud_t pud)
+{
+ return pud_deref(pud) >> PAGE_SHIFT;
+}
+
/*
* The pgd_offset function *always* adds the index for the top-level
* region/segment table. This is done to get a sequence like the
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 962da04234af..6b7269f51f83 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -14,8 +14,6 @@
#include <linux/bits.h>
-#define CIF_ASCE_PRIMARY 0 /* primary asce needs fixup / uaccess */
-#define CIF_ASCE_SECONDARY 1 /* secondary asce needs fixup / uaccess */
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
#define CIF_FPU 3 /* restore FPU registers */
#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */
@@ -23,8 +21,6 @@
#define CIF_MCCK_GUEST 6 /* machine check happening in guest */
#define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */
-#define _CIF_ASCE_PRIMARY BIT(CIF_ASCE_PRIMARY)
-#define _CIF_ASCE_SECONDARY BIT(CIF_ASCE_SECONDARY)
#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
#define _CIF_FPU BIT(CIF_FPU)
#define _CIF_IGNORE_IRQ BIT(CIF_IGNORE_IRQ)
@@ -102,8 +98,6 @@ extern void __bpon(void);
#define HAVE_ARCH_PICK_MMAP_LAYOUT
-typedef unsigned int mm_segment_t;
-
/*
* Thread structure
*/
@@ -116,7 +110,6 @@ struct thread_struct {
unsigned long hardirq_timer; /* task cputime in hardirq context */
unsigned long softirq_timer; /* task cputime in softirq context */
unsigned long sys_call_table; /* system call table address */
- mm_segment_t mm_segment;
unsigned long gmap_addr; /* address of last gmap fault. */
unsigned int gmap_write_flag; /* gmap fault write indication */
unsigned int gmap_int_code; /* int code of last gmap fault */
@@ -318,14 +311,10 @@ static __always_inline void __noreturn disabled_wait(void)
}
/*
- * Basic Machine Check/Program Check Handler.
+ * Basic Program Check Handler.
*/
-
extern void s390_base_pgm_handler(void);
-extern void s390_base_ext_handler(void);
-
extern void (*s390_base_pgm_handler_fn)(void);
-extern void (*s390_base_ext_handler_fn)(void);
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 16b3e4396312..73ca7f7cac33 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -87,6 +87,7 @@ struct pt_regs
unsigned int int_parm;
unsigned long int_parm_long;
unsigned long flags;
+ unsigned long cr1;
};
/*
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index a7bdd128d85b..5763769a39b6 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -12,7 +12,12 @@
#include <asm/cpu.h>
#define SCLP_CHP_INFO_MASK_SIZE 32
-#define SCLP_MAX_CORES 256
+#define EARLY_SCCB_SIZE PAGE_SIZE
+#define SCLP_MAX_CORES 512
+/* 144 + 16 * SCLP_MAX_CORES + 2 * (SCLP_MAX_CORES - 1) */
+#define EXT_SCCB_READ_SCP (3 * PAGE_SIZE)
+/* 24 + 16 * SCLP_MAX_CORES */
+#define EXT_SCCB_READ_CPU (3 * PAGE_SIZE)
struct sclp_chp_info {
u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
diff --git a/arch/s390/include/asm/seccomp.h b/arch/s390/include/asm/seccomp.h
index 795bbe0d7ca6..71d46f0ba97b 100644
--- a/arch/s390/include/asm/seccomp.h
+++ b/arch/s390/include/asm/seccomp.h
@@ -16,4 +16,13 @@
#include <asm-generic/seccomp.h>
+#define SECCOMP_ARCH_NATIVE AUDIT_ARCH_S390X
+#define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+#define SECCOMP_ARCH_NATIVE_NAME "s390x"
+#ifdef CONFIG_COMPAT
+# define SECCOMP_ARCH_COMPAT AUDIT_ARCH_S390
+# define SECCOMP_ARCH_COMPAT_NR NR_syscalls
+# define SECCOMP_ARCH_COMPAT_NAME "s390"
+#endif
+
#endif /* _ASM_S390_SECCOMP_H */
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index a996d3990a02..0c2151451ba5 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -26,14 +26,14 @@ static inline int arch_is_kernel_initmem_freed(unsigned long addr)
* final .boot.data section, which should be identical in the decompressor and
* the decompressed kernel (that is checked during the build).
*/
-#define __bootdata(var) __section(".boot.data.var") var
+#define __bootdata(var) __section(".boot.data." #var) var
/*
* .boot.preserved.data is similar to .boot.data, but it is not part of the
* .init section and thus will be preserved for later use in the decompressed
* kernel.
*/
-#define __bootdata_preserved(var) __section(".boot.preserved.data.var") var
+#define __bootdata_preserved(var) __section(".boot.preserved.data." #var) var
extern unsigned long __sdma, __edma;
extern unsigned long __stext_dma, __etext_dma;
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index bdb242a1544e..3e388fa208d4 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -16,8 +16,6 @@
#define EARLY_SCCB_OFFSET 0x11000
#define HEAD_END 0x12000
-#define EARLY_SCCB_SIZE PAGE_SIZE
-
/*
* Machine features detected in early.c
*/
@@ -88,10 +86,8 @@ extern unsigned int zlib_dfltcc_support;
#define ZLIB_DFLTCC_FULL_DEBUG 4
extern int noexec_disabled;
-extern int memory_end_set;
-extern unsigned long memory_end;
+extern unsigned long ident_map_size;
extern unsigned long vmalloc_size;
-extern unsigned long max_physmem_end;
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
extern unsigned long mio_wb_bit_mask;
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 13a04fcf7762..ce788f3e534d 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -18,7 +18,7 @@
#else
#define THREAD_SIZE_ORDER 2
#endif
-#define BOOT_STACK_ORDER 2
+#define BOOT_STACK_SIZE (PAGE_SIZE << 2)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#ifndef __ASSEMBLY__
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 289aaff4d365..c8e244ecdfde 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -49,6 +49,13 @@ static inline void set_clock_comparator(__u64 time)
asm volatile("sckc %0" : : "Q" (time));
}
+static inline void set_tod_programmable_field(u16 val)
+{
+ register unsigned long reg0 asm("0") = val;
+
+ asm volatile("sckpf" : : "d" (reg0));
+}
+
void clock_comparator_work(void);
void __init time_early_init(void);
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index c868e7ee49b3..c6707885e7c2 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -18,23 +18,7 @@
#include <asm/extable.h>
#include <asm/facility.h>
-/*
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons, these macros are grossly misnamed.
- */
-
-#define KERNEL_DS (0)
-#define KERNEL_DS_SACF (1)
-#define USER_DS (2)
-#define USER_DS_SACF (3)
-
-#define get_fs() (current->thread.mm_segment)
-#define uaccess_kernel() ((get_fs() & 2) == KERNEL_DS)
-
-void set_fs(mm_segment_t fs);
+void debug_user_asce(void);
static inline int __range_ok(unsigned long addr, unsigned long size)
{
@@ -88,7 +72,7 @@ int __get_user_bad(void) __attribute__((noreturn));
static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
- unsigned long spec = 0x010000UL;
+ unsigned long spec = 0x810000UL;
int rc;
switch (size) {
@@ -121,7 +105,7 @@ static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned lon
static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
- unsigned long spec = 0x01UL;
+ unsigned long spec = 0x81UL;
int rc;
switch (size) {
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 29b44a930e71..f65590889054 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -12,32 +12,9 @@
#ifndef __ASSEMBLY__
-/*
- * Note about the vdso_data and vdso_per_cpu_data structures:
- *
- * NEVER USE THEM IN USERSPACE CODE DIRECTLY. The layout of the
- * structure is supposed to be known only to the function in the vdso
- * itself and may change without notice.
- */
-
-struct vdso_per_cpu_data {
- /*
- * Note: node_id and cpu_nr must be at adjacent memory locations.
- * VDSO userspace must read both values with a single instruction.
- */
- union {
- __u64 getcpu_val;
- struct {
- __u32 node_id;
- __u32 cpu_nr;
- };
- };
-};
-
extern struct vdso_data *vdso_data;
-int vdso_alloc_per_cpu(struct lowcore *lowcore);
-void vdso_free_per_cpu(struct lowcore *lowcore);
+void vdso_getcpu_init(void);
#endif /* __ASSEMBLY__ */
#endif /* __S390_VDSO_H__ */
diff --git a/arch/s390/include/asm/vdso/vdso.h b/arch/s390/include/asm/vdso/vdso.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/arch/s390/include/asm/vdso/vdso.h
+++ /dev/null
diff --git a/arch/s390/include/asm/vtime.h b/arch/s390/include/asm/vtime.h
index 3622d4ebc73a..fac6a67988eb 100644
--- a/arch/s390/include/asm/vtime.h
+++ b/arch/s390/include/asm/vtime.h
@@ -2,7 +2,6 @@
#ifndef _S390_VTIME_H
#define _S390_VTIME_H
-#define __ARCH_HAS_VTIME_ACCOUNT
#define __ARCH_HAS_VTIME_TASK_SWITCH
#endif /* _S390_VTIME_H */
diff --git a/arch/s390/include/uapi/asm/signal.h b/arch/s390/include/uapi/asm/signal.h
index 9a14a611ed82..0189f326aac5 100644
--- a/arch/s390/include/uapi/asm/signal.h
+++ b/arch/s390/include/uapi/asm/signal.h
@@ -65,30 +65,6 @@ typedef unsigned long sigset_t;
#define SIGRTMIN 32
#define SIGRTMAX _NSIG
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
#define SA_RESTORER 0x04000000
#define MINSIGSTKSZ 2048
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index ece58f2217cb..79724d861dc9 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -13,7 +13,6 @@
#include <linux/purgatory.h>
#include <linux/pgtable.h>
#include <asm/idle.h>
-#include <asm/vdso.h>
#include <asm/gmap.h>
#include <asm/nmi.h>
#include <asm/stacktrace.h>
@@ -48,26 +47,17 @@ int main(void)
OFFSET(__PT_INT_PARM, pt_regs, int_parm);
OFFSET(__PT_INT_PARM_LONG, pt_regs, int_parm_long);
OFFSET(__PT_FLAGS, pt_regs, flags);
+ OFFSET(__PT_CR1, pt_regs, cr1);
DEFINE(__PT_SIZE, sizeof(struct pt_regs));
BLANK();
/* stack_frame offsets */
OFFSET(__SF_BACKCHAIN, stack_frame, back_chain);
OFFSET(__SF_GPRS, stack_frame, gprs);
- OFFSET(__SF_EMPTY, stack_frame, empty1);
- OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[0]);
- OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[1]);
- OFFSET(__SF_SIE_REASON, stack_frame, empty1[2]);
- OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[3]);
- BLANK();
- OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val);
- BLANK();
- /* constants used by the vdso */
- DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
- DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
- DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
- DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
- DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
- DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
+ OFFSET(__SF_EMPTY, stack_frame, empty1[0]);
+ OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[1]);
+ OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[2]);
+ OFFSET(__SF_SIE_REASON, stack_frame, empty1[3]);
+ OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[4]);
BLANK();
/* idle data offsets */
OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter);
@@ -146,12 +136,11 @@ int main(void)
OFFSET(__LC_RESTART_FN, lowcore, restart_fn);
OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source);
+ OFFSET(__LC_KERNEL_ASCE, lowcore, kernel_asce);
OFFSET(__LC_USER_ASCE, lowcore, user_asce);
- OFFSET(__LC_VDSO_ASCE, lowcore, vdso_asce);
OFFSET(__LC_LPP, lowcore, lpp);
OFFSET(__LC_CURRENT_PID, lowcore, current_pid);
OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
- OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data);
OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
OFFSET(__LC_GMAP, lowcore, gmap);
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index b79e0fd571f8..d255c69c1779 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -11,32 +11,10 @@
#include <asm/asm-offsets.h>
#include <asm/nospec-insn.h>
#include <asm/ptrace.h>
-#include <asm/sigp.h>
GEN_BR_THUNK %r9
GEN_BR_THUNK %r14
-ENTRY(s390_base_ext_handler)
- stmg %r0,%r15,__LC_SAVE_AREA_ASYNC
- basr %r13,0
-0: aghi %r15,-STACK_FRAME_OVERHEAD
- larl %r1,s390_base_ext_handler_fn
- lg %r9,0(%r1)
- ltgr %r9,%r9
- jz 1f
- BASR_EX %r14,%r9
-1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC
- ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
- lpswe __LC_EXT_OLD_PSW
-ENDPROC(s390_base_ext_handler)
-
- .section .bss
- .align 8
- .globl s390_base_ext_handler_fn
-s390_base_ext_handler_fn:
- .quad 0
- .previous
-
ENTRY(s390_base_pgm_handler)
stmg %r0,%r15,__LC_SAVE_AREA_SYNC
basr %r13,0
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 705844f73934..cc89763a4d3c 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -169,12 +169,10 @@ static noinline __init void setup_lowcore_early(void)
{
psw_t psw;
+ psw.addr = (unsigned long)s390_base_pgm_handler;
psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
if (IS_ENABLED(CONFIG_KASAN))
psw.mask |= PSW_MASK_DAT;
- psw.addr = (unsigned long) s390_base_ext_handler;
- S390_lowcore.external_new_psw = psw;
- psw.addr = (unsigned long) s390_base_pgm_handler;
S390_lowcore.program_new_psw = psw;
s390_base_pgm_handler_fn = early_pgm_check_handler;
S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 86235919c2d1..8bb9ebb71c4b 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -55,7 +55,7 @@ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT)
-_CIF_WORK = (_CIF_ASCE_PRIMARY | _CIF_ASCE_SECONDARY | _CIF_FPU)
+_CIF_WORK = (_CIF_FPU)
_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
_LPP_OFFSET = __LC_LPP
@@ -90,6 +90,12 @@ _LPP_OFFSET = __LC_LPP
#endif
.endm
+ .macro DEBUG_USER_ASCE
+#ifdef CONFIG_DEBUG_USER_ASCE
+ brasl %r14,debug_user_asce
+#endif
+ .endm
+
.macro CHECK_VMAP_STACK savearea,oklabel
#ifdef CONFIG_VMAP_STACK
lgr %r14,%r15
@@ -110,9 +116,9 @@ _LPP_OFFSET = __LC_LPP
#endif
.endm
- .macro SWITCH_ASYNC savearea,timer
+ .macro SWITCH_ASYNC savearea,timer,clock
tmhh %r8,0x0001 # interrupting from user ?
- jnz 2f
+ jnz 4f
#if IS_ENABLED(CONFIG_KVM)
lgr %r14,%r9
larl %r13,.Lsie_gmap
@@ -125,10 +131,26 @@ _LPP_OFFSET = __LC_LPP
#endif
0: larl %r13,.Lpsw_idle_exit
cgr %r13,%r9
- jne 1f
+ jne 3f
- mvc __CLOCK_IDLE_EXIT(8,%r2), __LC_INT_CLOCK
- mvc __TIMER_IDLE_EXIT(8,%r2), __LC_ASYNC_ENTER_TIMER
+ larl %r1,smp_cpu_mtid
+ llgf %r1,0(%r1)
+ ltgr %r1,%r1
+ jz 2f # no SMT, skip mt_cycles calculation
+ .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
+ larl %r3,mt_cycles
+ ag %r3,__LC_PERCPU_OFFSET
+ la %r4,__SF_EMPTY+16(%r15)
+1: lg %r0,0(%r3)
+ slg %r0,0(%r4)
+ alg %r0,64(%r4)
+ stg %r0,0(%r3)
+ la %r3,8(%r3)
+ la %r4,8(%r4)
+ brct %r1,1b
+
+2: mvc __CLOCK_IDLE_EXIT(8,%r2), \clock
+ mvc __TIMER_IDLE_EXIT(8,%r2), \timer
# account system time going idle
ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
@@ -146,17 +168,17 @@ _LPP_OFFSET = __LC_LPP
mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
nihh %r8,0xfcfd # clear wait state and irq bits
-1: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
+3: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
slgr %r14,%r15
srag %r14,%r14,STACK_SHIFT
- jnz 3f
+ jnz 5f
CHECK_STACK \savearea
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j 4f
-2: UPDATE_VTIME %r14,%r15,\timer
+ j 6f
+4: UPDATE_VTIME %r14,%r15,\timer
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
-3: lg %r15,__LC_ASYNC_STACK # load async stack
-4: la %r11,STACK_FRAME_OVERHEAD(%r15)
+5: lg %r15,__LC_ASYNC_STACK # load async stack
+6: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
.macro UPDATE_VTIME w1,w2,enter_timer
@@ -327,7 +349,7 @@ ENTRY(sie64a)
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_skip:
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
.Lsie_done:
# some program checks are suppressing. C code (e.g. do_protection_exception)
# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
@@ -380,6 +402,7 @@ ENTRY(system_call)
lg %r12,__LC_CURRENT
lghi %r14,_PIF_SYSCALL
.Lsysc_per:
+ lctlg %c1,%c1,__LC_KERNEL_ASCE
lghi %r13,__TASK_thread
lg %r15,__LC_KERNEL_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
@@ -422,15 +445,14 @@ ENTRY(system_call)
#endif
LOCKDEP_SYS_EXIT
.Lsysc_tif:
+ DISABLE_INTS
TSTMSK __PT_FLAGS(%r11),_PIF_WORK
jnz .Lsysc_work
TSTMSK __TI_flags(%r12),_TIF_WORK
jnz .Lsysc_work # check for work
- TSTMSK __LC_CPU_FLAGS,(_CIF_WORK-_CIF_FPU)
- jnz .Lsysc_work
+ DEBUG_USER_ASCE
+ lctlg %c1,%c1,__LC_USER_ASCE
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
-.Lsysc_restore:
- DISABLE_INTS
TSTMSK __LC_CPU_FLAGS, _CIF_FPU
jz .Lsysc_skip_fpu
brasl %r14,load_fpu_regs
@@ -444,6 +466,7 @@ ENTRY(system_call)
# One of the work bits is on. Find out which one.
#
.Lsysc_work:
+ ENABLE_INTS
TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
jo .Lsysc_reschedule
TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
@@ -467,8 +490,6 @@ ENTRY(system_call)
jo .Lsysc_sigpending
TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
jo .Lsysc_notify_resume
- TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
- jnz .Lsysc_asce
j .Lsysc_return
#
@@ -479,26 +500,6 @@ ENTRY(system_call)
jg schedule
#
-# _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce
-#
-.Lsysc_asce:
- ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
- lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce
- TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
- jz .Lsysc_return
-#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
- tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ?
- jnz .Lsysc_set_fs_fixup
- ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- j .Lsysc_return
-.Lsysc_set_fs_fixup:
-#endif
- larl %r14,.Lsysc_return
- jg set_fs_fixup
-
-
-#
# _TIF_SIGPENDING is set, call do_signal
#
.Lsysc_sigpending:
@@ -634,8 +635,11 @@ ENTRY(pgm_check_handler)
0: lg %r12,__LC_CURRENT
lghi %r11,0
lmg %r8,%r9,__LC_PGM_OLD_PSW
- tmhh %r8,0x0001 # test problem state bit
- jnz 3f # -> fault in user space
+ tmhh %r8,0x0001 # coming from user space?
+ jno .Lpgm_skip_asce
+ lctlg %c1,%c1,__LC_KERNEL_ASCE
+ j 3f
+.Lpgm_skip_asce:
#if IS_ENABLED(CONFIG_KVM)
# cleanup critical section for program checks in sie64a
lgr %r14,%r9
@@ -646,7 +650,7 @@ ENTRY(pgm_check_handler)
jhe 1f
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
lghi %r11,_PIF_GUEST_FAULT
#endif
@@ -707,10 +711,20 @@ ENTRY(pgm_check_handler)
.Lpgm_return:
LOCKDEP_SYS_EXIT
tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jno .Lsysc_restore
+ jno .Lpgm_restore
TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
jo .Lsysc_do_syscall
j .Lsysc_tif
+.Lpgm_restore:
+ DISABLE_INTS
+ TSTMSK __LC_CPU_FLAGS, _CIF_FPU
+ jz .Lpgm_skip_fpu
+ brasl %r14,load_fpu_regs
+.Lpgm_skip_fpu:
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
+ stpt __LC_EXIT_TIMER
+ lmg %r0,%r15,__PT_R0(%r11)
+ b __LC_RETURN_LPSWE
#
# PER event in supervisor state, must be kprobes
@@ -743,7 +757,7 @@ ENTRY(io_int_handler)
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r12,__LC_CURRENT
lmg %r8,%r9,__LC_IO_OLD_PSW
- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
+ SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK
stmg %r0,%r7,__PT_R0(%r11)
# clear user controlled registers to prevent speculative use
xgr %r0,%r0
@@ -757,16 +771,15 @@ ENTRY(io_int_handler)
xgr %r10,%r10
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
+ tm __PT_PSW+1(%r11),0x01 # coming from user space?
+ jno .Lio_skip_asce
+ lctlg %c1,%c1,__LC_KERNEL_ASCE
+.Lio_skip_asce:
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
jo .Lio_restore
-#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
- tmhh %r8,0x300
- jz 1f
TRACE_IRQS_OFF
-1:
-#endif
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
.Lio_loop:
lgr %r2,%r11 # pass pointer to pt_regs
@@ -789,15 +802,12 @@ ENTRY(io_int_handler)
TSTMSK __LC_CPU_FLAGS,_CIF_WORK
jnz .Lio_work
.Lio_restore:
-#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
- tm __PT_PSW(%r11),3
- jno 0f
TRACE_IRQS_ON
-0:
-#endif
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno .Lio_exit_kernel
+ DEBUG_USER_ASCE
+ lctlg %c1,%c1,__LC_USER_ASCE
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
stpt __LC_EXIT_TIMER
.Lio_exit_kernel:
@@ -863,28 +873,7 @@ ENTRY(io_int_handler)
jo .Lio_guarded_storage
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lio_vxrs
- TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
- jnz .Lio_asce
- j .Lio_return
-
-#
-# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
-#
-.Lio_asce:
- ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
- lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce
- TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
- jz .Lio_return
-#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
- tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ?
- jnz .Lio_set_fs_fixup
- ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j .Lio_return
-.Lio_set_fs_fixup:
-#endif
- larl %r14,.Lio_return
- jg set_fs_fixup
#
# CIF_FPU is set, restore floating-point controls and floating-point registers.
@@ -953,7 +942,7 @@ ENTRY(ext_int_handler)
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r12,__LC_CURRENT
lmg %r8,%r9,__LC_EXT_OLD_PSW
- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
+ SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK
stmg %r0,%r7,__PT_R0(%r11)
# clear user controlled registers to prevent speculative use
xgr %r0,%r0
@@ -967,6 +956,10 @@ ENTRY(ext_int_handler)
xgr %r10,%r10
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
+ tm __PT_PSW+1(%r11),0x01 # coming from user space?
+ jno .Lext_skip_asce
+ lctlg %c1,%c1,__LC_KERNEL_ASCE
+.Lext_skip_asce:
lghi %r1,__LC_EXT_PARAMS2
mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
@@ -974,12 +967,7 @@ ENTRY(ext_int_handler)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
jo .Lio_restore
-#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
- tmhh %r8,0x300
- jz 1f
TRACE_IRQS_OFF
-1:
-#endif
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
lghi %r3,EXT_INTERRUPT
@@ -1066,6 +1054,7 @@ EXPORT_SYMBOL(save_fpu_regs)
* %r4
*/
load_fpu_regs:
+ stnsm __SF_EMPTY(%r15),0xfc
lg %r4,__LC_CURRENT
aghi %r4,__TASK_thread
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
@@ -1097,6 +1086,7 @@ load_fpu_regs:
.Lload_fpu_regs_done:
ni __LC_CPU_FLAGS+7,255-_CIF_FPU
.Lload_fpu_regs_exit:
+ ssm __SF_EMPTY(%r15)
BR_EX %r14
.Lload_fpu_regs_end:
ENDPROC(load_fpu_regs)
@@ -1178,7 +1168,7 @@ ENTRY(mcck_int_handler)
TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
jno .Lmcck_panic
4: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
- SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
+ SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER,__LC_MCCK_CLOCK
.Lmcck_skip:
lghi %r14,__LC_GPREGS_SAVE_AREA+64
stmg %r0,%r7,__PT_R0(%r11)
@@ -1194,6 +1184,9 @@ ENTRY(mcck_int_handler)
xgr %r10,%r10
mvc __PT_R8(64,%r11),0(%r14)
stmg %r8,%r9,__PT_PSW(%r11)
+ la %r14,4095
+ mvc __PT_CR1(8,%r11),__LC_CREGS_SAVE_AREA-4095+8(%r14)
+ lctlg %c1,%c1,__LC_KERNEL_ASCE
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
@@ -1209,6 +1202,7 @@ ENTRY(mcck_int_handler)
brasl %r14,s390_handle_mcck
TRACE_IRQS_ON
.Lmcck_return:
+ lctlg %c1,%c1,__PT_CR1(%r11)
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
@@ -1285,7 +1279,7 @@ ENDPROC(stack_overflow)
1: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ lctlg %c1,%c1,__LC_KERNEL_ASCE
larl %r9,sie_exit # skip forward to sie_exit
BR_EX %r14,%r11
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index d2ca3fe51f8e..a16c33b32ab0 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -83,7 +83,6 @@ long sys_s390_sthyi(unsigned long function_code, void __user *buffer, u64 __user
DECLARE_PER_CPU(u64, mt_cycles[8]);
void gs_load_bc_cb(struct pt_regs *regs);
-void set_fs_fixup(void);
unsigned long stack_alloc(void);
void stack_free(unsigned long stack);
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index b388e87a08bf..ebc1284a618b 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -22,56 +22,26 @@
#include "entry.h"
/*
- * The mcount code looks like this:
- * stg %r14,8(%r15) # offset 0
- * larl %r1,<&counter> # offset 6
- * brasl %r14,_mcount # offset 12
- * lg %r14,8(%r15) # offset 18
- * Total length is 24 bytes. Only the first instruction will be patched
- * by ftrace_make_call / ftrace_make_nop.
- * The enabled ftrace code block looks like this:
+ * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
+ * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
+ * (since gcc 9 / clang 10) is used.
+ * In both cases the original and also the disabled function prologue contains
+ * only a single six byte instruction and looks like this:
+ * > brcl 0,0 # offset 0
+ * To enable ftrace the code gets patched like above and afterwards looks
+ * like this:
* > brasl %r0,ftrace_caller # offset 0
- * larl %r1,<&counter> # offset 6
- * brasl %r14,_mcount # offset 12
- * lg %r14,8(%r15) # offset 18
+ *
+ * The instruction will be patched by ftrace_make_call / ftrace_make_nop.
* The ftrace function gets called with a non-standard C function call ABI
* where r0 contains the return address. It is also expected that the called
* function only clobbers r0 and r1, but restores r2-r15.
* For module code we can't directly jump to ftrace caller, but need a
* trampoline (ftrace_plt), which clobbers also r1.
- * The return point of the ftrace function has offset 24, so execution
- * continues behind the mcount block.
- * The disabled ftrace code block looks like this:
- * > jg .+24 # offset 0
- * larl %r1,<&counter> # offset 6
- * brasl %r14,_mcount # offset 12
- * lg %r14,8(%r15) # offset 18
- * The jg instruction branches to offset 24 to skip as many instructions
- * as possible.
- * In case we use gcc's hotpatch feature the original and also the disabled
- * function prologue contains only a single six byte instruction and looks
- * like this:
- * > brcl 0,0 # offset 0
- * To enable ftrace the code gets patched like above and afterwards looks
- * like this:
- * > brasl %r0,ftrace_caller # offset 0
*/
unsigned long ftrace_plt;
-static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
-{
-#if defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)
- /* brcl 0,0 */
- insn->opc = 0xc004;
- insn->disp = 0;
-#else
- /* stg r14,8(r15) */
- insn->opc = 0xe3e0;
- insn->disp = 0xf0080024;
-#endif
-}
-
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
@@ -85,15 +55,10 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
return -EFAULT;
- if (addr == MCOUNT_ADDR) {
- /* Initial code replacement */
- ftrace_generate_orig_insn(&orig);
- ftrace_generate_nop_insn(&new);
- } else {
- /* Replace ftrace call with a nop. */
- ftrace_generate_call_insn(&orig, rec->ip);
- ftrace_generate_nop_insn(&new);
- }
+ /* Replace ftrace call with a nop. */
+ ftrace_generate_call_insn(&orig, rec->ip);
+ ftrace_generate_nop_insn(&new);
+
/* Verify that the to be replaced code matches what we expect. */
if (memcmp(&orig, &old, sizeof(old)))
return -EINVAL;
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 8b88dbbda7df..0c253886da78 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -18,12 +18,7 @@
__HEAD
ENTRY(startup_continue)
- tm __LC_STFLE_FAC_LIST+5,0x80 # LPP available ?
- jz 0f
- xc __LC_LPP+1(7,0),__LC_LPP+1 # clear lpp and current_pid
- mvi __LC_LPP,0x80 # and set LPP_MAGIC
- .insn s,0xb2800000,__LC_LPP # load program parameter
-0: larl %r1,tod_clock_base
+ larl %r1,tod_clock_base
mvc 0(16,%r1),__LC_BOOT_CLOCK
larl %r13,.LPG1 # get base
#
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index f7f1e64e0d98..2b85096964f8 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -33,10 +33,10 @@ void enabled_wait(void)
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY);
- local_irq_save(flags);
+ raw_local_irq_save(flags);
/* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask);
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
/* Account time spent with enabled wait psw loaded as idle time. */
raw_write_seqcount_begin(&idle->seqcount);
@@ -123,7 +123,7 @@ void arch_cpu_idle_enter(void)
void arch_cpu_idle(void)
{
enabled_wait();
- local_irq_enable();
+ raw_local_irq_enable();
}
void arch_cpu_idle_exit(void)
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 7458dcfd6464..faf64c2f90f5 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -33,11 +33,6 @@ ENDPROC(ftrace_stub)
#define TRACED_FUNC_FRAME_SIZE STACK_FRAME_OVERHEAD
#endif
-ENTRY(_mcount)
- BR_EX %r14
-ENDPROC(_mcount)
-EXPORT_SYMBOL(_mcount)
-
ENTRY(ftrace_caller)
.globl ftrace_regs_caller
.set ftrace_regs_caller,ftrace_caller
@@ -46,9 +41,6 @@ ENTRY(ftrace_caller)
ipm %r14 # don't put any instructions
sllg %r14,%r14,16 # clobbering CC before this point
lgr %r1,%r15
-#if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT))
- aghi %r0,MCOUNT_RETURN_FIXUP
-#endif
# allocate stack frame for ftrace_caller to contain traced function
aghi %r15,-TRACED_FUNC_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15)
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 4f9e4626df55..19cd7b961c45 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -672,7 +672,7 @@ static void cpumsf_output_event_pid(struct perf_event *event,
rcu_read_lock();
perf_prepare_sample(&header, data, event, regs);
- if (perf_output_begin(&handle, event, header.size))
+ if (perf_output_begin(&handle, data, event, header.size))
goto out;
/* Update the process ID (see also kernel/events/core.c) */
@@ -2228,4 +2228,4 @@ out:
}
arch_initcall(init_cpum_sampling_pmu);
-core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640);
+core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0644);
diff --git a/arch/s390/kernel/perf_regs.c b/arch/s390/kernel/perf_regs.c
index 4352a504f235..6e9e5d5e927e 100644
--- a/arch/s390/kernel/perf_regs.c
+++ b/arch/s390/kernel/perf_regs.c
@@ -53,8 +53,7 @@ u64 perf_reg_abi(struct task_struct *task)
}
void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy)
+ struct pt_regs *regs)
{
/*
* Use the regs from the first interruption and let
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index ec801d3bbb37..bc3ca54edfb4 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -94,7 +94,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
/* Save access registers to new thread structure. */
save_access_regs(&p->thread.acrs[0]);
/* start new process with ar4 pointing to the correct address space */
- p->thread.mm_segment = get_fs();
/* Don't copy debug registers */
memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
@@ -208,16 +207,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
ret = PAGE_ALIGN(mm->brk + brk_rnd());
return (ret > mm->brk) ? ret : mm->brk;
}
-
-void set_fs_fixup(void)
-{
- struct pt_regs *regs = current_pt_regs();
- static bool warned;
-
- set_fs(USER_DS);
- if (warned)
- return;
- WARN(1, "Unbalanced set_fs - int code: 0x%x\n", regs->int_code);
- show_registers(regs);
- warned = true;
-}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 4d843e64496f..1f16a03be995 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -49,6 +49,7 @@
#include <linux/memory.h>
#include <linux/compat.h>
#include <linux/start_kernel.h>
+#include <linux/hugetlb.h>
#include <asm/boot_data.h>
#include <asm/ipl.h>
@@ -94,10 +95,8 @@ char elf_platform[ELF_PLATFORM_SIZE];
unsigned long int_hwcap = 0;
int __bootdata(noexec_disabled);
-int __bootdata(memory_end_set);
-unsigned long __bootdata(memory_end);
+unsigned long __bootdata(ident_map_size);
unsigned long __bootdata(vmalloc_size);
-unsigned long __bootdata(max_physmem_end);
struct mem_detect_info __bootdata(mem_detect);
struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
@@ -336,6 +335,7 @@ int __init arch_early_irq_init(void)
if (!stack)
panic("Couldn't allocate async stack");
S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
+ udelay_enable();
return 0;
}
@@ -556,24 +556,25 @@ static void __init setup_resources(void)
#endif
}
-static void __init setup_memory_end(void)
+static void __init setup_ident_map_size(void)
{
unsigned long vmax, tmp;
/* Choose kernel address space layout: 3 or 4 levels. */
- tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
+ tmp = ident_map_size / PAGE_SIZE;
tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
vmax = _REGION2_SIZE; /* 3-level kernel page table */
else
vmax = _REGION1_SIZE; /* 4-level kernel page table */
+ /* module area is at the end of the kernel address space. */
+ MODULES_END = vmax;
if (is_prot_virt_host())
- adjust_to_uv_max(&vmax);
+ adjust_to_uv_max(&MODULES_END);
#ifdef CONFIG_KASAN
- vmax = kasan_vmax;
+ vmax = _REGION1_SIZE;
+ MODULES_END = kasan_vmax;
#endif
- /* module area is at the end of the kernel address space. */
- MODULES_END = vmax;
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
VMALLOC_START = VMALLOC_END - vmalloc_size;
@@ -587,22 +588,22 @@ static void __init setup_memory_end(void)
tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
vmemmap = (struct page *) tmp;
- /* Take care that memory_end is set and <= vmemmap */
- memory_end = min(memory_end ?: max_physmem_end, (unsigned long)vmemmap);
+ /* Take care that ident_map_size <= vmemmap */
+ ident_map_size = min(ident_map_size, (unsigned long)vmemmap);
#ifdef CONFIG_KASAN
- memory_end = min(memory_end, KASAN_SHADOW_START);
+ ident_map_size = min(ident_map_size, KASAN_SHADOW_START);
#endif
- vmemmap_size = SECTION_ALIGN_UP(memory_end / PAGE_SIZE) * sizeof(struct page);
+ vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
#ifdef CONFIG_KASAN
/* move vmemmap above kasan shadow only if stands in a way */
if (KASAN_SHADOW_END > (unsigned long)vmemmap &&
(unsigned long)vmemmap + vmemmap_size > KASAN_SHADOW_START)
vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END);
#endif
- max_pfn = max_low_pfn = PFN_DOWN(memory_end);
- memblock_remove(memory_end, ULONG_MAX);
+ max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
+ memblock_remove(ident_map_size, ULONG_MAX);
- pr_notice("The maximum memory size is %luMB\n", memory_end >> 20);
+ pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
}
#ifdef CONFIG_CRASH_DUMP
@@ -632,12 +633,11 @@ static struct notifier_block kdump_mem_nb = {
#endif
/*
- * Make sure that the area behind memory_end is protected
+ * Make sure that the area above identity mapping is protected
*/
-static void __init reserve_memory_end(void)
+static void __init reserve_above_ident_map(void)
{
- if (memory_end_set)
- memblock_reserve(memory_end, ULONG_MAX);
+ memblock_reserve(ident_map_size, ULONG_MAX);
}
/*
@@ -674,7 +674,7 @@ static void __init reserve_crashkernel(void)
phys_addr_t low, high;
int rc;
- rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
+ rc = parse_crashkernel(boot_command_line, ident_map_size, &crash_size,
&crash_base);
crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
@@ -1128,7 +1128,7 @@ void __init setup_arch(char **cmdline_p)
setup_control_program_code();
/* Do some memory reservations *before* memory is added to memblock */
- reserve_memory_end();
+ reserve_above_ident_map();
reserve_oldmem();
reserve_kernel();
reserve_initrd();
@@ -1143,10 +1143,12 @@ void __init setup_arch(char **cmdline_p)
remove_oldmem();
setup_uv();
- setup_memory_end();
+ setup_ident_map_size();
setup_memory();
- dma_contiguous_reserve(memory_end);
+ dma_contiguous_reserve(ident_map_size);
vmcp_cma_reserve();
+ if (MACHINE_HAS_EDAT2)
+ hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
check_initrd();
reserve_crashkernel();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index ebfe86d097f0..27c763014114 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -47,7 +47,6 @@
#include <asm/vtimer.h>
#include <asm/lowcore.h>
#include <asm/sclp.h>
-#include <asm/vdso.h>
#include <asm/debug.h>
#include <asm/os_info.h>
#include <asm/sigp.h>
@@ -55,6 +54,7 @@
#include <asm/nmi.h>
#include <asm/stacktrace.h>
#include <asm/topology.h>
+#include <asm/vdso.h>
#include "entry.h"
enum {
@@ -217,14 +217,10 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
if (nmi_alloc_per_cpu(lc))
goto out_async;
- if (vdso_alloc_per_cpu(lc))
- goto out_mcesa;
lowcore_ptr[cpu] = lc;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
return 0;
-out_mcesa:
- nmi_free_per_cpu(lc);
out_async:
stack_free(async_stack);
out:
@@ -245,7 +241,6 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
lowcore_ptr[pcpu - pcpu_devices] = NULL;
- vdso_free_per_cpu(pcpu->lowcore);
nmi_free_per_cpu(pcpu->lowcore);
stack_free(async_stack);
if (pcpu == &pcpu_devices[0])
@@ -265,13 +260,13 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
lc->spinlock_index = 0;
lc->percpu_offset = __per_cpu_offset[cpu];
lc->kernel_asce = S390_lowcore.kernel_asce;
- lc->user_asce = S390_lowcore.kernel_asce;
+ lc->user_asce = s390_invalid_asce;
lc->machine_flags = S390_lowcore.machine_flags;
lc->user_timer = lc->system_timer =
lc->steal_timer = lc->avg_steal_timer = 0;
__ctl_store(lc->cregs_save_area, 0, 15);
lc->cregs_save_area[1] = lc->kernel_asce;
- lc->cregs_save_area[7] = lc->vdso_asce;
+ lc->cregs_save_area[7] = lc->user_asce;
save_access_regs((unsigned int *) lc->access_regs_save_area);
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
sizeof(lc->stfle_fac_list));
@@ -855,16 +850,16 @@ void __init smp_detect_cpus(void)
static void smp_init_secondary(void)
{
- int cpu = smp_processor_id();
+ int cpu = raw_smp_processor_id();
S390_lowcore.last_update_clock = get_tod_clock();
restore_access_regs(S390_lowcore.access_regs_save_area);
- set_cpu_flag(CIF_ASCE_PRIMARY);
- set_cpu_flag(CIF_ASCE_SECONDARY);
cpu_init();
+ rcu_cpu_starting(cpu);
preempt_disable();
init_cpu_timer();
vtime_init();
+ vdso_getcpu_init();
pfault_init();
notify_cpu_starting(cpu);
if (topology_cpu_dedicated(cpu))
@@ -895,24 +890,12 @@ static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
/* Upping and downing of CPUs */
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
- struct pcpu *pcpu;
- int base, i, rc;
+ struct pcpu *pcpu = pcpu_devices + cpu;
+ int rc;
- pcpu = pcpu_devices + cpu;
if (pcpu->state != CPU_STATE_CONFIGURED)
return -EIO;
- base = smp_get_base_cpu(cpu);
- for (i = 0; i <= smp_cpu_mtid; i++) {
- if (base + i < nr_cpu_ids)
- if (cpu_online(base + i))
- break;
- }
- /*
- * If this is the first CPU of the core to get online
- * do an initial CPU reset.
- */
- if (i > smp_cpu_mtid &&
- pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
+ if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 0ac30ee2c633..c59cb44fbb7d 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -927,41 +927,25 @@ static ssize_t online_store(struct device *dev,
*/
static DEVICE_ATTR_RW(online);
-static struct device_attribute *stp_attributes[] = {
- &dev_attr_ctn_id,
- &dev_attr_ctn_type,
- &dev_attr_dst_offset,
- &dev_attr_leap_seconds,
- &dev_attr_online,
- &dev_attr_leap_seconds_scheduled,
- &dev_attr_stratum,
- &dev_attr_time_offset,
- &dev_attr_time_zone_offset,
- &dev_attr_timing_mode,
- &dev_attr_timing_state,
+static struct attribute *stp_dev_attrs[] = {
+ &dev_attr_ctn_id.attr,
+ &dev_attr_ctn_type.attr,
+ &dev_attr_dst_offset.attr,
+ &dev_attr_leap_seconds.attr,
+ &dev_attr_online.attr,
+ &dev_attr_leap_seconds_scheduled.attr,
+ &dev_attr_stratum.attr,
+ &dev_attr_time_offset.attr,
+ &dev_attr_time_zone_offset.attr,
+ &dev_attr_timing_mode.attr,
+ &dev_attr_timing_state.attr,
NULL
};
+ATTRIBUTE_GROUPS(stp_dev);
static int __init stp_init_sysfs(void)
{
- struct device_attribute **attr;
- int rc;
-
- rc = subsys_system_register(&stp_subsys, NULL);
- if (rc)
- goto out;
- for (attr = stp_attributes; *attr; attr++) {
- rc = device_create_file(stp_subsys.dev_root, *attr);
- if (rc)
- goto out_unreg;
- }
- return 0;
-out_unreg:
- for (; attr >= stp_attributes; attr--)
- device_remove_file(stp_subsys.dev_root, *attr);
- bus_unregister(&stp_subsys);
-out:
- return rc;
+ return subsys_system_register(&stp_subsys, stp_dev_groups);
}
device_initcall(stp_init_sysfs);
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 14bd9d58edc9..883bfed9f5c2 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -129,8 +129,15 @@ int uv_destroy_page(unsigned long paddr)
.paddr = paddr
};
- if (uv_call(0, (u64)&uvcb))
+ if (uv_call(0, (u64)&uvcb)) {
+ /*
+ * Older firmware uses 107/d as an indication of a non secure
+ * page. Let us emulate the newer variant (no-op).
+ */
+ if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
+ return 0;
return -EINVAL;
+ }
return 0;
}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index f9da5b149141..8bc269c55fd3 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -29,6 +29,7 @@
#include <asm/sections.h>
#include <asm/vdso.h>
#include <asm/facility.h>
+#include <asm/timex.h>
extern char vdso64_start, vdso64_end;
static void *vdso64_kbase = &vdso64_start;
@@ -61,17 +62,8 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *vma)
{
- unsigned long vdso_pages;
-
- vdso_pages = vdso64_pages;
-
- if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start)
- return -EINVAL;
-
- if (WARN_ON_ONCE(current->mm != vma->vm_mm))
- return -EFAULT;
-
current->mm->context.vdso_base = vma->vm_start;
+
return 0;
}
@@ -99,60 +91,10 @@ static union {
u8 page[PAGE_SIZE];
} vdso_data_store __page_aligned_data;
struct vdso_data *vdso_data = (struct vdso_data *)&vdso_data_store.data;
-/*
- * Allocate/free per cpu vdso data.
- */
-#define SEGMENT_ORDER 2
-
-int vdso_alloc_per_cpu(struct lowcore *lowcore)
-{
- unsigned long segment_table, page_table, page_frame;
- struct vdso_per_cpu_data *vd;
- segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
- page_table = get_zeroed_page(GFP_KERNEL);
- page_frame = get_zeroed_page(GFP_KERNEL);
- if (!segment_table || !page_table || !page_frame)
- goto out;
- arch_set_page_dat(virt_to_page(segment_table), SEGMENT_ORDER);
- arch_set_page_dat(virt_to_page(page_table), 0);
-
- /* Initialize per-cpu vdso data page */
- vd = (struct vdso_per_cpu_data *) page_frame;
- vd->cpu_nr = lowcore->cpu_nr;
- vd->node_id = cpu_to_node(vd->cpu_nr);
-
- /* Set up page table for the vdso address space */
- memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES);
- memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE);
-
- *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
- *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
-
- lowcore->vdso_asce = segment_table +
- _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
- lowcore->vdso_per_cpu_data = page_frame;
-
- return 0;
-
-out:
- free_page(page_frame);
- free_page(page_table);
- free_pages(segment_table, SEGMENT_ORDER);
- return -ENOMEM;
-}
-
-void vdso_free_per_cpu(struct lowcore *lowcore)
+void vdso_getcpu_init(void)
{
- unsigned long segment_table, page_table, page_frame;
-
- segment_table = lowcore->vdso_asce & PAGE_MASK;
- page_table = *(unsigned long *) segment_table;
- page_frame = *(unsigned long *) page_table;
-
- free_page(page_frame);
- free_page(page_table);
- free_pages(segment_table, SEGMENT_ORDER);
+ set_tod_programmable_field(smp_processor_id());
}
/*
@@ -225,6 +167,7 @@ static int __init vdso_init(void)
{
int i;
+ vdso_getcpu_init();
/* Calculate the size of the 64 bit vDSO */
vdso64_pages = ((&vdso64_end - &vdso64_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
@@ -240,8 +183,6 @@ static int __init vdso_init(void)
}
vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
vdso64_pagelist[vdso64_pages] = NULL;
- if (vdso_alloc_per_cpu(&S390_lowcore))
- BUG();
get_page(virt_to_page(vdso_data));
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index 13cc5a3f9abf..a6e0fb6b91d6 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -6,8 +6,9 @@ ARCH_REL_TYPE_ABS := R_390_COPY|R_390_GLOB_DAT|R_390_JMP_SLOT|R_390_RELATIVE
ARCH_REL_TYPE_ABS += R_390_GOT|R_390_PLT
include $(srctree)/lib/vdso/Makefile
-obj-vdso64 = vdso_user_wrapper.o note.o getcpu.o
-obj-cvdso64 = vdso64_generic.o
+obj-vdso64 = vdso_user_wrapper.o note.o
+obj-cvdso64 = vdso64_generic.o getcpu.o
+CFLAGS_REMOVE_getcpu.o = -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE)
CFLAGS_REMOVE_vdso64_generic.o = -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE)
# Build rules
diff --git a/arch/s390/kernel/vdso64/getcpu.S b/arch/s390/kernel/vdso64/getcpu.S
deleted file mode 100644
index 3c04f7328500..000000000000
--- a/arch/s390/kernel/vdso64/getcpu.S
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Userland implementation of getcpu() for 64 bits processes in a
- * s390 kernel for use in the vDSO
- *
- * Copyright IBM Corp. 2016
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-#include <asm/vdso.h>
-#include <asm/asm-offsets.h>
-#include <asm/dwarf.h>
-
- .text
- .align 4
- .globl __kernel_getcpu
- .type __kernel_getcpu,@function
-__kernel_getcpu:
- CFI_STARTPROC
- sacf 256
- lm %r4,%r5,__VDSO_GETCPU_VAL(%r0)
- sacf 0
- ltgr %r2,%r2
- jz 2f
- st %r5,0(%r2)
-2: ltgr %r3,%r3
- jz 3f
- st %r4,0(%r3)
-3: lghi %r2,0
- br %r14
- CFI_ENDPROC
- .size __kernel_getcpu,.-__kernel_getcpu
diff --git a/arch/s390/kernel/vdso64/getcpu.c b/arch/s390/kernel/vdso64/getcpu.c
new file mode 100644
index 000000000000..5b2bc7494d5b
--- /dev/null
+++ b/arch/s390/kernel/vdso64/getcpu.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright IBM Corp. 2020 */
+
+#include <linux/compiler.h>
+#include <linux/getcpu.h>
+#include <asm/timex.h>
+#include "vdso.h"
+
+int __s390_vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
+{
+ __u16 todval[8];
+
+ /* CPU number is stored in the programmable field of the TOD clock */
+ get_tod_clock_ext((char *)todval);
+ if (cpu)
+ *cpu = todval[7];
+ /* NUMA node is always zero */
+ if (node)
+ *node = 0;
+ return 0;
+}
diff --git a/arch/s390/kernel/vdso64/vdso.h b/arch/s390/kernel/vdso64/vdso.h
new file mode 100644
index 000000000000..34c7a2312f9d
--- /dev/null
+++ b/arch/s390/kernel/vdso64/vdso.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ARCH_S390_KERNEL_VDSO64_VDSO_H
+#define __ARCH_S390_KERNEL_VDSO64_VDSO_H
+
+#include <vdso/datapage.h>
+
+struct getcpu_cache;
+
+int __s390_vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused);
+int __s390_vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
+int __s390_vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
+int __s390_vdso_clock_getres(clockid_t clock, struct __kernel_timespec *ts);
+
+#endif /* __ARCH_S390_KERNEL_VDSO64_VDSO_H */
diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso64/vdso64.lds.S
index 7ddb116b5e2e..7bde3909290f 100644
--- a/arch/s390/kernel/vdso64/vdso64.lds.S
+++ b/arch/s390/kernel/vdso64/vdso64.lds.S
@@ -136,7 +136,6 @@ VERSION
__kernel_clock_gettime;
__kernel_clock_getres;
__kernel_getcpu;
-
local: *;
};
}
diff --git a/arch/s390/kernel/vdso64/vdso64_generic.c b/arch/s390/kernel/vdso64/vdso64_generic.c
index a8cef7e4d137..a9aa75643c08 100644
--- a/arch/s390/kernel/vdso64/vdso64_generic.c
+++ b/arch/s390/kernel/vdso64/vdso64_generic.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include "../../../../lib/vdso/gettimeofday.c"
+#include "vdso.h"
int __s390_vdso_gettimeofday(struct __kernel_old_timeval *tv,
struct timezone *tz)
diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
index a775d7e52872..f773505c7e63 100644
--- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
@@ -36,3 +36,4 @@ __kernel_\func:
vdso_func gettimeofday
vdso_func clock_getres
vdso_func clock_gettime
+vdso_func getcpu
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 177ccfbda40a..4c0e19145cc6 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -10,7 +10,8 @@
* Put .bss..swapper_pg_dir as the first thing in .bss. This will
* make sure it has 16k alignment.
*/
-#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir)
+#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir) \
+ *(.bss..invalid_pg_dir)
/* Handle ro_after_init data on our own. */
#define RO_AFTER_INIT_DATA
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 8df10d3c8f6c..5aaa2ca6a928 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -222,35 +222,50 @@ void vtime_flush(struct task_struct *tsk)
S390_lowcore.avg_steal_timer = avg_steal;
}
+static u64 vtime_delta(void)
+{
+ u64 timer = S390_lowcore.last_update_timer;
+
+ S390_lowcore.last_update_timer = get_vtimer();
+
+ return timer - S390_lowcore.last_update_timer;
+}
+
/*
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
*/
-void vtime_account_irq_enter(struct task_struct *tsk)
+void vtime_account_kernel(struct task_struct *tsk)
{
- u64 timer;
-
- timer = S390_lowcore.last_update_timer;
- S390_lowcore.last_update_timer = get_vtimer();
- timer -= S390_lowcore.last_update_timer;
+ u64 delta = vtime_delta();
- if ((tsk->flags & PF_VCPU) && (irq_count() == 0))
- S390_lowcore.guest_timer += timer;
- else if (hardirq_count())
- S390_lowcore.hardirq_timer += timer;
- else if (in_serving_softirq())
- S390_lowcore.softirq_timer += timer;
+ if (tsk->flags & PF_VCPU)
+ S390_lowcore.guest_timer += delta;
else
- S390_lowcore.system_timer += timer;
+ S390_lowcore.system_timer += delta;
- virt_timer_forward(timer);
+ virt_timer_forward(delta);
}
-EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
-
-void vtime_account_kernel(struct task_struct *tsk)
-__attribute__((alias("vtime_account_irq_enter")));
EXPORT_SYMBOL_GPL(vtime_account_kernel);
+void vtime_account_softirq(struct task_struct *tsk)
+{
+ u64 delta = vtime_delta();
+
+ S390_lowcore.softirq_timer += delta;
+
+ virt_timer_forward(delta);
+}
+
+void vtime_account_hardirq(struct task_struct *tsk)
+{
+ u64 delta = vtime_delta();
+
+ S390_lowcore.hardirq_timer += delta;
+
+ virt_timer_forward(delta);
+}
+
/*
* Sorted add to a list. List is linear searched until first bigger
* element is found.
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 6b74b92c1a58..425d3d75320b 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2312,7 +2312,7 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
struct kvm_s390_pv_unp unp = {};
r = -EINVAL;
- if (!kvm_s390_pv_is_protected(kvm))
+ if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
break;
r = -EFAULT;
@@ -3564,7 +3564,6 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->pp = 0;
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu->arch.sie_block->todpr = 0;
- vcpu->arch.sie_block->cpnc = 0;
}
}
@@ -3582,7 +3581,6 @@ static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
regs->etoken = 0;
regs->etoken_extension = 0;
- regs->diag318 = 0;
}
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index eb99e2f95ebe..f5847f9dec7c 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -208,7 +208,6 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
return -EIO;
}
kvm->arch.gmap->guest_handle = uvcb.guest_handle;
- atomic_set(&kvm->mm->context.is_protected, 1);
return 0;
}
@@ -228,6 +227,8 @@ int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
*rrc = uvcb.header.rrc;
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
*rc, *rrc);
+ if (!cc)
+ atomic_set(&kvm->mm->context.is_protected, 1);
return cc ? -EINVAL : 0;
}
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index daca7bad66de..68d61f2835df 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -13,11 +13,19 @@
#include <linux/export.h>
#include <linux/irqflags.h>
#include <linux/interrupt.h>
+#include <linux/jump_label.h>
#include <linux/irq.h>
#include <asm/vtimer.h>
#include <asm/div64.h>
#include <asm/idle.h>
+static DEFINE_STATIC_KEY_FALSE(udelay_ready);
+
+void __init udelay_enable(void)
+{
+ static_branch_enable(&udelay_ready);
+}
+
void __delay(unsigned long loops)
{
/*
@@ -33,7 +41,7 @@ EXPORT_SYMBOL(__delay);
static void __udelay_disabled(unsigned long long usecs)
{
- unsigned long cr0, cr0_new, psw_mask, flags;
+ unsigned long cr0, cr0_new, psw_mask;
struct s390_idle_data idle;
u64 end;
@@ -45,9 +53,8 @@ static void __udelay_disabled(unsigned long long usecs)
psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT;
set_clock_comparator(end);
set_cpu_flag(CIF_IGNORE_IRQ);
- local_irq_save(flags);
psw_idle(&idle, psw_mask);
- local_irq_restore(flags);
+ trace_hardirqs_off();
clear_cpu_flag(CIF_IGNORE_IRQ);
set_clock_comparator(S390_lowcore.clock_comparator);
__ctl_load(cr0, 0, 0);
@@ -77,6 +84,11 @@ void __udelay(unsigned long long usecs)
{
unsigned long flags;
+ if (!static_branch_likely(&udelay_ready)) {
+ udelay_simple(usecs);
+ return;
+ }
+
preempt_disable();
local_irq_save(flags);
if (in_irq()) {
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 0267405ab7c6..e8f642446fed 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -16,6 +16,22 @@
#include <asm/mmu_context.h>
#include <asm/facility.h>
+#ifdef CONFIG_DEBUG_USER_ASCE
+void debug_user_asce(void)
+{
+ unsigned long cr1, cr7;
+
+ __ctl_store(cr1, 1, 1);
+ __ctl_store(cr7, 7, 7);
+ if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce)
+ return;
+ panic("incorrect ASCE on kernel exit\n"
+ "cr1: %016lx cr7: %016lx\n"
+ "kernel: %016llx user: %016llx\n",
+ cr1, cr7, S390_lowcore.kernel_asce, S390_lowcore.user_asce);
+}
+#endif /*CONFIG_DEBUG_USER_ASCE */
+
#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
static DEFINE_STATIC_KEY_FALSE(have_mvcos);
@@ -40,71 +56,10 @@ static inline int copy_with_mvcos(void)
}
#endif
-void set_fs(mm_segment_t fs)
-{
- current->thread.mm_segment = fs;
- if (fs == USER_DS) {
- __ctl_load(S390_lowcore.user_asce, 1, 1);
- clear_cpu_flag(CIF_ASCE_PRIMARY);
- } else {
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
- set_cpu_flag(CIF_ASCE_PRIMARY);
- }
- if (fs & 1) {
- if (fs == USER_DS_SACF)
- __ctl_load(S390_lowcore.user_asce, 7, 7);
- else
- __ctl_load(S390_lowcore.kernel_asce, 7, 7);
- set_cpu_flag(CIF_ASCE_SECONDARY);
- }
-}
-EXPORT_SYMBOL(set_fs);
-
-mm_segment_t enable_sacf_uaccess(void)
-{
- mm_segment_t old_fs;
- unsigned long asce, cr;
- unsigned long flags;
-
- old_fs = current->thread.mm_segment;
- if (old_fs & 1)
- return old_fs;
- /* protect against a concurrent page table upgrade */
- local_irq_save(flags);
- current->thread.mm_segment |= 1;
- asce = S390_lowcore.kernel_asce;
- if (likely(old_fs == USER_DS)) {
- __ctl_store(cr, 1, 1);
- if (cr != S390_lowcore.kernel_asce) {
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
- set_cpu_flag(CIF_ASCE_PRIMARY);
- }
- asce = S390_lowcore.user_asce;
- }
- __ctl_store(cr, 7, 7);
- if (cr != asce) {
- __ctl_load(asce, 7, 7);
- set_cpu_flag(CIF_ASCE_SECONDARY);
- }
- local_irq_restore(flags);
- return old_fs;
-}
-EXPORT_SYMBOL(enable_sacf_uaccess);
-
-void disable_sacf_uaccess(mm_segment_t old_fs)
-{
- current->thread.mm_segment = old_fs;
- if (old_fs == USER_DS && test_facility(27)) {
- __ctl_load(S390_lowcore.user_asce, 1, 1);
- clear_cpu_flag(CIF_ASCE_PRIMARY);
- }
-}
-EXPORT_SYMBOL(disable_sacf_uaccess);
-
static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x01UL;
+ register unsigned long reg0 asm("0") = 0x81UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
@@ -135,9 +90,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
unsigned long size)
{
unsigned long tmp1, tmp2;
- mm_segment_t old_fs;
- old_fs = enable_sacf_uaccess();
tmp1 = -256UL;
asm volatile(
" sacf 0\n"
@@ -164,7 +117,6 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
- disable_sacf_uaccess(old_fs);
return size;
}
@@ -179,7 +131,7 @@ EXPORT_SYMBOL(raw_copy_from_user);
static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x010000UL;
+ register unsigned long reg0 asm("0") = 0x810000UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
@@ -210,9 +162,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
unsigned long size)
{
unsigned long tmp1, tmp2;
- mm_segment_t old_fs;
- old_fs = enable_sacf_uaccess();
tmp1 = -256UL;
asm volatile(
" sacf 0\n"
@@ -239,7 +189,6 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
- disable_sacf_uaccess(old_fs);
return size;
}
@@ -254,7 +203,7 @@ EXPORT_SYMBOL(raw_copy_to_user);
static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x010001UL;
+ register unsigned long reg0 asm("0") = 0x810081UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
@@ -277,10 +226,8 @@ static inline unsigned long copy_in_user_mvcos(void __user *to, const void __use
static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
unsigned long size)
{
- mm_segment_t old_fs;
unsigned long tmp1;
- old_fs = enable_sacf_uaccess();
asm volatile(
" sacf 256\n"
" aghi %0,-1\n"
@@ -304,7 +251,6 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
: : "cc", "memory");
- disable_sacf_uaccess(old_fs);
return size;
}
@@ -318,7 +264,7 @@ EXPORT_SYMBOL(raw_copy_in_user);
static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x010000UL;
+ register unsigned long reg0 asm("0") = 0x810000UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
@@ -346,10 +292,8 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
{
- mm_segment_t old_fs;
unsigned long tmp1, tmp2;
- old_fs = enable_sacf_uaccess();
asm volatile(
" sacf 256\n"
" aghi %0,-1\n"
@@ -378,7 +322,6 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
- disable_sacf_uaccess(old_fs);
return size;
}
@@ -414,15 +357,9 @@ static inline unsigned long strnlen_user_srst(const char __user *src,
unsigned long __strnlen_user(const char __user *src, unsigned long size)
{
- mm_segment_t old_fs;
- unsigned long len;
-
if (unlikely(!size))
return 0;
- old_fs = enable_sacf_uaccess();
- len = strnlen_user_srst(src, size);
- disable_sacf_uaccess(old_fs);
- return len;
+ return strnlen_user_srst(src, size);
}
EXPORT_SYMBOL(__strnlen_user);
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 8f9ff7e7187d..e40a30647d99 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -255,7 +255,7 @@ static int pt_dump_init(void)
*/
max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
- address_markers[IDENTITY_AFTER_END_NR].start_address = memory_end;
+ address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
address_markers[MODULES_NR].start_address = MODULES_VADDR;
address_markers[MODULES_END_NR].start_address = MODULES_END;
address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 996884dcc9fd..b8210103de14 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -53,7 +53,6 @@
enum fault_type {
KERNEL_FAULT,
USER_FAULT,
- VDSO_FAULT,
GMAP_FAULT,
};
@@ -77,22 +76,16 @@ static enum fault_type get_fault_type(struct pt_regs *regs)
trans_exc_code = regs->int_parm_long & 3;
if (likely(trans_exc_code == 0)) {
/* primary space exception */
- if (IS_ENABLED(CONFIG_PGSTE) &&
- test_pt_regs_flag(regs, PIF_GUEST_FAULT))
- return GMAP_FAULT;
- if (current->thread.mm_segment == USER_DS)
+ if (user_mode(regs))
return USER_FAULT;
- return KERNEL_FAULT;
- }
- if (trans_exc_code == 2) {
- /* secondary space exception */
- if (current->thread.mm_segment & 1) {
- if (current->thread.mm_segment == USER_DS_SACF)
- return USER_FAULT;
+ if (!IS_ENABLED(CONFIG_PGSTE))
return KERNEL_FAULT;
- }
- return VDSO_FAULT;
+ if (test_pt_regs_flag(regs, PIF_GUEST_FAULT))
+ return GMAP_FAULT;
+ return KERNEL_FAULT;
}
+ if (trans_exc_code == 2)
+ return USER_FAULT;
if (trans_exc_code == 1) {
/* access register mode, not used in the kernel */
return USER_FAULT;
@@ -188,10 +181,6 @@ static void dump_fault_info(struct pt_regs *regs)
asce = S390_lowcore.user_asce;
pr_cont("user ");
break;
- case VDSO_FAULT:
- asce = S390_lowcore.vdso_asce;
- pr_cont("vdso ");
- break;
case GMAP_FAULT:
asce = ((struct gmap *) S390_lowcore.gmap)->asce;
pr_cont("gmap ");
@@ -414,9 +403,6 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
switch (type) {
case KERNEL_FAULT:
goto out;
- case VDSO_FAULT:
- fault = VM_FAULT_BADMAP;
- goto out;
case USER_FAULT:
case GMAP_FAULT:
if (faulthandler_disabled() || !mm)
@@ -834,7 +820,6 @@ void do_secure_storage_access(struct pt_regs *regs)
if (rc)
BUG();
break;
- case VDSO_FAULT:
case GMAP_FAULT:
default:
do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index cfb0017f33a7..64795d034926 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2690,6 +2690,8 @@ static const struct mm_walk_ops reset_acc_walk_ops = {
#include <linux/sched/mm.h>
void s390_reset_acc(struct mm_struct *mm)
{
+ if (!mm_is_protected(mm))
+ return;
/*
* we might be called during
* reset: we walk the pages and clear
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 77767850d0d0..73a163065b95 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -49,6 +49,9 @@
#include <linux/virtio_config.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
+static pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
+
+unsigned long s390_invalid_asce;
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL(empty_zero_page);
@@ -92,6 +95,9 @@ void __init paging_init(void)
unsigned long pgd_type, asce_bits;
psw_t psw;
+ s390_invalid_asce = (unsigned long)invalid_pg_dir;
+ s390_invalid_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
+ crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
init_mm.pgd = swapper_pg_dir;
if (VMALLOC_END > _REGION2_SIZE) {
asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
@@ -102,14 +108,14 @@ void __init paging_init(void)
}
init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
S390_lowcore.kernel_asce = init_mm.context.asce;
- S390_lowcore.user_asce = S390_lowcore.kernel_asce;
+ S390_lowcore.user_asce = s390_invalid_asce;
crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
vmem_map_init();
- kasan_copy_shadow(init_mm.pgd);
+ kasan_copy_shadow_mapping();
/* enable virtual mapping in kernel mode */
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
- __ctl_load(S390_lowcore.kernel_asce, 7, 7);
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
psw.mask = __extract_psw();
psw_bits(psw).dat = 1;
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
index 5646b39c728a..db4d303aaaa9 100644
--- a/arch/s390/mm/kasan_init.c
+++ b/arch/s390/mm/kasan_init.c
@@ -87,7 +87,7 @@ enum populate_mode {
POPULATE_ZERO_SHADOW,
POPULATE_SHALLOW
};
-static void __init kasan_early_vmemmap_populate(unsigned long address,
+static void __init kasan_early_pgtable_populate(unsigned long address,
unsigned long end,
enum populate_mode mode)
{
@@ -123,8 +123,7 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
pgd_populate(&init_mm, pg_dir, p4_dir);
}
- if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
- mode == POPULATE_SHALLOW) {
+ if (mode == POPULATE_SHALLOW) {
address = (address + P4D_SIZE) & P4D_MASK;
continue;
}
@@ -143,12 +142,6 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
p4d_populate(&init_mm, p4_dir, pu_dir);
}
- if (!IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
- mode == POPULATE_SHALLOW) {
- address = (address + PUD_SIZE) & PUD_MASK;
- continue;
- }
-
pu_dir = pud_offset(p4_dir, address);
if (pud_none(*pu_dir)) {
if (mode == POPULATE_ZERO_SHADOW &&
@@ -281,7 +274,6 @@ void __init kasan_early_init(void)
unsigned long shadow_alloc_size;
unsigned long vmax_unlimited;
unsigned long initrd_end;
- unsigned long asce_type;
unsigned long memsize;
unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
pte_t pte_z;
@@ -297,32 +289,26 @@ void __init kasan_early_init(void)
memsize = get_mem_detect_end();
if (!memsize)
kasan_early_panic("cannot detect physical memory size\n");
- /* respect mem= cmdline parameter */
- if (memory_end_set && memsize > memory_end)
- memsize = memory_end;
- if (IS_ENABLED(CONFIG_CRASH_DUMP) && OLDMEM_BASE)
- memsize = min(memsize, OLDMEM_SIZE);
- memsize = min(memsize, KASAN_SHADOW_START);
-
- if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)) {
- /* 4 level paging */
- BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
- BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
- crst_table_init((unsigned long *)early_pg_dir,
- _REGION2_ENTRY_EMPTY);
- untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION1_SIZE;
- if (has_uv_sec_stor_limit())
- kasan_vmax = min(vmax_unlimited, uv_info.max_sec_stor_addr);
- asce_type = _ASCE_TYPE_REGION2;
- } else {
- /* 3 level paging */
- BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE));
- BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE));
- crst_table_init((unsigned long *)early_pg_dir,
- _REGION3_ENTRY_EMPTY);
- untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION2_SIZE;
- asce_type = _ASCE_TYPE_REGION3;
- }
+ /*
+ * Kasan currently supports standby memory but only if it follows
+ * online memory (default allocation), i.e. no memory holes.
+ * - memsize represents end of online memory
+ * - ident_map_size represents online + standby and memory limits
+ * accounted.
+ * Kasan maps "memsize" right away.
+ * [0, memsize] - as identity mapping
+ * [__sha(0), __sha(memsize)] - shadow memory for identity mapping
+ * The rest [memsize, ident_map_size] if memsize < ident_map_size
+ * could be mapped/unmapped dynamically later during memory hotplug.
+ */
+ memsize = min(memsize, ident_map_size);
+
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
+ crst_table_init((unsigned long *)early_pg_dir, _REGION2_ENTRY_EMPTY);
+ untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION1_SIZE;
+ if (has_uv_sec_stor_limit())
+ kasan_vmax = min(vmax_unlimited, uv_info.max_sec_stor_addr);
/* init kasan zero shadow */
crst_table_init((unsigned long *)kasan_early_shadow_p4d,
@@ -388,27 +374,25 @@ void __init kasan_early_init(void)
* +-----------------+ +- shadow end ---+
*/
/* populate kasan shadow (for identity mapping and zero page mapping) */
- kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
+ kasan_early_pgtable_populate(__sha(0), __sha(memsize), POPULATE_MAP);
if (IS_ENABLED(CONFIG_MODULES))
untracked_mem_end = kasan_vmax - MODULES_LEN;
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_mem_end = kasan_vmax - vmalloc_size - MODULES_LEN;
/* shallowly populate kasan shadow for vmalloc and modules */
- kasan_early_vmemmap_populate(__sha(untracked_mem_end),
- __sha(kasan_vmax), POPULATE_SHALLOW);
+ kasan_early_pgtable_populate(__sha(untracked_mem_end), __sha(kasan_vmax),
+ POPULATE_SHALLOW);
}
/* populate kasan shadow for untracked memory */
- kasan_early_vmemmap_populate(__sha(max_physmem_end),
- __sha(untracked_mem_end),
+ kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_mem_end),
POPULATE_ZERO_SHADOW);
- kasan_early_vmemmap_populate(__sha(kasan_vmax),
- __sha(vmax_unlimited),
+ kasan_early_pgtable_populate(__sha(kasan_vmax), __sha(vmax_unlimited),
POPULATE_ZERO_SHADOW);
/* memory allocated for identity mapping structs will be freed later */
pgalloc_freeable = pgalloc_pos;
/* populate identity mapping */
- kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
- kasan_set_pgd(early_pg_dir, asce_type);
+ kasan_early_pgtable_populate(0, memsize, POPULATE_ONE2ONE);
+ kasan_set_pgd(early_pg_dir, _ASCE_TYPE_REGION2);
kasan_enable_dat();
/* enable kasan */
init_task.kasan_depth = 0;
@@ -416,7 +400,7 @@ void __init kasan_early_init(void)
sclp_early_printk("KernelAddressSanitizer initialized\n");
}
-void __init kasan_copy_shadow(pgd_t *pg_dir)
+void __init kasan_copy_shadow_mapping(void)
{
/*
* At this point we are still running on early pages setup early_pg_dir,
@@ -428,24 +412,13 @@ void __init kasan_copy_shadow(pgd_t *pg_dir)
pgd_t *pg_dir_dst;
p4d_t *p4_dir_src;
p4d_t *p4_dir_dst;
- pud_t *pu_dir_src;
- pud_t *pu_dir_dst;
pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START);
- pg_dir_dst = pgd_offset_raw(pg_dir, KASAN_SHADOW_START);
+ pg_dir_dst = pgd_offset_raw(init_mm.pgd, KASAN_SHADOW_START);
p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START);
p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START);
- if (!p4d_folded(*p4_dir_src)) {
- /* 4 level paging */
- memcpy(p4_dir_dst, p4_dir_src,
- (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
- return;
- }
- /* 3 level paging */
- pu_dir_src = pud_offset(p4_dir_src, KASAN_SHADOW_START);
- pu_dir_dst = pud_offset(p4_dir_dst, KASAN_SHADOW_START);
- memcpy(pu_dir_dst, pu_dir_src,
- (KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t));
+ memcpy(p4_dir_dst, p4_dir_src,
+ (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
}
void __init kasan_free_early_identity(void)
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 11d2c8395e2a..4e87c819ddea 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -70,19 +70,10 @@ static void __crst_table_upgrade(void *arg)
{
struct mm_struct *mm = arg;
- /* we must change all active ASCEs to avoid the creation of new TLBs */
+ /* change all active ASCEs to avoid the creation of new TLBs */
if (current->active_mm == mm) {
S390_lowcore.user_asce = mm->context.asce;
- if (current->thread.mm_segment == USER_DS) {
- __ctl_load(S390_lowcore.user_asce, 1, 1);
- /* Mark user-ASCE present in CR1 */
- clear_cpu_flag(CIF_ASCE_PRIMARY);
- }
- if (current->thread.mm_segment == USER_DS_SACF) {
- __ctl_load(S390_lowcore.user_asce, 7, 7);
- /* enable_sacf_uaccess does all or nothing */
- WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY));
- }
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
}
__tlb_flush_local();
}
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index b239f2ba93b0..01f3a5f58e64 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -76,20 +76,20 @@ static void vmem_pte_free(unsigned long *table)
/*
* The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
- * from unused_pmd_start to next PMD_SIZE boundary.
+ * from unused_sub_pmd_start to next PMD_SIZE boundary.
*/
-static unsigned long unused_pmd_start;
+static unsigned long unused_sub_pmd_start;
-static void vmemmap_flush_unused_pmd(void)
+static void vmemmap_flush_unused_sub_pmd(void)
{
- if (!unused_pmd_start)
+ if (!unused_sub_pmd_start)
return;
- memset(__va(unused_pmd_start), PAGE_UNUSED,
- ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start);
- unused_pmd_start = 0;
+ memset(__va(unused_sub_pmd_start), PAGE_UNUSED,
+ ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
+ unused_sub_pmd_start = 0;
}
-static void __vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
+static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
{
/*
* As we expect to add in the same granularity as we remove, it's
@@ -106,24 +106,24 @@ static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
* We only optimize if the new used range directly follows the
* previously unused range (esp., when populating consecutive sections).
*/
- if (unused_pmd_start == start) {
- unused_pmd_start = end;
- if (likely(IS_ALIGNED(unused_pmd_start, PMD_SIZE)))
- unused_pmd_start = 0;
+ if (unused_sub_pmd_start == start) {
+ unused_sub_pmd_start = end;
+ if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
+ unused_sub_pmd_start = 0;
return;
}
- vmemmap_flush_unused_pmd();
- __vmemmap_use_sub_pmd(start, end);
+ vmemmap_flush_unused_sub_pmd();
+ vmemmap_mark_sub_pmd_used(start, end);
}
static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
{
void *page = __va(ALIGN_DOWN(start, PMD_SIZE));
- vmemmap_flush_unused_pmd();
+ vmemmap_flush_unused_sub_pmd();
/* Could be our memmap page is filled with PAGE_UNUSED already ... */
- __vmemmap_use_sub_pmd(start, end);
+ vmemmap_mark_sub_pmd_used(start, end);
/* Mark the unused parts of the new memmap page PAGE_UNUSED. */
if (!IS_ALIGNED(start, PMD_SIZE))
@@ -134,7 +134,7 @@ static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
* unused range in the populated PMD.
*/
if (!IS_ALIGNED(end, PMD_SIZE))
- unused_pmd_start = end;
+ unused_sub_pmd_start = end;
}
/* Returns true if the PMD is completely unused and can be freed. */
@@ -142,7 +142,7 @@ static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
{
void *page = __va(ALIGN_DOWN(start, PMD_SIZE));
- vmemmap_flush_unused_pmd();
+ vmemmap_flush_unused_sub_pmd();
memset(__va(start), PAGE_UNUSED, end - start);
return !memchr_inv(page, PAGE_UNUSED, PMD_SIZE);
}
@@ -223,7 +223,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
if (!add) {
if (pmd_none(*pmd))
continue;
- if (pmd_large(*pmd) && !add) {
+ if (pmd_large(*pmd)) {
if (IS_ALIGNED(addr, PMD_SIZE) &&
IS_ALIGNED(next, PMD_SIZE)) {
if (!direct)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 570016ae8bcd..41df8fcfddde 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -851,8 +851,10 @@ static int __init pci_base_init(void)
if (!s390_pci_probe)
return 0;
- if (!test_facility(69) || !test_facility(71))
+ if (!test_facility(69) || !test_facility(71)) {
+ pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
return 0;
+ }
if (test_facility(153) && !s390_pci_no_mio) {
static_branch_enable(&have_mio);
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index d33f21545dfd..9a6bae503fe6 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -101,6 +101,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
if (ret)
break;
+ /* the PCI function will be scanned once function 0 appears */
+ if (!zdev->zbus->bus)
+ break;
+
pdev = pci_scan_single_device(zdev->zbus->bus, zdev->devfn);
if (!pdev)
break;
diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
index 743f257cf2cb..9dd5ad1b553d 100644
--- a/arch/s390/pci/pci_irq.c
+++ b/arch/s390/pci/pci_irq.c
@@ -103,9 +103,10 @@ static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *de
{
struct msi_desc *entry = irq_get_msi_desc(data->irq);
struct msi_msg msg = entry->msg;
+ int cpu_addr = smp_cpu_get_cpu_address(cpumask_first(dest));
msg.address_lo &= 0xff0000ff;
- msg.address_lo |= (cpumask_first(dest) << 8);
+ msg.address_lo |= (cpu_addr << 8);
pci_write_msi_msg(data->irq, &msg);
return IRQ_SET_MASK_OK;
@@ -178,9 +179,7 @@ static void zpci_handle_fallback_irq(void)
if (atomic_inc_return(&cpu_data->scheduled) > 1)
continue;
- cpu_data->csd.func = zpci_handle_remote_irq;
- cpu_data->csd.info = &cpu_data->scheduled;
- cpu_data->csd.flags = 0;
+ INIT_CSD(&cpu_data->csd, zpci_handle_remote_irq, &cpu_data->scheduled);
smp_call_function_single_async(cpu, &cpu_data->csd);
}
}
@@ -238,6 +237,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
unsigned long bit;
struct msi_desc *msi;
struct msi_msg msg;
+ int cpu_addr;
int rc, irq;
zdev->aisb = -1UL;
@@ -287,9 +287,15 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
handle_percpu_irq);
msg.data = hwirq - bit;
if (irq_delivery == DIRECTED) {
+ if (msi->affinity)
+ cpu = cpumask_first(&msi->affinity->mask);
+ else
+ cpu = 0;
+ cpu_addr = smp_cpu_get_cpu_address(cpu);
+
msg.address_lo = zdev->msi_addr & 0xff0000ff;
- msg.address_lo |= msi->affinity ?
- (cpumask_first(&msi->affinity->mask) << 8) : 0;
+ msg.address_lo |= (cpu_addr << 8);
+
for_each_possible_cpu(cpu) {
airq_iv_set_data(zpci_ibv[cpu], hwirq, irq);
}
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 401cf670a243..18f2d10c3176 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -93,12 +93,10 @@ static inline int __memcpy_toio_inuser(void __iomem *dst,
{
int size, rc = 0;
u8 status = 0;
- mm_segment_t old_fs;
if (!src)
return -EINVAL;
- old_fs = enable_sacf_uaccess();
while (n > 0) {
size = zpci_get_max_write_size((u64 __force) dst,
(u64 __force) src, n,
@@ -113,39 +111,20 @@ static inline int __memcpy_toio_inuser(void __iomem *dst,
dst += size;
n -= size;
}
- disable_sacf_uaccess(old_fs);
if (rc)
zpci_err_mmio(rc, status, (__force u64) dst);
return rc;
}
-static long get_pfn(unsigned long user_addr, unsigned long access,
- unsigned long *pfn)
-{
- struct vm_area_struct *vma;
- long ret;
-
- mmap_read_lock(current->mm);
- ret = -EINVAL;
- vma = find_vma(current->mm, user_addr);
- if (!vma)
- goto out;
- ret = -EACCES;
- if (!(vma->vm_flags & access))
- goto out;
- ret = follow_pfn(vma, user_addr, pfn);
-out:
- mmap_read_unlock(current->mm);
- return ret;
-}
-
SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
const void __user *, user_buffer, size_t, length)
{
u8 local_buf[64];
void __iomem *io_addr;
void *buf;
- unsigned long pfn;
+ struct vm_area_struct *vma;
+ pte_t *ptep;
+ spinlock_t *ptl;
long ret;
if (!zpci_is_enabled())
@@ -158,7 +137,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
* We only support write access to MIO capable devices if we are on
* a MIO enabled system. Otherwise we would have to check for every
* address if it is a special ZPCI_ADDR and would have to do
- * a get_pfn() which we don't need for MIO capable devices. Currently
+ * a pfn lookup which we don't need for MIO capable devices. Currently
* ISM devices are the only devices without MIO support and there is no
* known need for accessing these from userspace.
*/
@@ -176,21 +155,37 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
} else
buf = local_buf;
- ret = get_pfn(mmio_addr, VM_WRITE, &pfn);
+ ret = -EFAULT;
+ if (copy_from_user(buf, user_buffer, length))
+ goto out_free;
+
+ mmap_read_lock(current->mm);
+ ret = -EINVAL;
+ vma = find_vma(current->mm, mmio_addr);
+ if (!vma)
+ goto out_unlock_mmap;
+ if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+ goto out_unlock_mmap;
+ ret = -EACCES;
+ if (!(vma->vm_flags & VM_WRITE))
+ goto out_unlock_mmap;
+
+ ret = follow_pte(vma->vm_mm, mmio_addr, NULL, &ptep, NULL, &ptl);
if (ret)
- goto out;
- io_addr = (void __iomem *)((pfn << PAGE_SHIFT) |
+ goto out_unlock_mmap;
+
+ io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
(mmio_addr & ~PAGE_MASK));
- ret = -EFAULT;
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
- goto out;
-
- if (copy_from_user(buf, user_buffer, length))
- goto out;
+ goto out_unlock_pt;
ret = zpci_memcpy_toio(io_addr, buf, length);
-out:
+out_unlock_pt:
+ pte_unmap_unlock(ptep, ptl);
+out_unlock_mmap:
+ mmap_read_unlock(current->mm);
+out_free:
if (buf != local_buf)
kfree(buf);
return ret;
@@ -248,9 +243,7 @@ static inline int __memcpy_fromio_inuser(void __user *dst,
{
int size, rc = 0;
u8 status;
- mm_segment_t old_fs;
- old_fs = enable_sacf_uaccess();
while (n > 0) {
size = zpci_get_max_write_size((u64 __force) src,
(u64 __force) dst, n,
@@ -262,7 +255,6 @@ static inline int __memcpy_fromio_inuser(void __user *dst,
dst += size;
n -= size;
}
- disable_sacf_uaccess(old_fs);
if (rc)
zpci_err_mmio(rc, status, (__force u64) dst);
return rc;
@@ -274,7 +266,9 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
u8 local_buf[64];
void __iomem *io_addr;
void *buf;
- unsigned long pfn;
+ struct vm_area_struct *vma;
+ pte_t *ptep;
+ spinlock_t *ptl;
long ret;
if (!zpci_is_enabled())
@@ -287,7 +281,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
* We only support read access to MIO capable devices if we are on
* a MIO enabled system. Otherwise we would have to check for every
* address if it is a special ZPCI_ADDR and would have to do
- * a get_pfn() which we don't need for MIO capable devices. Currently
+ * a pfn lookup which we don't need for MIO capable devices. Currently
* ISM devices are the only devices without MIO support and there is no
* known need for accessing these from userspace.
*/
@@ -306,22 +300,38 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
buf = local_buf;
}
- ret = get_pfn(mmio_addr, VM_READ, &pfn);
+ mmap_read_lock(current->mm);
+ ret = -EINVAL;
+ vma = find_vma(current->mm, mmio_addr);
+ if (!vma)
+ goto out_unlock_mmap;
+ if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+ goto out_unlock_mmap;
+ ret = -EACCES;
+ if (!(vma->vm_flags & VM_WRITE))
+ goto out_unlock_mmap;
+
+ ret = follow_pte(vma->vm_mm, mmio_addr, NULL, &ptep, NULL, &ptl);
if (ret)
- goto out;
- io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
+ goto out_unlock_mmap;
+
+ io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
+ (mmio_addr & ~PAGE_MASK));
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
ret = -EFAULT;
- goto out;
+ goto out_unlock_pt;
}
ret = zpci_memcpy_fromio(buf, io_addr, length);
- if (ret)
- goto out;
- if (copy_to_user(user_buffer, buf, length))
+
+out_unlock_pt:
+ pte_unmap_unlock(ptep, ptl);
+out_unlock_mmap:
+ mmap_read_unlock(current->mm);
+
+ if (!ret && copy_to_user(user_buffer, buf, length))
ret = -EFAULT;
-out:
if (buf != local_buf)
kfree(buf);
return ret;
diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S
index 5a10ce34b95d..3d1c31e0cf3d 100644
--- a/arch/s390/purgatory/head.S
+++ b/arch/s390/purgatory/head.S
@@ -62,14 +62,15 @@
jh 10b
.endm
-.macro START_NEXT_KERNEL base
+.macro START_NEXT_KERNEL base subcode
lg %r4,kernel_entry-\base(%r13)
lg %r5,load_psw_mask-\base(%r13)
ogr %r4,%r5
stg %r4,0(%r0)
xgr %r0,%r0
- diag %r0,%r0,0x308
+ lghi %r1,\subcode
+ diag %r0,%r1,0x308
.endm
.text
@@ -123,7 +124,7 @@ ENTRY(purgatory_start)
je .start_crash_kernel
/* start normal kernel */
- START_NEXT_KERNEL .base_crash
+ START_NEXT_KERNEL .base_crash 0
.return_old_kernel:
lmg %r6,%r15,gprregs-.base_crash(%r13)
@@ -227,7 +228,7 @@ ENTRY(purgatory_start)
MEMCPY %r9,%r10,%r11
/* start crash kernel */
- START_NEXT_KERNEL .base_dst
+ START_NEXT_KERNEL .base_dst 1
load_psw_mask:
diff --git a/arch/s390/purgatory/purgatory.c b/arch/s390/purgatory/purgatory.c
index 0a423bcf6746..030efda05dbe 100644
--- a/arch/s390/purgatory/purgatory.c
+++ b/arch/s390/purgatory/purgatory.c
@@ -9,7 +9,7 @@
#include <linux/kexec.h>
#include <linux/string.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <asm/purgatory.h>
int verify_sha256_digest(void)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 159da4ed578f..5fa580219a86 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -16,7 +16,6 @@ config SUPERH
select CPU_NO_EFFICIENT_FFS
select DMA_DECLARE_COHERENT
select GENERIC_ATOMIC64
- select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_SHOW
diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h
index f38adc189b83..b07fbc7f7bc6 100644
--- a/arch/sh/include/asm/fixmap.h
+++ b/arch/sh/include/asm/fixmap.h
@@ -13,9 +13,6 @@
#include <linux/kernel.h>
#include <linux/threads.h>
#include <asm/page.h>
-#ifdef CONFIG_HIGHMEM
-#include <asm/kmap_types.h>
-#endif
/*
* Here we define all the compile-time 'special' virtual
@@ -53,11 +50,6 @@ enum fixed_addresses {
FIX_CMAP_BEGIN,
FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1,
-#ifdef CONFIG_HIGHMEM
- FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
- FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
-#endif
-
#ifdef CONFIG_IOREMAP_FIXED
/*
* FIX_IOREMAP entries are useful for mapping physical address
diff --git a/arch/sh/include/asm/hardirq.h b/arch/sh/include/asm/hardirq.h
index edaea3559a23..9fe4495a8e90 100644
--- a/arch/sh/include/asm/hardirq.h
+++ b/arch/sh/include/asm/hardirq.h
@@ -2,16 +2,10 @@
#ifndef __ASM_SH_HARDIRQ_H
#define __ASM_SH_HARDIRQ_H
-#include <linux/threads.h>
-#include <linux/irq.h>
-
-typedef struct {
- unsigned int __softirq_pending;
- unsigned int __nmi_count; /* arch dependent */
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
extern void ack_bad_irq(unsigned int irq);
+#define ack_bad_irq ack_bad_irq
+#define ARCH_WANTS_NMI_IRQSTAT
+
+#include <asm-generic/hardirq.h>
#endif /* __ASM_SH_HARDIRQ_H */
diff --git a/arch/sh/include/asm/kmap_types.h b/arch/sh/include/asm/kmap_types.h
deleted file mode 100644
index b78107f923dd..000000000000
--- a/arch/sh/include/asm/kmap_types.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __SH_KMAP_TYPES_H
-#define __SH_KMAP_TYPES_H
-
-/* Dummy header just to define km_type. */
-
-#ifdef CONFIG_DEBUG_HIGHMEM
-#define __WITH_KM_FENCE
-#endif
-
-#include <asm-generic/kmap_types.h>
-
-#undef __WITH_KM_FENCE
-
-#endif
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h
index f664e51e8a15..78eef4e7d5df 100644
--- a/arch/sh/include/asm/mmu_context.h
+++ b/arch/sh/include/asm/mmu_context.h
@@ -84,6 +84,7 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
* Initialize the context related info for a new mm_struct
* instance.
*/
+#define init_new_context init_new_context
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
@@ -120,9 +121,7 @@ static inline void switch_mm(struct mm_struct *prev,
activate_context(next, cpu);
}
-#define activate_mm(prev, next) switch_mm((prev),(next),NULL)
-#define deactivate_mm(tsk,mm) do { } while (0)
-#define enter_lazy_tlb(mm,tsk) do { } while (0)
+#include <asm-generic/mmu_context.h>
#else
@@ -133,7 +132,7 @@ static inline void switch_mm(struct mm_struct *prev,
#define set_TTB(pgd) do { } while (0)
#define get_TTB() (0)
-#include <asm-generic/mmu_context.h>
+#include <asm-generic/nommu_context.h>
#endif /* CONFIG_MMU */
diff --git a/arch/sh/include/asm/mmu_context_32.h b/arch/sh/include/asm/mmu_context_32.h
index 71bf12ef1f65..bc5034fa6249 100644
--- a/arch/sh/include/asm/mmu_context_32.h
+++ b/arch/sh/include/asm/mmu_context_32.h
@@ -2,15 +2,6 @@
#ifndef __ASM_SH_MMU_CONTEXT_32_H
#define __ASM_SH_MMU_CONTEXT_32_H
-/*
- * Destroy context related info for an mm_struct that is about
- * to be put to rest.
- */
-static inline void destroy_context(struct mm_struct *mm)
-{
- /* Do nothing */
-}
-
#ifdef CONFIG_CPU_HAS_PTEAEX
static inline void set_asid(unsigned long asid)
{
diff --git a/arch/sh/include/asm/seccomp.h b/arch/sh/include/asm/seccomp.h
index 54111e4d32b8..d4578395fd66 100644
--- a/arch/sh/include/asm/seccomp.h
+++ b/arch/sh/include/asm/seccomp.h
@@ -8,4 +8,14 @@
#define __NR_seccomp_exit __NR_exit
#define __NR_seccomp_sigreturn __NR_rt_sigreturn
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define __SECCOMP_ARCH_LE __AUDIT_ARCH_LE
+#else
+#define __SECCOMP_ARCH_LE 0
+#endif
+
+#define SECCOMP_ARCH_NATIVE (AUDIT_ARCH_SH | __SECCOMP_ARCH_LE)
+#define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+#define SECCOMP_ARCH_NATIVE_NAME "sh"
+
#endif /* __ASM_SECCOMP_H */
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 0dc0f52f9bb8..f59814983bd5 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -22,7 +22,7 @@ static void (*sh_idle)(void);
void default_idle(void)
{
set_bl_bit();
- local_irq_enable();
+ raw_local_irq_enable();
/* Isn't this racy ? */
cpu_sleep();
clear_bl_bit();
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 5717c7cbdd97..ab5f790b0cd2 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -44,7 +44,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%*s: ", prec, "NMI");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", nmi_count(j));
+ seq_printf(p, "%10u ", per_cpu(irq_stat.__nmi_count, j));
seq_printf(p, " Non-maskable interrupts\n");
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 9c3d32b80038..f5beecdac693 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -186,7 +186,7 @@ BUILD_TRAP_HANDLER(nmi)
arch_ftrace_nmi_enter();
nmi_enter();
- nmi_count(cpu)++;
+ this_cpu_inc(irq_stat.__nmi_count);
switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) {
case NOTIFY_OK:
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 3348e0c4d769..0db6919af8d3 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -362,9 +362,6 @@ void __init mem_init(void)
mem_init_print_info(NULL);
pr_info("virtual kernel memory layout:\n"
" fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
-#ifdef CONFIG_HIGHMEM
- " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
-#endif
" vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
" lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n"
#ifdef CONFIG_UNCACHED_MAPPING
@@ -376,11 +373,6 @@ void __init mem_init(void)
FIXADDR_START, FIXADDR_TOP,
(FIXADDR_TOP - FIXADDR_START) >> 10,
-#ifdef CONFIG_HIGHMEM
- PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
- (LAST_PKMAP*PAGE_SIZE) >> 10,
-#endif
-
(unsigned long)VMALLOC_START, VMALLOC_END,
(VMALLOC_END - VMALLOC_START) >> 20,
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index a6ca135442f9..c9c34dc52b7d 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -39,7 +39,6 @@ config SPARC
select HAVE_EBPF_JIT if SPARC64
select HAVE_DEBUG_BUGVERBOSE
select GENERIC_SMP_IDLE_THREAD
- select GENERIC_CLOCKEVENTS
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select MODULES_USE_ELF_RELA
@@ -88,6 +87,7 @@ config SPARC64
select HAVE_C_RECORDMCOUNT
select HAVE_ARCH_AUDITSYSCALL
select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select HAVE_NMI
select HAVE_REGS_AND_STACK_ACCESS_API
select ARCH_USE_QUEUED_RWLOCKS
@@ -139,6 +139,7 @@ config MMU
config HIGHMEM
bool
default y if SPARC32
+ select KMAP_LOCAL
config ZONE_DMA
bool
@@ -148,9 +149,6 @@ config GENERIC_ISA_DMA
bool
default y if SPARC32
-config ARCH_SUPPORTS_DEBUG_PAGEALLOC
- def_bool y if SPARC64
-
config PGTABLE_LEVELS
default 4 if 64BIT
default 3
diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
index 4e9323229e71..82efb7f81c28 100644
--- a/arch/sparc/crypto/crc32c_glue.c
+++ b/arch/sparc/crypto/crc32c_glue.c
@@ -35,7 +35,7 @@ static int crc32c_sparc64_setkey(struct crypto_shash *hash, const u8 *key,
if (keylen != sizeof(u32))
return -EINVAL;
- *(__le32 *)mctx = le32_to_cpup((__le32 *)key);
+ *mctx = le32_to_cpup((__le32 *)key);
return 0;
}
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
index 111283fe837e..511db98d590a 100644
--- a/arch/sparc/crypto/md5_glue.c
+++ b/arch/sparc/crypto/md5_glue.c
@@ -33,10 +33,11 @@ static int md5_sparc64_init(struct shash_desc *desc)
{
struct md5_state *mctx = shash_desc_ctx(desc);
- mctx->hash[0] = cpu_to_le32(MD5_H0);
- mctx->hash[1] = cpu_to_le32(MD5_H1);
- mctx->hash[2] = cpu_to_le32(MD5_H2);
- mctx->hash[3] = cpu_to_le32(MD5_H3);
+ mctx->hash[0] = MD5_H0;
+ mctx->hash[1] = MD5_H1;
+ mctx->hash[2] = MD5_H2;
+ mctx->hash[3] = MD5_H3;
+ le32_to_cpu_array(mctx->hash, 4);
mctx->byte_count = 0;
return 0;
diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
index dc017782be52..86a654cce5ab 100644
--- a/arch/sparc/crypto/sha1_glue.c
+++ b/arch/sparc/crypto/sha1_glue.c
@@ -16,7 +16,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <asm/pstate.h>
#include <asm/elf.h>
diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
index ca2547df9652..60ec524cf9ca 100644
--- a/arch/sparc/crypto/sha256_glue.c
+++ b/arch/sparc/crypto/sha256_glue.c
@@ -16,7 +16,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <asm/pstate.h>
#include <asm/elf.h>
diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
index 3b2ca732ff7a..273ce21918c1 100644
--- a/arch/sparc/crypto/sha512_glue.c
+++ b/arch/sparc/crypto/sha512_glue.c
@@ -15,7 +15,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <asm/pstate.h>
#include <asm/elf.h>
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h
index 6c35f0d27ee1..875116209ec1 100644
--- a/arch/sparc/include/asm/highmem.h
+++ b/arch/sparc/include/asm/highmem.h
@@ -24,7 +24,6 @@
#include <linux/interrupt.h>
#include <linux/pgtable.h>
#include <asm/vaddrs.h>
-#include <asm/kmap_types.h>
#include <asm/pgtsrmmu.h>
/* declarations for highmem.c */
@@ -33,8 +32,6 @@ extern unsigned long highstart_pfn, highend_pfn;
#define kmap_prot __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE)
extern pte_t *pkmap_page_table;
-void kmap_init(void) __init;
-
/*
* Right now we initialize only a single pte table. It can be extended
* easily, subsequent pte tables have to be allocated in one physical
@@ -53,6 +50,11 @@ void kmap_init(void) __init;
#define flush_cache_kmaps() flush_cache_all()
+/* FIXME: Use __flush_tlb_one(vaddr) instead of flush_cache_all() -- Anton */
+#define arch_kmap_local_post_map(vaddr, pteval) flush_cache_all()
+#define arch_kmap_local_post_unmap(vaddr) flush_cache_all()
+
+
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
diff --git a/arch/sparc/include/asm/kmap_types.h b/arch/sparc/include/asm/kmap_types.h
deleted file mode 100644
index 55a99b6bd91e..000000000000
--- a/arch/sparc/include/asm/kmap_types.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_KMAP_TYPES_H
-#define _ASM_KMAP_TYPES_H
-
-/* Dummy header just to define km_type. None of this
- * is actually used on sparc. -DaveM
- */
-
-#include <asm-generic/kmap_types.h>
-
-#endif
diff --git a/arch/sparc/include/asm/mmu_context_32.h b/arch/sparc/include/asm/mmu_context_32.h
index 7ddcb8badf70..509043f81560 100644
--- a/arch/sparc/include/asm/mmu_context_32.h
+++ b/arch/sparc/include/asm/mmu_context_32.h
@@ -6,13 +6,10 @@
#include <asm-generic/mm_hooks.h>
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
/* Initialize a new mmu context. This is invoked when a new
* address space instance (unique or shared) is instantiated.
*/
+#define init_new_context init_new_context
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
/* Destroy a dead context. This occurs when mmput drops the
@@ -20,17 +17,18 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
* all the page tables have been flushed. Our job is to destroy
* any remaining processor-specific state.
*/
+#define destroy_context destroy_context
void destroy_context(struct mm_struct *mm);
/* Switch the current MM context. */
void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
struct task_struct *tsk);
-#define deactivate_mm(tsk,mm) do { } while (0)
-
/* Activate a new MM instance for the current task. */
#define activate_mm(active_mm, mm) switch_mm((active_mm), (mm), NULL)
+#include <asm-generic/mmu_context.h>
+
#endif /* !(__ASSEMBLY__) */
#endif /* !(__SPARC_MMU_CONTEXT_H) */
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 312fcee8df2b..7a8380c63aab 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -16,17 +16,16 @@
#include <asm-generic/mm_hooks.h>
#include <asm/percpu.h>
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
extern spinlock_t ctx_alloc_lock;
extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
void get_new_mmu_context(struct mm_struct *mm);
+
+#define init_new_context init_new_context
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+#define destroy_context destroy_context
void destroy_context(struct mm_struct *mm);
void __tsb_context_switch(unsigned long pgd_pa,
@@ -136,7 +135,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
spin_unlock_irqrestore(&mm->context.lock, flags);
}
-#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
#define __HAVE_ARCH_START_CONTEXT_SWITCH
@@ -187,6 +185,8 @@ static inline void finish_arch_post_lock_switch(void)
}
}
+#include <asm-generic/mmu_context.h>
+
#endif /* !(__ASSEMBLY__) */
#endif /* !(__SPARC64_MMU_CONTEXT_H) */
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 7ef6affa105e..550d3904de65 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -1121,6 +1121,19 @@ extern unsigned long cmdline_memory_size;
asmlinkage void do_sparc64_fault(struct pt_regs *regs);
+#ifdef CONFIG_HUGETLB_PAGE
+
+#define pud_leaf_size pud_leaf_size
+extern unsigned long pud_leaf_size(pud_t pud);
+
+#define pmd_leaf_size pmd_leaf_size
+extern unsigned long pmd_leaf_size(pmd_t pmd);
+
+#define pte_leaf_size pte_leaf_size
+extern unsigned long pte_leaf_size(pte_t pte);
+
+#endif /* CONFIG_HUGETLB_PAGE */
+
#endif /* !(__ASSEMBLY__) */
#endif /* !(_SPARC64_PGTABLE_H) */
diff --git a/arch/sparc/include/asm/vaddrs.h b/arch/sparc/include/asm/vaddrs.h
index 84d054b07a6f..4fec0341e2a8 100644
--- a/arch/sparc/include/asm/vaddrs.h
+++ b/arch/sparc/include/asm/vaddrs.h
@@ -32,13 +32,13 @@
#define SRMMU_NOCACHE_ALCRATIO 64 /* 256 pages per 64MB of system RAM */
#ifndef __ASSEMBLY__
-#include <asm/kmap_types.h>
+#include <asm/kmap_size.h>
enum fixed_addresses {
FIX_HOLE,
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN,
- FIX_KMAP_END = (KM_TYPE_NR * NR_CPUS),
+ FIX_KMAP_END = (KM_MAX_IDX * NR_CPUS),
#endif
__end_of_fixed_addresses
};
diff --git a/arch/sparc/include/uapi/asm/signal.h b/arch/sparc/include/uapi/asm/signal.h
index ff9505923b9a..53758d53ac0e 100644
--- a/arch/sparc/include/uapi/asm/signal.h
+++ b/arch/sparc/include/uapi/asm/signal.h
@@ -137,13 +137,11 @@ struct sigstack {
#define SA_STACK _SV_SSTACK
#define SA_ONSTACK _SV_SSTACK
#define SA_RESTART _SV_INTR
-#define SA_ONESHOT _SV_RESET
+#define SA_RESETHAND _SV_RESET
#define SA_NODEFER 0x20u
#define SA_NOCLDWAIT 0x100u
#define SA_SIGINFO 0x200u
-#define SA_NOMASK SA_NODEFER
-
#define SIG_BLOCK 0x01 /* for blocking signals */
#define SIG_UNBLOCK 0x02 /* for unblocking signals */
#define SIG_SETMASK 0x04 /* for setting the signal mask */
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 8029b681fc7c..848a22fbac20 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -117,6 +117,9 @@
#define SO_DETACH_REUSEPORT_BPF 0x0047
+#define SO_PREFER_BUSY_POLL 0x0048
+#define SO_BUSY_POLL_BUDGET 0x0049
+
#if !defined(__KERNEL__)
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
index 065e2d4b7290..396f46bca52e 100644
--- a/arch/sparc/kernel/leon_pmc.c
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -50,7 +50,7 @@ static void pmc_leon_idle_fixup(void)
register unsigned int address = (unsigned int)leon3_irqctrl_regs;
/* Interrupts need to be enabled to not hang the CPU */
- local_irq_enable();
+ raw_local_irq_enable();
__asm__ __volatile__ (
"wr %%g0, %%asr19\n"
@@ -66,7 +66,7 @@ static void pmc_leon_idle_fixup(void)
static void pmc_leon_idle(void)
{
/* Interrupts need to be enabled to not hang the CPU */
- local_irq_enable();
+ raw_local_irq_enable();
/* For systems without power-down, this will be no-op */
__asm__ __volatile__ ("wr %g0, %asr19\n\t");
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index adfcaeab3ddc..a02363735915 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -74,7 +74,7 @@ void arch_cpu_idle(void)
{
if (sparc_idle)
(*sparc_idle)();
- local_irq_enable();
+ raw_local_irq_enable();
}
/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index a75093b993f9..6f8c7822fc06 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -62,11 +62,11 @@ void arch_cpu_idle(void)
{
if (tlb_type != hypervisor) {
touch_nmi_watchdog();
- local_irq_enable();
+ raw_local_irq_enable();
} else {
unsigned long pstate;
- local_irq_enable();
+ raw_local_irq_enable();
/* The sun4v sleeping code requires that we have PSTATE.IE cleared over
* the cpu sleep hypervisor call.
diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S
index 0c0268e77155..d839956407a7 100644
--- a/arch/sparc/lib/csum_copy.S
+++ b/arch/sparc/lib/csum_copy.S
@@ -71,7 +71,7 @@
FUNC_NAME: /* %o0=src, %o1=dst, %o2=len */
LOAD(prefetch, %o0 + 0x000, #n_reads)
xor %o0, %o1, %g1
- mov 1, %o3
+ mov -1, %o3
clr %o4
andcc %g1, 0x3, %g0
bne,pn %icc, 95f
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index b078205b70e0..68db1f859b02 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -15,6 +15,3 @@ obj-$(CONFIG_SPARC32) += leon_mm.o
# Only used by sparc64
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-
-# Only used by sparc32
-obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
deleted file mode 100644
index 8f2a2afb048a..000000000000
--- a/arch/sparc/mm/highmem.c
+++ /dev/null
@@ -1,115 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * highmem.c: virtual kernel memory mappings for high memory
- *
- * Provides kernel-static versions of atomic kmap functions originally
- * found as inlines in include/asm-sparc/highmem.h. These became
- * needed as kmap_atomic() and kunmap_atomic() started getting
- * called from within modules.
- * -- Tomas Szepe <szepe@pinerecords.com>, September 2002
- *
- * But kmap_atomic() and kunmap_atomic() cannot be inlined in
- * modules because they are loaded with btfixup-ped functions.
- */
-
-/*
- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
- * gives a more generic (and caching) interface. But kmap_atomic can
- * be used in IRQ contexts, so in some (very limited) cases we need it.
- *
- * XXX This is an old text. Actually, it's good to use atomic kmaps,
- * provided you remember that they are atomic and not try to sleep
- * with a kmap taken, much like a spinlock. Non-atomic kmaps are
- * shared by CPUs, and so precious, and establishing them requires IPI.
- * Atomic kmaps are lightweight and we may have NCPUS more of them.
- */
-#include <linux/highmem.h>
-#include <linux/export.h>
-#include <linux/mm.h>
-
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-#include <asm/vaddrs.h>
-
-static pte_t *kmap_pte;
-
-void __init kmap_init(void)
-{
- unsigned long address = __fix_to_virt(FIX_KMAP_BEGIN);
-
- /* cache the first kmap pte */
- kmap_pte = virt_to_kpte(address);
-}
-
-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
-{
- unsigned long vaddr;
- long idx, type;
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-
-/* XXX Fix - Anton */
-#if 0
- __flush_cache_one(vaddr);
-#else
- flush_cache_all();
-#endif
-
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte-idx)));
-#endif
- set_pte(kmap_pte-idx, mk_pte(page, prot));
-/* XXX Fix - Anton */
-#if 0
- __flush_tlb_one(vaddr);
-#else
- flush_tlb_all();
-#endif
-
- return (void*) vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic_high_prot);
-
-void kunmap_atomic_high(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int type;
-
- if (vaddr < FIXADDR_START)
- return;
-
- type = kmap_atomic_idx();
-
-#ifdef CONFIG_DEBUG_HIGHMEM
- {
- unsigned long idx;
-
- idx = type + KM_TYPE_NR * smp_processor_id();
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
-
- /* XXX Fix - Anton */
-#if 0
- __flush_cache_one(vaddr);
-#else
- flush_cache_all();
-#endif
-
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- /* XXX Fix - Anton */
-#if 0
- __flush_tlb_one(vaddr);
-#else
- flush_tlb_all();
-#endif
- }
-#endif
-
- kmap_atomic_idx_pop();
-}
-EXPORT_SYMBOL(kunmap_atomic_high);
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index ec423b5f17dd..ad4b42f04988 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -247,14 +247,17 @@ static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
return shift;
}
-static unsigned int huge_tte_to_shift(pte_t entry)
+static unsigned long tte_to_shift(pte_t entry)
{
- unsigned long shift;
-
if (tlb_type == hypervisor)
- shift = sun4v_huge_tte_to_shift(entry);
- else
- shift = sun4u_huge_tte_to_shift(entry);
+ return sun4v_huge_tte_to_shift(entry);
+
+ return sun4u_huge_tte_to_shift(entry);
+}
+
+static unsigned int huge_tte_to_shift(pte_t entry)
+{
+ unsigned long shift = tte_to_shift(entry);
if (shift == PAGE_SHIFT)
WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
@@ -272,6 +275,10 @@ static unsigned long huge_tte_to_size(pte_t pte)
return size;
}
+unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&pud); }
+unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); }
+unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); }
+
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 96edf64d4fb3..182bb7bdaa0a 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2894,7 +2894,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
if (!page)
return NULL;
if (!pgtable_pte_page_ctor(page)) {
- free_unref_page(page);
+ __free_page(page);
return NULL;
}
return (pte_t *) page_address(page);
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 0070f8b9a753..a03caa5f6628 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -971,8 +971,6 @@ void __init srmmu_paging_init(void)
sparc_context_init(num_contexts);
- kmap_init();
-
{
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 4b799fad8b48..43333e36e0ba 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -17,7 +17,6 @@ config UML
select NO_DMA
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
- select GENERIC_CLOCKEVENTS
select HAVE_GCC_PLUGINS
select SET_FS
select TTY # Needed for line.c
diff --git a/arch/um/include/asm/fixmap.h b/arch/um/include/asm/fixmap.h
index 2c697a145ac1..2efac5827188 100644
--- a/arch/um/include/asm/fixmap.h
+++ b/arch/um/include/asm/fixmap.h
@@ -3,7 +3,6 @@
#define __UM_FIXMAP_H
#include <asm/processor.h>
-#include <asm/kmap_types.h>
#include <asm/archparam.h>
#include <asm/page.h>
#include <linux/threads.h>
diff --git a/arch/um/include/asm/hardirq.h b/arch/um/include/asm/hardirq.h
index b426796d26fd..52e2c36267a9 100644
--- a/arch/um/include/asm/hardirq.h
+++ b/arch/um/include/asm/hardirq.h
@@ -2,22 +2,7 @@
#ifndef __ASM_UM_HARDIRQ_H
#define __ASM_UM_HARDIRQ_H
-#include <linux/cache.h>
-#include <linux/threads.h>
-
-typedef struct {
- unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-#include <linux/irq.h>
-
-#ifndef ack_bad_irq
-static inline void ack_bad_irq(unsigned int irq)
-{
- printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
-}
-#endif
+#include <asm-generic/hardirq.h>
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
deleted file mode 100644
index b0bd12de1d23..000000000000
--- a/arch/um/include/asm/kmap_types.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
- */
-
-#ifndef __UM_KMAP_TYPES_H
-#define __UM_KMAP_TYPES_H
-
-/* No more #include "asm/arch/kmap_types.h" ! */
-
-#define KM_TYPE_NR 14
-
-#endif
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index 17ddd4edf875..f8a100770691 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -37,10 +37,9 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
* end asm-generic/mm_hooks.h functions
*/
-#define deactivate_mm(tsk,mm) do { } while (0)
-
extern void force_flush_all(void);
+#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
{
/*
@@ -66,13 +65,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
}
}
-static inline void enter_lazy_tlb(struct mm_struct *mm,
- struct task_struct *tsk)
-{
-}
-
+#define init_new_context init_new_context
extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
+#define destroy_context destroy_context
extern void destroy_context(struct mm_struct *mm);
+#include <asm-generic/mmu_context.h>
+
#endif
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
index 5393e13e07e0..2bbf28cf3aa9 100644
--- a/arch/um/include/asm/pgalloc.h
+++ b/arch/um/include/asm/pgalloc.h
@@ -33,7 +33,13 @@ do { \
} while (0)
#ifdef CONFIG_3_LEVEL_PGTABLES
-#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
+
+#define __pmd_free_tlb(tlb, pmd, address) \
+do { \
+ pgtable_pmd_page_dtor(virt_to_page(pmd)); \
+ tlb_remove_page((tlb),virt_to_page(pmd)); \
+} while (0) \
+
#endif
#endif
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 3bed09538dd9..9505a7e87396 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -217,7 +217,7 @@ void arch_cpu_idle(void)
{
cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
um_idle_sleep();
- local_irq_enable();
+ raw_local_irq_enable();
}
int __cant_sleep(void) {
diff --git a/arch/um/kernel/skas/clone.c b/arch/um/kernel/skas/clone.c
index 95c355181dcd..bfb70c456b30 100644
--- a/arch/um/kernel/skas/clone.c
+++ b/arch/um/kernel/skas/clone.c
@@ -21,7 +21,7 @@
* on some systems.
*/
-void __section(".__syscall_stub")
+void __attribute__ ((__section__ (".__syscall_stub")))
stub_clone_handler(void)
{
struct stub_data *data = (struct stub_data *) STUB_DATA;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f6946b81f74a..a8bd298e45b1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -14,10 +14,11 @@ config X86_32
select ARCH_WANT_IPC_PARSE_VERSION
select CLKSRC_I8253
select CLONE_BACKWARDS
+ select GENERIC_VDSO_32
select HAVE_DEBUG_STACKOVERFLOW
+ select KMAP_LOCAL
select MODULES_USE_ELF_REL
select OLD_SIGACTION
- select GENERIC_VDSO_32
config X86_64
def_bool y
@@ -91,7 +92,9 @@ config X86
select ARCH_STACKWALK
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
+ select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
@@ -100,6 +103,7 @@ config X86
select ARCH_WANT_DEFAULT_BPF_JIT if X86_64
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANT_HUGE_PMD_SHARE
+ select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_THP_SWAP if X86_64
select BUILDTIME_TABLE_SORT
select CLKEVT_I8253
@@ -108,7 +112,6 @@ config X86
select DCACHE_WORD_ACCESS
select EDAC_ATOMIC_SCRUB
select EDAC_SUPPORT
- select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
select GENERIC_CLOCKEVENTS_MIN_ADJUST
select GENERIC_CMOS_UPDATE
@@ -162,6 +165,7 @@ config X86
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_CONTEXT_TRACKING if X86_64
+ select HAVE_CONTEXT_TRACKING_OFFSTACK if HAVE_CONTEXT_TRACKING
select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
@@ -198,6 +202,7 @@ config X86
select HAVE_MIXED_BREAKPOINTS_REGS
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_MOVE_PMD
+ select HAVE_MOVE_PUD
select HAVE_NMI
select HAVE_OPROFILE
select HAVE_OPTPROBES
@@ -329,9 +334,6 @@ config ZONE_DMA32
config AUDIT_ARCH
def_bool y if X86_64
-config ARCH_SUPPORTS_DEBUG_PAGEALLOC
- def_bool y
-
config KASAN_SHADOW_OFFSET
hex
depends on KASAN
@@ -1930,6 +1932,23 @@ config X86_INTEL_TSX_MODE_AUTO
side channel attacks- equals the tsx=auto command line parameter.
endchoice
+config X86_SGX
+ bool "Software Guard eXtensions (SGX)"
+ depends on X86_64 && CPU_SUP_INTEL
+ depends on CRYPTO=y
+ depends on CRYPTO_SHA256=y
+ select SRCU
+ select MMU_NOTIFIER
+ help
+ Intel(R) Software Guard eXtensions (SGX) is a set of CPU instructions
+ that can be used by applications to set aside private regions of code
+ and data, referred to as enclaves. An enclave's private memory can
+ only be accessed by code running within the enclave. Accesses from
+ outside the enclave, including other enclaves, are disallowed by
+ hardware.
+
+ If unsure, say N.
+
config EFI
bool "EFI runtime service support"
depends on ACPI
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 27b5e2bc6a01..80b57e7f4947 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -62,9 +62,6 @@ config EARLY_PRINTK_USB_XDBC
You should normally say N here, unless you want to debug early
crashes or need a very simple printk logging facility.
-config COPY_MC_TEST
- def_bool n
-
config EFI_PGT_DUMP
bool "Dump the EFI pagetable"
depends on EFI
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 154259f18b8b..7116da3980be 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -24,14 +24,7 @@ endif
# How to compile the 16-bit code. Note we always compile for -march=i386;
# that way we can complain to the user if the CPU is insufficient.
-#
-# The -m16 option is supported by GCC >= 4.9 and clang >= 3.5. For
-# older versions of GCC, include an *assembly* header to make sure that
-# gcc doesn't play any games behind our back.
-CODE16GCC_CFLAGS := -m32 -Wa,$(srctree)/arch/x86/boot/code16gcc.h
-M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS))
-
-REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \
+REALMODE_CFLAGS := -m16 -g -Os -DDISABLE_BRANCH_PROFILING \
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
-mno-mmx -mno-sse
@@ -209,9 +202,6 @@ ifdef CONFIG_X86_64
LDFLAGS_vmlinux += -z max-page-size=0x200000
endif
-# We never want expected sections to be placed heuristically by the
-# linker. All sections should be explicitly named in the linker script.
-LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
archscripts: scripts_basic
$(Q)$(MAKE) $(build)=arch/x86/tools relocs
diff --git a/arch/x86/boot/code16gcc.h b/arch/x86/boot/code16gcc.h
deleted file mode 100644
index e19fd7536307..000000000000
--- a/arch/x86/boot/code16gcc.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#
-# code16gcc.h
-#
-# This file is added to the assembler via -Wa when compiling 16-bit C code.
-# This is done this way instead via asm() to make sure gcc does not reorder
-# things around us.
-#
-# gcc 4.9+ has a real -m16 option so we can drop this hack long term.
-#
-
- .code16gcc
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index ee249088cbfe..40b8fd375d52 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -61,7 +61,9 @@ KBUILD_LDFLAGS += $(call ld-option,--no-ld-generated-unwind-info)
# Compressed kernel should be built as PIE since it may be loaded at any
# address by the bootloader.
LDFLAGS_vmlinux := -pie $(call ld-option, --no-dynamic-linker)
-LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
+ifdef CONFIG_LD_ORPHAN_WARN
+LDFLAGS_vmlinux += --orphan-handling=warn
+endif
LDFLAGS_vmlinux += -T
hostprogs := mkpiggy
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 017de6cc87dc..e94874f4bbc1 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -241,12 +241,12 @@ SYM_FUNC_START(startup_32)
leal rva(startup_64)(%ebp), %eax
#ifdef CONFIG_EFI_MIXED
movl rva(efi32_boot_args)(%ebp), %edi
- cmp $0, %edi
+ testl %edi, %edi
jz 1f
leal rva(efi64_stub_entry)(%ebp), %eax
movl rva(efi32_boot_args+4)(%ebp), %esi
movl rva(efi32_boot_args+8)(%ebp), %edx // saved bootparams pointer
- cmpl $0, %edx
+ testl %edx, %edx
jnz 1f
/*
* efi_pe_entry uses MS calling convention, which requires 32 bytes of
@@ -592,7 +592,7 @@ SYM_CODE_START(trampoline_32bit_src)
movl %eax, %cr0
/* Check what paging mode we want to be in after the trampoline */
- cmpl $0, %edx
+ testl %edx, %edx
jz 1f
/* We want 5-level paging: don't touch CR3 if it already points to 5-level page tables */
@@ -622,7 +622,7 @@ SYM_CODE_START(trampoline_32bit_src)
/* Enable PAE and LA57 (if required) paging modes */
movl $X86_CR4_PAE, %eax
- cmpl $0, %edx
+ testl %edx, %edx
jz 1f
orl $X86_CR4_LA57, %eax
1:
diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
index a5e5db6ada3c..f7213d0943b8 100644
--- a/arch/x86/boot/compressed/ident_map_64.c
+++ b/arch/x86/boot/compressed/ident_map_64.c
@@ -164,16 +164,7 @@ void initialize_identity_maps(void *rmode)
add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
/* Load the new page-table. */
- write_cr3(top_level_pgt);
-}
-
-/*
- * This switches the page tables to the new level4 that has been built
- * via calls to add_identity_map() above. If booted via startup_32(),
- * this is effectively a no-op.
- */
-void finalize_identity_maps(void)
-{
+ sev_verify_cbit(top_level_pgt);
write_cr3(top_level_pgt);
}
diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
index dd07e7b41b11..aa561795efd1 100644
--- a/arch/x86/boot/compressed/mem_encrypt.S
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -68,6 +68,9 @@ SYM_FUNC_START(get_sev_encryption_bit)
SYM_FUNC_END(get_sev_encryption_bit)
.code64
+
+#include "../../kernel/sev_verify_cbit.S"
+
SYM_FUNC_START(set_sev_encryption_mask)
#ifdef CONFIG_AMD_MEM_ENCRYPT
push %rbp
@@ -81,6 +84,19 @@ SYM_FUNC_START(set_sev_encryption_mask)
bts %rax, sme_me_mask(%rip) /* Create the encryption mask */
+ /*
+ * Read MSR_AMD64_SEV again and store it to sev_status. Can't do this in
+ * get_sev_encryption_bit() because this function is 32-bit code and
+ * shared between 64-bit and 32-bit boot path.
+ */
+ movl $MSR_AMD64_SEV, %ecx /* Read the SEV MSR */
+ rdmsr
+
+ /* Store MSR value in sev_status */
+ shlq $32, %rdx
+ orq %rdx, %rax
+ movq %rax, sev_status(%rip)
+
.Lno_sev_mask:
movq %rbp, %rsp /* Restore original stack pointer */
@@ -96,5 +112,7 @@ SYM_FUNC_END(set_sev_encryption_mask)
#ifdef CONFIG_AMD_MEM_ENCRYPT
.balign 8
-SYM_DATA(sme_me_mask, .quad 0)
+SYM_DATA(sme_me_mask, .quad 0)
+SYM_DATA(sev_status, .quad 0)
+SYM_DATA(sev_check_data, .quad 0)
#endif
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 6d31f1b4c4d1..d9a631c5973c 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -159,4 +159,6 @@ void boot_page_fault(void);
void boot_stage1_vc(void);
void boot_stage2_vc(void);
+unsigned long sev_verify_cbit(unsigned long cr3);
+
#endif /* BOOT_COMPRESSED_MISC_H */
diff --git a/arch/x86/boot/compressed/sev-es.c b/arch/x86/boot/compressed/sev-es.c
index 954cb2702e23..27826c265aab 100644
--- a/arch/x86/boot/compressed/sev-es.c
+++ b/arch/x86/boot/compressed/sev-es.c
@@ -32,13 +32,12 @@ struct ghcb *boot_ghcb;
*/
static bool insn_has_rep_prefix(struct insn *insn)
{
+ insn_byte_t p;
int i;
insn_get_prefixes(insn);
- for (i = 0; i < insn->prefixes.nbytes; i++) {
- insn_byte_t p = insn->prefixes.bytes[i];
-
+ for_each_insn_prefix(insn, i, p) {
if (p == 0xf2 || p == 0xf3)
return true;
}
diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c
deleted file mode 100644
index 7b7dc05fa1a4..000000000000
--- a/arch/x86/crypto/aes_glue.c
+++ /dev/null
@@ -1 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 1852b19a73a0..d1436c37008b 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -318,7 +318,7 @@ _initial_blocks_\@:
# Main loop - Encrypt/Decrypt remaining blocks
- cmp $0, %r13
+ test %r13, %r13
je _zero_cipher_left_\@
sub $64, %r13
je _four_cipher_left_\@
@@ -437,7 +437,7 @@ _multiple_of_16_bytes_\@:
mov PBlockLen(%arg2), %r12
- cmp $0, %r12
+ test %r12, %r12
je _partial_done\@
GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
@@ -474,7 +474,7 @@ _T_8_\@:
add $8, %r10
sub $8, %r11
psrldq $8, %xmm0
- cmp $0, %r11
+ test %r11, %r11
je _return_T_done_\@
_T_4_\@:
movd %xmm0, %eax
@@ -482,7 +482,7 @@ _T_4_\@:
add $4, %r10
sub $4, %r11
psrldq $4, %xmm0
- cmp $0, %r11
+ test %r11, %r11
je _return_T_done_\@
_T_123_\@:
movd %xmm0, %eax
@@ -619,7 +619,7 @@ _get_AAD_blocks\@:
/* read the last <16B of AAD */
_get_AAD_rest\@:
- cmp $0, %r11
+ test %r11, %r11
je _get_AAD_done\@
READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7
@@ -640,7 +640,7 @@ _get_AAD_done\@:
.macro PARTIAL_BLOCK CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \
AAD_HASH operation
mov PBlockLen(%arg2), %r13
- cmp $0, %r13
+ test %r13, %r13
je _partial_block_done_\@ # Leave Macro if no partial blocks
# Read in input data without over reading
cmp $16, \PLAIN_CYPH_LEN
@@ -692,7 +692,7 @@ _no_extra_mask_1_\@:
pshufb %xmm2, %xmm3
pxor %xmm3, \AAD_HASH
- cmp $0, %r10
+ test %r10, %r10
jl _partial_incomplete_1_\@
# GHASH computation for the last <16 Byte block
@@ -727,7 +727,7 @@ _no_extra_mask_2_\@:
pshufb %xmm2, %xmm9
pxor %xmm9, \AAD_HASH
- cmp $0, %r10
+ test %r10, %r10
jl _partial_incomplete_2_\@
# GHASH computation for the last <16 Byte block
@@ -747,7 +747,7 @@ _encode_done_\@:
pshufb %xmm2, %xmm9
.endif
# output encrypted Bytes
- cmp $0, %r10
+ test %r10, %r10
jl _partial_fill_\@
mov %r13, %r12
mov $16, %r13
@@ -2720,7 +2720,7 @@ SYM_FUNC_END(aesni_ctr_enc)
*/
SYM_FUNC_START(aesni_xts_crypt8)
FRAME_BEGIN
- cmpb $0, %cl
+ testb %cl, %cl
movl $0, %ecx
movl $240, %r10d
leaq _aesni_enc4, %r11
diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index 5fee47956f3b..2cf8e94d986a 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -369,7 +369,7 @@ _initial_num_blocks_is_0\@:
_initial_blocks_encrypted\@:
- cmp $0, %r13
+ test %r13, %r13
je _zero_cipher_left\@
sub $128, %r13
@@ -528,7 +528,7 @@ _multiple_of_16_bytes\@:
vmovdqu HashKey(arg2), %xmm13
mov PBlockLen(arg2), %r12
- cmp $0, %r12
+ test %r12, %r12
je _partial_done\@
#GHASH computation for the last <16 Byte block
@@ -573,7 +573,7 @@ _T_8\@:
add $8, %r10
sub $8, %r11
vpsrldq $8, %xmm9, %xmm9
- cmp $0, %r11
+ test %r11, %r11
je _return_T_done\@
_T_4\@:
vmovd %xmm9, %eax
@@ -581,7 +581,7 @@ _T_4\@:
add $4, %r10
sub $4, %r11
vpsrldq $4, %xmm9, %xmm9
- cmp $0, %r11
+ test %r11, %r11
je _return_T_done\@
_T_123\@:
vmovd %xmm9, %eax
@@ -625,7 +625,7 @@ _get_AAD_blocks\@:
cmp $16, %r11
jge _get_AAD_blocks\@
vmovdqu \T8, \T7
- cmp $0, %r11
+ test %r11, %r11
je _get_AAD_done\@
vpxor \T7, \T7, \T7
@@ -644,7 +644,7 @@ _get_AAD_rest8\@:
vpxor \T1, \T7, \T7
jmp _get_AAD_rest8\@
_get_AAD_rest4\@:
- cmp $0, %r11
+ test %r11, %r11
jle _get_AAD_rest0\@
mov (%r10), %eax
movq %rax, \T1
@@ -749,7 +749,7 @@ _done_read_partial_block_\@:
.macro PARTIAL_BLOCK GHASH_MUL CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \
AAD_HASH ENC_DEC
mov PBlockLen(arg2), %r13
- cmp $0, %r13
+ test %r13, %r13
je _partial_block_done_\@ # Leave Macro if no partial blocks
# Read in input data without over reading
cmp $16, \PLAIN_CYPH_LEN
@@ -801,7 +801,7 @@ _no_extra_mask_1_\@:
vpshufb %xmm2, %xmm3, %xmm3
vpxor %xmm3, \AAD_HASH, \AAD_HASH
- cmp $0, %r10
+ test %r10, %r10
jl _partial_incomplete_1_\@
# GHASH computation for the last <16 Byte block
@@ -836,7 +836,7 @@ _no_extra_mask_2_\@:
vpshufb %xmm2, %xmm9, %xmm9
vpxor %xmm9, \AAD_HASH, \AAD_HASH
- cmp $0, %r10
+ test %r10, %r10
jl _partial_incomplete_2_\@
# GHASH computation for the last <16 Byte block
@@ -856,7 +856,7 @@ _encode_done_\@:
vpshufb %xmm2, %xmm9, %xmm9
.endif
# output encrypted Bytes
- cmp $0, %r10
+ test %r10, %r10
jl _partial_fill_\@
mov %r13, %r12
mov $16, %r13
diff --git a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl
index 7d568012cc15..71fae5a09e56 100644
--- a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl
+++ b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl
@@ -251,7 +251,7 @@ $code.=<<___;
mov %rax,8($ctx)
mov %rax,16($ctx)
- cmp \$0,$inp
+ test $inp,$inp
je .Lno_key
___
$code.=<<___ if (!$kernel);
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index e508dbd91813..646da46e8d10 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -158,6 +158,7 @@ static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx,
dctx->s[1] = get_unaligned_le32(&inp[4]);
dctx->s[2] = get_unaligned_le32(&inp[8]);
dctx->s[3] = get_unaligned_le32(&inp[12]);
+ acc += POLY1305_BLOCK_SIZE;
dctx->sset = true;
}
}
@@ -209,7 +210,7 @@ void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
}
poly1305_simd_emit(&dctx->h, dst, dctx->s);
- *dctx = (struct poly1305_desc_ctx){};
+ memzero_explicit(dctx, sizeof(*dctx));
}
EXPORT_SYMBOL(poly1305_final_arch);
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index 18200135603f..44340a1139e0 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -22,7 +22,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <asm/simd.h>
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index dd06249229e1..3a5f6be7dbba 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -35,7 +35,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <linux/string.h>
#include <asm/simd.h>
diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
index 63470fd6ae32..684d58c8bc4f 100644
--- a/arch/x86/crypto/sha512-avx-asm.S
+++ b/arch/x86/crypto/sha512-avx-asm.S
@@ -278,7 +278,7 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
# "blocks" is the message length in SHA512 blocks
########################################################################
SYM_FUNC_START(sha512_transform_avx)
- cmp $0, msglen
+ test msglen, msglen
je nowork
# Allocate Stack Space
diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
index 7946a1bee85b..50812af0b083 100644
--- a/arch/x86/crypto/sha512-ssse3-asm.S
+++ b/arch/x86/crypto/sha512-ssse3-asm.S
@@ -280,7 +280,7 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
########################################################################
SYM_FUNC_START(sha512_transform_ssse3)
- cmp $0, msglen
+ test msglen, msglen
je nowork
# Allocate Stack Space
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index b0b05c93409e..30e70f4fe2f7 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -34,7 +34,7 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
#include <asm/simd.h>
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 870efeec8bda..18d8f17f755c 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -209,40 +209,6 @@ SYSCALL_DEFINE0(ni_syscall)
return -ENOSYS;
}
-noinstr bool idtentry_enter_nmi(struct pt_regs *regs)
-{
- bool irq_state = lockdep_hardirqs_enabled();
-
- __nmi_enter();
- lockdep_hardirqs_off(CALLER_ADDR0);
- lockdep_hardirq_enter();
- rcu_nmi_enter();
-
- instrumentation_begin();
- trace_hardirqs_off_finish();
- ftrace_nmi_enter();
- instrumentation_end();
-
- return irq_state;
-}
-
-noinstr void idtentry_exit_nmi(struct pt_regs *regs, bool restore)
-{
- instrumentation_begin();
- ftrace_nmi_exit();
- if (restore) {
- trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
- }
- instrumentation_end();
-
- rcu_nmi_exit();
- lockdep_hardirq_exit();
- if (restore)
- lockdep_hardirqs_on(CALLER_ADDR0);
- __nmi_exit();
-}
-
#ifdef CONFIG_XEN_PV
#ifndef CONFIG_PREEMPTION
/*
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 1f47e24fb65c..379819244b91 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -364,10 +364,10 @@
440 common process_madvise sys_process_madvise
#
-# x32-specific system call numbers start at 512 to avoid cache impact
-# for native 64-bit operation. The __x32_compat_sys stubs are created
-# on-the-fly for compat_sys_*() compatibility system calls if X86_X32
-# is defined.
+# Due to a historical design error, certain syscalls are numbered differently
+# in x32 as compared to native x86_64. These syscalls have numbers 512-547.
+# Do not add new syscalls to this range. Numbers 548 and above are available
+# for non-x32 use.
#
512 x32 rt_sigaction compat_sys_rt_sigaction
513 x32 rt_sigreturn compat_sys_x32_rt_sigreturn
@@ -405,3 +405,5 @@
545 x32 execveat compat_sys_execveat
546 x32 preadv2 compat_sys_preadv64v2
547 x32 pwritev2 compat_sys_pwritev64v2
+# This is the end of the legacy x32 range. Numbers 548 and above are
+# not special and are not to be used for x32-specific syscalls.
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 21243747965d..02e3e42f380b 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -27,9 +27,10 @@ VDSO32-$(CONFIG_IA32_EMULATION) := y
vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o
vobjs32-y := vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o
vobjs32-y += vdso32/vclock_gettime.o
+vobjs-$(CONFIG_X86_SGX) += vsgx.o
# files to link into kernel
-obj-y += vma.o
+obj-y += vma.o extable.o
KASAN_SANITIZE_vma.o := y
UBSAN_SANITIZE_vma.o := y
KCSAN_SANITIZE_vma.o := y
@@ -98,6 +99,7 @@ $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS
CFLAGS_REMOVE_vclock_gettime.o = -pg
CFLAGS_REMOVE_vdso32/vclock_gettime.o = -pg
CFLAGS_REMOVE_vgetcpu.o = -pg
+CFLAGS_REMOVE_vsgx.o = -pg
#
# X32 processes use x32 vDSO to access 64bit kernel data.
@@ -128,8 +130,8 @@ $(obj)/%-x32.o: $(obj)/%.o FORCE
targets += vdsox32.lds $(vobjx32s-y)
-$(obj)/%.so: OBJCOPYFLAGS := -S
-$(obj)/%.so: $(obj)/%.so.dbg FORCE
+$(obj)/%.so: OBJCOPYFLAGS := -S --remove-section __ex_table
+$(obj)/%.so: $(obj)/%.so.dbg
$(call if_changed,objcopy)
$(obj)/vdsox32.so.dbg: $(obj)/vdsox32.lds $(vobjx32s) FORCE
diff --git a/arch/x86/entry/vdso/extable.c b/arch/x86/entry/vdso/extable.c
new file mode 100644
index 000000000000..afcf5b65beef
--- /dev/null
+++ b/arch/x86/entry/vdso/extable.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <asm/current.h>
+#include <asm/traps.h>
+#include <asm/vdso.h>
+
+struct vdso_exception_table_entry {
+ int insn, fixup;
+};
+
+bool fixup_vdso_exception(struct pt_regs *regs, int trapnr,
+ unsigned long error_code, unsigned long fault_addr)
+{
+ const struct vdso_image *image = current->mm->context.vdso_image;
+ const struct vdso_exception_table_entry *extable;
+ unsigned int nr_entries, i;
+ unsigned long base;
+
+ /*
+ * Do not attempt to fixup #DB or #BP. It's impossible to identify
+ * whether or not a #DB/#BP originated from within an SGX enclave and
+ * SGX enclaves are currently the only use case for vDSO fixup.
+ */
+ if (trapnr == X86_TRAP_DB || trapnr == X86_TRAP_BP)
+ return false;
+
+ if (!current->mm->context.vdso)
+ return false;
+
+ base = (unsigned long)current->mm->context.vdso + image->extable_base;
+ nr_entries = image->extable_len / (sizeof(*extable));
+ extable = image->extable;
+
+ for (i = 0; i < nr_entries; i++) {
+ if (regs->ip == base + extable[i].insn) {
+ regs->ip = base + extable[i].fixup;
+ regs->di = trapnr;
+ regs->si = error_code;
+ regs->dx = fault_addr;
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/arch/x86/entry/vdso/extable.h b/arch/x86/entry/vdso/extable.h
new file mode 100644
index 000000000000..b56f6b012941
--- /dev/null
+++ b/arch/x86/entry/vdso/extable.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_EXTABLE_H
+#define __VDSO_EXTABLE_H
+
+/*
+ * Inject exception fixup for vDSO code. Unlike normal exception fixup,
+ * vDSO uses a dedicated handler the addresses are relative to the overall
+ * exception table, not each individual entry.
+ */
+#ifdef __ASSEMBLY__
+#define _ASM_VDSO_EXTABLE_HANDLE(from, to) \
+ ASM_VDSO_EXTABLE_HANDLE from to
+
+.macro ASM_VDSO_EXTABLE_HANDLE from:req to:req
+ .pushsection __ex_table, "a"
+ .long (\from) - __ex_table
+ .long (\to) - __ex_table
+ .popsection
+.endm
+#else
+#define _ASM_VDSO_EXTABLE_HANDLE(from, to) \
+ ".pushsection __ex_table, \"a\"\n" \
+ ".long (" #from ") - __ex_table\n" \
+ ".long (" #to ") - __ex_table\n" \
+ ".popsection\n"
+#endif
+
+#endif /* __VDSO_EXTABLE_H */
diff --git a/arch/x86/entry/vdso/vdso-layout.lds.S b/arch/x86/entry/vdso/vdso-layout.lds.S
index 4d152933547d..dc8da7695859 100644
--- a/arch/x86/entry/vdso/vdso-layout.lds.S
+++ b/arch/x86/entry/vdso/vdso-layout.lds.S
@@ -75,11 +75,18 @@ SECTIONS
* stuff that isn't used at runtime in between.
*/
- .text : { *(.text*) } :text =0x90909090,
+ .text : {
+ *(.text*)
+ *(.fixup)
+ } :text =0x90909090,
+
+
.altinstructions : { *(.altinstructions) } :text
.altinstr_replacement : { *(.altinstr_replacement) } :text
+ __ex_table : { *(__ex_table) } :text
+
/DISCARD/ : {
*(.discard)
*(.discard.*)
diff --git a/arch/x86/entry/vdso/vdso.lds.S b/arch/x86/entry/vdso/vdso.lds.S
index 36b644e16272..4bf48462fca7 100644
--- a/arch/x86/entry/vdso/vdso.lds.S
+++ b/arch/x86/entry/vdso/vdso.lds.S
@@ -27,6 +27,7 @@ VERSION {
__vdso_time;
clock_getres;
__vdso_clock_getres;
+ __vdso_sgx_enter_enclave;
local: *;
};
}
diff --git a/arch/x86/entry/vdso/vdso2c.c b/arch/x86/entry/vdso/vdso2c.c
index 7380908045c7..2d0f3d8bcc25 100644
--- a/arch/x86/entry/vdso/vdso2c.c
+++ b/arch/x86/entry/vdso/vdso2c.c
@@ -101,6 +101,8 @@ struct vdso_sym required_syms[] = {
{"__kernel_sigreturn", true},
{"__kernel_rt_sigreturn", true},
{"int80_landing_pad", true},
+ {"vdso32_rt_sigreturn_landing_pad", true},
+ {"vdso32_sigreturn_landing_pad", true},
};
__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
index 6f46e11ce539..1c7cfac7e64a 100644
--- a/arch/x86/entry/vdso/vdso2c.h
+++ b/arch/x86/entry/vdso/vdso2c.h
@@ -5,6 +5,41 @@
* are built for 32-bit userspace.
*/
+static void BITSFUNC(copy)(FILE *outfile, const unsigned char *data, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (i % 10 == 0)
+ fprintf(outfile, "\n\t");
+ fprintf(outfile, "0x%02X, ", (int)(data)[i]);
+ }
+}
+
+
+/*
+ * Extract a section from the input data into a standalone blob. Used to
+ * capture kernel-only data that needs to persist indefinitely, e.g. the
+ * exception fixup tables, but only in the kernel, i.e. the section can
+ * be stripped from the final vDSO image.
+ */
+static void BITSFUNC(extract)(const unsigned char *data, size_t data_len,
+ FILE *outfile, ELF(Shdr) *sec, const char *name)
+{
+ unsigned long offset;
+ size_t len;
+
+ offset = (unsigned long)GET_LE(&sec->sh_offset);
+ len = (size_t)GET_LE(&sec->sh_size);
+
+ if (offset + len > data_len)
+ fail("section to extract overruns input data");
+
+ fprintf(outfile, "static const unsigned char %s[%lu] = {", name, len);
+ BITSFUNC(copy)(outfile, data + offset, len);
+ fprintf(outfile, "\n};\n\n");
+}
+
static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
void *stripped_addr, size_t stripped_len,
FILE *outfile, const char *image_name)
@@ -15,7 +50,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
unsigned long i, syms_nr;
ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
- *alt_sec = NULL;
+ *alt_sec = NULL, *extable_sec = NULL;
ELF(Dyn) *dyn = 0, *dyn_end = 0;
const char *secstrings;
INT_BITS syms[NSYMS] = {};
@@ -77,6 +112,8 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
if (!strcmp(secstrings + GET_LE(&sh->sh_name),
".altinstructions"))
alt_sec = sh;
+ if (!strcmp(secstrings + GET_LE(&sh->sh_name), "__ex_table"))
+ extable_sec = sh;
}
if (!symtab_hdr)
@@ -155,6 +192,9 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
(int)((unsigned char *)stripped_addr)[i]);
}
fprintf(outfile, "\n};\n\n");
+ if (extable_sec)
+ BITSFUNC(extract)(raw_addr, raw_len, outfile,
+ extable_sec, "extable");
fprintf(outfile, "const struct vdso_image %s = {\n", image_name);
fprintf(outfile, "\t.data = raw_data,\n");
@@ -165,6 +205,14 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
fprintf(outfile, "\t.alt_len = %lu,\n",
(unsigned long)GET_LE(&alt_sec->sh_size));
}
+ if (extable_sec) {
+ fprintf(outfile, "\t.extable_base = %lu,\n",
+ (unsigned long)GET_LE(&extable_sec->sh_offset));
+ fprintf(outfile, "\t.extable_len = %lu,\n",
+ (unsigned long)GET_LE(&extable_sec->sh_size));
+ fprintf(outfile, "\t.extable = extable,\n");
+ }
+
for (i = 0; i < NSYMS; i++) {
if (required_syms[i].export && syms[i])
fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
diff --git a/arch/x86/entry/vdso/vdso32/sigreturn.S b/arch/x86/entry/vdso/vdso32/sigreturn.S
index c3233ee98a6b..1bd068f72d4c 100644
--- a/arch/x86/entry/vdso/vdso32/sigreturn.S
+++ b/arch/x86/entry/vdso/vdso32/sigreturn.S
@@ -18,6 +18,7 @@ __kernel_sigreturn:
movl $__NR_sigreturn, %eax
SYSCALL_ENTER_KERNEL
.LEND_sigreturn:
+SYM_INNER_LABEL(vdso32_sigreturn_landing_pad, SYM_L_GLOBAL)
nop
.size __kernel_sigreturn,.-.LSTART_sigreturn
@@ -29,6 +30,7 @@ __kernel_rt_sigreturn:
movl $__NR_rt_sigreturn, %eax
SYSCALL_ENTER_KERNEL
.LEND_rt_sigreturn:
+SYM_INNER_LABEL(vdso32_rt_sigreturn_landing_pad, SYM_L_GLOBAL)
nop
.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
.previous
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 9185cb1d13b9..825e829ffff1 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -89,30 +89,14 @@ static void vdso_fix_landing(const struct vdso_image *image,
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
- unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
const struct vdso_image *image = current->mm->context.vdso_image;
- if (image->size != new_size)
- return -EINVAL;
-
vdso_fix_landing(image, new_vma);
current->mm->context.vdso = (void __user *)new_vma->vm_start;
return 0;
}
-static int vvar_mremap(const struct vm_special_mapping *sm,
- struct vm_area_struct *new_vma)
-{
- const struct vdso_image *image = new_vma->vm_mm->context.vdso_image;
- unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
-
- if (new_size != -image->sym_vvar_start)
- return -EINVAL;
-
- return 0;
-}
-
#ifdef CONFIG_TIME_NS
static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
{
@@ -252,7 +236,6 @@ static const struct vm_special_mapping vdso_mapping = {
static const struct vm_special_mapping vvar_mapping = {
.name = "[vvar]",
.fault = vvar_fault,
- .mremap = vvar_mremap,
};
/*
@@ -413,10 +396,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
#ifdef CONFIG_COMPAT
int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
- int uses_interp)
+ int uses_interp, bool x32)
{
#ifdef CONFIG_X86_X32_ABI
- if (test_thread_flag(TIF_X32)) {
+ if (x32) {
if (!vdso64_enabled)
return 0;
return map_vdso_randomized(&vdso_image_x32);
@@ -436,6 +419,21 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
}
#endif
+bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
+{
+#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
+ const struct vdso_image *image = current->mm->context.vdso_image;
+ unsigned long vdso = (unsigned long) current->mm->context.vdso;
+
+ if (in_ia32_syscall() && image == &vdso_image_32) {
+ if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad ||
+ regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad)
+ return true;
+ }
+#endif
+ return false;
+}
+
#ifdef CONFIG_X86_64
static __init int vdso_setup(char *s)
{
diff --git a/arch/x86/entry/vdso/vsgx.S b/arch/x86/entry/vdso/vsgx.S
new file mode 100644
index 000000000000..86a0e94f68df
--- /dev/null
+++ b/arch/x86/entry/vdso/vsgx.S
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/linkage.h>
+#include <asm/export.h>
+#include <asm/errno.h>
+#include <asm/enclu.h>
+
+#include "extable.h"
+
+/* Relative to %rbp. */
+#define SGX_ENCLAVE_OFFSET_OF_RUN 16
+
+/* The offsets relative to struct sgx_enclave_run. */
+#define SGX_ENCLAVE_RUN_TCS 0
+#define SGX_ENCLAVE_RUN_LEAF 8
+#define SGX_ENCLAVE_RUN_EXCEPTION_VECTOR 12
+#define SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE 14
+#define SGX_ENCLAVE_RUN_EXCEPTION_ADDR 16
+#define SGX_ENCLAVE_RUN_USER_HANDLER 24
+#define SGX_ENCLAVE_RUN_USER_DATA 32 /* not used */
+#define SGX_ENCLAVE_RUN_RESERVED_START 40
+#define SGX_ENCLAVE_RUN_RESERVED_END 256
+
+.code64
+.section .text, "ax"
+
+SYM_FUNC_START(__vdso_sgx_enter_enclave)
+ /* Prolog */
+ .cfi_startproc
+ push %rbp
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset %rbp, 0
+ mov %rsp, %rbp
+ .cfi_def_cfa_register %rbp
+ push %rbx
+ .cfi_rel_offset %rbx, -8
+
+ mov %ecx, %eax
+.Lenter_enclave:
+ /* EENTER <= function <= ERESUME */
+ cmp $EENTER, %eax
+ jb .Linvalid_input
+ cmp $ERESUME, %eax
+ ja .Linvalid_input
+
+ mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rcx
+
+ /* Validate that the reserved area contains only zeros. */
+ mov $SGX_ENCLAVE_RUN_RESERVED_START, %rbx
+1:
+ cmpq $0, (%rcx, %rbx)
+ jne .Linvalid_input
+ add $8, %rbx
+ cmpq $SGX_ENCLAVE_RUN_RESERVED_END, %rbx
+ jne 1b
+
+ /* Load TCS and AEP */
+ mov SGX_ENCLAVE_RUN_TCS(%rcx), %rbx
+ lea .Lasync_exit_pointer(%rip), %rcx
+
+ /* Single ENCLU serving as both EENTER and AEP (ERESUME) */
+.Lasync_exit_pointer:
+.Lenclu_eenter_eresume:
+ enclu
+
+ /* EEXIT jumps here unless the enclave is doing something fancy. */
+ mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
+
+ /* Set exit_reason. */
+ movl $EEXIT, SGX_ENCLAVE_RUN_LEAF(%rbx)
+
+ /* Invoke userspace's exit handler if one was provided. */
+.Lhandle_exit:
+ cmpq $0, SGX_ENCLAVE_RUN_USER_HANDLER(%rbx)
+ jne .Linvoke_userspace_handler
+
+ /* Success, in the sense that ENCLU was attempted. */
+ xor %eax, %eax
+
+.Lout:
+ pop %rbx
+ leave
+ .cfi_def_cfa %rsp, 8
+ ret
+
+ /* The out-of-line code runs with the pre-leave stack frame. */
+ .cfi_def_cfa %rbp, 16
+
+.Linvalid_input:
+ mov $(-EINVAL), %eax
+ jmp .Lout
+
+.Lhandle_exception:
+ mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
+
+ /* Set the exception info. */
+ mov %eax, (SGX_ENCLAVE_RUN_LEAF)(%rbx)
+ mov %di, (SGX_ENCLAVE_RUN_EXCEPTION_VECTOR)(%rbx)
+ mov %si, (SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE)(%rbx)
+ mov %rdx, (SGX_ENCLAVE_RUN_EXCEPTION_ADDR)(%rbx)
+ jmp .Lhandle_exit
+
+.Linvoke_userspace_handler:
+ /* Pass the untrusted RSP (at exit) to the callback via %rcx. */
+ mov %rsp, %rcx
+
+ /* Save struct sgx_enclave_exception %rbx is about to be clobbered. */
+ mov %rbx, %rax
+
+ /* Save the untrusted RSP offset in %rbx (non-volatile register). */
+ mov %rsp, %rbx
+ and $0xf, %rbx
+
+ /*
+ * Align stack per x86_64 ABI. Note, %rsp needs to be 16-byte aligned
+ * _after_ pushing the parameters on the stack, hence the bonus push.
+ */
+ and $-0x10, %rsp
+ push %rax
+
+ /* Push struct sgx_enclave_exception as a param to the callback. */
+ push %rax
+
+ /* Clear RFLAGS.DF per x86_64 ABI */
+ cld
+
+ /*
+ * Load the callback pointer to %rax and lfence for LVI (load value
+ * injection) protection before making the call.
+ */
+ mov SGX_ENCLAVE_RUN_USER_HANDLER(%rax), %rax
+ lfence
+ call *%rax
+
+ /* Undo the post-exit %rsp adjustment. */
+ lea 0x10(%rsp, %rbx), %rsp
+
+ /*
+ * If the return from callback is zero or negative, return immediately,
+ * else re-execute ENCLU with the postive return value interpreted as
+ * the requested ENCLU function.
+ */
+ cmp $0, %eax
+ jle .Lout
+ jmp .Lenter_enclave
+
+ .cfi_endproc
+
+_ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception)
+
+SYM_FUNC_END(__vdso_sgx_enter_enclave)
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 44c33103a955..1b40b9297083 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -316,7 +316,7 @@ static struct vm_area_struct gate_vma __ro_after_init = {
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
#ifdef CONFIG_COMPAT
- if (!mm || mm->context.ia32_compat)
+ if (!mm || !(mm->context.flags & MM_CONTEXT_HAS_VSYSCALL))
return NULL;
#endif
if (vsyscall_mode == NONE)
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 39eb276d0277..2c1791c4a518 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -538,7 +538,7 @@ static void amd_pmu_cpu_starting(int cpu)
if (!x86_pmu.amd_nb_constraints)
return;
- nb_id = amd_get_nb_id(cpu);
+ nb_id = topology_die_id(cpu);
WARN_ON_ONCE(nb_id == BAD_APICID);
for_each_online_cpu(i) {
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index a88c94d65693..e37de298a495 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1174,7 +1174,7 @@ static inline void x86_assign_hw_event(struct perf_event *event,
case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
/* All the metric events are mapped onto the fixed counter 3. */
idx = INTEL_PMC_IDX_FIXED_SLOTS;
- /* fall through */
+ fallthrough;
case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1:
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 +
@@ -2602,7 +2602,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
struct stack_frame_ia32 frame;
const struct stack_frame_ia32 __user *fp;
- if (!test_thread_flag(TIF_IA32))
+ if (user_64bit_mode(regs))
return 0;
cs_base = get_segment_base(regs->cs);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index f1926e9f2143..d4569bfa83e3 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -257,7 +257,8 @@ static struct event_constraint intel_icl_event_constraints[] = {
INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
- INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
+ INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
+ INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */
INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
@@ -1900,6 +1901,19 @@ static __initconst const u64 tnt_hw_cache_extra_regs
},
};
+EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0");
+EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0");
+EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6");
+EVENT_ATTR_STR(topdown-be-bound, td_be_bound_tnt, "event=0x74,umask=0x0");
+
+static struct attribute *tnt_events_attrs[] = {
+ EVENT_PTR(td_fe_bound_tnt),
+ EVENT_PTR(td_retiring_tnt),
+ EVENT_PTR(td_bad_spec_tnt),
+ EVENT_PTR(td_be_bound_tnt),
+ NULL,
+};
+
static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
@@ -2630,7 +2644,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
u64 pebs_enabled = cpuc->pebs_enabled;
handled++;
- x86_pmu.drain_pebs(regs);
+ x86_pmu.drain_pebs(regs, &data);
status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
/*
@@ -4987,6 +5001,12 @@ __init int intel_pmu_init(void)
x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
+ if (version >= 5) {
+ x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated;
+ if (x86_pmu.intel_cap.anythread_deprecated)
+ pr_cont(" AnyThread deprecated, ");
+ }
+
/*
* Install the hw-cache-events table:
*/
@@ -5167,6 +5187,7 @@ __init int intel_pmu_init(void)
x86_pmu.lbr_pt_coexist = true;
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.get_event_constraints = tnt_get_event_constraints;
+ td_attr = tnt_events_attrs;
extra_attr = slm_format_attr;
pr_cont("Tremont events, ");
name = "Tremont";
@@ -5436,6 +5457,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_ICELAKE:
case INTEL_FAM6_TIGERLAKE_L:
case INTEL_FAM6_TIGERLAKE:
+ case INTEL_FAM6_ROCKETLAKE:
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
@@ -5458,7 +5480,7 @@ __init int intel_pmu_init(void)
mem_attr = icl_events_attrs;
td_attr = icl_td_events_attrs;
tsx_attr = icl_tsx_events_attrs;
- x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02);
+ x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
x86_pmu.lbr_pt_coexist = true;
intel_pmu_pebs_data_source_skl(pmem);
x86_pmu.update_topdown_event = icl_update_topdown_event;
@@ -5512,6 +5534,10 @@ __init int intel_pmu_init(void)
x86_pmu.intel_ctrl |=
((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
+ /* AnyThread may be deprecated on arch perfmon v5 or later */
+ if (x86_pmu.intel_cap.anythread_deprecated)
+ x86_pmu.format_attrs = intel_arch_formats_attr;
+
if (x86_pmu.event_constraints) {
/*
* event on fixed counter2 (REF_CYCLES) only works on this
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 442e1ed4acd4..407eee5f6f95 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -51,46 +51,46 @@
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
* SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
- * TNT
+ * TNT,RKL
* Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03
* Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
- * ICL,TGL
+ * ICL,TGL,RKL
* Scope: Core
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
* perf code: 0x00
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
- * KBL,CML,ICL,TGL,TNT
+ * KBL,CML,ICL,TGL,TNT,RKL
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
- * GLM,CNL,KBL,CML,ICL,TGL,TNT
+ * GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL
* Scope: Package (physical package)
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
* SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
- * TNT
+ * TNT,RKL
* Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
- * KBL,CML,ICL,TGL
+ * KBL,CML,ICL,TGL,RKL
* Scope: Package (physical package)
* MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
* perf code: 0x04
- * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL
+ * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL
* Scope: Package (physical package)
* MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
* perf code: 0x05
- * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL
+ * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL
* Scope: Package (physical package)
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
* perf code: 0x06
* Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
- * TNT
+ * TNT,RKL
* Scope: Package (physical package)
*
*/
@@ -107,14 +107,14 @@
MODULE_LICENSE("GPL");
#define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
-static ssize_t __cstate_##_var##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
+static ssize_t __cstate_##_var##_show(struct device *dev, \
+ struct device_attribute *attr, \
char *page) \
{ \
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
return sprintf(page, _format "\n"); \
} \
-static struct kobj_attribute format_attr_##_var = \
+static struct device_attribute format_attr_##_var = \
__ATTR(_name, 0444, __cstate_##_var##_show, NULL)
static ssize_t cstate_get_attr_cpumask(struct device *dev,
@@ -649,6 +649,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates),
+ X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &icl_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 404315df1e16..67dbc91bccfe 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -642,8 +642,8 @@ int intel_pmu_drain_bts_buffer(void)
rcu_read_lock();
perf_prepare_sample(&header, &data, event, &regs);
- if (perf_output_begin(&handle, event, header.size *
- (top - base - skip)))
+ if (perf_output_begin(&handle, &data, event,
+ header.size * (top - base - skip)))
goto unlock;
for (at = base; at < top; at++) {
@@ -670,7 +670,9 @@ unlock:
static inline void intel_pmu_drain_pebs_buffer(void)
{
- x86_pmu.drain_pebs(NULL);
+ struct perf_sample_data data;
+
+ x86_pmu.drain_pebs(NULL, &data);
}
/*
@@ -959,7 +961,8 @@ static void adaptive_pebs_record_size_update(void)
#define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \
PERF_SAMPLE_PHYS_ADDR | PERF_SAMPLE_WEIGHT | \
- PERF_SAMPLE_TRANSACTION)
+ PERF_SAMPLE_TRANSACTION | \
+ PERF_SAMPLE_DATA_PAGE_SIZE)
static u64 pebs_update_adaptive_cfg(struct perf_event *event)
{
@@ -1259,7 +1262,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
old_to = to;
#ifdef CONFIG_X86_64
- is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
+ is_64bit = kernel_ip(to) || any_64bit_mode(regs);
#endif
insn_init(&insn, kaddr, size, is_64bit);
insn_get_length(&insn);
@@ -1335,6 +1338,10 @@ static u64 get_data_src(struct perf_event *event, u64 aux)
return val;
}
+#define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \
+ PERF_SAMPLE_PHYS_ADDR | \
+ PERF_SAMPLE_DATA_PAGE_SIZE)
+
static void setup_pebs_fixed_sample_data(struct perf_event *event,
struct pt_regs *iregs, void *__pebs,
struct perf_sample_data *data,
@@ -1449,7 +1456,7 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event,
}
- if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) &&
+ if ((sample_type & PERF_SAMPLE_ADDR_TYPE) &&
x86_pmu.intel_cap.pebs_format >= 1)
data->addr = pebs->dla;
@@ -1577,7 +1584,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
if (sample_type & PERF_SAMPLE_DATA_SRC)
data->data_src.val = get_data_src(event, meminfo->aux);
- if (sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR))
+ if (sample_type & PERF_SAMPLE_ADDR_TYPE)
data->addr = meminfo->address;
if (sample_type & PERF_SAMPLE_TRANSACTION)
@@ -1719,23 +1726,24 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
return 0;
}
-static void __intel_pmu_pebs_event(struct perf_event *event,
- struct pt_regs *iregs,
- void *base, void *top,
- int bit, int count,
- void (*setup_sample)(struct perf_event *,
- struct pt_regs *,
- void *,
- struct perf_sample_data *,
- struct pt_regs *))
+static __always_inline void
+__intel_pmu_pebs_event(struct perf_event *event,
+ struct pt_regs *iregs,
+ struct perf_sample_data *data,
+ void *base, void *top,
+ int bit, int count,
+ void (*setup_sample)(struct perf_event *,
+ struct pt_regs *,
+ void *,
+ struct perf_sample_data *,
+ struct pt_regs *))
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
- struct perf_sample_data data;
struct x86_perf_regs perf_regs;
struct pt_regs *regs = &perf_regs.regs;
void *at = get_next_pebs_record_by_bit(base, top, bit);
- struct pt_regs dummy_iregs;
+ static struct pt_regs dummy_iregs;
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
/*
@@ -1752,14 +1760,14 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
iregs = &dummy_iregs;
while (count > 1) {
- setup_sample(event, iregs, at, &data, regs);
- perf_event_output(event, &data, regs);
+ setup_sample(event, iregs, at, data, regs);
+ perf_event_output(event, data, regs);
at += cpuc->pebs_record_size;
at = get_next_pebs_record_by_bit(at, top, bit);
count--;
}
- setup_sample(event, iregs, at, &data, regs);
+ setup_sample(event, iregs, at, data, regs);
if (iregs == &dummy_iregs) {
/*
* The PEBS records may be drained in the non-overflow context,
@@ -1767,18 +1775,18 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
* last record the same as other PEBS records, and doesn't
* invoke the generic overflow handler.
*/
- perf_event_output(event, &data, regs);
+ perf_event_output(event, data, regs);
} else {
/*
* All but the last records are processed.
* The last one is left to be able to call the overflow handler.
*/
- if (perf_event_overflow(event, &data, regs))
+ if (perf_event_overflow(event, data, regs))
x86_pmu_stop(event, 0);
}
}
-static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
+static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct debug_store *ds = cpuc->ds;
@@ -1812,7 +1820,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
return;
}
- __intel_pmu_pebs_event(event, iregs, at, top, 0, n,
+ __intel_pmu_pebs_event(event, iregs, data, at, top, 0, n,
setup_pebs_fixed_sample_data);
}
@@ -1835,7 +1843,7 @@ static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int
}
}
-static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
+static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_data *data)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct debug_store *ds = cpuc->ds;
@@ -1913,7 +1921,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
* that caused the PEBS record. It's called collision.
* If collision happened, the record will be dropped.
*/
- if (p->status != (1ULL << bit)) {
+ if (pebs_status != (1ULL << bit)) {
for_each_set_bit(i, (unsigned long *)&pebs_status, size)
error[i]++;
continue;
@@ -1937,19 +1945,19 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
if (error[bit]) {
perf_log_lost_samples(event, error[bit]);
- if (perf_event_account_interrupt(event))
+ if (iregs && perf_event_account_interrupt(event))
x86_pmu_stop(event, 0);
}
if (counts[bit]) {
- __intel_pmu_pebs_event(event, iregs, base,
+ __intel_pmu_pebs_event(event, iregs, data, base,
top, bit, counts[bit],
setup_pebs_fixed_sample_data);
}
}
}
-static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs)
+static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_data *data)
{
short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1997,7 +2005,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs)
if (WARN_ON_ONCE(!event->attr.precise_ip))
continue;
- __intel_pmu_pebs_event(event, iregs, base,
+ __intel_pmu_pebs_event(event, iregs, data, base,
top, bit, counts[bit],
setup_pebs_adaptive_sample_data);
}
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 8961653c5dd2..21890dacfcfe 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -919,7 +919,7 @@ static __always_inline bool get_lbr_predicted(u64 info)
return !(info & LBR_INFO_MISPRED);
}
-static __always_inline bool get_lbr_cycles(u64 info)
+static __always_inline u16 get_lbr_cycles(u64 info)
{
if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
!(x86_pmu.lbr_timed_lbr && info & LBR_INFO_CYC_CNT_VALID))
@@ -1221,7 +1221,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
* on 64-bit systems running 32-bit apps
*/
#ifdef CONFIG_X86_64
- is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
+ is64 = kernel_ip((unsigned long)addr) || any_64bit_mode(current_pt_regs());
#endif
insn_init(&insn, addr, bytes_read, is64);
insn_get_opcode(&insn);
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 86d012b3e0b4..357258f82dc8 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -94,8 +94,8 @@ end:
return map;
}
-ssize_t uncore_event_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+ssize_t uncore_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct uncore_event_desc *event =
container_of(attr, struct uncore_event_desc, attr);
@@ -1636,6 +1636,11 @@ static const struct intel_uncore_init_fun tgl_l_uncore_init __initconst = {
.mmio_init = tgl_l_uncore_mmio_init,
};
+static const struct intel_uncore_init_fun rkl_uncore_init __initconst = {
+ .cpu_init = tgl_uncore_cpu_init,
+ .pci_init = skl_uncore_pci_init,
+};
+
static const struct intel_uncore_init_fun icx_uncore_init __initconst = {
.cpu_init = icx_uncore_cpu_init,
.pci_init = icx_uncore_pci_init,
@@ -1683,6 +1688,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_l_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_uncore_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rkl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init),
{},
};
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index 83d2a7d490e0..9efea154349d 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -157,7 +157,7 @@ struct intel_uncore_box {
#define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
struct uncore_event_desc {
- struct kobj_attribute attr;
+ struct device_attribute attr;
const char *config;
};
@@ -179,8 +179,8 @@ struct pci2phy_map {
struct pci2phy_map *__find_pci2phy_map(int segment);
int uncore_pcibus_to_physid(struct pci_bus *bus);
-ssize_t uncore_event_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf);
+ssize_t uncore_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
static inline struct intel_uncore_pmu *dev_to_uncore_pmu(struct device *dev)
{
@@ -201,14 +201,14 @@ extern int __uncore_max_dies;
}
#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
-static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
+static ssize_t __uncore_##_var##_show(struct device *dev, \
+ struct device_attribute *attr, \
char *page) \
{ \
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
return sprintf(page, _format "\n"); \
} \
-static struct kobj_attribute format_attr_##_var = \
+static struct device_attribute format_attr_##_var = \
__ATTR(_name, 0444, __uncore_##_var##_show, NULL)
static inline bool uncore_pmc_fixed(int idx)
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 39e632ed6ca9..098f893e2e22 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -60,7 +60,8 @@
#define PCI_DEVICE_ID_INTEL_TGL_U3_IMC 0x9a12
#define PCI_DEVICE_ID_INTEL_TGL_U4_IMC 0x9a14
#define PCI_DEVICE_ID_INTEL_TGL_H_IMC 0x9a36
-
+#define PCI_DEVICE_ID_INTEL_RKL_1_IMC 0x4c43
+#define PCI_DEVICE_ID_INTEL_RKL_2_IMC 0x4c53
/* SNB event control */
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
@@ -405,6 +406,12 @@ static struct intel_uncore_type *tgl_msr_uncores[] = {
NULL,
};
+static void rkl_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+ if (box->pmu->pmu_idx == 0)
+ wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
+}
+
void tgl_uncore_cpu_init(void)
{
uncore_msr_uncores = tgl_msr_uncores;
@@ -412,6 +419,7 @@ void tgl_uncore_cpu_init(void)
icl_uncore_cbox.ops = &skl_uncore_msr_ops;
icl_uncore_clockbox.ops = &skl_uncore_msr_ops;
snb_uncore_arb.ops = &skl_uncore_msr_ops;
+ skl_uncore_msr_ops.init_box = rkl_uncore_msr_init_box;
}
enum {
@@ -475,7 +483,7 @@ enum perf_snb_uncore_imc_freerunning_types {
static struct freerunning_counters snb_uncore_imc_freerunning[] = {
[SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
0x0, 0x0, 1, 32 },
- [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
+ [SNB_PCI_UNCORE_IMC_DATA_WRITES] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
0x0, 0x0, 1, 32 },
[SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE,
0x0, 0x0, 1, 32 },
@@ -926,6 +934,14 @@ static const struct pci_device_id icl_uncore_pci_ids[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RKL_1_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RKL_2_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
{ /* end: all zeroes */ },
};
@@ -1019,6 +1035,8 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
IMC_DEV(CML_S5_IMC, &skl_uncore_pci_driver),
IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
+ IMC_DEV(RKL_1_IMC, &icl_uncore_pci_driver),
+ IMC_DEV(RKL_2_IMC, &icl_uncore_pci_driver),
{ /* end marker */ }
};
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 4be8f9cabd07..680404c58cb1 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -99,6 +99,7 @@ static bool test_intel(int idx, void *data)
case INTEL_FAM6_ICELAKE_D:
case INTEL_FAM6_TIGERLAKE_L:
case INTEL_FAM6_TIGERLAKE:
+ case INTEL_FAM6_ROCKETLAKE:
if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
return true;
break;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index ee2b9b9fc2a5..7895cf4c59a7 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -132,7 +132,7 @@ struct amd_nb {
PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
- PERF_SAMPLE_PERIOD)
+ PERF_SAMPLE_PERIOD | PERF_SAMPLE_CODE_PAGE_SIZE)
#define PEBS_GP_REGS \
((1ULL << PERF_REG_X86_AX) | \
@@ -585,6 +585,7 @@ union perf_capabilities {
u64 pebs_baseline:1;
u64 perf_metrics:1;
u64 pebs_output_pt_available:1;
+ u64 anythread_deprecated:1;
};
u64 capabilities;
};
@@ -727,7 +728,7 @@ struct x86_pmu {
int pebs_record_size;
int pebs_buffer_size;
int max_pebs_events;
- void (*drain_pebs)(struct pt_regs *regs);
+ void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data);
struct event_constraint *pebs_constraints;
void (*pebs_aliases)(struct perf_event *event);
unsigned long large_pebs_flags;
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 7c0120e2e957..7dbbeaacd995 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -93,18 +93,6 @@ static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
* any other bit is reserved
*/
#define RAPL_EVENT_MASK 0xFFULL
-
-#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \
-static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
- char *page) \
-{ \
- BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
- return sprintf(page, _format "\n"); \
-} \
-static struct kobj_attribute format_attr_##_var = \
- __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
-
#define RAPL_CNTR_WIDTH 32
#define RAPL_EVENT_ATTR_STR(_name, v, str) \
@@ -441,7 +429,7 @@ static struct attribute_group rapl_pmu_events_group = {
.attrs = attrs_empty,
};
-DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
+PMU_FORMAT_ATTR(event, "config:0-7");
static struct attribute *rapl_formats_attr[] = {
&format_attr_event.attr,
NULL,
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 40e0e322161d..284e73661a18 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -273,11 +273,15 @@ void __init hv_apic_init(void)
pr_info("Hyper-V: Using enlightened APIC (%s mode)",
x2apic_enabled() ? "x2apic" : "xapic");
/*
- * With x2apic, architectural x2apic MSRs are equivalent to the
- * respective synthetic MSRs, so there's no need to override
- * the apic accessors. The only exception is
- * hv_apic_eoi_write, because it benefits from lazy EOI when
- * available, but it works for both xapic and x2apic modes.
+ * When in x2apic mode, don't use the Hyper-V specific APIC
+ * accessors since the field layout in the ICR register is
+ * different in x2apic mode. Furthermore, the architectural
+ * x2apic MSRs function just as well as the Hyper-V
+ * synthetic APIC MSRs, so there's no benefit in having
+ * separate Hyper-V accessors for x2apic mode. The only
+ * exception is hv_apic_eoi_write, because it benefits from
+ * lazy EOI when available, but the same accessor works for
+ * both xapic and x2apic because the field layout is the same.
*/
apic_set_eoi_write(hv_apic_eoi_write);
if (!x2apic_enabled()) {
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 81cf22398cd1..5e3d9b7fd5fb 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -347,7 +347,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
*/
unsafe_put_user(*((u64 *)&code), (u64 __user *)frame->retcode, Efault);
unsafe_put_sigcontext32(&frame->uc.uc_mcontext, fp, regs, set, Efault);
- unsafe_put_user(*(__u64 *)set, (__u64 *)&frame->uc.uc_sigmask, Efault);
+ unsafe_put_user(*(__u64 *)set, (__u64 __user *)&frame->uc.uc_sigmask, Efault);
user_access_end();
if (__copy_siginfo_to_user32(&frame->info, &ksig->info))
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 6d2df1ee427b..65064d9f7fa6 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -159,6 +159,8 @@ static inline u64 x86_default_get_root_pointer(void)
extern int x86_acpi_numa_init(void);
#endif /* CONFIG_ACPI_NUMA */
+struct cper_ia_proc_ctx;
+
#ifdef CONFIG_ACPI_APEI
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
{
@@ -177,6 +179,15 @@ static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
*/
return PAGE_KERNEL_NOENC;
}
+
+int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
+ u64 lapic_id);
+#else
+static inline int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
+ u64 lapic_id)
+{
+ return -EINVAL;
+}
#endif
#define ACPI_TABLE_UPGRADE_MAX_PHYS (max_low_pfn_mapped << PAGE_SHIFT)
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 4e3099d9ae62..34cb3c159481 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -259,6 +259,7 @@ static inline u64 native_x2apic_icr_read(void)
extern int x2apic_mode;
extern int x2apic_phys;
+extern void __init x2apic_set_max_apicid(u32 apicid);
extern void __init check_x2apic(void);
extern void x2apic_setup(void);
static inline int x2apic_enabled(void)
@@ -305,11 +306,10 @@ struct apic {
void (*send_IPI_all)(int vector);
void (*send_IPI_self)(int vector);
- /* dest_logical is used by the IPI functions */
- u32 dest_logical;
u32 disable_esr;
- u32 irq_delivery_mode;
- u32 irq_dest_mode;
+
+ enum apic_delivery_modes delivery_mode;
+ bool dest_mode_logical;
u32 (*calc_dest_apicid)(unsigned int cpu);
@@ -520,12 +520,10 @@ static inline void apic_smt_update(void) { }
#endif
struct msi_msg;
+struct irq_cfg;
-#ifdef CONFIG_PCI_MSI
-void x86_vector_msi_compose_msg(struct irq_data *data, struct msi_msg *msg);
-#else
-# define x86_vector_msi_compose_msg NULL
-#endif
+extern void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg,
+ bool dmar);
extern void ioapic_zap_locks(void);
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index 05e694ed8386..5716f22f81ac 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -432,15 +432,13 @@ struct local_apic {
#define BAD_APICID 0xFFFFu
#endif
-enum ioapic_irq_destination_types {
- dest_Fixed = 0,
- dest_LowestPrio = 1,
- dest_SMI = 2,
- dest__reserved_1 = 3,
- dest_NMI = 4,
- dest_INIT = 5,
- dest__reserved_2 = 6,
- dest_ExtINT = 7
+enum apic_delivery_modes {
+ APIC_DELIVERY_MODE_FIXED = 0,
+ APIC_DELIVERY_MODE_LOWESTPRIO = 1,
+ APIC_DELIVERY_MODE_SMI = 2,
+ APIC_DELIVERY_MODE_NMI = 4,
+ APIC_DELIVERY_MODE_INIT = 5,
+ APIC_DELIVERY_MODE_EXTINT = 7,
};
#endif /* _ASM_X86_APICDEF_H */
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index b6cac6e9bb70..f732741ad7c7 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -199,7 +199,7 @@ static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
- return try_cmpxchg(&v->counter, old, new);
+ return arch_try_cmpxchg(&v->counter, old, new);
}
#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 809bd010a751..7886d0578fc9 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -187,7 +187,7 @@ static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
- return try_cmpxchg(&v->counter, old, new);
+ return arch_try_cmpxchg(&v->counter, old, new);
}
#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
index 86b63c7feab7..86b2e0dcc4bf 100644
--- a/arch/x86/include/asm/cacheinfo.h
+++ b/arch/x86/include/asm/cacheinfo.h
@@ -2,7 +2,7 @@
#ifndef _ASM_X86_CACHEINFO_H
#define _ASM_X86_CACHEINFO_H
-void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id);
-void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id);
+void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
+void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
#endif /* _ASM_X86_CACHEINFO_H */
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index a8bfac131256..4d4ec5cbdc51 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -221,7 +221,7 @@ extern void __add_wrong_size(void)
#define __try_cmpxchg(ptr, pold, new, size) \
__raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX)
-#define try_cmpxchg(ptr, pold, new) \
+#define arch_try_cmpxchg(ptr, pold, new) \
__try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr)))
/*
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 0e327a01f50f..f145e3326c6d 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -177,14 +177,13 @@ typedef struct user_regs_struct compat_elf_gregset_t;
static inline void __user *arch_compat_alloc_user_space(long len)
{
- compat_uptr_t sp;
-
- if (test_thread_flag(TIF_IA32)) {
- sp = task_pt_regs(current)->sp;
- } else {
- /* -128 for the x32 ABI redzone */
- sp = task_pt_regs(current)->sp - 128;
- }
+ compat_uptr_t sp = task_pt_regs(current)->sp;
+
+ /*
+ * -128 for the x32 ABI redzone. For IA32, it is not strictly
+ * necessary, but not harmful.
+ */
+ sp -= 128;
return (void __user *)round_down(sp - len, 16);
}
diff --git a/arch/x86/include/asm/copy_mc_test.h b/arch/x86/include/asm/copy_mc_test.h
deleted file mode 100644
index e4991ba96726..000000000000
--- a/arch/x86/include/asm/copy_mc_test.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _COPY_MC_TEST_H_
-#define _COPY_MC_TEST_H_
-
-#ifndef __ASSEMBLY__
-#ifdef CONFIG_COPY_MC_TEST
-extern unsigned long copy_mc_test_src;
-extern unsigned long copy_mc_test_dst;
-
-static inline void copy_mc_inject_src(void *addr)
-{
- if (addr)
- copy_mc_test_src = (unsigned long) addr;
- else
- copy_mc_test_src = ~0UL;
-}
-
-static inline void copy_mc_inject_dst(void *addr)
-{
- if (addr)
- copy_mc_test_dst = (unsigned long) addr;
- else
- copy_mc_test_dst = ~0UL;
-}
-#else /* CONFIG_COPY_MC_TEST */
-static inline void copy_mc_inject_src(void *addr)
-{
-}
-
-static inline void copy_mc_inject_dst(void *addr)
-{
-}
-#endif /* CONFIG_COPY_MC_TEST */
-
-#else /* __ASSEMBLY__ */
-#include <asm/export.h>
-
-#ifdef CONFIG_COPY_MC_TEST
-.macro COPY_MC_TEST_CTL
- .pushsection .data
- .align 8
- .globl copy_mc_test_src
- copy_mc_test_src:
- .quad 0
- EXPORT_SYMBOL_GPL(copy_mc_test_src)
- .globl copy_mc_test_dst
- copy_mc_test_dst:
- .quad 0
- EXPORT_SYMBOL_GPL(copy_mc_test_dst)
- .popsection
-.endm
-
-.macro COPY_MC_TEST_SRC reg count target
- leaq \count(\reg), %r9
- cmp copy_mc_test_src, %r9
- ja \target
-.endm
-
-.macro COPY_MC_TEST_DST reg count target
- leaq \count(\reg), %r9
- cmp copy_mc_test_dst, %r9
- ja \target
-.endm
-#else
-.macro COPY_MC_TEST_CTL
-.endm
-
-.macro COPY_MC_TEST_SRC reg count target
-.endm
-
-.macro COPY_MC_TEST_DST reg count target
-.endm
-#endif /* CONFIG_COPY_MC_TEST */
-#endif /* __ASSEMBLY__ */
-#endif /* _COPY_MC_TEST_H_ */
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index dad350d42ecf..f5ef2d5b9231 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -241,6 +241,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */
+#define X86_FEATURE_SGX ( 9*32+ 2) /* Software Guard Extensions */
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
@@ -356,6 +357,7 @@
#define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */
#define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */
#define X86_FEATURE_ENQCMD (16*32+29) /* ENQCMD and ENQCMDS instructions */
+#define X86_FEATURE_SGX_LC (16*32+30) /* Software Guard Extensions Launch Control */
/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index 5861d34f9771..7947cb1782da 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -62,6 +62,12 @@
# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
#endif
+#ifdef CONFIG_X86_SGX
+# define DISABLE_SGX 0
+#else
+# define DISABLE_SGX (1 << (X86_FEATURE_SGX & 31))
+#endif
+
/*
* Make sure to add features to the correct mask
*/
@@ -74,7 +80,7 @@
#define DISABLED_MASK6 0
#define DISABLED_MASK7 (DISABLE_PTI)
#define DISABLED_MASK8 0
-#define DISABLED_MASK9 (DISABLE_SMAP)
+#define DISABLED_MASK9 (DISABLE_SMAP|DISABLE_SGX)
#define DISABLED_MASK10 0
#define DISABLED_MASK11 0
#define DISABLED_MASK12 0
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index b9a5d488f1a5..66bdfe838d61 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -186,8 +186,9 @@ static inline void elf_common_init(struct thread_struct *t,
#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \
elf_common_init(&current->thread, regs, __USER_DS)
-void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp);
-#define compat_start_thread compat_start_thread
+void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp, bool x32);
+#define COMPAT_START_THREAD(ex, regs, new_ip, new_sp) \
+ compat_start_thread(regs, new_ip, new_sp, ex->e_machine == EM_X86_64)
void set_personality_ia32(bool);
#define COMPAT_SET_PERSONALITY(ex) \
@@ -361,7 +362,7 @@ do { \
#define AT_SYSINFO 32
#define COMPAT_ARCH_DLINFO \
-if (test_thread_flag(TIF_X32)) \
+if (exec->e_machine == EM_X86_64) \
ARCH_DLINFO_X32; \
else \
ARCH_DLINFO_IA32
@@ -382,8 +383,12 @@ struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
- int uses_interp);
-#define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
+ int uses_interp, bool x32);
+#define COMPAT_ARCH_SETUP_ADDITIONAL_PAGES(bprm, ex, interpreter) \
+ compat_arch_setup_additional_pages(bprm, interpreter, \
+ (ex->e_machine == EM_X86_64))
+
+extern bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs);
/* Do not change the values. See get_align_mask() */
enum align_flags {
diff --git a/arch/x86/include/asm/enclu.h b/arch/x86/include/asm/enclu.h
new file mode 100644
index 000000000000..b1314e41a744
--- /dev/null
+++ b/arch/x86/include/asm/enclu.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_ENCLU_H
+#define _ASM_X86_ENCLU_H
+
+#define EENTER 0x02
+#define ERESUME 0x03
+#define EEXIT 0x04
+
+#endif /* _ASM_X86_ENCLU_H */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 77217bd292bd..9f1a0a987e5e 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -14,13 +14,20 @@
#ifndef _ASM_X86_FIXMAP_H
#define _ASM_X86_FIXMAP_H
+#include <asm/kmap_size.h>
+
/*
* Exposed to assembly code for setting up initial page tables. Cannot be
* calculated in assembly code (fixmap entries are an enum), but is sanity
* checked in the actual fixmap C code to make sure that the fixmap is
* covered fully.
*/
-#define FIXMAP_PMD_NUM 2
+#ifndef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
+# define FIXMAP_PMD_NUM 2
+#else
+# define KM_PMDS (KM_MAX_IDX * ((CONFIG_NR_CPUS + 511) / 512))
+# define FIXMAP_PMD_NUM (KM_PMDS + 2)
+#endif
/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
#define FIXMAP_PMD_TOP 507
@@ -31,7 +38,6 @@
#include <asm/pgtable_types.h>
#ifdef CONFIG_X86_32
#include <linux/threads.h>
-#include <asm/kmap_types.h>
#else
#include <uapi/asm/vsyscall.h>
#endif
@@ -92,9 +98,9 @@ enum fixed_addresses {
FIX_IO_APIC_BASE_0,
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
#endif
-#ifdef CONFIG_X86_32
+#ifdef CONFIG_KMAP_LOCAL
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
#ifdef CONFIG_PCI_MMCONFIG
FIX_PCIE_MCFG,
#endif
@@ -151,7 +157,6 @@ extern void reserve_top_address(unsigned long reserve);
extern int fixmaps_set;
-extern pte_t *kmap_pte;
extern pte_t *pkmap_page_table;
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index dcd9503b1098..a5aba4ab0224 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -29,17 +29,32 @@ extern void fpregs_mark_activate(void);
* A context switch will (and softirq might) save CPU's FPU registers to
* fpu->state and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in
* a random state.
+ *
+ * local_bh_disable() protects against both preemption and soft interrupts
+ * on !RT kernels.
+ *
+ * On RT kernels local_bh_disable() is not sufficient because it only
+ * serializes soft interrupt related sections via a local lock, but stays
+ * preemptible. Disabling preemption is the right choice here as bottom
+ * half processing is always in thread context on RT kernels so it
+ * implicitly prevents bottom half processing as well.
+ *
+ * Disabling preemption also serializes against kernel_fpu_begin().
*/
static inline void fpregs_lock(void)
{
- preempt_disable();
- local_bh_disable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_bh_disable();
+ else
+ preempt_disable();
}
static inline void fpregs_unlock(void)
{
- local_bh_enable();
- preempt_enable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_bh_enable();
+ else
+ preempt_enable();
}
#ifdef CONFIG_X86_DEBUG_FPU
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 0f420b24e0fc..032e020853aa 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -23,7 +23,6 @@
#include <linux/interrupt.h>
#include <linux/threads.h>
-#include <asm/kmap_types.h>
#include <asm/tlbflush.h>
#include <asm/paravirt.h>
#include <asm/fixmap.h>
@@ -58,11 +57,17 @@ extern unsigned long highstart_pfn, highend_pfn;
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-void *kmap_atomic_pfn(unsigned long pfn);
-void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
-
#define flush_cache_kmaps() do { } while (0)
+#define arch_kmap_local_post_map(vaddr, pteval) \
+ arch_flush_lazy_mmu_mode()
+
+#define arch_kmap_local_post_unmap(vaddr) \
+ do { \
+ flush_tlb_one_kernel((vaddr)); \
+ arch_flush_lazy_mmu_mode(); \
+ } while (0)
+
extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
unsigned long end_pfn);
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 6352dee37cda..ab9f3dd87c80 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -74,17 +74,6 @@ extern void hpet_disable(void);
extern unsigned int hpet_readl(unsigned int a);
extern void force_hpet_resume(void);
-struct irq_data;
-struct hpet_channel;
-struct irq_domain;
-
-extern void hpet_msi_unmask(struct irq_data *data);
-extern void hpet_msi_mask(struct irq_data *data);
-extern void hpet_msi_write(struct hpet_channel *hc, struct msi_msg *msg);
-extern struct irq_domain *hpet_create_irq_domain(int hpet_id);
-extern int hpet_assign_irq(struct irq_domain *domain,
- struct hpet_channel *hc, int dev_num);
-
#ifdef CONFIG_HPET_EMULATE_RTC
#include <linux/interrupt.h>
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index a4aeeaace040..d465ece58151 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -39,18 +39,16 @@ enum irq_alloc_type {
X86_IRQ_ALLOC_TYPE_PCI_MSI,
X86_IRQ_ALLOC_TYPE_PCI_MSIX,
X86_IRQ_ALLOC_TYPE_DMAR,
+ X86_IRQ_ALLOC_TYPE_AMDVI,
X86_IRQ_ALLOC_TYPE_UV,
- X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT,
- X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT,
};
struct ioapic_alloc_info {
- int pin;
- int node;
- u32 trigger : 1;
- u32 polarity : 1;
- u32 valid : 1;
- struct IO_APIC_route_entry *entry;
+ int pin;
+ int node;
+ u32 is_level : 1;
+ u32 active_low : 1;
+ u32 valid : 1;
};
struct uv_alloc_info {
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 0ed20e8bba9e..6bf42aed387e 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -23,6 +23,13 @@
#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
#define HYPERV_CPUID_NESTED_FEATURES 0x4000000A
+#define HYPERV_CPUID_VIRT_STACK_INTERFACE 0x40000081
+#define HYPERV_VS_INTERFACE_EAX_SIGNATURE 0x31235356 /* "VS#1" */
+
+#define HYPERV_CPUID_VIRT_STACK_PROPERTIES 0x40000082
+/* Support for the extended IOAPIC RTE format */
+#define HYPERV_VS_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE BIT(2)
+
#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000
#define HYPERV_CPUID_MIN 0x40000005
#define HYPERV_CPUID_MAX 0x4000ffff
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index b2442eb0ac2f..247a60a47331 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -11,9 +11,6 @@
#include <asm/irq_stack.h>
-bool idtentry_enter_nmi(struct pt_regs *regs);
-void idtentry_exit_nmi(struct pt_regs *regs, bool irq_state);
-
/**
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points
* No error code pushed by hardware
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index 5c1ae3eff9d4..a8c3d284fa46 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -201,6 +201,21 @@ static inline int insn_offset_immediate(struct insn *insn)
return insn_offset_displacement(insn) + insn->displacement.nbytes;
}
+/**
+ * for_each_insn_prefix() -- Iterate prefixes in the instruction
+ * @insn: Pointer to struct insn.
+ * @idx: Index storage.
+ * @prefix: Prefix byte.
+ *
+ * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
+ * and the index is stored in @idx (note that this @idx is just for a cursor,
+ * do not change it.)
+ * Since prefixes.nbytes can be bigger than 4 if some prefixes
+ * are repeated, it cannot be used for looping over the prefixes.
+ */
+#define for_each_insn_prefix(insn, idx, prefix) \
+ for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
+
#define POP_SS_OPCODE 0x1f
#define MOV_SREG_OPCODE 0x8e
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h
index bd7f02480ca1..438ccd4f3cc4 100644
--- a/arch/x86/include/asm/inst.h
+++ b/arch/x86/include/asm/inst.h
@@ -143,21 +143,6 @@
.macro MODRM mod opd1 opd2
.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
.endm
-
-.macro RDPID opd
- REG_TYPE rdpid_opd_type \opd
- .if rdpid_opd_type == REG_TYPE_R64
- R64_NUM rdpid_opd \opd
- .else
- R32_NUM rdpid_opd \opd
- .endif
- .byte 0xf3
- .if rdpid_opd > 7
- PFX_REX rdpid_opd 0
- .endif
- .byte 0x0f, 0xc7
- MODRM 0xc0 rdpid_opd 0x7
-.endm
#endif
#endif
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index a1a26f6d3aa4..437aa8d00e53 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -13,15 +13,6 @@
* Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
*/
-/* I/O Unit Redirection Table */
-#define IO_APIC_REDIR_VECTOR_MASK 0x000FF
-#define IO_APIC_REDIR_DEST_LOGICAL 0x00800
-#define IO_APIC_REDIR_DEST_PHYSICAL 0x00000
-#define IO_APIC_REDIR_SEND_PENDING (1 << 12)
-#define IO_APIC_REDIR_REMOTE_IRR (1 << 14)
-#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15)
-#define IO_APIC_REDIR_MASKED (1 << 16)
-
/*
* The structure of the IO-APIC:
*/
@@ -65,52 +56,40 @@ union IO_APIC_reg_03 {
};
struct IO_APIC_route_entry {
- __u32 vector : 8,
- delivery_mode : 3, /* 000: FIXED
- * 001: lowest prio
- * 111: ExtINT
- */
- dest_mode : 1, /* 0: physical, 1: logical */
- delivery_status : 1,
- polarity : 1,
- irr : 1,
- trigger : 1, /* 0: edge, 1: level */
- mask : 1, /* 0: enabled, 1: disabled */
- __reserved_2 : 15;
-
- __u32 __reserved_3 : 24,
- dest : 8;
-} __attribute__ ((packed));
-
-struct IR_IO_APIC_route_entry {
- __u64 vector : 8,
- zero : 3,
- index2 : 1,
- delivery_status : 1,
- polarity : 1,
- irr : 1,
- trigger : 1,
- mask : 1,
- reserved : 31,
- format : 1,
- index : 15;
+ union {
+ struct {
+ u64 vector : 8,
+ delivery_mode : 3,
+ dest_mode_logical : 1,
+ delivery_status : 1,
+ active_low : 1,
+ irr : 1,
+ is_level : 1,
+ masked : 1,
+ reserved_0 : 15,
+ reserved_1 : 17,
+ virt_destid_8_14 : 7,
+ destid_0_7 : 8;
+ };
+ struct {
+ u64 ir_shared_0 : 8,
+ ir_zero : 3,
+ ir_index_15 : 1,
+ ir_shared_1 : 5,
+ ir_reserved_0 : 31,
+ ir_format : 1,
+ ir_index_0_14 : 15;
+ };
+ struct {
+ u64 w1 : 32,
+ w2 : 32;
+ };
+ };
} __attribute__ ((packed));
struct irq_alloc_info;
struct ioapic_domain_cfg;
-#define IOAPIC_EDGE 0
-#define IOAPIC_LEVEL 1
-
-#define IOAPIC_MASKED 1
-#define IOAPIC_UNMASKED 0
-
-#define IOAPIC_POL_HIGH 0
-#define IOAPIC_POL_LOW 1
-
-#define IOAPIC_DEST_MODE_PHYSICAL 0
-#define IOAPIC_DEST_MODE_LOGICAL 1
-
#define IOAPIC_MAP_ALLOC 0x1
#define IOAPIC_MAP_CHECK 0x2
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index bacf68c4d70e..e2de092fc38c 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -9,19 +9,14 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
+#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-void __iomem *
-iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
+void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
-void
-iounmap_atomic(void __iomem *kvaddr);
+int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
-int
-iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
-
-void
-iomap_free(resource_size_t base, unsigned long size);
+void iomap_free(resource_size_t base, unsigned long size);
#endif /* _ASM_X86_IOMAP_H */
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index af4a151d70b3..7cc49432187f 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -44,9 +44,6 @@ extern int irq_remapping_reenable(int);
extern int irq_remap_enable_fault_handling(void);
extern void panic_if_irq_remap(const char *msg);
-extern struct irq_domain *
-irq_remapping_get_irq_domain(struct irq_alloc_info *info);
-
/* Create PCI MSI/MSIx irqdomain, use @parent as the parent irqdomain. */
extern struct irq_domain *
arch_create_remap_msi_irq_domain(struct irq_domain *par, const char *n, int id);
@@ -71,11 +68,5 @@ static inline void panic_if_irq_remap(const char *msg)
{
}
-static inline struct irq_domain *
-irq_remapping_get_irq_domain(struct irq_alloc_info *info)
-{
- return NULL;
-}
-
#endif /* CONFIG_IRQ_REMAP */
#endif /* __X86_IRQ_REMAPPING_H */
diff --git a/arch/x86/include/asm/irqdomain.h b/arch/x86/include/asm/irqdomain.h
index cd684d45cb5f..125c23b7bad3 100644
--- a/arch/x86/include/asm/irqdomain.h
+++ b/arch/x86/include/asm/irqdomain.h
@@ -12,6 +12,9 @@ enum {
X86_IRQ_ALLOC_LEGACY = 0x2,
};
+extern int x86_fwspec_is_ioapic(struct irq_fwspec *fwspec);
+extern int x86_fwspec_is_hpet(struct irq_fwspec *fwspec);
+
extern struct irq_domain *x86_vector_domain;
extern void init_irq_alloc_info(struct irq_alloc_info *info,
diff --git a/arch/x86/include/asm/kmap_types.h b/arch/x86/include/asm/kmap_types.h
deleted file mode 100644
index 04ab8266e347..000000000000
--- a/arch/x86/include/asm/kmap_types.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_KMAP_TYPES_H
-#define _ASM_X86_KMAP_TYPES_H
-
-#if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM)
-#define __WITH_KM_FENCE
-#endif
-
-#include <asm-generic/kmap_types.h>
-
-#undef __WITH_KM_FENCE
-
-#endif /* _ASM_X86_KMAP_TYPES_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d44858b69353..7e5f33a0d0e2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -639,6 +639,7 @@ struct kvm_vcpu_arch {
int cpuid_nent;
struct kvm_cpuid_entry2 *cpuid_entries;
+ unsigned long cr3_lm_rsvd_bits;
int maxphyaddr;
int max_tdp_level;
@@ -1655,6 +1656,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
+int kvm_cpu_has_extint(struct kvm_vcpu *v);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index a0f147893a04..56cdeaac76a0 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -177,7 +177,8 @@ enum mce_notifier_prios {
MCE_PRIO_EXTLOG,
MCE_PRIO_UC,
MCE_PRIO_EARLY,
- MCE_PRIO_CEC
+ MCE_PRIO_CEC,
+ MCE_PRIO_HIGHEST = MCE_PRIO_CEC
};
struct notifier_block;
@@ -198,16 +199,22 @@ static inline void enable_copy_mc_fragile(void)
}
#endif
+struct cper_ia_proc_ctx;
+
#ifdef CONFIG_X86_MCE
int mcheck_init(void);
void mcheck_cpu_init(struct cpuinfo_x86 *c);
void mcheck_cpu_clear(struct cpuinfo_x86 *c);
void mcheck_vendor_init_severity(void);
+int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
+ u64 lapic_id);
#else
static inline int mcheck_init(void) { return 0; }
static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}
static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {}
static inline void mcheck_vendor_init_severity(void) {}
+static inline int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
+ u64 lapic_id) { return -EINVAL; }
#endif
#ifdef CONFIG_X86_ANCIENT_MCE
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 9257667d13c5..5d7494631ea9 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -6,6 +6,12 @@
#include <linux/rwsem.h>
#include <linux/mutex.h>
#include <linux/atomic.h>
+#include <linux/bits.h>
+
+/* Uprobes on this MM assume 32-bit code */
+#define MM_CONTEXT_UPROBE_IA32 BIT(0)
+/* vsyscall page is accessible on this MM */
+#define MM_CONTEXT_HAS_VSYSCALL BIT(1)
/*
* x86 has arch-specific MMU state beyond what lives in mm_struct.
@@ -33,8 +39,7 @@ typedef struct {
#endif
#ifdef CONFIG_X86_64
- /* True if mm supports a task running in 32 bit compatibility mode. */
- unsigned short ia32_compat;
+ unsigned short flags;
#endif
struct mutex lock;
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index d98016b83755..27516046117a 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -91,12 +91,14 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
}
#endif
+#define enter_lazy_tlb enter_lazy_tlb
extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
/*
* Init a new mm. Used on mm copies, like at fork()
* and on mm's that are brand-new, like at execve().
*/
+#define init_new_context init_new_context
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
@@ -116,6 +118,8 @@ static inline int init_new_context(struct task_struct *tsk,
init_new_context_ldt(mm);
return 0;
}
+
+#define destroy_context destroy_context
static inline void destroy_context(struct mm_struct *mm)
{
destroy_context_ldt(mm);
@@ -177,7 +181,7 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
static inline bool is_64bit_mm(struct mm_struct *mm)
{
return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
- !(mm->context.ia32_compat == TIF_IA32);
+ !(mm->context.flags & MM_CONTEXT_UPROBE_IA32);
}
#else
static inline bool is_64bit_mm(struct mm_struct *mm)
@@ -214,4 +218,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
unsigned long __get_current_cr3_fast(void);
+#include <asm-generic/mmu_context.h>
+
#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/msi.h b/arch/x86/include/asm/msi.h
index cd30013d15d3..b85147d75626 100644
--- a/arch/x86/include/asm/msi.h
+++ b/arch/x86/include/asm/msi.h
@@ -9,4 +9,54 @@ typedef struct irq_alloc_info msi_alloc_info_t;
int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
msi_alloc_info_t *arg);
+/* Structs and defines for the X86 specific MSI message format */
+
+typedef struct x86_msi_data {
+ u32 vector : 8,
+ delivery_mode : 3,
+ dest_mode_logical : 1,
+ reserved : 2,
+ active_low : 1,
+ is_level : 1;
+
+ u32 dmar_subhandle;
+} __attribute__ ((packed)) arch_msi_msg_data_t;
+#define arch_msi_msg_data x86_msi_data
+
+typedef struct x86_msi_addr_lo {
+ union {
+ struct {
+ u32 reserved_0 : 2,
+ dest_mode_logical : 1,
+ redirect_hint : 1,
+ reserved_1 : 1,
+ virt_destid_8_14 : 7,
+ destid_0_7 : 8,
+ base_address : 12;
+ };
+ struct {
+ u32 dmar_reserved_0 : 2,
+ dmar_index_15 : 1,
+ dmar_subhandle_valid : 1,
+ dmar_format : 1,
+ dmar_index_0_14 : 15,
+ dmar_base_address : 12;
+ };
+ };
+} __attribute__ ((packed)) arch_msi_msg_addr_lo_t;
+#define arch_msi_msg_addr_lo x86_msi_addr_lo
+
+#define X86_MSI_BASE_ADDRESS_LOW (0xfee00000 >> 20)
+
+typedef struct x86_msi_addr_hi {
+ u32 reserved : 8,
+ destid_8_31 : 24;
+} __attribute__ ((packed)) arch_msi_msg_addr_hi_t;
+#define arch_msi_msg_addr_hi x86_msi_addr_hi
+
+#define X86_MSI_BASE_ADDRESS_HIGH (0)
+
+struct msi_msg;
+u32 x86_msi_msg_get_destid(struct msi_msg *msg, bool extid);
+
#endif /* _ASM_X86_MSI_H */
diff --git a/arch/x86/include/asm/msidef.h b/arch/x86/include/asm/msidef.h
deleted file mode 100644
index ee2f8ccc32d0..000000000000
--- a/arch/x86/include/asm/msidef.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_MSIDEF_H
-#define _ASM_X86_MSIDEF_H
-
-/*
- * Constants for Intel APIC based MSI messages.
- */
-
-/*
- * Shifts for MSI data
- */
-
-#define MSI_DATA_VECTOR_SHIFT 0
-#define MSI_DATA_VECTOR_MASK 0x000000ff
-#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \
- MSI_DATA_VECTOR_MASK)
-
-#define MSI_DATA_DELIVERY_MODE_SHIFT 8
-#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
-#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
-
-#define MSI_DATA_LEVEL_SHIFT 14
-#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
-#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
-
-#define MSI_DATA_TRIGGER_SHIFT 15
-#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
-#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
-
-/*
- * Shift/mask fields for msi address
- */
-
-#define MSI_ADDR_BASE_HI 0
-#define MSI_ADDR_BASE_LO 0xfee00000
-
-#define MSI_ADDR_DEST_MODE_SHIFT 2
-#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT)
-#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT)
-
-#define MSI_ADDR_REDIRECTION_SHIFT 3
-#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
- /* dedicated cpu */
-#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
- /* lowest priority */
-
-#define MSI_ADDR_DEST_ID_SHIFT 12
-#define MSI_ADDR_DEST_ID_MASK 0x00ffff0
-#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
- MSI_ADDR_DEST_ID_MASK)
-#define MSI_ADDR_EXT_DEST_ID(dest) ((dest) & 0xffffff00)
-
-#define MSI_ADDR_IR_EXT_INT (1 << 4)
-#define MSI_ADDR_IR_SHV (1 << 3)
-#define MSI_ADDR_IR_INDEX1(index) ((index & 0x8000) >> 13)
-#define MSI_ADDR_IR_INDEX2(index) ((index & 0x7fff) << 5)
-#endif /* _ASM_X86_MSIDEF_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 972a34d93505..2b5fc9accec4 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -139,6 +139,7 @@
#define MSR_IA32_MCG_CAP 0x00000179
#define MSR_IA32_MCG_STATUS 0x0000017a
#define MSR_IA32_MCG_CTL 0x0000017b
+#define MSR_ERROR_CONTROL 0x0000017f
#define MSR_IA32_MCG_EXT_CTL 0x000004d0
#define MSR_OFFCORE_RSP_0 0x000001a6
@@ -326,8 +327,9 @@
#define MSR_PP1_ENERGY_STATUS 0x00000641
#define MSR_PP1_POLICY 0x00000642
-#define MSR_AMD_PKG_ENERGY_STATUS 0xc001029b
#define MSR_AMD_RAPL_POWER_UNIT 0xc0010299
+#define MSR_AMD_CORE_ENERGY_STATUS 0xc001029a
+#define MSR_AMD_PKG_ENERGY_STATUS 0xc001029b
/* Config TDP MSRs */
#define MSR_CONFIG_TDP_NOMINAL 0x00000648
@@ -609,6 +611,8 @@
#define FEAT_CTL_LOCKED BIT(0)
#define FEAT_CTL_VMX_ENABLED_INSIDE_SMX BIT(1)
#define FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX BIT(2)
+#define FEAT_CTL_SGX_LC_ENABLED BIT(17)
+#define FEAT_CTL_SGX_ENABLED BIT(18)
#define FEAT_CTL_LMCE_ENABLED BIT(20)
#define MSR_IA32_TSC_ADJUST 0x0000003b
@@ -628,6 +632,12 @@
#define MSR_IA32_UCODE_WRITE 0x00000079
#define MSR_IA32_UCODE_REV 0x0000008b
+/* Intel SGX Launch Enclave Public Key Hash MSRs */
+#define MSR_IA32_SGXLEPUBKEYHASH0 0x0000008C
+#define MSR_IA32_SGXLEPUBKEYHASH1 0x0000008D
+#define MSR_IA32_SGXLEPUBKEYHASH2 0x0000008E
+#define MSR_IA32_SGXLEPUBKEYHASH3 0x0000008F
+
#define MSR_IA32_SMM_MONITOR_CTL 0x0000009b
#define MSR_IA32_SMBASE 0x0000009e
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index e039a933aca3..29dd27b5a339 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -88,8 +88,6 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
- trace_hardirqs_on();
-
mds_idle_clear_cpu_buffers();
/* "mwait %eax, %ecx;" */
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index f462895a33e4..faf9cc1c14bb 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -53,7 +53,13 @@
#define STACK_TOP_MAX STACK_TOP
/*
- * Kernel image size is limited to 512 MB (see in arch/x86/kernel/head_32.S)
+ * In spite of the name, KERNEL_IMAGE_SIZE is a limit on the maximum virtual
+ * address for the kernel image, rather than the limit on the size itself. On
+ * 32-bit, this is not a strict limit, but this value is used to limit the
+ * link-time virtual address range of the kernel, and by KASLR to limit the
+ * randomized address from which the kernel is executed. A relocatable kernel
+ * can be loaded somewhat higher than KERNEL_IMAGE_SIZE as long as enough space
+ * remains for the vmalloc area.
*/
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 3f49dac03617..645bd1d0ee07 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -98,8 +98,10 @@
#define STACK_TOP_MAX TASK_SIZE_MAX
/*
- * Maximum kernel image size is limited to 1 GiB, due to the fixmap living
- * in the next 1 GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S).
+ * In spite of the name, KERNEL_IMAGE_SIZE is a limit on the maximum virtual
+ * address for the kernel image, rather than the limit on the size itself.
+ * This can be at most 1 GiB, due to the fixmap living in the next 1 GiB (see
+ * level2_kernel_pgt in arch/x86/kernel/head_64.S).
*
* On KASLR use 1 GiB by default, leaving 1 GiB for modules once the
* page tables are fully set up.
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index d25cc6830e89..f8dce11d2bc1 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -812,17 +812,6 @@ extern void default_banner(void);
#endif /* CONFIG_PARAVIRT_XXL */
#endif /* CONFIG_X86_64 */
-#ifdef CONFIG_PARAVIRT_XXL
-
-#define GET_CR2_INTO_AX \
- PARA_SITE(PARA_PATCH(PV_MMU_read_cr2), \
- ANNOTATE_RETPOLINE_SAFE; \
- call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2); \
- )
-
-#endif /* CONFIG_PARAVIRT_XXL */
-
-
#endif /* __ASSEMBLY__ */
#else /* CONFIG_PARAVIRT */
# define default_banner x86_init_noop
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 0fad9f61c76a..b6b02b7c19cc 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -41,7 +41,6 @@
#ifndef __ASSEMBLY__
#include <asm/desc_defs.h>
-#include <asm/kmap_types.h>
#include <asm/pgtable_types.h>
#include <asm/nospec-branch.h>
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 6960cd6d1f23..b9a7fd0a27e2 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -137,7 +137,9 @@ union cpuid10_edx {
struct {
unsigned int num_counters_fixed:5;
unsigned int bit_width_fixed:8;
- unsigned int reserved:19;
+ unsigned int reserved1:2;
+ unsigned int anythread_deprecated:1;
+ unsigned int reserved2:16;
} split;
unsigned int full;
};
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index d7acae4120d5..7c9c968a42ef 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -57,19 +57,13 @@ do { \
#endif
/*
- * This is how much memory in addition to the memory covered up to
- * and including _end we need mapped initially.
- * We need:
- * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
- * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
+ * This is used to calculate the .brk reservation for initial pagetables.
+ * Enough space is reserved to allocate pagetables sufficient to cover all
+ * of LOWMEM_PAGES, which is an upper bound on the size of the direct map of
+ * lowmem.
*
- * Modulo rounding, each megabyte assigned here requires a kilobyte of
- * memory, which is currently unreclaimed.
- *
- * This should be a multiple of a page.
- *
- * KERNEL_IMAGE_SIZE should be greater than pa(_end)
- * and small than max_low_pfn, otherwise will waste some page table entries
+ * With PAE paging (PTRS_PER_PMD > 1), we allocate PTRS_PER_PGD == 4 pages for
+ * the PMD's in addition to the pages required for the last level pagetables.
*/
#if PTRS_PER_PMD > 1
#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 52e5f5f2240d..91ac10654570 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -143,7 +143,11 @@ extern unsigned int ptrs_per_p4d;
#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
/* The module sections ends with the start of the fixmap */
-#define MODULES_END _AC(0xffffffffff000000, UL)
+#ifndef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
+# define MODULES_END _AC(0xffffffffff000000, UL)
+#else
+# define MODULES_END _AC(0xfffffffffe000000, UL)
+#endif
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
#define ESPFIX_PGD_ENTRY _AC(-2, UL)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 816b31c68550..394757ee030a 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -155,6 +155,7 @@ enum page_cache_mode {
#define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
#define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
+#define _PAGE_LARGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT_LARGE)
#define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC))
#define _PAGE_CACHE_WP (cachemode2protval(_PAGE_CACHE_MODE_WP))
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 82a08b585818..c20a52b5534b 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -813,10 +813,8 @@ extern int set_tsc_mode(unsigned int val);
DECLARE_PER_CPU(u64, msr_misc_features_shadow);
#ifdef CONFIG_CPU_SUP_AMD
-extern u16 amd_get_nb_id(int cpu);
extern u32 amd_get_nodes_per_socket(void);
#else
-static inline u16 amd_get_nb_id(int cpu) { return 0; }
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
#endif
diff --git a/arch/x86/include/asm/seccomp.h b/arch/x86/include/asm/seccomp.h
index 2bd1338de236..fef16e398161 100644
--- a/arch/x86/include/asm/seccomp.h
+++ b/arch/x86/include/asm/seccomp.h
@@ -16,6 +16,26 @@
#define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn
#endif
+#ifdef CONFIG_X86_64
+# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_X86_64
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "x86_64"
+# ifdef CONFIG_COMPAT
+# define SECCOMP_ARCH_COMPAT AUDIT_ARCH_I386
+# define SECCOMP_ARCH_COMPAT_NR IA32_NR_syscalls
+# define SECCOMP_ARCH_COMPAT_NAME "ia32"
+# endif
+/*
+ * x32 will have __X32_SYSCALL_BIT set in syscall number. We don't support
+ * caching them and they are treated as out of range syscalls, which will
+ * always pass through the BPF filter.
+ */
+#else /* !CONFIG_X86_64 */
+# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_I386
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "ia32"
+#endif
+
#include <asm-generic/seccomp.h>
#endif /* _ASM_X86_SECCOMP_H */
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index 5948218f35c5..4352f08bfbb5 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -82,6 +82,7 @@ int set_pages_rw(struct page *page, int numpages);
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
+bool kernel_page_present(struct page *page);
extern int kernel_set_to_readonly;
diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h
index 6bfc878f6771..6a9ccc1b2be5 100644
--- a/arch/x86/include/asm/sparsemem.h
+++ b/arch/x86/include/asm/sparsemem.h
@@ -28,4 +28,14 @@
#endif
#endif /* CONFIG_SPARSEMEM */
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_NUMA_KEEP_MEMINFO
+extern int phys_to_target_node(phys_addr_t start);
+#define phys_to_target_node phys_to_target_node
+extern int memory_add_physaddr_to_nid(u64 start);
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+#endif
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_X86_SPARSEMEM_H */
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 49600643faba..f248eb2ac2d4 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -88,9 +88,6 @@ get_stack_pointer(struct task_struct *task, struct pt_regs *regs)
return (unsigned long *)task->thread.sp;
}
-void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
- unsigned long *stack, const char *log_lvl);
-
/* The form of the top of the frame on the stack */
struct stack_frame {
struct stack_frame *next_frame;
diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h
index 0fd4a9dfb29c..ab7382f92aff 100644
--- a/arch/x86/include/asm/sync_core.h
+++ b/arch/x86/include/asm/sync_core.h
@@ -98,12 +98,13 @@ static inline void sync_core_before_usermode(void)
/* With PTI, we unconditionally serialize before running user code. */
if (static_cpu_has(X86_FEATURE_PTI))
return;
+
/*
- * Return from interrupt and NMI is done through iret, which is core
- * serializing.
+ * Even if we're in an interrupt, we might reschedule before returning,
+ * in which case we could switch to a different thread in the same mm
+ * and return using SYSRET or SYSEXIT. Instead of trying to keep
+ * track of our need to sync the core, just sync right away.
*/
- if (in_irq() || in_nmi())
- return;
sync_core();
}
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 44733a4bfc42..0d751d5da702 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -55,6 +55,7 @@ struct task_struct;
struct thread_info {
unsigned long flags; /* low level flags */
+ unsigned long syscall_work; /* SYSCALL_WORK_ flags */
u32 status; /* thread synchronous flags */
};
@@ -74,15 +75,11 @@ struct thread_info {
* - these are process state flags that various assembly files
* may need to access
*/
-#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
#define TIF_SSBD 5 /* Speculative store bypass disable */
-#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
-#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
-#define TIF_SECCOMP 8 /* secure computing */
#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
#define TIF_SPEC_FORCE_UPDATE 10 /* Force speculation MSR update in context switch */
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
@@ -91,7 +88,7 @@ struct thread_info {
#define TIF_NEED_FPU_LOAD 14 /* load FPU on return to userspace */
#define TIF_NOCPUID 15 /* CPUID is not accessible in userland */
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
-#define TIF_IA32 17 /* IA32 compatibility process */
+#define TIF_NOTIFY_SIGNAL 17 /* signal notifications exist */
#define TIF_SLD 18 /* Restore split lock detection on context switch */
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
#define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */
@@ -99,19 +96,13 @@ struct thread_info {
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
-#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
#define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
-#define TIF_X32 30 /* 32-bit native x86-64 binary */
-#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_SSBD (1 << TIF_SSBD)
-#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
-#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
@@ -120,16 +111,14 @@ struct thread_info {
#define _TIF_NEED_FPU_LOAD (1 << TIF_NEED_FPU_LOAD)
#define _TIF_NOCPUID (1 << TIF_NOCPUID)
#define _TIF_NOTSC (1 << TIF_NOTSC)
-#define _TIF_IA32 (1 << TIF_IA32)
+#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_SLD (1 << TIF_SLD)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
-#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_ADDR32 (1 << TIF_ADDR32)
-#define _TIF_X32 (1 << TIF_X32)
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW_BASE \
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index f4234575f3fd..488a8e848754 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -218,4 +218,9 @@ static inline void arch_set_max_freq_ratio(bool turbo_disabled)
}
#endif
+#ifdef CONFIG_ACPI_CPPC_LIB
+void init_freq_invariance_cppc(void);
+#define init_freq_invariance_cppc init_freq_invariance_cppc
+#endif
+
#endif /* _ASM_X86_TOPOLOGY_H */
diff --git a/arch/x86/include/asm/trap_pf.h b/arch/x86/include/asm/trap_pf.h
index 305bc1214aef..10b1de500ab1 100644
--- a/arch/x86/include/asm/trap_pf.h
+++ b/arch/x86/include/asm/trap_pf.h
@@ -11,6 +11,7 @@
* bit 3 == 1: use of reserved bit detected
* bit 4 == 1: fault was an instruction fetch
* bit 5 == 1: protection keys block access
+ * bit 15 == 1: SGX MMU page-fault
*/
enum x86_pf_error_code {
X86_PF_PROT = 1 << 0,
@@ -19,6 +20,7 @@ enum x86_pf_error_code {
X86_PF_RSVD = 1 << 3,
X86_PF_INSTR = 1 << 4,
X86_PF_PK = 1 << 5,
+ X86_PF_SGX = 1 << 15,
};
#endif /* _ASM_X86_TRAP_PF_H */
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index 08b3d810dfba..1b6455f881f9 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -28,6 +28,20 @@ enum uv_bios_cmd {
UV_BIOS_SET_LEGACY_VGA_TARGET
};
+#define UV_BIOS_EXTRA 0x10000
+#define UV_BIOS_GET_PCI_TOPOLOGY 0x10001
+#define UV_BIOS_GET_GEOINFO 0x10003
+
+#define UV_BIOS_EXTRA_OP_MEM_COPYIN 0x1000
+#define UV_BIOS_EXTRA_OP_MEM_COPYOUT 0x2000
+#define UV_BIOS_EXTRA_OP_MASK 0x0fff
+#define UV_BIOS_EXTRA_GET_HEAPSIZE 1
+#define UV_BIOS_EXTRA_INSTALL_HEAP 2
+#define UV_BIOS_EXTRA_MASTER_NASID 3
+#define UV_BIOS_EXTRA_OBJECT_COUNT (10|UV_BIOS_EXTRA_OP_MEM_COPYOUT)
+#define UV_BIOS_EXTRA_ENUM_OBJECTS (12|UV_BIOS_EXTRA_OP_MEM_COPYOUT)
+#define UV_BIOS_EXTRA_ENUM_PORTS (13|UV_BIOS_EXTRA_OP_MEM_COPYOUT)
+
/*
* Status values returned from a BIOS call.
*/
@@ -109,6 +123,32 @@ struct uv_systab {
} entry[1]; /* additional entries follow */
};
extern struct uv_systab *uv_systab;
+
+#define UV_BIOS_MAXSTRING 128
+struct uv_bios_hub_info {
+ unsigned int id;
+ union {
+ struct {
+ unsigned long long this_part:1;
+ unsigned long long is_shared:1;
+ unsigned long long is_disabled:1;
+ } fields;
+ struct {
+ unsigned long long flags;
+ unsigned long long reserved;
+ } b;
+ } f;
+ char name[UV_BIOS_MAXSTRING];
+ char location[UV_BIOS_MAXSTRING];
+ unsigned int ports;
+};
+
+struct uv_bios_port_info {
+ unsigned int port;
+ unsigned int conn_id;
+ unsigned int conn_port;
+};
+
/* (... end of definitions from UV BIOS ...) */
enum {
@@ -142,6 +182,15 @@ extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *);
extern int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus);
+extern s64 uv_bios_get_master_nasid(u64 sz, u64 *nasid);
+extern s64 uv_bios_get_heapsize(u64 nasid, u64 sz, u64 *heap_sz);
+extern s64 uv_bios_install_heap(u64 nasid, u64 sz, u64 *heap);
+extern s64 uv_bios_obj_count(u64 nasid, u64 sz, u64 *objcnt);
+extern s64 uv_bios_enum_objs(u64 nasid, u64 sz, u64 *objbuf);
+extern s64 uv_bios_enum_ports(u64 nasid, u64 obj_id, u64 sz, u64 *portbuf);
+extern s64 uv_bios_get_geoinfo(u64 nasid, u64 sz, u64 *geo);
+extern s64 uv_bios_get_pci_topology(u64 sz, u64 *buf);
+
extern int uv_bios_init(void);
extern unsigned long get_uv_systab_phys(bool msg);
@@ -151,6 +200,8 @@ extern long sn_partition_id;
extern long sn_coherency_id;
extern long sn_region_size;
extern long system_serial_number;
+extern ssize_t uv_get_archtype(char *buf, int len);
+extern int uv_get_hubless_system(void);
extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 172d3e4a9e4b..648eb23fe7f0 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -2,14 +2,8 @@
#ifndef _ASM_X86_UV_UV_H
#define _ASM_X86_UV_UV_H
-#include <asm/tlbflush.h>
-
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC};
-struct cpumask;
-struct mm_struct;
-struct flush_tlb_info;
-
#ifdef CONFIG_X86_UV
#include <linux/efi.h>
@@ -44,10 +38,6 @@ static inline int is_uv_system(void) { return 0; }
static inline int is_uv_hubbed(int uv) { return 0; }
static inline void uv_cpu_init(void) { }
static inline void uv_system_init(void) { }
-static inline const struct cpumask *
-uv_flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info)
-{ return cpumask; }
#endif /* X86_UV */
diff --git a/arch/x86/include/asm/uv/uv_geo.h b/arch/x86/include/asm/uv/uv_geo.h
new file mode 100644
index 000000000000..f241451035fb
--- /dev/null
+++ b/arch/x86/include/asm/uv/uv_geo.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2020 Hewlett Packard Enterprise Development LP. All rights reserved.
+ */
+
+#ifndef _ASM_UV_GEO_H
+#define _ASM_UV_GEO_H
+
+/* Type declaractions */
+
+/* Size of a geoid_s structure (must be before decl. of geoid_u) */
+#define GEOID_SIZE 8
+
+/* Fields common to all substructures */
+struct geo_common_s {
+ unsigned char type; /* What type of h/w is named by this geoid_s */
+ unsigned char blade;
+ unsigned char slot; /* slot is IRU */
+ unsigned char upos;
+ unsigned char rack;
+};
+
+/* Additional fields for particular types of hardware */
+struct geo_node_s {
+ struct geo_common_s common; /* No additional fields needed */
+};
+
+struct geo_rtr_s {
+ struct geo_common_s common; /* No additional fields needed */
+};
+
+struct geo_iocntl_s {
+ struct geo_common_s common; /* No additional fields needed */
+};
+
+struct geo_pcicard_s {
+ struct geo_iocntl_s common;
+ char bus; /* Bus/widget number */
+ char slot; /* PCI slot number */
+};
+
+/* Subcomponents of a node */
+struct geo_cpu_s {
+ struct geo_node_s node;
+ unsigned char socket:4, /* Which CPU on the node */
+ thread:4;
+ unsigned char core;
+};
+
+struct geo_mem_s {
+ struct geo_node_s node;
+ char membus; /* The memory bus on the node */
+ char memslot; /* The memory slot on the bus */
+};
+
+union geoid_u {
+ struct geo_common_s common;
+ struct geo_node_s node;
+ struct geo_iocntl_s iocntl;
+ struct geo_pcicard_s pcicard;
+ struct geo_rtr_s rtr;
+ struct geo_cpu_s cpu;
+ struct geo_mem_s mem;
+ char padsize[GEOID_SIZE];
+};
+
+/* Defined constants */
+
+#define GEO_MAX_LEN 48
+
+#define GEO_TYPE_INVALID 0
+#define GEO_TYPE_MODULE 1
+#define GEO_TYPE_NODE 2
+#define GEO_TYPE_RTR 3
+#define GEO_TYPE_IOCNTL 4
+#define GEO_TYPE_IOCARD 5
+#define GEO_TYPE_CPU 6
+#define GEO_TYPE_MEM 7
+#define GEO_TYPE_MAX (GEO_TYPE_MEM+1)
+
+static inline int geo_rack(union geoid_u g)
+{
+ return (g.common.type == GEO_TYPE_INVALID) ?
+ -1 : g.common.rack;
+}
+
+static inline int geo_slot(union geoid_u g)
+{
+ return (g.common.type == GEO_TYPE_INVALID) ?
+ -1 : g.common.upos;
+}
+
+static inline int geo_blade(union geoid_u g)
+{
+ return (g.common.type == GEO_TYPE_INVALID) ?
+ -1 : g.common.blade * 2 + g.common.slot;
+}
+
+#endif /* _ASM_UV_GEO_H */
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index bbcdc7b8f963..98aa103eb4ab 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -15,6 +15,8 @@ struct vdso_image {
unsigned long size; /* Always a multiple of PAGE_SIZE */
unsigned long alt, alt_len;
+ unsigned long extable_base, extable_len;
+ const void *extable;
long sym_vvar_start; /* Negative offset to the vvar area */
@@ -27,6 +29,8 @@ struct vdso_image {
long sym___kernel_rt_sigreturn;
long sym___kernel_vsyscall;
long sym_int80_landing_pad;
+ long sym_vdso32_sigreturn_landing_pad;
+ long sym_vdso32_rt_sigreturn_landing_pad;
};
#ifdef CONFIG_X86_64
@@ -45,6 +49,9 @@ extern void __init init_vdso_image(const struct vdso_image *image);
extern int map_vdso_once(const struct vdso_image *image, unsigned long addr);
+extern bool fixup_vdso_exception(struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr);
#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_VDSO_H */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index dde5b3f1e7cd..5c69f7eb5d47 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -116,6 +116,7 @@ struct x86_init_pci {
* @init_platform: platform setup
* @guest_late_init: guest late init
* @x2apic_available: X2APIC detection
+ * @msi_ext_dest_id: MSI supports 15-bit APIC IDs
* @init_mem_mapping: setup early mappings during init_mem_mapping()
* @init_after_bootmem: guest init after boot allocator is finished
*/
@@ -123,6 +124,7 @@ struct x86_hyper_init {
void (*init_platform)(void);
void (*guest_late_init)(void);
bool (*x2apic_available)(void);
+ bool (*msi_ext_dest_id)(void);
void (*init_mem_mapping)(void);
void (*init_after_bootmem)(void);
};
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 812e9b4c1114..950afebfba88 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -32,6 +32,7 @@
#define KVM_FEATURE_POLL_CONTROL 12
#define KVM_FEATURE_PV_SCHED_YIELD 13
#define KVM_FEATURE_ASYNC_PF_INT 14
+#define KVM_FEATURE_MSI_EXT_DEST_ID 15
#define KVM_HINTS_REALTIME 0
diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
new file mode 100644
index 000000000000..9034f3007c4e
--- /dev/null
+++ b/arch/x86/include/uapi/asm/sgx.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright(c) 2016-20 Intel Corporation.
+ */
+#ifndef _UAPI_ASM_X86_SGX_H
+#define _UAPI_ASM_X86_SGX_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/**
+ * enum sgx_page_flags - page control flags
+ * %SGX_PAGE_MEASURE: Measure the page contents with a sequence of
+ * ENCLS[EEXTEND] operations.
+ */
+enum sgx_page_flags {
+ SGX_PAGE_MEASURE = 0x01,
+};
+
+#define SGX_MAGIC 0xA4
+
+#define SGX_IOC_ENCLAVE_CREATE \
+ _IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create)
+#define SGX_IOC_ENCLAVE_ADD_PAGES \
+ _IOWR(SGX_MAGIC, 0x01, struct sgx_enclave_add_pages)
+#define SGX_IOC_ENCLAVE_INIT \
+ _IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)
+#define SGX_IOC_ENCLAVE_PROVISION \
+ _IOW(SGX_MAGIC, 0x03, struct sgx_enclave_provision)
+
+/**
+ * struct sgx_enclave_create - parameter structure for the
+ * %SGX_IOC_ENCLAVE_CREATE ioctl
+ * @src: address for the SECS page data
+ */
+struct sgx_enclave_create {
+ __u64 src;
+};
+
+/**
+ * struct sgx_enclave_add_pages - parameter structure for the
+ * %SGX_IOC_ENCLAVE_ADD_PAGE ioctl
+ * @src: start address for the page data
+ * @offset: starting page offset
+ * @length: length of the data (multiple of the page size)
+ * @secinfo: address for the SECINFO data
+ * @flags: page control flags
+ * @count: number of bytes added (multiple of the page size)
+ */
+struct sgx_enclave_add_pages {
+ __u64 src;
+ __u64 offset;
+ __u64 length;
+ __u64 secinfo;
+ __u64 flags;
+ __u64 count;
+};
+
+/**
+ * struct sgx_enclave_init - parameter structure for the
+ * %SGX_IOC_ENCLAVE_INIT ioctl
+ * @sigstruct: address for the SIGSTRUCT data
+ */
+struct sgx_enclave_init {
+ __u64 sigstruct;
+};
+
+/**
+ * struct sgx_enclave_provision - parameter structure for the
+ * %SGX_IOC_ENCLAVE_PROVISION ioctl
+ * @fd: file handle of /dev/sgx_provision
+ */
+struct sgx_enclave_provision {
+ __u64 fd;
+};
+
+struct sgx_enclave_run;
+
+/**
+ * typedef sgx_enclave_user_handler_t - Exit handler function accepted by
+ * __vdso_sgx_enter_enclave()
+ * @run: The run instance given by the caller
+ *
+ * The register parameters contain the snapshot of their values at enclave
+ * exit. An invalid ENCLU function number will cause -EINVAL to be returned
+ * to the caller.
+ *
+ * Return:
+ * - <= 0: The given value is returned back to the caller.
+ * - > 0: ENCLU function to invoke, either EENTER or ERESUME.
+ */
+typedef int (*sgx_enclave_user_handler_t)(long rdi, long rsi, long rdx,
+ long rsp, long r8, long r9,
+ struct sgx_enclave_run *run);
+
+/**
+ * struct sgx_enclave_run - the execution context of __vdso_sgx_enter_enclave()
+ * @tcs: TCS used to enter the enclave
+ * @function: The last seen ENCLU function (EENTER, ERESUME or EEXIT)
+ * @exception_vector: The interrupt vector of the exception
+ * @exception_error_code: The exception error code pulled out of the stack
+ * @exception_addr: The address that triggered the exception
+ * @user_handler: User provided callback run on exception
+ * @user_data: Data passed to the user handler
+ * @reserved Reserved for future extensions
+ *
+ * If @user_handler is provided, the handler will be invoked on all return paths
+ * of the normal flow. The user handler may transfer control, e.g. via a
+ * longjmp() call or a C++ exception, without returning to
+ * __vdso_sgx_enter_enclave().
+ */
+struct sgx_enclave_run {
+ __u64 tcs;
+ __u32 function;
+ __u16 exception_vector;
+ __u16 exception_error_code;
+ __u64 exception_addr;
+ __u64 user_handler;
+ __u64 user_data;
+ __u8 reserved[216];
+};
+
+/**
+ * typedef vdso_sgx_enter_enclave_t - Prototype for __vdso_sgx_enter_enclave(),
+ * a vDSO function to enter an SGX enclave.
+ * @rdi: Pass-through value for RDI
+ * @rsi: Pass-through value for RSI
+ * @rdx: Pass-through value for RDX
+ * @function: ENCLU function, must be EENTER or ERESUME
+ * @r8: Pass-through value for R8
+ * @r9: Pass-through value for R9
+ * @run: struct sgx_enclave_run, must be non-NULL
+ *
+ * NOTE: __vdso_sgx_enter_enclave() does not ensure full compliance with the
+ * x86-64 ABI, e.g. doesn't handle XSAVE state. Except for non-volatile
+ * general purpose registers, EFLAGS.DF, and RSP alignment, preserving/setting
+ * state in accordance with the x86-64 ABI is the responsibility of the enclave
+ * and its runtime, i.e. __vdso_sgx_enter_enclave() cannot be called from C
+ * code without careful consideration by both the enclave and its runtime.
+ *
+ * All general purpose registers except RAX, RBX and RCX are passed as-is to the
+ * enclave. RAX, RBX and RCX are consumed by EENTER and ERESUME and are loaded
+ * with @function, asynchronous exit pointer, and @run.tcs respectively.
+ *
+ * RBP and the stack are used to anchor __vdso_sgx_enter_enclave() to the
+ * pre-enclave state, e.g. to retrieve @run.exception and @run.user_handler
+ * after an enclave exit. All other registers are available for use by the
+ * enclave and its runtime, e.g. an enclave can push additional data onto the
+ * stack (and modify RSP) to pass information to the optional user handler (see
+ * below).
+ *
+ * Most exceptions reported on ENCLU, including those that occur within the
+ * enclave, are fixed up and reported synchronously instead of being delivered
+ * via a standard signal. Debug Exceptions (#DB) and Breakpoints (#BP) are
+ * never fixed up and are always delivered via standard signals. On synchrously
+ * reported exceptions, -EFAULT is returned and details about the exception are
+ * recorded in @run.exception, the optional sgx_enclave_exception struct.
+ *
+ * Return:
+ * - 0: ENCLU function was successfully executed.
+ * - -EINVAL: Invalid ENCL number (neither EENTER nor ERESUME).
+ */
+typedef int (*vdso_sgx_enter_enclave_t)(unsigned long rdi, unsigned long rsi,
+ unsigned long rdx, unsigned int function,
+ unsigned long r8, unsigned long r9,
+ struct sgx_enclave_run *run);
+
+#endif /* _UAPI_ASM_X86_SGX_H */
diff --git a/arch/x86/include/uapi/asm/signal.h b/arch/x86/include/uapi/asm/signal.h
index e5745d593dc7..164a22a72984 100644
--- a/arch/x86/include/uapi/asm/signal.h
+++ b/arch/x86/include/uapi/asm/signal.h
@@ -62,30 +62,6 @@ typedef unsigned long sigset_t;
#define SIGRTMIN 32
#define SIGRTMAX _NSIG
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001u
-#define SA_NOCLDWAIT 0x00000002u
-#define SA_SIGINFO 0x00000004u
-#define SA_ONSTACK 0x08000000u
-#define SA_RESTART 0x10000000u
-#define SA_NODEFER 0x40000000u
-#define SA_RESETHAND 0x80000000u
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
#define SA_RESTORER 0x04000000
#define MINSIGSTKSZ 2048
diff --git a/arch/x86/kernel/acpi/apei.c b/arch/x86/kernel/acpi/apei.c
index c22fb55abcfd..0916f00a992e 100644
--- a/arch/x86/kernel/acpi/apei.c
+++ b/arch/x86/kernel/acpi/apei.c
@@ -43,3 +43,8 @@ void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
apei_mce_report_mem_error(sev, mem_err);
#endif
}
+
+int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id)
+{
+ return apei_smca_report_x86_error(ctx_info, lapic_id);
+}
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 4adbe65afe23..8d778e46725d 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -807,6 +807,15 @@ static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
temp_mm_state_t temp_state;
lockdep_assert_irqs_disabled();
+
+ /*
+ * Make sure not to be in TLB lazy mode, as otherwise we'll end up
+ * with a stale address space WITHOUT being in lazy mode after
+ * restoring the previous mm.
+ */
+ if (this_cpu_read(cpu_tlbstate.is_lazy))
+ leave_mm(smp_processor_id());
+
temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
switch_mm_irqs_off(NULL, mm, current);
@@ -1365,7 +1374,7 @@ void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const voi
* @addr: address to patch
* @opcode: opcode of new instruction
* @len: length to copy
- * @handler: address to jump to when the temporary breakpoint is hit
+ * @emulate: instruction to be emulated
*
* Update a single instruction with the vector in the stack, avoiding
* dynamically allocated memory. This function should be used when it is
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 18f6b7c4bd79..b4396952c9a6 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -384,7 +384,7 @@ struct resource *amd_get_mmconfig_range(struct resource *res)
int amd_get_subcaches(int cpu)
{
- struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
+ struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link;
unsigned int mask;
if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
@@ -398,7 +398,7 @@ int amd_get_subcaches(int cpu)
int amd_set_subcaches(int cpu, unsigned long mask)
{
static unsigned int reset, ban;
- struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
+ struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu));
unsigned int reg;
int cuid;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index b3eef1d5c903..6bd20c0de8bc 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -94,6 +94,11 @@ static unsigned int disabled_cpu_apicid __ro_after_init = BAD_APICID;
static int apic_extnmi __ro_after_init = APIC_EXTNMI_BSP;
/*
+ * Hypervisor supports 15 bits of APIC ID in MSI Extended Destination ID
+ */
+static bool virt_ext_dest_id __ro_after_init;
+
+/*
* Map cpu index to physical APIC ID
*/
DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
@@ -1591,7 +1596,7 @@ static void setup_local_APIC(void)
apic->init_apic_ldr();
#ifdef CONFIG_X86_32
- if (apic->dest_logical) {
+ if (apic->dest_mode_logical) {
int logical_apicid, ldr_apicid;
/*
@@ -1841,20 +1846,34 @@ static __init void try_to_enable_x2apic(int remap_mode)
return;
if (remap_mode != IRQ_REMAP_X2APIC_MODE) {
- /* IR is required if there is APIC ID > 255 even when running
- * under KVM
+ u32 apic_limit = 255;
+
+ /*
+ * Using X2APIC without IR is not architecturally supported
+ * on bare metal but may be supported in guests.
*/
- if (max_physical_apicid > 255 ||
- !x86_init.hyper.x2apic_available()) {
+ if (!x86_init.hyper.x2apic_available()) {
pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
x2apic_disable();
return;
}
/*
- * without IR all CPUs can be addressed by IOAPIC/MSI
- * only in physical mode
+ * If the hypervisor supports extended destination ID in
+ * MSI, that increases the maximum APIC ID that can be
+ * used for non-remapped IRQ domains.
*/
+ if (x86_init.hyper.msi_ext_dest_id()) {
+ virt_ext_dest_id = 1;
+ apic_limit = 32767;
+ }
+
+ /*
+ * Without IR, all CPUs can be addressed by IOAPIC/MSI only
+ * in physical mode, and CPUs with an APIC ID that cannnot
+ * be addressed must not be brought online.
+ */
+ x2apic_set_max_apicid(apic_limit);
x2apic_phys = 1;
}
x2apic_enable();
@@ -2478,6 +2497,46 @@ int hard_smp_processor_id(void)
return read_apic_id();
}
+void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg,
+ bool dmar)
+{
+ memset(msg, 0, sizeof(*msg));
+
+ msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
+ msg->arch_addr_lo.dest_mode_logical = apic->dest_mode_logical;
+ msg->arch_addr_lo.destid_0_7 = cfg->dest_apicid & 0xFF;
+
+ msg->arch_data.delivery_mode = APIC_DELIVERY_MODE_FIXED;
+ msg->arch_data.vector = cfg->vector;
+
+ msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
+ /*
+ * Only the IOMMU itself can use the trick of putting destination
+ * APIC ID into the high bits of the address. Anything else would
+ * just be writing to memory if it tried that, and needs IR to
+ * address APICs which can't be addressed in the normal 32-bit
+ * address range at 0xFFExxxxx. That is typically just 8 bits, but
+ * some hypervisors allow the extended destination ID field in bits
+ * 5-11 to be used, giving support for 15 bits of APIC IDs in total.
+ */
+ if (dmar)
+ msg->arch_addr_hi.destid_8_31 = cfg->dest_apicid >> 8;
+ else if (virt_ext_dest_id && cfg->dest_apicid < 0x8000)
+ msg->arch_addr_lo.virt_destid_8_14 = cfg->dest_apicid >> 8;
+ else
+ WARN_ON_ONCE(cfg->dest_apicid > 0xFF);
+}
+
+u32 x86_msi_msg_get_destid(struct msi_msg *msg, bool extid)
+{
+ u32 dest = msg->arch_addr_lo.destid_0_7;
+
+ if (extid)
+ dest |= msg->arch_addr_hi.destid_8_31 << 8;
+ return dest;
+}
+EXPORT_SYMBOL_GPL(x86_msi_msg_get_destid);
+
/*
* Override the generic EOI implementation with an optimized version.
* Only called during early boot when only one CPU is active and with
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 7862b152a052..8f72b4351c9f 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -53,7 +53,7 @@ static void _flat_send_IPI_mask(unsigned long mask, int vector)
unsigned long flags;
local_irq_save(flags);
- __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
+ __default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
local_irq_restore(flags);
}
@@ -113,15 +113,13 @@ static struct apic apic_flat __ro_after_init = {
.apic_id_valid = default_apic_id_valid,
.apic_id_registered = flat_apic_id_registered,
- .irq_delivery_mode = dest_Fixed,
- .irq_dest_mode = 1, /* logical */
+ .delivery_mode = APIC_DELIVERY_MODE_FIXED,
+ .dest_mode_logical = true,
.disable_esr = 0,
- .dest_logical = APIC_DEST_LOGICAL,
- .check_apicid_used = NULL,
+ .check_apicid_used = NULL,
.init_apic_ldr = flat_init_apic_ldr,
-
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
@@ -206,15 +204,13 @@ static struct apic apic_physflat __ro_after_init = {
.apic_id_valid = default_apic_id_valid,
.apic_id_registered = flat_apic_id_registered,
- .irq_delivery_mode = dest_Fixed,
- .irq_dest_mode = 0, /* physical */
+ .delivery_mode = APIC_DELIVERY_MODE_FIXED,
+ .dest_mode_logical = false,
.disable_esr = 0,
- .dest_logical = 0,
- .check_apicid_used = NULL,
+ .check_apicid_used = NULL,
.init_apic_ldr = physflat_init_apic_ldr,
-
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index 780c702969b7..fe78319e0f7a 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -95,19 +95,15 @@ struct apic apic_noop __ro_after_init = {
.apic_id_valid = default_apic_id_valid,
.apic_id_registered = noop_apic_id_registered,
- .irq_delivery_mode = dest_Fixed,
- /* logical delivery broadcast to all CPUs: */
- .irq_dest_mode = 1,
+ .delivery_mode = APIC_DELIVERY_MODE_FIXED,
+ .dest_mode_logical = true,
.disable_esr = 0,
- .dest_logical = APIC_DEST_LOGICAL,
- .check_apicid_used = default_check_apicid_used,
+ .check_apicid_used = default_check_apicid_used,
.init_apic_ldr = noop_init_apic_ldr,
-
.ioapic_phys_id_map = default_ioapic_phys_id_map,
.setup_apic_routing = NULL,
-
.cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = physid_set_mask_of_physid,
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 35edd57f064a..a54d817eb4b6 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -246,15 +246,13 @@ static const struct apic apic_numachip1 __refconst = {
.apic_id_valid = numachip_apic_id_valid,
.apic_id_registered = numachip_apic_id_registered,
- .irq_delivery_mode = dest_Fixed,
- .irq_dest_mode = 0, /* physical */
+ .delivery_mode = APIC_DELIVERY_MODE_FIXED,
+ .dest_mode_logical = false,
.disable_esr = 0,
- .dest_logical = 0,
- .check_apicid_used = NULL,
+ .check_apicid_used = NULL,
.init_apic_ldr = flat_init_apic_ldr,
-
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
@@ -295,15 +293,13 @@ static const struct apic apic_numachip2 __refconst = {
.apic_id_valid = numachip_apic_id_valid,
.apic_id_registered = numachip_apic_id_registered,
- .irq_delivery_mode = dest_Fixed,
- .irq_dest_mode = 0, /* physical */
+ .delivery_mode = APIC_DELIVERY_MODE_FIXED,
+ .dest_mode_logical = false,
.disable_esr = 0,
- .dest_logical = 0,
- .check_apicid_used = NULL,
+ .check_apicid_used = NULL,
.init_apic_ldr = flat_init_apic_ldr,
-
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index 98d015a4405a..77555f66c14d 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -127,16 +127,13 @@ static struct apic apic_bigsmp __ro_after_init = {
.apic_id_valid = default_apic_id_valid,
.apic_id_registered = bigsmp_apic_id_registered,
- .irq_delivery_mode = dest_Fixed,
- /* phys delivery to target CPU: */
- .irq_dest_mode = 0,
+ .delivery_mode = APIC_DELIVERY_MODE_FIXED,
+ .dest_mode_logical = false,
.disable_esr = 1,
- .dest_logical = 0,
- .check_apicid_used = bigsmp_check_apicid_used,
+ .check_apicid_used = bigsmp_check_apicid_used,
.init_apic_ldr = bigsmp_init_apic_ldr,
-
.ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
.setup_apic_routing = bigsmp_setup_apic_routing,
.cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 7b3c7e0d4a09..e4ab4804b20d 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -48,6 +48,7 @@
#include <linux/jiffies.h> /* time_after() */
#include <linux/slab.h>
#include <linux/memblock.h>
+#include <linux/msi.h>
#include <asm/irqdomain.h>
#include <asm/io.h>
@@ -63,7 +64,6 @@
#include <asm/setup.h>
#include <asm/irq_remapping.h>
#include <asm/hw_irq.h>
-
#include <asm/apic.h>
#define for_each_ioapic(idx) \
@@ -89,12 +89,12 @@ struct irq_pin_list {
};
struct mp_chip_data {
- struct list_head irq_2_pin;
- struct IO_APIC_route_entry entry;
- int trigger;
- int polarity;
+ struct list_head irq_2_pin;
+ struct IO_APIC_route_entry entry;
+ bool is_level;
+ bool active_low;
+ bool isa_irq;
u32 count;
- bool isa_irq;
};
struct mp_ioapic_gsi {
@@ -286,31 +286,26 @@ static void io_apic_write(unsigned int apic, unsigned int reg,
writel(value, &io_apic->data);
}
-union entry_union {
- struct { u32 w1, w2; };
- struct IO_APIC_route_entry entry;
-};
-
static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin)
{
- union entry_union eu;
+ struct IO_APIC_route_entry entry;
- eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
- eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
+ entry.w1 = io_apic_read(apic, 0x10 + 2 * pin);
+ entry.w2 = io_apic_read(apic, 0x11 + 2 * pin);
- return eu.entry;
+ return entry;
}
static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
{
- union entry_union eu;
+ struct IO_APIC_route_entry entry;
unsigned long flags;
raw_spin_lock_irqsave(&ioapic_lock, flags);
- eu.entry = __ioapic_read_entry(apic, pin);
+ entry = __ioapic_read_entry(apic, pin);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
- return eu.entry;
+ return entry;
}
/*
@@ -321,11 +316,8 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
*/
static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{
- union entry_union eu = {{0, 0}};
-
- eu.entry = e;
- io_apic_write(apic, 0x11 + 2*pin, eu.w2);
- io_apic_write(apic, 0x10 + 2*pin, eu.w1);
+ io_apic_write(apic, 0x11 + 2*pin, e.w2);
+ io_apic_write(apic, 0x10 + 2*pin, e.w1);
}
static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
@@ -344,12 +336,12 @@ static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
*/
static void ioapic_mask_entry(int apic, int pin)
{
+ struct IO_APIC_route_entry e = { .masked = true };
unsigned long flags;
- union entry_union eu = { .entry.mask = IOAPIC_MASKED };
raw_spin_lock_irqsave(&ioapic_lock, flags);
- io_apic_write(apic, 0x10 + 2*pin, eu.w1);
- io_apic_write(apic, 0x11 + 2*pin, eu.w2);
+ io_apic_write(apic, 0x10 + 2*pin, e.w1);
+ io_apic_write(apic, 0x11 + 2*pin, e.w2);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
@@ -422,20 +414,15 @@ static void __init replace_pin_at_irq_node(struct mp_chip_data *data, int node,
add_pin_to_irq_node(data, node, newapic, newpin);
}
-static void io_apic_modify_irq(struct mp_chip_data *data,
- int mask_and, int mask_or,
+static void io_apic_modify_irq(struct mp_chip_data *data, bool masked,
void (*final)(struct irq_pin_list *entry))
{
- union entry_union eu;
struct irq_pin_list *entry;
- eu.entry = data->entry;
- eu.w1 &= mask_and;
- eu.w1 |= mask_or;
- data->entry = eu.entry;
+ data->entry.masked = masked;
for_each_irq_pin(entry, data->irq_2_pin) {
- io_apic_write(entry->apic, 0x10 + 2 * entry->pin, eu.w1);
+ io_apic_write(entry->apic, 0x10 + 2 * entry->pin, data->entry.w1);
if (final)
final(entry);
}
@@ -459,13 +446,13 @@ static void mask_ioapic_irq(struct irq_data *irq_data)
unsigned long flags;
raw_spin_lock_irqsave(&ioapic_lock, flags);
- io_apic_modify_irq(data, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
+ io_apic_modify_irq(data, true, &io_apic_sync);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void __unmask_ioapic(struct mp_chip_data *data)
{
- io_apic_modify_irq(data, ~IO_APIC_REDIR_MASKED, 0, NULL);
+ io_apic_modify_irq(data, false, NULL);
}
static void unmask_ioapic_irq(struct irq_data *irq_data)
@@ -506,8 +493,8 @@ static void __eoi_ioapic_pin(int apic, int pin, int vector)
/*
* Mask the entry and change the trigger mode to edge.
*/
- entry1.mask = IOAPIC_MASKED;
- entry1.trigger = IOAPIC_EDGE;
+ entry1.masked = true;
+ entry1.is_level = false;
__ioapic_write_entry(apic, pin, entry1);
@@ -535,15 +522,15 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
/* Check delivery_mode to be sure we're not clearing an SMI pin */
entry = ioapic_read_entry(apic, pin);
- if (entry.delivery_mode == dest_SMI)
+ if (entry.delivery_mode == APIC_DELIVERY_MODE_SMI)
return;
/*
* Make sure the entry is masked and re-read the contents to check
* if it is a level triggered pin and if the remote-IRR is set.
*/
- if (entry.mask == IOAPIC_UNMASKED) {
- entry.mask = IOAPIC_MASKED;
+ if (!entry.masked) {
+ entry.masked = true;
ioapic_write_entry(apic, pin, entry);
entry = ioapic_read_entry(apic, pin);
}
@@ -556,8 +543,8 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
* doesn't clear the remote-IRR if the trigger mode is not
* set to level.
*/
- if (entry.trigger == IOAPIC_EDGE) {
- entry.trigger = IOAPIC_LEVEL;
+ if (!entry.is_level) {
+ entry.is_level = true;
ioapic_write_entry(apic, pin, entry);
}
raw_spin_lock_irqsave(&ioapic_lock, flags);
@@ -659,8 +646,8 @@ void mask_ioapic_entries(void)
struct IO_APIC_route_entry entry;
entry = ioapics[apic].saved_registers[pin];
- if (entry.mask == IOAPIC_UNMASKED) {
- entry.mask = IOAPIC_MASKED;
+ if (!entry.masked) {
+ entry.masked = true;
ioapic_write_entry(apic, pin, entry);
}
}
@@ -745,44 +732,7 @@ static int __init find_isa_irq_apic(int irq, int type)
return -1;
}
-#ifdef CONFIG_EISA
-/*
- * EISA Edge/Level control register, ELCR
- */
-static int EISA_ELCR(unsigned int irq)
-{
- if (irq < nr_legacy_irqs()) {
- unsigned int port = 0x4d0 + (irq >> 3);
- return (inb(port) >> (irq & 7)) & 1;
- }
- apic_printk(APIC_VERBOSE, KERN_INFO
- "Broken MPtable reports ISA irq %d\n", irq);
- return 0;
-}
-
-#endif
-
-/* ISA interrupts are always active high edge triggered,
- * when listed as conforming in the MP table. */
-
-#define default_ISA_trigger(idx) (IOAPIC_EDGE)
-#define default_ISA_polarity(idx) (IOAPIC_POL_HIGH)
-
-/* EISA interrupts are always polarity zero and can be edge or level
- * trigger depending on the ELCR value. If an interrupt is listed as
- * EISA conforming in the MP table, that means its trigger type must
- * be read in from the ELCR */
-
-#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
-#define default_EISA_polarity(idx) default_ISA_polarity(idx)
-
-/* PCI interrupts are always active low level triggered,
- * when listed as conforming in the MP table. */
-
-#define default_PCI_trigger(idx) (IOAPIC_LEVEL)
-#define default_PCI_polarity(idx) (IOAPIC_POL_LOW)
-
-static int irq_polarity(int idx)
+static bool irq_active_low(int idx)
{
int bus = mp_irqs[idx].srcbus;
@@ -791,90 +741,139 @@ static int irq_polarity(int idx)
*/
switch (mp_irqs[idx].irqflag & MP_IRQPOL_MASK) {
case MP_IRQPOL_DEFAULT:
- /* conforms to spec, ie. bus-type dependent polarity */
- if (test_bit(bus, mp_bus_not_pci))
- return default_ISA_polarity(idx);
- else
- return default_PCI_polarity(idx);
+ /*
+ * Conforms to spec, ie. bus-type dependent polarity. PCI
+ * defaults to low active. [E]ISA defaults to high active.
+ */
+ return !test_bit(bus, mp_bus_not_pci);
case MP_IRQPOL_ACTIVE_HIGH:
- return IOAPIC_POL_HIGH;
+ return false;
case MP_IRQPOL_RESERVED:
pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n");
fallthrough;
case MP_IRQPOL_ACTIVE_LOW:
default: /* Pointless default required due to do gcc stupidity */
- return IOAPIC_POL_LOW;
+ return true;
}
}
#ifdef CONFIG_EISA
-static int eisa_irq_trigger(int idx, int bus, int trigger)
+/*
+ * EISA Edge/Level control register, ELCR
+ */
+static bool EISA_ELCR(unsigned int irq)
+{
+ if (irq < nr_legacy_irqs()) {
+ unsigned int port = 0x4d0 + (irq >> 3);
+ return (inb(port) >> (irq & 7)) & 1;
+ }
+ apic_printk(APIC_VERBOSE, KERN_INFO
+ "Broken MPtable reports ISA irq %d\n", irq);
+ return false;
+}
+
+/*
+ * EISA interrupts are always active high and can be edge or level
+ * triggered depending on the ELCR value. If an interrupt is listed as
+ * EISA conforming in the MP table, that means its trigger type must be
+ * read in from the ELCR.
+ */
+static bool eisa_irq_is_level(int idx, int bus, bool level)
{
switch (mp_bus_id_to_type[bus]) {
case MP_BUS_PCI:
case MP_BUS_ISA:
- return trigger;
+ return level;
case MP_BUS_EISA:
- return default_EISA_trigger(idx);
+ return EISA_ELCR(mp_irqs[idx].srcbusirq);
}
pr_warn("IOAPIC: Invalid srcbus: %d defaulting to level\n", bus);
- return IOAPIC_LEVEL;
+ return true;
}
#else
-static inline int eisa_irq_trigger(int idx, int bus, int trigger)
+static inline int eisa_irq_is_level(int idx, int bus, bool level)
{
- return trigger;
+ return level;
}
#endif
-static int irq_trigger(int idx)
+static bool irq_is_level(int idx)
{
int bus = mp_irqs[idx].srcbus;
- int trigger;
+ bool level;
/*
* Determine IRQ trigger mode (edge or level sensitive):
*/
switch (mp_irqs[idx].irqflag & MP_IRQTRIG_MASK) {
case MP_IRQTRIG_DEFAULT:
- /* conforms to spec, ie. bus-type dependent trigger mode */
- if (test_bit(bus, mp_bus_not_pci))
- trigger = default_ISA_trigger(idx);
- else
- trigger = default_PCI_trigger(idx);
+ /*
+ * Conforms to spec, ie. bus-type dependent trigger
+ * mode. PCI defaults to level, ISA to edge.
+ */
+ level = !test_bit(bus, mp_bus_not_pci);
/* Take EISA into account */
- return eisa_irq_trigger(idx, bus, trigger);
+ return eisa_irq_is_level(idx, bus, level);
case MP_IRQTRIG_EDGE:
- return IOAPIC_EDGE;
+ return false;
case MP_IRQTRIG_RESERVED:
pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n");
fallthrough;
case MP_IRQTRIG_LEVEL:
default: /* Pointless default required due to do gcc stupidity */
- return IOAPIC_LEVEL;
+ return true;
}
}
+static int __acpi_get_override_irq(u32 gsi, bool *trigger, bool *polarity)
+{
+ int ioapic, pin, idx;
+
+ if (skip_ioapic_setup)
+ return -1;
+
+ ioapic = mp_find_ioapic(gsi);
+ if (ioapic < 0)
+ return -1;
+
+ pin = mp_find_ioapic_pin(ioapic, gsi);
+ if (pin < 0)
+ return -1;
+
+ idx = find_irq_entry(ioapic, pin, mp_INT);
+ if (idx < 0)
+ return -1;
+
+ *trigger = irq_is_level(idx);
+ *polarity = irq_active_low(idx);
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+int acpi_get_override_irq(u32 gsi, int *is_level, int *active_low)
+{
+ *is_level = *active_low = 0;
+ return __acpi_get_override_irq(gsi, (bool *)is_level,
+ (bool *)active_low);
+}
+#endif
+
void ioapic_set_alloc_attr(struct irq_alloc_info *info, int node,
int trigger, int polarity)
{
init_irq_alloc_info(info, NULL);
info->type = X86_IRQ_ALLOC_TYPE_IOAPIC;
info->ioapic.node = node;
- info->ioapic.trigger = trigger;
- info->ioapic.polarity = polarity;
+ info->ioapic.is_level = trigger;
+ info->ioapic.active_low = polarity;
info->ioapic.valid = 1;
}
-#ifndef CONFIG_ACPI
-int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
-#endif
-
static void ioapic_copy_alloc_attr(struct irq_alloc_info *dst,
struct irq_alloc_info *src,
u32 gsi, int ioapic_idx, int pin)
{
- int trigger, polarity;
+ bool level, pol_low;
copy_irq_alloc_info(dst, src);
dst->type = X86_IRQ_ALLOC_TYPE_IOAPIC;
@@ -883,20 +882,20 @@ static void ioapic_copy_alloc_attr(struct irq_alloc_info *dst,
dst->ioapic.valid = 1;
if (src && src->ioapic.valid) {
dst->ioapic.node = src->ioapic.node;
- dst->ioapic.trigger = src->ioapic.trigger;
- dst->ioapic.polarity = src->ioapic.polarity;
+ dst->ioapic.is_level = src->ioapic.is_level;
+ dst->ioapic.active_low = src->ioapic.active_low;
} else {
dst->ioapic.node = NUMA_NO_NODE;
- if (acpi_get_override_irq(gsi, &trigger, &polarity) >= 0) {
- dst->ioapic.trigger = trigger;
- dst->ioapic.polarity = polarity;
+ if (__acpi_get_override_irq(gsi, &level, &pol_low) >= 0) {
+ dst->ioapic.is_level = level;
+ dst->ioapic.active_low = pol_low;
} else {
/*
* PCI interrupts are always active low level
* triggered.
*/
- dst->ioapic.trigger = IOAPIC_LEVEL;
- dst->ioapic.polarity = IOAPIC_POL_LOW;
+ dst->ioapic.is_level = true;
+ dst->ioapic.active_low = true;
}
}
}
@@ -906,12 +905,12 @@ static int ioapic_alloc_attr_node(struct irq_alloc_info *info)
return (info && info->ioapic.valid) ? info->ioapic.node : NUMA_NO_NODE;
}
-static void mp_register_handler(unsigned int irq, unsigned long trigger)
+static void mp_register_handler(unsigned int irq, bool level)
{
irq_flow_handler_t hdl;
bool fasteoi;
- if (trigger) {
+ if (level) {
irq_set_status_flags(irq, IRQ_LEVEL);
fasteoi = true;
} else {
@@ -933,14 +932,14 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
* pin with real trigger and polarity attributes.
*/
if (irq < nr_legacy_irqs() && data->count == 1) {
- if (info->ioapic.trigger != data->trigger)
- mp_register_handler(irq, info->ioapic.trigger);
- data->entry.trigger = data->trigger = info->ioapic.trigger;
- data->entry.polarity = data->polarity = info->ioapic.polarity;
+ if (info->ioapic.is_level != data->is_level)
+ mp_register_handler(irq, info->ioapic.is_level);
+ data->entry.is_level = data->is_level = info->ioapic.is_level;
+ data->entry.active_low = data->active_low = info->ioapic.active_low;
}
- return data->trigger == info->ioapic.trigger &&
- data->polarity == info->ioapic.polarity;
+ return data->is_level == info->ioapic.is_level &&
+ data->active_low == info->ioapic.active_low;
}
static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi,
@@ -1219,10 +1218,9 @@ void ioapic_zap_locks(void)
static void io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
{
- int i;
- char buf[256];
struct IO_APIC_route_entry entry;
- struct IR_IO_APIC_route_entry *ir_entry = (void *)&entry;
+ char buf[256];
+ int i;
printk(KERN_DEBUG "IOAPIC %d:\n", apic);
for (i = 0; i <= nr_entries; i++) {
@@ -1230,20 +1228,21 @@ static void io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
snprintf(buf, sizeof(buf),
" pin%02x, %s, %s, %s, V(%02X), IRR(%1d), S(%1d)",
i,
- entry.mask == IOAPIC_MASKED ? "disabled" : "enabled ",
- entry.trigger == IOAPIC_LEVEL ? "level" : "edge ",
- entry.polarity == IOAPIC_POL_LOW ? "low " : "high",
+ entry.masked ? "disabled" : "enabled ",
+ entry.is_level ? "level" : "edge ",
+ entry.active_low ? "low " : "high",
entry.vector, entry.irr, entry.delivery_status);
- if (ir_entry->format)
+ if (entry.ir_format) {
printk(KERN_DEBUG "%s, remapped, I(%04X), Z(%X)\n",
- buf, (ir_entry->index2 << 15) | ir_entry->index,
- ir_entry->zero);
- else
- printk(KERN_DEBUG "%s, %s, D(%02X), M(%1d)\n",
buf,
- entry.dest_mode == IOAPIC_DEST_MODE_LOGICAL ?
- "logical " : "physical",
- entry.dest, entry.delivery_mode);
+ (entry.ir_index_15 << 15) | entry.ir_index_0_14,
+ entry.ir_zero);
+ } else {
+ printk(KERN_DEBUG "%s, %s, D(%02X%02X), M(%1d)\n", buf,
+ entry.dest_mode_logical ? "logical " : "physical",
+ entry.virt_destid_8_14, entry.destid_0_7,
+ entry.delivery_mode);
+ }
}
}
@@ -1368,7 +1367,8 @@ void __init enable_IO_APIC(void)
/* If the interrupt line is enabled and in ExtInt mode
* I have found the pin where the i8259 is connected.
*/
- if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
+ if (!entry.masked &&
+ entry.delivery_mode == APIC_DELIVERY_MODE_EXTINT) {
ioapic_i8259.apic = apic;
ioapic_i8259.pin = pin;
goto found_i8259;
@@ -1410,14 +1410,16 @@ void native_restore_boot_irq_mode(void)
*/
if (ioapic_i8259.pin != -1) {
struct IO_APIC_route_entry entry;
+ u32 apic_id = read_apic_id();
memset(&entry, 0, sizeof(entry));
- entry.mask = IOAPIC_UNMASKED;
- entry.trigger = IOAPIC_EDGE;
- entry.polarity = IOAPIC_POL_HIGH;
- entry.dest_mode = IOAPIC_DEST_MODE_PHYSICAL;
- entry.delivery_mode = dest_ExtINT;
- entry.dest = read_apic_id();
+ entry.masked = false;
+ entry.is_level = false;
+ entry.active_low = false;
+ entry.dest_mode_logical = false;
+ entry.delivery_mode = APIC_DELIVERY_MODE_EXTINT;
+ entry.destid_0_7 = apic_id & 0xFF;
+ entry.virt_destid_8_14 = apic_id >> 8;
/*
* Add it to the IO-APIC irq-routing table:
@@ -1618,21 +1620,16 @@ static void __init delay_without_tsc(void)
static int __init timer_irq_works(void)
{
unsigned long t1 = jiffies;
- unsigned long flags;
if (no_timer_check)
return 1;
- local_save_flags(flags);
local_irq_enable();
-
if (boot_cpu_has(X86_FEATURE_TSC))
delay_with_tsc();
else
delay_without_tsc();
- local_irq_restore(flags);
-
/*
* Expect a few ticks at least, to be sure some possible
* glue logic does not lock up after one or two first
@@ -1641,10 +1638,10 @@ static int __init timer_irq_works(void)
* least one tick may be lost due to delays.
*/
- /* jiffies wrap? */
- if (time_after(jiffies, t1 + 4))
- return 1;
- return 0;
+ local_irq_disable();
+
+ /* Did jiffies advance? */
+ return time_after(jiffies, t1 + 4);
}
/*
@@ -1696,13 +1693,13 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
raw_spin_lock_irqsave(&ioapic_lock, flags);
for_each_irq_pin(entry, data->irq_2_pin) {
- unsigned int reg;
+ struct IO_APIC_route_entry e;
int pin;
pin = entry->pin;
- reg = io_apic_read(entry->apic, 0x10 + pin*2);
+ e.w1 = io_apic_read(entry->apic, 0x10 + pin*2);
/* Is the remote IRR bit set? */
- if (reg & IO_APIC_REDIR_REMOTE_IRR) {
+ if (e.irr) {
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return true;
}
@@ -1849,21 +1846,62 @@ static void ioapic_ir_ack_level(struct irq_data *irq_data)
eoi_ioapic_pin(data->entry.vector, data);
}
+/*
+ * The I/OAPIC is just a device for generating MSI messages from legacy
+ * interrupt pins. Various fields of the RTE translate into bits of the
+ * resulting MSI which had a historical meaning.
+ *
+ * With interrupt remapping, many of those bits have different meanings
+ * in the underlying MSI, but the way that the I/OAPIC transforms them
+ * from its RTE to the MSI message is the same. This function allows
+ * the parent IRQ domain to compose the MSI message, then takes the
+ * relevant bits to put them in the appropriate places in the RTE in
+ * order to generate that message when the IRQ happens.
+ *
+ * The setup here relies on a preconfigured route entry (is_level,
+ * active_low, masked) because the parent domain is merely composing the
+ * generic message routing information which is used for the MSI.
+ */
+static void ioapic_setup_msg_from_msi(struct irq_data *irq_data,
+ struct IO_APIC_route_entry *entry)
+{
+ struct msi_msg msg;
+
+ /* Let the parent domain compose the MSI message */
+ irq_chip_compose_msi_msg(irq_data, &msg);
+
+ /*
+ * - Real vector
+ * - DMAR/IR: 8bit subhandle (ioapic.pin)
+ * - AMD/IR: 8bit IRTE index
+ */
+ entry->vector = msg.arch_data.vector;
+ /* Delivery mode (for DMAR/IR all 0) */
+ entry->delivery_mode = msg.arch_data.delivery_mode;
+ /* Destination mode or DMAR/IR index bit 15 */
+ entry->dest_mode_logical = msg.arch_addr_lo.dest_mode_logical;
+ /* DMAR/IR: 1, 0 for all other modes */
+ entry->ir_format = msg.arch_addr_lo.dmar_format;
+ /*
+ * - DMAR/IR: index bit 0-14.
+ *
+ * - Virt: If the host supports x2apic without a virtualized IR
+ * unit then bit 0-6 of dmar_index_0_14 are providing bit
+ * 8-14 of the destination id.
+ *
+ * All other modes have bit 0-6 of dmar_index_0_14 cleared and the
+ * topmost 8 bits are destination id bit 0-7 (entry::destid_0_7).
+ */
+ entry->ir_index_0_14 = msg.arch_addr_lo.dmar_index_0_14;
+}
+
static void ioapic_configure_entry(struct irq_data *irqd)
{
struct mp_chip_data *mpd = irqd->chip_data;
- struct irq_cfg *cfg = irqd_cfg(irqd);
struct irq_pin_list *entry;
- /*
- * Only update when the parent is the vector domain, don't touch it
- * if the parent is the remapping domain. Check the installed
- * ioapic chip to verify that.
- */
- if (irqd->chip == &ioapic_chip) {
- mpd->entry.dest = cfg->dest_apicid;
- mpd->entry.vector = cfg->vector;
- }
+ ioapic_setup_msg_from_msi(irqd, &mpd->entry);
+
for_each_irq_pin(entry, mpd->irq_2_pin)
__ioapic_write_entry(entry->apic, entry->pin, mpd->entry);
}
@@ -1919,7 +1957,7 @@ static int ioapic_irq_get_chip_state(struct irq_data *irqd,
* irrelevant because the IO-APIC treats them as fire and
* forget.
*/
- if (rentry.irr && rentry.trigger) {
+ if (rentry.irr && rentry.is_level) {
*state = true;
break;
}
@@ -2027,6 +2065,7 @@ static inline void __init unlock_ExtINT_logic(void)
int apic, pin, i;
struct IO_APIC_route_entry entry0, entry1;
unsigned char save_control, save_freq_select;
+ u32 apic_id;
pin = find_isa_irq_pin(8, mp_INT);
if (pin == -1) {
@@ -2042,14 +2081,16 @@ static inline void __init unlock_ExtINT_logic(void)
entry0 = ioapic_read_entry(apic, pin);
clear_IO_APIC_pin(apic, pin);
+ apic_id = hard_smp_processor_id();
memset(&entry1, 0, sizeof(entry1));
- entry1.dest_mode = IOAPIC_DEST_MODE_PHYSICAL;
- entry1.mask = IOAPIC_UNMASKED;
- entry1.dest = hard_smp_processor_id();
- entry1.delivery_mode = dest_ExtINT;
- entry1.polarity = entry0.polarity;
- entry1.trigger = IOAPIC_EDGE;
+ entry1.dest_mode_logical = true;
+ entry1.masked = false;
+ entry1.destid_0_7 = apic_id & 0xFF;
+ entry1.virt_destid_8_14 = apic_id >> 8;
+ entry1.delivery_mode = APIC_DELIVERY_MODE_EXTINT;
+ entry1.active_low = entry0.active_low;
+ entry1.is_level = false;
entry1.vector = 0;
ioapic_write_entry(apic, pin, entry1);
@@ -2117,13 +2158,12 @@ static inline void __init check_timer(void)
struct irq_cfg *cfg = irqd_cfg(irq_data);
int node = cpu_to_node(0);
int apic1, pin1, apic2, pin2;
- unsigned long flags;
int no_pin1 = 0;
if (!global_clock_event)
return;
- local_irq_save(flags);
+ local_irq_disable();
/*
* get/set the timer IRQ vector:
@@ -2178,9 +2218,9 @@ static inline void __init check_timer(void)
* so only need to unmask if it is level-trigger
* do we really have level trigger timer?
*/
- int idx;
- idx = find_irq_entry(apic1, pin1, mp_INT);
- if (idx != -1 && irq_trigger(idx))
+ int idx = find_irq_entry(apic1, pin1, mp_INT);
+
+ if (idx != -1 && irq_is_level(idx))
unmask_ioapic_irq(irq_get_irq_data(0));
}
irq_domain_deactivate_irq(irq_data);
@@ -2191,7 +2231,6 @@ static inline void __init check_timer(void)
goto out;
}
panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC");
- local_irq_disable();
clear_IO_APIC_pin(apic1, pin1);
if (!no_pin1)
apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
@@ -2215,7 +2254,6 @@ static inline void __init check_timer(void)
/*
* Cleanup, just in case ...
*/
- local_irq_disable();
legacy_pic->mask(0);
clear_IO_APIC_pin(apic2, pin2);
apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
@@ -2232,7 +2270,6 @@ static inline void __init check_timer(void)
apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
goto out;
}
- local_irq_disable();
legacy_pic->mask(0);
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
@@ -2251,7 +2288,6 @@ static inline void __init check_timer(void)
apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
goto out;
}
- local_irq_disable();
apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
if (apic_is_x2apic_enabled())
apic_printk(APIC_QUIET, KERN_INFO
@@ -2260,7 +2296,7 @@ static inline void __init check_timer(void)
panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
"report. Then try booting with the 'noapic' option.\n");
out:
- local_irq_restore(flags);
+ local_irq_enable();
}
/*
@@ -2284,36 +2320,37 @@ out:
static int mp_irqdomain_create(int ioapic)
{
- struct irq_alloc_info info;
struct irq_domain *parent;
int hwirqs = mp_ioapic_pin_count(ioapic);
struct ioapic *ip = &ioapics[ioapic];
struct ioapic_domain_cfg *cfg = &ip->irqdomain_cfg;
struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic);
struct fwnode_handle *fn;
- char *name = "IO-APIC";
+ struct irq_fwspec fwspec;
if (cfg->type == IOAPIC_DOMAIN_INVALID)
return 0;
- init_irq_alloc_info(&info, NULL);
- info.type = X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT;
- info.devid = mpc_ioapic_id(ioapic);
- parent = irq_remapping_get_irq_domain(&info);
- if (!parent)
- parent = x86_vector_domain;
- else
- name = "IO-APIC-IR";
-
/* Handle device tree enumerated APICs proper */
if (cfg->dev) {
fn = of_node_to_fwnode(cfg->dev);
} else {
- fn = irq_domain_alloc_named_id_fwnode(name, ioapic);
+ fn = irq_domain_alloc_named_id_fwnode("IO-APIC", mpc_ioapic_id(ioapic));
if (!fn)
return -ENOMEM;
}
+ fwspec.fwnode = fn;
+ fwspec.param_count = 1;
+ fwspec.param[0] = mpc_ioapic_id(ioapic);
+
+ parent = irq_find_matching_fwspec(&fwspec, DOMAIN_BUS_ANY);
+ if (!parent) {
+ if (!cfg->dev)
+ irq_domain_free_fwnode(fn);
+ return -ENODEV;
+ }
+
ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops,
(void *)(long)ioapic);
@@ -2587,30 +2624,6 @@ static int io_apic_get_version(int ioapic)
return reg_01.bits.version;
}
-int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
-{
- int ioapic, pin, idx;
-
- if (skip_ioapic_setup)
- return -1;
-
- ioapic = mp_find_ioapic(gsi);
- if (ioapic < 0)
- return -1;
-
- pin = mp_find_ioapic_pin(ioapic, gsi);
- if (pin < 0)
- return -1;
-
- idx = find_irq_entry(ioapic, pin, mp_INT);
- if (idx < 0)
- return -1;
-
- *trigger = irq_trigger(idx);
- *polarity = irq_polarity(idx);
- return 0;
-}
-
/*
* This function updates target affinity of IOAPIC interrupts to include
* the CPUs which came online during SMP bringup.
@@ -2934,44 +2947,49 @@ static void mp_irqdomain_get_attr(u32 gsi, struct mp_chip_data *data,
struct irq_alloc_info *info)
{
if (info && info->ioapic.valid) {
- data->trigger = info->ioapic.trigger;
- data->polarity = info->ioapic.polarity;
- } else if (acpi_get_override_irq(gsi, &data->trigger,
- &data->polarity) < 0) {
+ data->is_level = info->ioapic.is_level;
+ data->active_low = info->ioapic.active_low;
+ } else if (__acpi_get_override_irq(gsi, &data->is_level,
+ &data->active_low) < 0) {
/* PCI interrupts are always active low level triggered. */
- data->trigger = IOAPIC_LEVEL;
- data->polarity = IOAPIC_POL_LOW;
+ data->is_level = true;
+ data->active_low = true;
}
}
-static void mp_setup_entry(struct irq_cfg *cfg, struct mp_chip_data *data,
- struct IO_APIC_route_entry *entry)
+/*
+ * Configure the I/O-APIC specific fields in the routing entry.
+ *
+ * This is important to setup the I/O-APIC specific bits (is_level,
+ * active_low, masked) because the underlying parent domain will only
+ * provide the routing information and is oblivious of the I/O-APIC
+ * specific bits.
+ *
+ * The entry is just preconfigured at this point and not written into the
+ * RTE. This happens later during activation which will fill in the actual
+ * routing information.
+ */
+static void mp_preconfigure_entry(struct mp_chip_data *data)
{
+ struct IO_APIC_route_entry *entry = &data->entry;
+
memset(entry, 0, sizeof(*entry));
- entry->delivery_mode = apic->irq_delivery_mode;
- entry->dest_mode = apic->irq_dest_mode;
- entry->dest = cfg->dest_apicid;
- entry->vector = cfg->vector;
- entry->trigger = data->trigger;
- entry->polarity = data->polarity;
+ entry->is_level = data->is_level;
+ entry->active_low = data->active_low;
/*
* Mask level triggered irqs. Edge triggered irqs are masked
* by the irq core code in case they fire.
*/
- if (data->trigger == IOAPIC_LEVEL)
- entry->mask = IOAPIC_MASKED;
- else
- entry->mask = IOAPIC_UNMASKED;
+ entry->masked = data->is_level;
}
int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
- int ret, ioapic, pin;
- struct irq_cfg *cfg;
- struct irq_data *irq_data;
- struct mp_chip_data *data;
struct irq_alloc_info *info = arg;
+ struct mp_chip_data *data;
+ struct irq_data *irq_data;
+ int ret, ioapic, pin;
unsigned long flags;
if (!info || nr_irqs > 1)
@@ -2989,7 +3007,6 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
if (!data)
return -ENOMEM;
- info->ioapic.entry = &data->entry;
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info);
if (ret < 0) {
kfree(data);
@@ -3003,22 +3020,20 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
irq_data->chip_data = data;
mp_irqdomain_get_attr(mp_pin_to_gsi(ioapic, pin), data, info);
- cfg = irqd_cfg(irq_data);
add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin);
+ mp_preconfigure_entry(data);
+ mp_register_handler(virq, data->is_level);
+
local_irq_save(flags);
- if (info->ioapic.entry)
- mp_setup_entry(cfg, data, info->ioapic.entry);
- mp_register_handler(virq, data->trigger);
if (virq < nr_legacy_irqs())
legacy_pic->mask(virq);
local_irq_restore(flags);
apic_printk(APIC_VERBOSE, KERN_DEBUG
- "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i Dest:%d)\n",
- ioapic, mpc_ioapic_id(ioapic), pin, cfg->vector,
- virq, data->trigger, data->polarity, cfg->dest_apicid);
-
+ "IOAPIC[%d]: Preconfigured routing entry (%d-%d -> IRQ %d Level:%i ActiveLow:%i)\n",
+ ioapic, mpc_ioapic_id(ioapic), pin, virq,
+ data->is_level, data->active_low);
return 0;
}
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index 387154e39e08..d1fb874fbe64 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -260,7 +260,7 @@ void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
for_each_cpu(query_cpu, mask)
__default_send_IPI_dest_field(
early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
- vector, apic->dest_logical);
+ vector, APIC_DEST_LOGICAL);
local_irq_restore(flags);
}
@@ -279,7 +279,7 @@ void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
continue;
__default_send_IPI_dest_field(
early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
- vector, apic->dest_logical);
+ vector, APIC_DEST_LOGICAL);
}
local_irq_restore(flags);
}
@@ -297,7 +297,7 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
local_irq_save(flags);
WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
- __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
+ __default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
local_irq_restore(flags);
}
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index 6313f0a05db7..44ebe25e7703 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -15,7 +15,6 @@
#include <linux/hpet.h>
#include <linux/msi.h>
#include <asm/irqdomain.h>
-#include <asm/msidef.h>
#include <asm/hpet.h>
#include <asm/hw_irq.h>
#include <asm/apic.h>
@@ -23,38 +22,11 @@
struct irq_domain *x86_pci_msi_default_domain __ro_after_init;
-static void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg)
-{
- msg->address_hi = MSI_ADDR_BASE_HI;
-
- if (x2apic_enabled())
- msg->address_hi |= MSI_ADDR_EXT_DEST_ID(cfg->dest_apicid);
-
- msg->address_lo =
- MSI_ADDR_BASE_LO |
- ((apic->irq_dest_mode == 0) ?
- MSI_ADDR_DEST_MODE_PHYSICAL :
- MSI_ADDR_DEST_MODE_LOGICAL) |
- MSI_ADDR_REDIRECTION_CPU |
- MSI_ADDR_DEST_ID(cfg->dest_apicid);
-
- msg->data =
- MSI_DATA_TRIGGER_EDGE |
- MSI_DATA_LEVEL_ASSERT |
- MSI_DATA_DELIVERY_FIXED |
- MSI_DATA_VECTOR(cfg->vector);
-}
-
-void x86_vector_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
-{
- __irq_msi_compose_msg(irqd_cfg(data), msg);
-}
-
static void irq_msi_update_msg(struct irq_data *irqd, struct irq_cfg *cfg)
{
struct msi_msg msg[2] = { [1] = { }, };
- __irq_msi_compose_msg(cfg, msg);
+ __irq_msi_compose_msg(cfg, msg, false);
irq_data_get_irq_chip(irqd)->irq_write_msi_msg(irqd, msg);
}
@@ -276,6 +248,17 @@ struct irq_domain *arch_create_remap_msi_irq_domain(struct irq_domain *parent,
#endif
#ifdef CONFIG_DMAR_TABLE
+/*
+ * The Intel IOMMU (ab)uses the high bits of the MSI address to contain the
+ * high bits of the destination APIC ID. This can't be done in the general
+ * case for MSIs as it would be targeting real memory above 4GiB not the
+ * APIC.
+ */
+static void dmar_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ __irq_msi_compose_msg(irqd_cfg(data), msg, true);
+}
+
static void dmar_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
{
dmar_msi_write(data->irq, msg);
@@ -288,6 +271,7 @@ static struct irq_chip dmar_msi_controller = {
.irq_ack = irq_chip_ack_parent,
.irq_set_affinity = msi_domain_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_compose_msi_msg = dmar_msi_compose_msg,
.irq_write_msi_msg = dmar_msi_write_msg,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@@ -356,114 +340,3 @@ void dmar_free_hwirq(int irq)
irq_domain_free_irqs(irq, 1);
}
#endif
-
-/*
- * MSI message composition
- */
-#ifdef CONFIG_HPET_TIMER
-static inline int hpet_dev_id(struct irq_domain *domain)
-{
- struct msi_domain_info *info = msi_get_domain_info(domain);
-
- return (int)(long)info->data;
-}
-
-static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
-{
- hpet_msi_write(irq_data_get_irq_handler_data(data), msg);
-}
-
-static struct irq_chip hpet_msi_controller __ro_after_init = {
- .name = "HPET-MSI",
- .irq_unmask = hpet_msi_unmask,
- .irq_mask = hpet_msi_mask,
- .irq_ack = irq_chip_ack_parent,
- .irq_set_affinity = msi_domain_set_affinity,
- .irq_retrigger = irq_chip_retrigger_hierarchy,
- .irq_write_msi_msg = hpet_msi_write_msg,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-static int hpet_msi_init(struct irq_domain *domain,
- struct msi_domain_info *info, unsigned int virq,
- irq_hw_number_t hwirq, msi_alloc_info_t *arg)
-{
- irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
- irq_domain_set_info(domain, virq, arg->hwirq, info->chip, NULL,
- handle_edge_irq, arg->data, "edge");
-
- return 0;
-}
-
-static void hpet_msi_free(struct irq_domain *domain,
- struct msi_domain_info *info, unsigned int virq)
-{
- irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT);
-}
-
-static struct msi_domain_ops hpet_msi_domain_ops = {
- .msi_init = hpet_msi_init,
- .msi_free = hpet_msi_free,
-};
-
-static struct msi_domain_info hpet_msi_domain_info = {
- .ops = &hpet_msi_domain_ops,
- .chip = &hpet_msi_controller,
- .flags = MSI_FLAG_USE_DEF_DOM_OPS,
-};
-
-struct irq_domain *hpet_create_irq_domain(int hpet_id)
-{
- struct msi_domain_info *domain_info;
- struct irq_domain *parent, *d;
- struct irq_alloc_info info;
- struct fwnode_handle *fn;
-
- if (x86_vector_domain == NULL)
- return NULL;
-
- domain_info = kzalloc(sizeof(*domain_info), GFP_KERNEL);
- if (!domain_info)
- return NULL;
-
- *domain_info = hpet_msi_domain_info;
- domain_info->data = (void *)(long)hpet_id;
-
- init_irq_alloc_info(&info, NULL);
- info.type = X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT;
- info.devid = hpet_id;
- parent = irq_remapping_get_irq_domain(&info);
- if (parent == NULL)
- parent = x86_vector_domain;
- else
- hpet_msi_controller.name = "IR-HPET-MSI";
-
- fn = irq_domain_alloc_named_id_fwnode(hpet_msi_controller.name,
- hpet_id);
- if (!fn) {
- kfree(domain_info);
- return NULL;
- }
-
- d = msi_create_irq_domain(fn, domain_info, parent);
- if (!d) {
- irq_domain_free_fwnode(fn);
- kfree(domain_info);
- }
- return d;
-}
-
-int hpet_assign_irq(struct irq_domain *domain, struct hpet_channel *hc,
- int dev_num)
-{
- struct irq_alloc_info info;
-
- init_irq_alloc_info(&info, NULL);
- info.type = X86_IRQ_ALLOC_TYPE_HPET;
- info.data = hc;
- info.devid = hpet_dev_id(domain);
- info.hwirq = dev_num;
-
- return irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info);
-}
-#endif
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 67b6f7c049ec..a61f642b1b90 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -69,16 +69,13 @@ static struct apic apic_default __ro_after_init = {
.apic_id_valid = default_apic_id_valid,
.apic_id_registered = default_apic_id_registered,
- .irq_delivery_mode = dest_Fixed,
- /* logical delivery broadcast to all CPUs: */
- .irq_dest_mode = 1,
+ .delivery_mode = APIC_DELIVERY_MODE_FIXED,
+ .dest_mode_logical = true,
.disable_esr = 0,
- .dest_logical = APIC_DEST_LOGICAL,
- .check_apicid_used = default_check_apicid_used,
+ .check_apicid_used = default_check_apicid_used,
.init_apic_ldr = default_init_apic_ldr,
-
.ioapic_phys_id_map = default_ioapic_phys_id_map,
.setup_apic_routing = setup_apic_flat_routing,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 1eac53632786..3c9c7492252f 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -273,20 +273,24 @@ static int assign_irq_vector_any_locked(struct irq_data *irqd)
const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
int node = irq_data_get_node(irqd);
- if (node == NUMA_NO_NODE)
- goto all;
- /* Try the intersection of @affmsk and node mask */
- cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
- if (!assign_vector_locked(irqd, vector_searchmask))
- return 0;
- /* Try the node mask */
- if (!assign_vector_locked(irqd, cpumask_of_node(node)))
- return 0;
-all:
+ if (node != NUMA_NO_NODE) {
+ /* Try the intersection of @affmsk and node mask */
+ cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
+ if (!assign_vector_locked(irqd, vector_searchmask))
+ return 0;
+ }
+
/* Try the full affinity mask */
cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
if (!assign_vector_locked(irqd, vector_searchmask))
return 0;
+
+ if (node != NUMA_NO_NODE) {
+ /* Try the node mask */
+ if (!assign_vector_locked(irqd, cpumask_of_node(node)))
+ return 0;
+ }
+
/* Try the full online mask */
return assign_vector_locked(irqd, cpu_online_mask);
}
@@ -636,7 +640,50 @@ static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
}
#endif
+int x86_fwspec_is_ioapic(struct irq_fwspec *fwspec)
+{
+ if (fwspec->param_count != 1)
+ return 0;
+
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ const char *fwname = fwnode_get_name(fwspec->fwnode);
+ return fwname && !strncmp(fwname, "IO-APIC-", 8) &&
+ simple_strtol(fwname+8, NULL, 10) == fwspec->param[0];
+ }
+ return to_of_node(fwspec->fwnode) &&
+ of_device_is_compatible(to_of_node(fwspec->fwnode),
+ "intel,ce4100-ioapic");
+}
+
+int x86_fwspec_is_hpet(struct irq_fwspec *fwspec)
+{
+ if (fwspec->param_count != 1)
+ return 0;
+
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ const char *fwname = fwnode_get_name(fwspec->fwnode);
+ return fwname && !strncmp(fwname, "HPET-MSI-", 9) &&
+ simple_strtol(fwname+9, NULL, 10) == fwspec->param[0];
+ }
+ return 0;
+}
+
+static int x86_vector_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ /*
+ * HPET and I/OAPIC cannot be parented in the vector domain
+ * if IRQ remapping is enabled. APIC IDs above 15 bits are
+ * only permitted if IRQ remapping is enabled, so check that.
+ */
+ if (apic->apic_id_valid(32768))
+ return 0;
+
+ return x86_fwspec_is_ioapic(fwspec) || x86_fwspec_is_hpet(fwspec);
+}
+
static const struct irq_domain_ops x86_vector_domain_ops = {
+ .select = x86_vector_select,
.alloc = x86_vector_alloc_irqs,
.free = x86_vector_free_irqs,
.activate = x86_vector_activate,
@@ -818,6 +865,12 @@ void apic_ack_edge(struct irq_data *irqd)
apic_ack_irq(irqd);
}
+static void x86_vector_msi_compose_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ __irq_msi_compose_msg(irqd_cfg(data), msg, false);
+}
+
static struct irq_chip lapic_controller = {
.name = "APIC",
.irq_ack = apic_ack_edge,
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index b0889c48a2ac..df6adc5674c9 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -61,7 +61,7 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
if (!dest)
continue;
- __x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
+ __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
/* Remove cluster CPUs from tmpmask */
cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
}
@@ -184,15 +184,13 @@ static struct apic apic_x2apic_cluster __ro_after_init = {
.apic_id_valid = x2apic_apic_id_valid,
.apic_id_registered = x2apic_apic_id_registered,
- .irq_delivery_mode = dest_Fixed,
- .irq_dest_mode = 1, /* logical */
+ .delivery_mode = APIC_DELIVERY_MODE_FIXED,
+ .dest_mode_logical = true,
.disable_esr = 0,
- .dest_logical = APIC_DEST_LOGICAL,
- .check_apicid_used = NULL,
+ .check_apicid_used = NULL,
.init_apic_ldr = init_x2apic_ldr,
-
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index bc9693841353..0e4e81971567 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -8,6 +8,12 @@
int x2apic_phys;
static struct apic apic_x2apic_phys;
+static u32 x2apic_max_apicid __ro_after_init;
+
+void __init x2apic_set_max_apicid(u32 apicid)
+{
+ x2apic_max_apicid = apicid;
+}
static int __init set_x2apic_phys_mode(char *arg)
{
@@ -98,6 +104,9 @@ static int x2apic_phys_probe(void)
/* Common x2apic functions, also used by x2apic_cluster */
int x2apic_apic_id_valid(u32 apicid)
{
+ if (x2apic_max_apicid && apicid > x2apic_max_apicid)
+ return 0;
+
return 1;
}
@@ -148,15 +157,13 @@ static struct apic apic_x2apic_phys __ro_after_init = {
.apic_id_valid = x2apic_apic_id_valid,
.apic_id_registered = x2apic_apic_id_registered,
- .irq_delivery_mode = dest_Fixed,
- .irq_dest_mode = 0, /* physical */
+ .delivery_mode = APIC_DELIVERY_MODE_FIXED,
+ .dest_mode_logical = false,
.disable_esr = 0,
- .dest_logical = 0,
- .check_apicid_used = NULL,
+ .check_apicid_used = NULL,
.init_apic_ldr = init_x2apic_ldr,
-
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 714233cee0b5..52bc217ca8c3 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -33,7 +33,7 @@ static union uvh_apicid uvh_apicid;
static int uv_node_id;
/* Unpack AT/OEM/TABLE ID's to be NULL terminated strings */
-static u8 uv_archtype[UV_AT_SIZE];
+static u8 uv_archtype[UV_AT_SIZE + 1];
static u8 oem_id[ACPI_OEM_ID_SIZE + 1];
static u8 oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
@@ -161,7 +161,7 @@ static int __init early_set_hub_type(void)
/* UV4/4A only have a revision difference */
case UV4_HUB_PART_NUMBER:
uv_min_hub_revision_id = node_id.s.revision
- + UV4_HUB_REVISION_BASE;
+ + UV4_HUB_REVISION_BASE - 1;
uv_hub_type_set(UV4);
if (uv_min_hub_revision_id == UV4A_HUB_REVISION_BASE)
uv_hub_type_set(UV4|UV4A);
@@ -290,6 +290,9 @@ static void __init uv_stringify(int len, char *to, char *from)
{
/* Relies on 'to' being NULL chars so result will be NULL terminated */
strncpy(to, from, len-1);
+
+ /* Trim trailing spaces */
+ (void)strim(to);
}
/* Find UV arch type entry in UVsystab */
@@ -317,7 +320,7 @@ static int __init decode_arch_type(unsigned long ptr)
if (n > 0 && n < sizeof(uv_ate->archtype)) {
pr_info("UV: UVarchtype received from BIOS\n");
- uv_stringify(UV_AT_SIZE, uv_archtype, uv_ate->archtype);
+ uv_stringify(sizeof(uv_archtype), uv_archtype, uv_ate->archtype);
return 1;
}
return 0;
@@ -366,7 +369,7 @@ static int __init early_get_arch_type(void)
return ret;
}
-static int __init uv_set_system_type(char *_oem_id)
+static int __init uv_set_system_type(char *_oem_id, char *_oem_table_id)
{
/* Save OEM_ID passed from ACPI MADT */
uv_stringify(sizeof(oem_id), oem_id, _oem_id);
@@ -375,7 +378,7 @@ static int __init uv_set_system_type(char *_oem_id)
if (!early_get_arch_type())
/* If not use OEM ID for UVarchtype */
- uv_stringify(UV_AT_SIZE, uv_archtype, _oem_id);
+ uv_stringify(sizeof(uv_archtype), uv_archtype, oem_id);
/* Check if not hubbed */
if (strncmp(uv_archtype, "SGI", 3) != 0) {
@@ -386,13 +389,23 @@ static int __init uv_set_system_type(char *_oem_id)
/* (Not hubless), not a UV */
return 0;
+ /* Is UV hubless system */
+ uv_hubless_system = 0x01;
+
+ /* UV5 Hubless */
+ if (strncmp(uv_archtype, "NSGI5", 5) == 0)
+ uv_hubless_system |= 0x20;
+
/* UV4 Hubless: CH */
- if (strncmp(uv_archtype, "NSGI4", 5) == 0)
- uv_hubless_system = 0x11;
+ else if (strncmp(uv_archtype, "NSGI4", 5) == 0)
+ uv_hubless_system |= 0x10;
/* UV3 Hubless: UV300/MC990X w/o hub */
else
- uv_hubless_system = 0x9;
+ uv_hubless_system |= 0x8;
+
+ /* Copy APIC type */
+ uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id);
pr_info("UV: OEM IDs %s/%s, SystemType %d, HUBLESS ID %x\n",
oem_id, oem_table_id, uv_system_type, uv_hubless_system);
@@ -456,7 +469,7 @@ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
/* If not UV, return. */
- if (likely(uv_set_system_type(_oem_id) == 0))
+ if (uv_set_system_type(_oem_id, _oem_table_id) == 0)
return 0;
/* Save and Decode OEM Table ID */
@@ -489,6 +502,18 @@ enum uv_system_type get_uv_system_type(void)
return uv_system_type;
}
+int uv_get_hubless_system(void)
+{
+ return uv_hubless_system;
+}
+EXPORT_SYMBOL_GPL(uv_get_hubless_system);
+
+ssize_t uv_get_archtype(char *buf, int len)
+{
+ return scnprintf(buf, len, "%s/%s", uv_archtype, oem_table_id);
+}
+EXPORT_SYMBOL_GPL(uv_get_archtype);
+
int is_uv_system(void)
{
return uv_system_type != UV_NONE;
@@ -703,9 +728,9 @@ static void uv_send_IPI_one(int cpu, int vector)
unsigned long dmode, val;
if (vector == NMI_VECTOR)
- dmode = dest_NMI;
+ dmode = APIC_DELIVERY_MODE_NMI;
else
- dmode = dest_Fixed;
+ dmode = APIC_DELIVERY_MODE_FIXED;
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
(apicid << UVH_IPI_INT_APIC_ID_SHFT) |
@@ -807,15 +832,13 @@ static struct apic apic_x2apic_uv_x __ro_after_init = {
.apic_id_valid = uv_apic_id_valid,
.apic_id_registered = uv_apic_id_registered,
- .irq_delivery_mode = dest_Fixed,
- .irq_dest_mode = 0, /* Physical */
+ .delivery_mode = APIC_DELIVERY_MODE_FIXED,
+ .dest_mode_logical = false,
.disable_esr = 0,
- .dest_logical = APIC_DEST_LOGICAL,
- .check_apicid_used = NULL,
+ .check_apicid_used = NULL,
.init_apic_ldr = uv_init_apic_ldr,
-
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
@@ -1590,21 +1613,30 @@ static void check_efi_reboot(void)
reboot_type = BOOT_ACPI;
}
-/* Setup user proc fs files */
+/*
+ * User proc fs file handling now deprecated.
+ * Recommend using /sys/firmware/sgi_uv/... instead.
+ */
static int __maybe_unused proc_hubbed_show(struct seq_file *file, void *data)
{
+ pr_notice_once("%s: using deprecated /proc/sgi_uv/hubbed, use /sys/firmware/sgi_uv/hub_type\n",
+ current->comm);
seq_printf(file, "0x%x\n", uv_hubbed_system);
return 0;
}
static int __maybe_unused proc_hubless_show(struct seq_file *file, void *data)
{
+ pr_notice_once("%s: using deprecated /proc/sgi_uv/hubless, use /sys/firmware/sgi_uv/hubless\n",
+ current->comm);
seq_printf(file, "0x%x\n", uv_hubless_system);
return 0;
}
static int __maybe_unused proc_archtype_show(struct seq_file *file, void *data)
{
+ pr_notice_once("%s: using deprecated /proc/sgi_uv/archtype, use /sys/firmware/sgi_uv/archtype\n",
+ current->comm);
seq_printf(file, "%s/%s\n", uv_archtype, oem_table_id);
return 0;
}
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index 70b7154f4bdd..60b9f42ce3c1 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -66,7 +66,6 @@ static void __used common(void)
OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable);
OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable);
OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret);
- OFFSET(PV_MMU_read_cr2, paravirt_patch_template, mmu.read_cr2);
#endif
#ifdef CONFIG_XEN
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 93792b457b81..637b499450d1 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_X86_MCE) += mce/
obj-$(CONFIG_MTRR) += mtrr/
obj-$(CONFIG_MICROCODE) += microcode/
obj-$(CONFIG_X86_CPU_RESCTRL) += resctrl/
+obj-$(CONFIG_X86_SGX) += sgx/
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 6062ce586b95..f8ca66f3d861 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -23,7 +23,6 @@
#ifdef CONFIG_X86_64
# include <asm/mmconfig.h>
-# include <asm/set_memory.h>
#endif
#include "cpu.h"
@@ -330,7 +329,6 @@ static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
*/
static void amd_get_topology(struct cpuinfo_x86 *c)
{
- u8 node_id;
int cpu = smp_processor_id();
/* get information required for multi-node processors */
@@ -340,7 +338,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
- node_id = ecx & 0xff;
+ c->cpu_die_id = ecx & 0xff;
if (c->x86 == 0x15)
c->cu_id = ebx & 0xff;
@@ -360,15 +358,15 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
if (!err)
c->x86_coreid_bits = get_count_order(c->x86_max_cores);
- cacheinfo_amd_init_llc_id(c, cpu, node_id);
+ cacheinfo_amd_init_llc_id(c, cpu);
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
u64 value;
rdmsrl(MSR_FAM10H_NODE_ID, value);
- node_id = value & 7;
+ c->cpu_die_id = value & 7;
- per_cpu(cpu_llc_id, cpu) = node_id;
+ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
} else
return;
@@ -393,7 +391,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
/* Convert the initial APIC ID into the socket ID */
c->phys_proc_id = c->initial_apicid >> bits;
/* use socket ID also for last level cache */
- per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
+ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
}
static void amd_detect_ppin(struct cpuinfo_x86 *c)
@@ -425,12 +423,6 @@ clear_ppin:
clear_cpu_cap(c, X86_FEATURE_AMD_PPIN);
}
-u16 amd_get_nb_id(int cpu)
-{
- return per_cpu(cpu_llc_id, cpu);
-}
-EXPORT_SYMBOL_GPL(amd_get_nb_id);
-
u32 amd_get_nodes_per_socket(void)
{
return nodes_per_socket;
@@ -516,26 +508,6 @@ static void early_init_amd_mc(struct cpuinfo_x86 *c)
static void bsp_init_amd(struct cpuinfo_x86 *c)
{
-
-#ifdef CONFIG_X86_64
- if (c->x86 >= 0xf) {
- unsigned long long tseg;
-
- /*
- * Split up direct mapping around the TSEG SMM area.
- * Don't do it for gbpages because there seems very little
- * benefit in doing so.
- */
- if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
- unsigned long pfn = tseg >> PAGE_SHIFT;
-
- pr_debug("tseg: %010llx\n", tseg);
- if (pfn_range_is_mapped(pfn, pfn + 1))
- set_memory_4k((unsigned long)__va(tseg), 1);
- }
- }
-#endif
-
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
if (c->x86 > 0x10 ||
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index e2f319dc992d..22911deacb6e 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -14,11 +14,13 @@
#include <linux/cpufreq.h>
#include <linux/smp.h>
#include <linux/sched/isolation.h>
+#include <linux/rcupdate.h>
#include "cpu.h"
struct aperfmperf_sample {
unsigned int khz;
+ atomic_t scfpending;
ktime_t time;
u64 aperf;
u64 mperf;
@@ -62,17 +64,20 @@ static void aperfmperf_snapshot_khz(void *dummy)
s->aperf = aperf;
s->mperf = mperf;
s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
+ atomic_set_release(&s->scfpending, 0);
}
static bool aperfmperf_snapshot_cpu(int cpu, ktime_t now, bool wait)
{
s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu));
+ struct aperfmperf_sample *s = per_cpu_ptr(&samples, cpu);
/* Don't bother re-computing within the cache threshold time. */
if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
return true;
- smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, wait);
+ if (!atomic_xchg(&s->scfpending, 1) || wait)
+ smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, wait);
/* Return false if the previous iteration was too long ago. */
return time_delta <= APERFMPERF_STALE_THRESHOLD_MS;
@@ -89,6 +94,9 @@ unsigned int aperfmperf_get_khz(int cpu)
if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
return 0;
+ if (rcu_is_idle_cpu(cpu))
+ return 0; /* Idle CPUs are completely uninteresting. */
+
aperfmperf_snapshot_cpu(cpu, ktime_get(), true);
return per_cpu(samples.khz, cpu);
}
@@ -108,6 +116,8 @@ void arch_freq_prepare_all(void)
for_each_online_cpu(cpu) {
if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
continue;
+ if (rcu_is_idle_cpu(cpu))
+ continue; /* Idle CPUs are completely uninteresting. */
if (!aperfmperf_snapshot_cpu(cpu, now, false))
wait = true;
}
@@ -118,6 +128,8 @@ void arch_freq_prepare_all(void)
unsigned int arch_freq_get_on_cpu(int cpu)
{
+ struct aperfmperf_sample *s = per_cpu_ptr(&samples, cpu);
+
if (!cpu_khz)
return 0;
@@ -131,6 +143,8 @@ unsigned int arch_freq_get_on_cpu(int cpu)
return per_cpu(samples.khz, cpu);
msleep(APERFMPERF_REFRESH_DELAY_MS);
+ atomic_set(&s->scfpending, 1);
+ smp_mb(); /* ->scfpending before smp_call_function_single(). */
smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
return per_cpu(samples.khz, cpu);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index d3f0db463f96..d41b70fe4918 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -739,11 +739,13 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
if (boot_cpu_has(X86_FEATURE_IBPB)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+ spectre_v2_user_ibpb = mode;
switch (cmd) {
case SPECTRE_V2_USER_CMD_FORCE:
case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
static_branch_enable(&switch_mm_always_ibpb);
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
break;
case SPECTRE_V2_USER_CMD_PRCTL:
case SPECTRE_V2_USER_CMD_AUTO:
@@ -757,8 +759,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
static_key_enabled(&switch_mm_always_ibpb) ?
"always-on" : "conditional");
-
- spectre_v2_user_ibpb = mode;
}
/*
@@ -1254,6 +1254,14 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
return 0;
}
+static bool is_spec_ib_user_controlled(void)
+{
+ return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
+ spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
+}
+
static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
{
switch (ctrl) {
@@ -1261,16 +1269,26 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return 0;
+
/*
- * Indirect branch speculation is always disabled in strict
- * mode. It can neither be enabled if it was force-disabled
- * by a previous prctl call.
+ * With strict mode for both IBPB and STIBP, the instruction
+ * code paths avoid checking this task flag and instead,
+ * unconditionally run the instruction. However, STIBP and IBPB
+ * are independent and either can be set to conditionally
+ * enabled regardless of the mode of the other.
+ *
+ * If either is set to conditional, allow the task flag to be
+ * updated, unless it was force-disabled by a previous prctl
+ * call. Currently, this is possible on an AMD CPU which has the
+ * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
+ * kernel is booted with 'spectre_v2_user=seccomp', then
+ * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
+ * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
*/
- if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
- spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
- spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
+ if (!is_spec_ib_user_controlled() ||
task_spec_ib_force_disable(task))
return -EPERM;
+
task_clear_spec_ib_disable(task);
task_update_spec_tif(task);
break;
@@ -1283,10 +1301,10 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return -EPERM;
- if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
- spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
- spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+
+ if (!is_spec_ib_user_controlled())
return 0;
+
task_set_spec_ib_disable(task);
if (ctrl == PR_SPEC_FORCE_DISABLE)
task_set_spec_ib_force_disable(task);
@@ -1351,20 +1369,17 @@ static int ib_prctl_get(struct task_struct *task)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return PR_SPEC_ENABLE;
- else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
- spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
- spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
- return PR_SPEC_DISABLE;
- else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
- spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
- spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
- spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
+ else if (is_spec_ib_user_controlled()) {
if (task_spec_ib_force_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
if (task_spec_ib_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
- } else
+ } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+ return PR_SPEC_DISABLE;
+ else
return PR_SPEC_NOT_AFFECTED;
}
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 57074cf3ad7c..3ca9be482a9e 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -580,7 +580,7 @@ static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
if (index < 3)
return;
- node = amd_get_nb_id(smp_processor_id());
+ node = topology_die_id(smp_processor_id());
this_leaf->nb = node_to_amd_nb(node);
if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
amd_calc_l3_indices(this_leaf->nb);
@@ -646,7 +646,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
return i;
}
-void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
+void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu)
{
/*
* We may have multiple LLCs if L3 caches exist, so check if we
@@ -657,7 +657,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
if (c->x86 < 0x17) {
/* LLC is at the node level. */
- per_cpu(cpu_llc_id, cpu) = node_id;
+ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
} else if (c->x86 == 0x17 && c->x86_model <= 0x1F) {
/*
* LLC is at the core complex level.
@@ -684,7 +684,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
}
}
-void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
+void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu)
{
/*
* We may have multiple LLCs if L3 caches exist, so check if we
diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c
index 29a3bedabd06..3b1b01f2b248 100644
--- a/arch/x86/kernel/cpu/feat_ctl.c
+++ b/arch/x86/kernel/cpu/feat_ctl.c
@@ -93,16 +93,41 @@ static void init_vmx_capabilities(struct cpuinfo_x86 *c)
}
#endif /* CONFIG_X86_VMX_FEATURE_NAMES */
+static void clear_sgx_caps(void)
+{
+ setup_clear_cpu_cap(X86_FEATURE_SGX);
+ setup_clear_cpu_cap(X86_FEATURE_SGX_LC);
+}
+
+static int __init nosgx(char *str)
+{
+ clear_sgx_caps();
+
+ return 0;
+}
+
+early_param("nosgx", nosgx);
+
void init_ia32_feat_ctl(struct cpuinfo_x86 *c)
{
bool tboot = tboot_enabled();
+ bool enable_sgx;
u64 msr;
if (rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr)) {
clear_cpu_cap(c, X86_FEATURE_VMX);
+ clear_sgx_caps();
return;
}
+ /*
+ * Enable SGX if and only if the kernel supports SGX and Launch Control
+ * is supported, i.e. disable SGX if the LE hash MSRs can't be written.
+ */
+ enable_sgx = cpu_has(c, X86_FEATURE_SGX) &&
+ cpu_has(c, X86_FEATURE_SGX_LC) &&
+ IS_ENABLED(CONFIG_X86_SGX);
+
if (msr & FEAT_CTL_LOCKED)
goto update_caps;
@@ -124,13 +149,16 @@ void init_ia32_feat_ctl(struct cpuinfo_x86 *c)
msr |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX;
}
+ if (enable_sgx)
+ msr |= FEAT_CTL_SGX_ENABLED | FEAT_CTL_SGX_LC_ENABLED;
+
wrmsrl(MSR_IA32_FEAT_CTL, msr);
update_caps:
set_cpu_cap(c, X86_FEATURE_MSR_IA32_FEAT_CTL);
if (!cpu_has(c, X86_FEATURE_VMX))
- return;
+ goto update_sgx;
if ( (tboot && !(msr & FEAT_CTL_VMX_ENABLED_INSIDE_SMX)) ||
(!tboot && !(msr & FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX))) {
@@ -143,4 +171,12 @@ update_caps:
init_vmx_capabilities(c);
#endif
}
+
+update_sgx:
+ if (!(msr & FEAT_CTL_SGX_ENABLED) ||
+ !(msr & FEAT_CTL_SGX_LC_ENABLED) || !enable_sgx) {
+ if (enable_sgx)
+ pr_err_once("SGX disabled by BIOS\n");
+ clear_sgx_caps();
+ }
}
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
index ac6c30e5801d..ae59115d18f9 100644
--- a/arch/x86/kernel/cpu/hygon.c
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -14,9 +14,6 @@
#include <asm/cacheinfo.h>
#include <asm/spec-ctrl.h>
#include <asm/delay.h>
-#ifdef CONFIG_X86_64
-# include <asm/set_memory.h>
-#endif
#include "cpu.h"
@@ -65,7 +62,6 @@ static void hygon_get_topology_early(struct cpuinfo_x86 *c)
*/
static void hygon_get_topology(struct cpuinfo_x86 *c)
{
- u8 node_id;
int cpu = smp_processor_id();
/* get information required for multi-node processors */
@@ -75,7 +71,7 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
- node_id = ecx & 0xff;
+ c->cpu_die_id = ecx & 0xff;
c->cpu_core_id = ebx & 0xff;
@@ -93,14 +89,14 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
/* Socket ID is ApicId[6] for these processors. */
c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
- cacheinfo_hygon_init_llc_id(c, cpu, node_id);
+ cacheinfo_hygon_init_llc_id(c, cpu);
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
u64 value;
rdmsrl(MSR_FAM10H_NODE_ID, value);
- node_id = value & 7;
+ c->cpu_die_id = value & 7;
- per_cpu(cpu_llc_id, cpu) = node_id;
+ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
} else
return;
@@ -123,7 +119,7 @@ static void hygon_detect_cmp(struct cpuinfo_x86 *c)
/* Convert the initial APIC ID into the socket ID */
c->phys_proc_id = c->initial_apicid >> bits;
/* use socket ID also for last level cache */
- per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
+ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
}
static void srat_detect_node(struct cpuinfo_x86 *c)
@@ -204,23 +200,6 @@ static void early_init_hygon_mc(struct cpuinfo_x86 *c)
static void bsp_init_hygon(struct cpuinfo_x86 *c)
{
-#ifdef CONFIG_X86_64
- unsigned long long tseg;
-
- /*
- * Split up direct mapping around the TSEG SMM area.
- * Don't do it for gbpages because there seems very little
- * benefit in doing so.
- */
- if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
- unsigned long pfn = tseg >> PAGE_SHIFT;
-
- pr_debug("tseg: %010llx\n", tseg);
- if (pfn_range_is_mapped(pfn, pfn + 1))
- set_memory_4k((unsigned long)__va(tseg), 1);
- }
-#endif
-
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
u64 val;
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index 0c6b02dd744c..e486f96b3cb3 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -1341,7 +1341,7 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu,
return -ENODEV;
if (is_shared_bank(bank)) {
- nb = node_to_amd_nb(amd_get_nb_id(cpu));
+ nb = node_to_amd_nb(topology_die_id(cpu));
/* threshold descriptor already initialized on this node? */
if (nb && nb->bank4) {
@@ -1445,7 +1445,7 @@ static void threshold_remove_bank(struct threshold_bank *bank)
* The last CPU on this node using the shared bank is going
* away, remove that bank now.
*/
- nb = node_to_amd_nb(amd_get_nb_id(smp_processor_id()));
+ nb = node_to_amd_nb(topology_die_id(smp_processor_id()));
nb->bank4 = NULL;
}
diff --git a/arch/x86/kernel/cpu/mce/apei.c b/arch/x86/kernel/cpu/mce/apei.c
index af8d37962586..b58b85380ddb 100644
--- a/arch/x86/kernel/cpu/mce/apei.c
+++ b/arch/x86/kernel/cpu/mce/apei.c
@@ -51,6 +51,67 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
}
EXPORT_SYMBOL_GPL(apei_mce_report_mem_error);
+int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id)
+{
+ const u64 *i_mce = ((const u64 *) (ctx_info + 1));
+ unsigned int cpu;
+ struct mce m;
+
+ if (!boot_cpu_has(X86_FEATURE_SMCA))
+ return -EINVAL;
+
+ /*
+ * The starting address of the register array extracted from BERT must
+ * match with the first expected register in the register layout of
+ * SMCA address space. This address corresponds to banks's MCA_STATUS
+ * register.
+ *
+ * Match any MCi_STATUS register by turning off bank numbers.
+ */
+ if ((ctx_info->msr_addr & MSR_AMD64_SMCA_MC0_STATUS) !=
+ MSR_AMD64_SMCA_MC0_STATUS)
+ return -EINVAL;
+
+ /*
+ * The register array size must be large enough to include all the
+ * SMCA registers which need to be extracted.
+ *
+ * The number of registers in the register array is determined by
+ * Register Array Size/8 as defined in UEFI spec v2.8, sec N.2.4.2.2.
+ * The register layout is fixed and currently the raw data in the
+ * register array includes 6 SMCA registers which the kernel can
+ * extract.
+ */
+ if (ctx_info->reg_arr_size < 48)
+ return -EINVAL;
+
+ mce_setup(&m);
+
+ m.extcpu = -1;
+ m.socketid = -1;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu_data(cpu).initial_apicid == lapic_id) {
+ m.extcpu = cpu;
+ m.socketid = cpu_data(m.extcpu).phys_proc_id;
+ break;
+ }
+ }
+
+ m.apicid = lapic_id;
+ m.bank = (ctx_info->msr_addr >> 4) & 0xFF;
+ m.status = *i_mce;
+ m.addr = *(i_mce + 1);
+ m.misc = *(i_mce + 2);
+ /* Skipping MCA_CONFIG */
+ m.ipid = *(i_mce + 4);
+ m.synd = *(i_mce + 5);
+
+ mce_log(&m);
+
+ return 0;
+}
+
#define CPER_CREATOR_MCE \
GUID_INIT(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \
0x64, 0x90, 0xb8, 0x9d)
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 4102b866e7c0..13d3f1cbda17 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -162,7 +162,8 @@ EXPORT_SYMBOL_GPL(mce_log);
void mce_register_decode_chain(struct notifier_block *nb)
{
- if (WARN_ON(nb->priority > MCE_PRIO_MCELOG && nb->priority < MCE_PRIO_EDAC))
+ if (WARN_ON(nb->priority < MCE_PRIO_LOWEST ||
+ nb->priority > MCE_PRIO_HIGHEST))
return;
blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
@@ -1265,14 +1266,14 @@ static void kill_me_maybe(struct callback_head *cb)
}
}
-static void queue_task_work(struct mce *m, int kill_it)
+static void queue_task_work(struct mce *m, int kill_current_task)
{
current->mce_addr = m->addr;
current->mce_kflags = m->kflags;
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
current->mce_whole_page = whole_page(m);
- if (kill_it)
+ if (kill_current_task)
current->mce_kill_me.func = kill_me_now;
else
current->mce_kill_me.func = kill_me_maybe;
@@ -1320,10 +1321,10 @@ noinstr void do_machine_check(struct pt_regs *regs)
int no_way_out = 0;
/*
- * If kill_it gets set, there might be a way to recover from this
+ * If kill_current_task is not set, there might be a way to recover from this
* error.
*/
- int kill_it = 0;
+ int kill_current_task = 0;
/*
* MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
@@ -1350,8 +1351,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
* severity is MCE_AR_SEVERITY we have other options.
*/
if (!(m.mcgstatus & MCG_STATUS_RIPV))
- kill_it = 1;
-
+ kill_current_task = (cfg->tolerant == 3) ? 0 : 1;
/*
* Check if this MCE is signaled to only this logical processor,
* on Intel, Zhaoxin only.
@@ -1368,7 +1368,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
* to see it will clear it.
*/
if (lmce) {
- if (no_way_out)
+ if (no_way_out && cfg->tolerant < 3)
mce_panic("Fatal local machine check", &m, msg);
} else {
order = mce_start(&no_way_out);
@@ -1384,8 +1384,13 @@ noinstr void do_machine_check(struct pt_regs *regs)
* When there's any problem use only local no_way_out state.
*/
if (!lmce) {
- if (mce_end(order) < 0)
- no_way_out = worst >= MCE_PANIC_SEVERITY;
+ if (mce_end(order) < 0) {
+ if (!no_way_out)
+ no_way_out = worst >= MCE_PANIC_SEVERITY;
+
+ if (no_way_out && cfg->tolerant < 3)
+ mce_panic("Fatal machine check on current CPU", &m, msg);
+ }
} else {
/*
* If there was a fatal machine check we should have
@@ -1401,19 +1406,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
}
}
- /*
- * If tolerant is at an insane level we drop requests to kill
- * processes and continue even when there is no way out.
- */
- if (cfg->tolerant == 3)
- kill_it = 0;
- else if (no_way_out)
- mce_panic("Fatal machine check on current CPU", &m, msg);
-
- if (worst > 0)
- irq_work_queue(&mce_irq_work);
-
- if (worst != MCE_AR_SEVERITY && !kill_it)
+ if (worst != MCE_AR_SEVERITY && !kill_current_task)
goto out;
/* Fault was in user mode and we need to take some action */
@@ -1421,7 +1414,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
/* If this triggers there is no way to recover. Die hard. */
BUG_ON(!on_thread_stack() || !user_mode(regs));
- queue_task_work(&m, kill_it);
+ queue_task_work(&m, kill_current_task);
} else {
/*
@@ -1439,7 +1432,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
}
if (m.kflags & MCE_IN_KERNEL_COPYIN)
- queue_task_work(&m, kill_it);
+ queue_task_work(&m, kill_current_task);
}
out:
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
@@ -1581,7 +1574,7 @@ static void __mcheck_cpu_mce_banks_init(void)
* __mcheck_cpu_init_clear_banks() does the final bank setup.
*/
b->ctl = -1ULL;
- b->init = 1;
+ b->init = true;
}
}
@@ -1762,7 +1755,7 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
*/
if (c->x86 == 6 && c->x86_model < 0x1A && this_cpu_read(mce_num_banks) > 0)
- mce_banks[0].init = 0;
+ mce_banks[0].init = false;
/*
* All newer Intel systems support MCE broadcasting. Enable
@@ -1811,11 +1804,9 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
case X86_VENDOR_INTEL:
intel_p5_mcheck_init(c);
return 1;
- break;
case X86_VENDOR_CENTAUR:
winchip_mcheck_init(c);
return 1;
- break;
default:
return 0;
}
@@ -1983,7 +1974,7 @@ void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
{
- bool irq_state;
+ irqentry_state_t irq_state;
WARN_ON_ONCE(user_mode(regs));
@@ -1995,7 +1986,7 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
mce_check_crashing_cpu())
return;
- irq_state = idtentry_enter_nmi(regs);
+ irq_state = irqentry_nmi_enter(regs);
/*
* The call targets are marked noinstr, but objtool can't figure
* that out because it's an indirect call. Annotate it.
@@ -2006,7 +1997,7 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
if (regs->flags & X86_EFLAGS_IF)
trace_hardirqs_on_prepare();
instrumentation_end();
- idtentry_exit_nmi(regs, irq_state);
+ irqentry_nmi_exit(regs, irq_state);
}
static __always_inline void exc_machine_check_user(struct pt_regs *regs)
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 3a44346f2276..7b360731fc2d 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -522,8 +522,8 @@ static void do_inject(void)
if (boot_cpu_has(X86_FEATURE_AMD_DCM) &&
b == 4 &&
boot_cpu_data.x86 < 0x17) {
- toggle_nb_mca_mst_cpu(amd_get_nb_id(cpu));
- cpu = get_nbc_for_node(amd_get_nb_id(cpu));
+ toggle_nb_mca_mst_cpu(topology_die_id(cpu));
+ cpu = get_nbc_for_node(topology_die_id(cpu));
}
get_online_cpus();
diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
index abe9fe0fb851..c2476fe0682e 100644
--- a/arch/x86/kernel/cpu/mce/intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -509,12 +509,33 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
}
}
+/*
+ * Enable additional error logs from the integrated
+ * memory controller on processors that support this.
+ */
+static void intel_imc_init(struct cpuinfo_x86 *c)
+{
+ u64 error_control;
+
+ switch (c->x86_model) {
+ case INTEL_FAM6_SANDYBRIDGE_X:
+ case INTEL_FAM6_IVYBRIDGE_X:
+ case INTEL_FAM6_HASWELL_X:
+ if (rdmsrl_safe(MSR_ERROR_CONTROL, &error_control))
+ return;
+ error_control |= 2;
+ wrmsrl_safe(MSR_ERROR_CONTROL, error_control);
+ break;
+ }
+}
+
void mce_intel_feature_init(struct cpuinfo_x86 *c)
{
intel_init_thermal(c);
intel_init_cmci();
intel_init_lmce();
intel_ppin_init(c);
+ intel_imc_init(c);
}
void mce_intel_feature_clear(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 3f6b137ef4e6..3d4a48336084 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -215,7 +215,6 @@ static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size
default:
WARN(1, "%s: WTF family: 0x%x\n", __func__, family);
return 0;
- break;
}
if (sh_psize > min_t(u32, buf_size, max_size))
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 6a99535d7f37..7e8e07bddd5f 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -100,53 +100,6 @@ static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev
return find_matching_signature(mc, csig, cpf);
}
-/*
- * Given CPU signature and a microcode patch, this function finds if the
- * microcode patch has matching family and model with the CPU.
- *
- * %true - if there's a match
- * %false - otherwise
- */
-static bool microcode_matches(struct microcode_header_intel *mc_header,
- unsigned long sig)
-{
- unsigned long total_size = get_totalsize(mc_header);
- unsigned long data_size = get_datasize(mc_header);
- struct extended_sigtable *ext_header;
- unsigned int fam_ucode, model_ucode;
- struct extended_signature *ext_sig;
- unsigned int fam, model;
- int ext_sigcount, i;
-
- fam = x86_family(sig);
- model = x86_model(sig);
-
- fam_ucode = x86_family(mc_header->sig);
- model_ucode = x86_model(mc_header->sig);
-
- if (fam == fam_ucode && model == model_ucode)
- return true;
-
- /* Look for ext. headers: */
- if (total_size <= data_size + MC_HEADER_SIZE)
- return false;
-
- ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE;
- ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
- ext_sigcount = ext_header->count;
-
- for (i = 0; i < ext_sigcount; i++) {
- fam_ucode = x86_family(ext_sig->sig);
- model_ucode = x86_model(ext_sig->sig);
-
- if (fam == fam_ucode && model == model_ucode)
- return true;
-
- ext_sig++;
- }
- return false;
-}
-
static struct ucode_patch *memdup_patch(void *data, unsigned int size)
{
struct ucode_patch *p;
@@ -164,7 +117,7 @@ static struct ucode_patch *memdup_patch(void *data, unsigned int size)
return p;
}
-static void save_microcode_patch(void *data, unsigned int size)
+static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size)
{
struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
struct ucode_patch *iter, *tmp, *p = NULL;
@@ -210,6 +163,9 @@ static void save_microcode_patch(void *data, unsigned int size)
if (!p)
return;
+ if (!find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf))
+ return;
+
/*
* Save for early loading. On 32-bit, that needs to be a physical
* address as the APs are running from physical addresses, before
@@ -344,13 +300,14 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
size -= mc_size;
- if (!microcode_matches(mc_header, uci->cpu_sig.sig)) {
+ if (!find_matching_signature(data, uci->cpu_sig.sig,
+ uci->cpu_sig.pf)) {
data += mc_size;
continue;
}
if (save) {
- save_microcode_patch(data, mc_size);
+ save_microcode_patch(uci, data, mc_size);
goto next;
}
@@ -483,14 +440,14 @@ static void show_saved_mc(void)
* Save this microcode patch. It will be loaded early when a CPU is
* hot-added or resumes.
*/
-static void save_mc_for_early(u8 *mc, unsigned int size)
+static void save_mc_for_early(struct ucode_cpu_info *uci, u8 *mc, unsigned int size)
{
/* Synchronization during CPU hotplug. */
static DEFINE_MUTEX(x86_cpu_microcode_mutex);
mutex_lock(&x86_cpu_microcode_mutex);
- save_microcode_patch(mc, size);
+ save_microcode_patch(uci, mc, size);
show_saved_mc();
mutex_unlock(&x86_cpu_microcode_mutex);
@@ -935,7 +892,7 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
* permanent memory. So it will be loaded early when a CPU is hot added
* or resumes.
*/
- save_mc_for_early(new_mc, new_mc_size);
+ save_mc_for_early(uci, new_mc, new_mc_size);
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
cpu, new_rev, uci->cpu_sig.rev);
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 05ef1f4550cb..f628e3dc150f 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -366,9 +366,38 @@ static void __init ms_hyperv_init_platform(void)
#endif
}
+static bool __init ms_hyperv_x2apic_available(void)
+{
+ return x2apic_supported();
+}
+
+/*
+ * If ms_hyperv_msi_ext_dest_id() returns true, hyperv_prepare_irq_remapping()
+ * returns -ENODEV and the Hyper-V IOMMU driver is not used; instead, the
+ * generic support of the 15-bit APIC ID is used: see __irq_msi_compose_msg().
+ *
+ * Note: for a VM on Hyper-V, the I/O-APIC is the only device which
+ * (logically) generates MSIs directly to the system APIC irq domain.
+ * There is no HPET, and PCI MSI/MSI-X interrupts are remapped by the
+ * pci-hyperv host bridge.
+ */
+static bool __init ms_hyperv_msi_ext_dest_id(void)
+{
+ u32 eax;
+
+ eax = cpuid_eax(HYPERV_CPUID_VIRT_STACK_INTERFACE);
+ if (eax != HYPERV_VS_INTERFACE_EAX_SIGNATURE)
+ return false;
+
+ eax = cpuid_eax(HYPERV_CPUID_VIRT_STACK_PROPERTIES);
+ return eax & HYPERV_VS_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE;
+}
+
const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
.name = "Microsoft Hyper-V",
.detect = ms_hyperv_platform,
.type = X86_HYPER_MS_HYPERV,
+ .init.x2apic_available = ms_hyperv_x2apic_available,
+ .init.msi_ext_dest_id = ms_hyperv_msi_ext_dest_id,
.init.init_platform = ms_hyperv_init_platform,
};
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 6a80f36b5d59..61eb26edc6d2 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -794,8 +794,6 @@ void mtrr_ap_init(void)
if (!use_intel() || mtrr_aps_delayed_init)
return;
- rcu_cpu_starting(smp_processor_id());
-
/*
* Ideally we should hold mtrr_mutex here to avoid mtrr entries
* changed, but this routine will be called in cpu boot time,
@@ -813,7 +811,8 @@ void mtrr_ap_init(void)
}
/**
- * Save current fixed-range MTRR state of the first cpu in cpu_online_mask.
+ * mtrr_save_state - Save current fixed-range MTRR state of the first
+ * cpu in cpu_online_mask.
*/
void mtrr_save_state(void)
{
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index e5f4ee8f4c3b..698bb26aeb6e 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -570,6 +570,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
if (d) {
cpumask_set_cpu(cpu, &d->cpu_mask);
+ if (r->cache.arch_has_per_cpu_cfg)
+ rdt_domain_reconfigure_cdp(r);
return;
}
@@ -893,6 +895,10 @@ static __init void __check_quirks_intel(void)
set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
else
set_rdt_options("!l3cat");
+ fallthrough;
+ case INTEL_FAM6_BROADWELL_X:
+ intel_rdt_mbm_apply_quirk();
+ break;
}
}
@@ -923,6 +929,7 @@ static __init void rdt_init_res_defs_intel(void)
r->rid == RDT_RESOURCE_L2CODE) {
r->cache.arch_has_sparse_bitmaps = false;
r->cache.arch_has_empty_bitmaps = false;
+ r->cache.arch_has_per_cpu_cfg = false;
} else if (r->rid == RDT_RESOURCE_MBA) {
r->msr_base = MSR_IA32_MBA_THRTL_BASE;
r->msr_update = mba_wrmsr_intel;
@@ -943,6 +950,7 @@ static __init void rdt_init_res_defs_amd(void)
r->rid == RDT_RESOURCE_L2CODE) {
r->cache.arch_has_sparse_bitmaps = true;
r->cache.arch_has_empty_bitmaps = true;
+ r->cache.arch_has_per_cpu_cfg = true;
} else if (r->rid == RDT_RESOURCE_MBA) {
r->msr_base = MSR_IA32_MBA_BW_BASE;
r->msr_update = mba_wrmsr_amd;
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index 80fa997fae60..ee71c47844cb 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -264,7 +264,7 @@ void __exit rdtgroup_exit(void);
struct rftype {
char *name;
umode_t mode;
- struct kernfs_ops *kf_ops;
+ const struct kernfs_ops *kf_ops;
unsigned long flags;
unsigned long fflags;
@@ -360,6 +360,8 @@ struct msr_param {
* executing entities
* @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid.
* @arch_has_empty_bitmaps: True if the '0' bitmap is valid.
+ * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache
+ * level has CPU scope.
*/
struct rdt_cache {
unsigned int cbm_len;
@@ -369,6 +371,7 @@ struct rdt_cache {
unsigned int shareable_bits;
bool arch_has_sparse_bitmaps;
bool arch_has_empty_bitmaps;
+ bool arch_has_per_cpu_cfg;
};
/**
@@ -616,6 +619,7 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
void mbm_setup_overflow_handler(struct rdt_domain *dom,
unsigned long delay_ms);
void mbm_handle_overflow(struct work_struct *work);
+void __init intel_rdt_mbm_apply_quirk(void);
bool is_mba_sc(struct rdt_resource *r);
void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm);
u32 delay_bw_map(unsigned long bw, struct rdt_resource *r);
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index 54dffe574e67..7ac31210e452 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -64,6 +64,69 @@ unsigned int rdt_mon_features;
*/
unsigned int resctrl_cqm_threshold;
+#define CF(cf) ((unsigned long)(1048576 * (cf) + 0.5))
+
+/*
+ * The correction factor table is documented in Documentation/x86/resctrl.rst.
+ * If rmid > rmid threshold, MBM total and local values should be multiplied
+ * by the correction factor.
+ *
+ * The original table is modified for better code:
+ *
+ * 1. The threshold 0 is changed to rmid count - 1 so don't do correction
+ * for the case.
+ * 2. MBM total and local correction table indexed by core counter which is
+ * equal to (x86_cache_max_rmid + 1) / 8 - 1 and is from 0 up to 27.
+ * 3. The correction factor is normalized to 2^20 (1048576) so it's faster
+ * to calculate corrected value by shifting:
+ * corrected_value = (original_value * correction_factor) >> 20
+ */
+static const struct mbm_correction_factor_table {
+ u32 rmidthreshold;
+ u64 cf;
+} mbm_cf_table[] __initdata = {
+ {7, CF(1.000000)},
+ {15, CF(1.000000)},
+ {15, CF(0.969650)},
+ {31, CF(1.000000)},
+ {31, CF(1.066667)},
+ {31, CF(0.969650)},
+ {47, CF(1.142857)},
+ {63, CF(1.000000)},
+ {63, CF(1.185115)},
+ {63, CF(1.066553)},
+ {79, CF(1.454545)},
+ {95, CF(1.000000)},
+ {95, CF(1.230769)},
+ {95, CF(1.142857)},
+ {95, CF(1.066667)},
+ {127, CF(1.000000)},
+ {127, CF(1.254863)},
+ {127, CF(1.185255)},
+ {151, CF(1.000000)},
+ {127, CF(1.066667)},
+ {167, CF(1.000000)},
+ {159, CF(1.454334)},
+ {183, CF(1.000000)},
+ {127, CF(0.969744)},
+ {191, CF(1.280246)},
+ {191, CF(1.230921)},
+ {215, CF(1.000000)},
+ {191, CF(1.143118)},
+};
+
+static u32 mbm_cf_rmidthreshold __read_mostly = UINT_MAX;
+static u64 mbm_cf __read_mostly;
+
+static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val)
+{
+ /* Correct MBM value. */
+ if (rmid > mbm_cf_rmidthreshold)
+ val = (val * mbm_cf) >> 20;
+
+ return val;
+}
+
static inline struct rmid_entry *__rmid_entry(u32 rmid)
{
struct rmid_entry *entry;
@@ -260,7 +323,8 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
m->chunks += chunks;
m->prev_msr = tval;
- rr->val += m->chunks;
+ rr->val += get_corrected_mbm_count(rmid, m->chunks);
+
return 0;
}
@@ -279,8 +343,7 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
return;
chunks = mbm_overflow_count(m->prev_bw_msr, tval, rr->r->mbm_width);
- m->chunks += chunks;
- cur_bw = (chunks * r->mon_scale) >> 20;
+ cur_bw = (get_corrected_mbm_count(rmid, chunks) * r->mon_scale) >> 20;
if (m->delta_comp)
m->delta_bw = abs(cur_bw - m->prev_bw);
@@ -450,15 +513,14 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid)
}
if (is_mbm_local_enabled()) {
rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
+ __mon_event_count(rmid, &rr);
/*
* Call the MBA software controller only for the
* control groups and when user has enabled
* the software controller explicitly.
*/
- if (!is_mba_sc(NULL))
- __mon_event_count(rmid, &rr);
- else
+ if (is_mba_sc(NULL))
mbm_bw_count(rmid, &rr);
}
}
@@ -644,3 +706,17 @@ int rdt_get_mon_l3_config(struct rdt_resource *r)
return 0;
}
+
+void __init intel_rdt_mbm_apply_quirk(void)
+{
+ int cf_index;
+
+ cf_index = (boot_cpu_data.x86_cache_max_rmid + 1) / 8 - 1;
+ if (cf_index >= ARRAY_SIZE(mbm_cf_table)) {
+ pr_info("No MBM correction factor available\n");
+ return;
+ }
+
+ mbm_cf_rmidthreshold = mbm_cf_table[cf_index].rmidthreshold;
+ mbm_cf = mbm_cf_table[cf_index].cf;
+}
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index 0daf2f1cf7a8..e916646adc69 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -1458,7 +1458,7 @@ static int pseudo_lock_dev_release(struct inode *inode, struct file *filp)
return 0;
}
-static int pseudo_lock_dev_mremap(struct vm_area_struct *area)
+static int pseudo_lock_dev_mremap(struct vm_area_struct *area, unsigned long flags)
{
/* Not supported */
return -EINVAL;
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index af323e2e3100..29ffb95b25ff 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -240,13 +240,13 @@ static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
return -EINVAL;
}
-static struct kernfs_ops rdtgroup_kf_single_ops = {
+static const struct kernfs_ops rdtgroup_kf_single_ops = {
.atomic_write_len = PAGE_SIZE,
.write = rdtgroup_file_write,
.seq_show = rdtgroup_seqfile_show,
};
-static struct kernfs_ops kf_mondata_ops = {
+static const struct kernfs_ops kf_mondata_ops = {
.atomic_write_len = PAGE_SIZE,
.seq_show = rdtgroup_mondata_show,
};
@@ -507,6 +507,24 @@ unlock:
return ret ?: nbytes;
}
+/**
+ * rdtgroup_remove - the helper to remove resource group safely
+ * @rdtgrp: resource group to remove
+ *
+ * On resource group creation via a mkdir, an extra kernfs_node reference is
+ * taken to ensure that the rdtgroup structure remains accessible for the
+ * rdtgroup_kn_unlock() calls where it is removed.
+ *
+ * Drop the extra reference here, then free the rdtgroup structure.
+ *
+ * Return: void
+ */
+static void rdtgroup_remove(struct rdtgroup *rdtgrp)
+{
+ kernfs_put(rdtgrp->kn);
+ kfree(rdtgrp);
+}
+
struct task_move_callback {
struct callback_head work;
struct rdtgroup *rdtgrp;
@@ -529,7 +547,7 @@ static void move_myself(struct callback_head *head)
(rdtgrp->flags & RDT_DELETED)) {
current->closid = 0;
current->rmid = 0;
- kfree(rdtgrp);
+ rdtgroup_remove(rdtgrp);
}
if (unlikely(current->flags & PF_EXITING))
@@ -1769,7 +1787,6 @@ static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
if (IS_ERR(kn_subdir))
return PTR_ERR(kn_subdir);
- kernfs_get(kn_subdir);
ret = rdtgroup_kn_set_ugid(kn_subdir);
if (ret)
return ret;
@@ -1792,7 +1809,6 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
if (IS_ERR(kn_info))
return PTR_ERR(kn_info);
- kernfs_get(kn_info);
ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
if (ret)
@@ -1813,12 +1829,6 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
goto out_destroy;
}
- /*
- * This extra ref will be put in kernfs_remove() and guarantees
- * that @rdtgrp->kn is always accessible.
- */
- kernfs_get(kn_info);
-
ret = rdtgroup_kn_set_ugid(kn_info);
if (ret)
goto out_destroy;
@@ -1847,12 +1857,6 @@ mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
if (dest_kn)
*dest_kn = kn;
- /*
- * This extra ref will be put in kernfs_remove() and guarantees
- * that @rdtgrp->kn is always accessible.
- */
- kernfs_get(kn);
-
ret = rdtgroup_kn_set_ugid(kn);
if (ret)
goto out_destroy;
@@ -1905,8 +1909,13 @@ static int set_cache_qos_cfg(int level, bool enable)
r_l = &rdt_resources_all[level];
list_for_each_entry(d, &r_l->domains, list) {
- /* Pick one CPU from each domain instance to update MSR */
- cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+ if (r_l->cache.arch_has_per_cpu_cfg)
+ /* Pick all the CPUs in the domain instance */
+ for_each_cpu(cpu, &d->cpu_mask)
+ cpumask_set_cpu(cpu, cpu_mask);
+ else
+ /* Pick one CPU from each domain instance to update MSR */
+ cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
}
cpu = get_cpu();
/* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
@@ -2079,8 +2088,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
rdtgroup_pseudo_lock_remove(rdtgrp);
kernfs_unbreak_active_protection(kn);
- kernfs_put(rdtgrp->kn);
- kfree(rdtgrp);
+ rdtgroup_remove(rdtgrp);
} else {
kernfs_unbreak_active_protection(kn);
}
@@ -2139,13 +2147,11 @@ static int rdt_get_tree(struct fs_context *fc)
&kn_mongrp);
if (ret < 0)
goto out_info;
- kernfs_get(kn_mongrp);
ret = mkdir_mondata_all(rdtgroup_default.kn,
&rdtgroup_default, &kn_mondata);
if (ret < 0)
goto out_mongrp;
- kernfs_get(kn_mondata);
rdtgroup_default.mon.mon_data_kn = kn_mondata;
}
@@ -2357,7 +2363,7 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
if (atomic_read(&sentry->waitcount) != 0)
sentry->flags = RDT_DELETED;
else
- kfree(sentry);
+ rdtgroup_remove(sentry);
}
}
@@ -2399,7 +2405,7 @@ static void rmdir_all_sub(void)
if (atomic_read(&rdtgrp->waitcount) != 0)
rdtgrp->flags = RDT_DELETED;
else
- kfree(rdtgrp);
+ rdtgroup_remove(rdtgrp);
}
/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
update_closid_rmid(cpu_online_mask, &rdtgroup_default);
@@ -2499,11 +2505,6 @@ static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
if (IS_ERR(kn))
return PTR_ERR(kn);
- /*
- * This extra ref will be put in kernfs_remove() and guarantees
- * that kn is always accessible.
- */
- kernfs_get(kn);
ret = rdtgroup_kn_set_ugid(kn);
if (ret)
goto out_destroy;
@@ -2838,8 +2839,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
/*
* kernfs_remove() will drop the reference count on "kn" which
* will free it. But we still need it to stick around for the
- * rdtgroup_kn_unlock(kn} call below. Take one extra reference
- * here, which will be dropped inside rdtgroup_kn_unlock().
+ * rdtgroup_kn_unlock(kn) call. Take one extra reference here,
+ * which will be dropped by kernfs_put() in rdtgroup_remove().
*/
kernfs_get(kn);
@@ -2880,6 +2881,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
out_idfree:
free_rmid(rdtgrp->mon.rmid);
out_destroy:
+ kernfs_put(rdtgrp->kn);
kernfs_remove(rdtgrp->kn);
out_free_rgrp:
kfree(rdtgrp);
@@ -2892,7 +2894,7 @@ static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
{
kernfs_remove(rgrp->kn);
free_rmid(rgrp->mon.rmid);
- kfree(rgrp);
+ rdtgroup_remove(rgrp);
}
/*
@@ -3021,8 +3023,7 @@ static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
return -EPERM;
}
-static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
- cpumask_var_t tmpmask)
+static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
{
struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
int cpu;
@@ -3049,33 +3050,21 @@ static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
list_del(&rdtgrp->mon.crdtgrp_list);
- /*
- * one extra hold on this, will drop when we kfree(rdtgrp)
- * in rdtgroup_kn_unlock()
- */
- kernfs_get(kn);
kernfs_remove(rdtgrp->kn);
return 0;
}
-static int rdtgroup_ctrl_remove(struct kernfs_node *kn,
- struct rdtgroup *rdtgrp)
+static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp)
{
rdtgrp->flags = RDT_DELETED;
list_del(&rdtgrp->rdtgroup_list);
- /*
- * one extra hold on this, will drop when we kfree(rdtgrp)
- * in rdtgroup_kn_unlock()
- */
- kernfs_get(kn);
kernfs_remove(rdtgrp->kn);
return 0;
}
-static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
- cpumask_var_t tmpmask)
+static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
{
int cpu;
@@ -3102,7 +3091,7 @@ static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
closid_free(rdtgrp->closid);
free_rmid(rdtgrp->mon.rmid);
- rdtgroup_ctrl_remove(kn, rdtgrp);
+ rdtgroup_ctrl_remove(rdtgrp);
/*
* Free all the child monitor group rmids.
@@ -3139,13 +3128,13 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
rdtgrp != &rdtgroup_default) {
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
- ret = rdtgroup_ctrl_remove(kn, rdtgrp);
+ ret = rdtgroup_ctrl_remove(rdtgrp);
} else {
- ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
+ ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask);
}
} else if (rdtgrp->type == RDTMON_GROUP &&
is_mon_groups(parent_kn, kn->name)) {
- ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask);
+ ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask);
} else {
ret = -EPERM;
}
diff --git a/arch/x86/kernel/cpu/sgx/Makefile b/arch/x86/kernel/cpu/sgx/Makefile
new file mode 100644
index 000000000000..91d3dc784a29
--- /dev/null
+++ b/arch/x86/kernel/cpu/sgx/Makefile
@@ -0,0 +1,5 @@
+obj-y += \
+ driver.o \
+ encl.o \
+ ioctl.o \
+ main.o
diff --git a/arch/x86/kernel/cpu/sgx/arch.h b/arch/x86/kernel/cpu/sgx/arch.h
new file mode 100644
index 000000000000..dd7602c44c72
--- /dev/null
+++ b/arch/x86/kernel/cpu/sgx/arch.h
@@ -0,0 +1,338 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+ * Copyright(c) 2016-20 Intel Corporation.
+ *
+ * Contains data structures defined by the SGX architecture. Data structures
+ * defined by the Linux software stack should not be placed here.
+ */
+#ifndef _ASM_X86_SGX_ARCH_H
+#define _ASM_X86_SGX_ARCH_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/* The SGX specific CPUID function. */
+#define SGX_CPUID 0x12
+/* EPC enumeration. */
+#define SGX_CPUID_EPC 2
+/* An invalid EPC section, i.e. the end marker. */
+#define SGX_CPUID_EPC_INVALID 0x0
+/* A valid EPC section. */
+#define SGX_CPUID_EPC_SECTION 0x1
+/* The bitmask for the EPC section type. */
+#define SGX_CPUID_EPC_MASK GENMASK(3, 0)
+
+/**
+ * enum sgx_return_code - The return code type for ENCLS, ENCLU and ENCLV
+ * %SGX_NOT_TRACKED: Previous ETRACK's shootdown sequence has not
+ * been completed yet.
+ * %SGX_INVALID_EINITTOKEN: EINITTOKEN is invalid and enclave signer's
+ * public key does not match IA32_SGXLEPUBKEYHASH.
+ * %SGX_UNMASKED_EVENT: An unmasked event, e.g. INTR, was received
+ */
+enum sgx_return_code {
+ SGX_NOT_TRACKED = 11,
+ SGX_INVALID_EINITTOKEN = 16,
+ SGX_UNMASKED_EVENT = 128,
+};
+
+/* The modulus size for 3072-bit RSA keys. */
+#define SGX_MODULUS_SIZE 384
+
+/**
+ * enum sgx_miscselect - additional information to an SSA frame
+ * %SGX_MISC_EXINFO: Report #PF or #GP to the SSA frame.
+ *
+ * Save State Area (SSA) is a stack inside the enclave used to store processor
+ * state when an exception or interrupt occurs. This enum defines additional
+ * information stored to an SSA frame.
+ */
+enum sgx_miscselect {
+ SGX_MISC_EXINFO = BIT(0),
+};
+
+#define SGX_MISC_RESERVED_MASK GENMASK_ULL(63, 1)
+
+#define SGX_SSA_GPRS_SIZE 184
+#define SGX_SSA_MISC_EXINFO_SIZE 16
+
+/**
+ * enum sgx_attributes - the attributes field in &struct sgx_secs
+ * %SGX_ATTR_INIT: Enclave can be entered (is initialized).
+ * %SGX_ATTR_DEBUG: Allow ENCLS(EDBGRD) and ENCLS(EDBGWR).
+ * %SGX_ATTR_MODE64BIT: Tell that this a 64-bit enclave.
+ * %SGX_ATTR_PROVISIONKEY: Allow to use provisioning keys for remote
+ * attestation.
+ * %SGX_ATTR_KSS: Allow to use key separation and sharing (KSS).
+ * %SGX_ATTR_EINITTOKENKEY: Allow to use token signing key that is used to
+ * sign cryptographic tokens that can be passed to
+ * EINIT as an authorization to run an enclave.
+ */
+enum sgx_attribute {
+ SGX_ATTR_INIT = BIT(0),
+ SGX_ATTR_DEBUG = BIT(1),
+ SGX_ATTR_MODE64BIT = BIT(2),
+ SGX_ATTR_PROVISIONKEY = BIT(4),
+ SGX_ATTR_EINITTOKENKEY = BIT(5),
+ SGX_ATTR_KSS = BIT(7),
+};
+
+#define SGX_ATTR_RESERVED_MASK (BIT_ULL(3) | BIT_ULL(6) | GENMASK_ULL(63, 8))
+
+/**
+ * struct sgx_secs - SGX Enclave Control Structure (SECS)
+ * @size: size of the address space
+ * @base: base address of the address space
+ * @ssa_frame_size: size of an SSA frame
+ * @miscselect: additional information stored to an SSA frame
+ * @attributes: attributes for enclave
+ * @xfrm: XSave-Feature Request Mask (subset of XCR0)
+ * @mrenclave: SHA256-hash of the enclave contents
+ * @mrsigner: SHA256-hash of the public key used to sign the SIGSTRUCT
+ * @config_id: a user-defined value that is used in key derivation
+ * @isv_prod_id: a user-defined value that is used in key derivation
+ * @isv_svn: a user-defined value that is used in key derivation
+ * @config_svn: a user-defined value that is used in key derivation
+ *
+ * SGX Enclave Control Structure (SECS) is a special enclave page that is not
+ * visible in the address space. In fact, this structure defines the address
+ * range and other global attributes for the enclave and it is the first EPC
+ * page created for any enclave. It is moved from a temporary buffer to an EPC
+ * by the means of ENCLS[ECREATE] function.
+ */
+struct sgx_secs {
+ u64 size;
+ u64 base;
+ u32 ssa_frame_size;
+ u32 miscselect;
+ u8 reserved1[24];
+ u64 attributes;
+ u64 xfrm;
+ u32 mrenclave[8];
+ u8 reserved2[32];
+ u32 mrsigner[8];
+ u8 reserved3[32];
+ u32 config_id[16];
+ u16 isv_prod_id;
+ u16 isv_svn;
+ u16 config_svn;
+ u8 reserved4[3834];
+} __packed;
+
+/**
+ * enum sgx_tcs_flags - execution flags for TCS
+ * %SGX_TCS_DBGOPTIN: If enabled allows single-stepping and breakpoints
+ * inside an enclave. It is cleared by EADD but can
+ * be set later with EDBGWR.
+ */
+enum sgx_tcs_flags {
+ SGX_TCS_DBGOPTIN = 0x01,
+};
+
+#define SGX_TCS_RESERVED_MASK GENMASK_ULL(63, 1)
+#define SGX_TCS_RESERVED_SIZE 4024
+
+/**
+ * struct sgx_tcs - Thread Control Structure (TCS)
+ * @state: used to mark an entered TCS
+ * @flags: execution flags (cleared by EADD)
+ * @ssa_offset: SSA stack offset relative to the enclave base
+ * @ssa_index: the current SSA frame index (cleard by EADD)
+ * @nr_ssa_frames: the number of frame in the SSA stack
+ * @entry_offset: entry point offset relative to the enclave base
+ * @exit_addr: address outside the enclave to exit on an exception or
+ * interrupt
+ * @fs_offset: offset relative to the enclave base to become FS
+ * segment inside the enclave
+ * @gs_offset: offset relative to the enclave base to become GS
+ * segment inside the enclave
+ * @fs_limit: size to become a new FS-limit (only 32-bit enclaves)
+ * @gs_limit: size to become a new GS-limit (only 32-bit enclaves)
+ *
+ * Thread Control Structure (TCS) is an enclave page visible in its address
+ * space that defines an entry point inside the enclave. A thread enters inside
+ * an enclave by supplying address of TCS to ENCLU(EENTER). A TCS can be entered
+ * by only one thread at a time.
+ */
+struct sgx_tcs {
+ u64 state;
+ u64 flags;
+ u64 ssa_offset;
+ u32 ssa_index;
+ u32 nr_ssa_frames;
+ u64 entry_offset;
+ u64 exit_addr;
+ u64 fs_offset;
+ u64 gs_offset;
+ u32 fs_limit;
+ u32 gs_limit;
+ u8 reserved[SGX_TCS_RESERVED_SIZE];
+} __packed;
+
+/**
+ * struct sgx_pageinfo - an enclave page descriptor
+ * @addr: address of the enclave page
+ * @contents: pointer to the page contents
+ * @metadata: pointer either to a SECINFO or PCMD instance
+ * @secs: address of the SECS page
+ */
+struct sgx_pageinfo {
+ u64 addr;
+ u64 contents;
+ u64 metadata;
+ u64 secs;
+} __packed __aligned(32);
+
+
+/**
+ * enum sgx_page_type - bits in the SECINFO flags defining the page type
+ * %SGX_PAGE_TYPE_SECS: a SECS page
+ * %SGX_PAGE_TYPE_TCS: a TCS page
+ * %SGX_PAGE_TYPE_REG: a regular page
+ * %SGX_PAGE_TYPE_VA: a VA page
+ * %SGX_PAGE_TYPE_TRIM: a page in trimmed state
+ */
+enum sgx_page_type {
+ SGX_PAGE_TYPE_SECS,
+ SGX_PAGE_TYPE_TCS,
+ SGX_PAGE_TYPE_REG,
+ SGX_PAGE_TYPE_VA,
+ SGX_PAGE_TYPE_TRIM,
+};
+
+#define SGX_NR_PAGE_TYPES 5
+#define SGX_PAGE_TYPE_MASK GENMASK(7, 0)
+
+/**
+ * enum sgx_secinfo_flags - the flags field in &struct sgx_secinfo
+ * %SGX_SECINFO_R: allow read
+ * %SGX_SECINFO_W: allow write
+ * %SGX_SECINFO_X: allow execution
+ * %SGX_SECINFO_SECS: a SECS page
+ * %SGX_SECINFO_TCS: a TCS page
+ * %SGX_SECINFO_REG: a regular page
+ * %SGX_SECINFO_VA: a VA page
+ * %SGX_SECINFO_TRIM: a page in trimmed state
+ */
+enum sgx_secinfo_flags {
+ SGX_SECINFO_R = BIT(0),
+ SGX_SECINFO_W = BIT(1),
+ SGX_SECINFO_X = BIT(2),
+ SGX_SECINFO_SECS = (SGX_PAGE_TYPE_SECS << 8),
+ SGX_SECINFO_TCS = (SGX_PAGE_TYPE_TCS << 8),
+ SGX_SECINFO_REG = (SGX_PAGE_TYPE_REG << 8),
+ SGX_SECINFO_VA = (SGX_PAGE_TYPE_VA << 8),
+ SGX_SECINFO_TRIM = (SGX_PAGE_TYPE_TRIM << 8),
+};
+
+#define SGX_SECINFO_PERMISSION_MASK GENMASK_ULL(2, 0)
+#define SGX_SECINFO_PAGE_TYPE_MASK (SGX_PAGE_TYPE_MASK << 8)
+#define SGX_SECINFO_RESERVED_MASK ~(SGX_SECINFO_PERMISSION_MASK | \
+ SGX_SECINFO_PAGE_TYPE_MASK)
+
+/**
+ * struct sgx_secinfo - describes attributes of an EPC page
+ * @flags: permissions and type
+ *
+ * Used together with ENCLS leaves that add or modify an EPC page to an
+ * enclave to define page permissions and type.
+ */
+struct sgx_secinfo {
+ u64 flags;
+ u8 reserved[56];
+} __packed __aligned(64);
+
+#define SGX_PCMD_RESERVED_SIZE 40
+
+/**
+ * struct sgx_pcmd - Paging Crypto Metadata (PCMD)
+ * @enclave_id: enclave identifier
+ * @mac: MAC over PCMD, page contents and isvsvn
+ *
+ * PCMD is stored for every swapped page to the regular memory. When ELDU loads
+ * the page back it recalculates the MAC by using a isvsvn number stored in a
+ * VA page. Together these two structures bring integrity and rollback
+ * protection.
+ */
+struct sgx_pcmd {
+ struct sgx_secinfo secinfo;
+ u64 enclave_id;
+ u8 reserved[SGX_PCMD_RESERVED_SIZE];
+ u8 mac[16];
+} __packed __aligned(128);
+
+#define SGX_SIGSTRUCT_RESERVED1_SIZE 84
+#define SGX_SIGSTRUCT_RESERVED2_SIZE 20
+#define SGX_SIGSTRUCT_RESERVED3_SIZE 32
+#define SGX_SIGSTRUCT_RESERVED4_SIZE 12
+
+/**
+ * struct sgx_sigstruct_header - defines author of the enclave
+ * @header1: constant byte string
+ * @vendor: must be either 0x0000 or 0x8086
+ * @date: YYYYMMDD in BCD
+ * @header2: costant byte string
+ * @swdefined: software defined value
+ */
+struct sgx_sigstruct_header {
+ u64 header1[2];
+ u32 vendor;
+ u32 date;
+ u64 header2[2];
+ u32 swdefined;
+ u8 reserved1[84];
+} __packed;
+
+/**
+ * struct sgx_sigstruct_body - defines contents of the enclave
+ * @miscselect: additional information stored to an SSA frame
+ * @misc_mask: required miscselect in SECS
+ * @attributes: attributes for enclave
+ * @xfrm: XSave-Feature Request Mask (subset of XCR0)
+ * @attributes_mask: required attributes in SECS
+ * @xfrm_mask: required XFRM in SECS
+ * @mrenclave: SHA256-hash of the enclave contents
+ * @isvprodid: a user-defined value that is used in key derivation
+ * @isvsvn: a user-defined value that is used in key derivation
+ */
+struct sgx_sigstruct_body {
+ u32 miscselect;
+ u32 misc_mask;
+ u8 reserved2[20];
+ u64 attributes;
+ u64 xfrm;
+ u64 attributes_mask;
+ u64 xfrm_mask;
+ u8 mrenclave[32];
+ u8 reserved3[32];
+ u16 isvprodid;
+ u16 isvsvn;
+} __packed;
+
+/**
+ * struct sgx_sigstruct - an enclave signature
+ * @header: defines author of the enclave
+ * @modulus: the modulus of the public key
+ * @exponent: the exponent of the public key
+ * @signature: the signature calculated over the fields except modulus,
+ * @body: defines contents of the enclave
+ * @q1: a value used in RSA signature verification
+ * @q2: a value used in RSA signature verification
+ *
+ * Header and body are the parts that are actual signed. The remaining fields
+ * define the signature of the enclave.
+ */
+struct sgx_sigstruct {
+ struct sgx_sigstruct_header header;
+ u8 modulus[SGX_MODULUS_SIZE];
+ u32 exponent;
+ u8 signature[SGX_MODULUS_SIZE];
+ struct sgx_sigstruct_body body;
+ u8 reserved4[12];
+ u8 q1[SGX_MODULUS_SIZE];
+ u8 q2[SGX_MODULUS_SIZE];
+} __packed;
+
+#define SGX_LAUNCH_TOKEN_SIZE 304
+
+#endif /* _ASM_X86_SGX_ARCH_H */
diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c
new file mode 100644
index 000000000000..f2eac41bb4ff
--- /dev/null
+++ b/arch/x86/kernel/cpu/sgx/driver.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2016-20 Intel Corporation. */
+
+#include <linux/acpi.h>
+#include <linux/miscdevice.h>
+#include <linux/mman.h>
+#include <linux/security.h>
+#include <linux/suspend.h>
+#include <asm/traps.h>
+#include "driver.h"
+#include "encl.h"
+
+u64 sgx_attributes_reserved_mask;
+u64 sgx_xfrm_reserved_mask = ~0x3;
+u32 sgx_misc_reserved_mask;
+
+static int sgx_open(struct inode *inode, struct file *file)
+{
+ struct sgx_encl *encl;
+ int ret;
+
+ encl = kzalloc(sizeof(*encl), GFP_KERNEL);
+ if (!encl)
+ return -ENOMEM;
+
+ kref_init(&encl->refcount);
+ xa_init(&encl->page_array);
+ mutex_init(&encl->lock);
+ INIT_LIST_HEAD(&encl->va_pages);
+ INIT_LIST_HEAD(&encl->mm_list);
+ spin_lock_init(&encl->mm_lock);
+
+ ret = init_srcu_struct(&encl->srcu);
+ if (ret) {
+ kfree(encl);
+ return ret;
+ }
+
+ file->private_data = encl;
+
+ return 0;
+}
+
+static int sgx_release(struct inode *inode, struct file *file)
+{
+ struct sgx_encl *encl = file->private_data;
+ struct sgx_encl_mm *encl_mm;
+
+ /*
+ * Drain the remaining mm_list entries. At this point the list contains
+ * entries for processes, which have closed the enclave file but have
+ * not exited yet. The processes, which have exited, are gone from the
+ * list by sgx_mmu_notifier_release().
+ */
+ for ( ; ; ) {
+ spin_lock(&encl->mm_lock);
+
+ if (list_empty(&encl->mm_list)) {
+ encl_mm = NULL;
+ } else {
+ encl_mm = list_first_entry(&encl->mm_list,
+ struct sgx_encl_mm, list);
+ list_del_rcu(&encl_mm->list);
+ }
+
+ spin_unlock(&encl->mm_lock);
+
+ /* The enclave is no longer mapped by any mm. */
+ if (!encl_mm)
+ break;
+
+ synchronize_srcu(&encl->srcu);
+ mmu_notifier_unregister(&encl_mm->mmu_notifier, encl_mm->mm);
+ kfree(encl_mm);
+ }
+
+ kref_put(&encl->refcount, sgx_encl_release);
+ return 0;
+}
+
+static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct sgx_encl *encl = file->private_data;
+ int ret;
+
+ ret = sgx_encl_may_map(encl, vma->vm_start, vma->vm_end, vma->vm_flags);
+ if (ret)
+ return ret;
+
+ ret = sgx_encl_mm_add(encl, vma->vm_mm);
+ if (ret)
+ return ret;
+
+ vma->vm_ops = &sgx_vm_ops;
+ vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+ vma->vm_private_data = encl;
+
+ return 0;
+}
+
+static unsigned long sgx_get_unmapped_area(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ if ((flags & MAP_TYPE) == MAP_PRIVATE)
+ return -EINVAL;
+
+ if (flags & MAP_FIXED)
+ return addr;
+
+ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+}
+
+#ifdef CONFIG_COMPAT
+static long sgx_compat_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ return sgx_ioctl(filep, cmd, arg);
+}
+#endif
+
+static const struct file_operations sgx_encl_fops = {
+ .owner = THIS_MODULE,
+ .open = sgx_open,
+ .release = sgx_release,
+ .unlocked_ioctl = sgx_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = sgx_compat_ioctl,
+#endif
+ .mmap = sgx_mmap,
+ .get_unmapped_area = sgx_get_unmapped_area,
+};
+
+const struct file_operations sgx_provision_fops = {
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice sgx_dev_enclave = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "sgx_enclave",
+ .nodename = "sgx_enclave",
+ .fops = &sgx_encl_fops,
+};
+
+static struct miscdevice sgx_dev_provision = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "sgx_provision",
+ .nodename = "sgx_provision",
+ .fops = &sgx_provision_fops,
+};
+
+int __init sgx_drv_init(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ u64 attr_mask;
+ u64 xfrm_mask;
+ int ret;
+
+ if (!cpu_feature_enabled(X86_FEATURE_SGX_LC))
+ return -ENODEV;
+
+ cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx);
+
+ if (!(eax & 1)) {
+ pr_err("SGX disabled: SGX1 instruction support not available.\n");
+ return -ENODEV;
+ }
+
+ sgx_misc_reserved_mask = ~ebx | SGX_MISC_RESERVED_MASK;
+
+ cpuid_count(SGX_CPUID, 1, &eax, &ebx, &ecx, &edx);
+
+ attr_mask = (((u64)ebx) << 32) + (u64)eax;
+ sgx_attributes_reserved_mask = ~attr_mask | SGX_ATTR_RESERVED_MASK;
+
+ if (cpu_feature_enabled(X86_FEATURE_OSXSAVE)) {
+ xfrm_mask = (((u64)edx) << 32) + (u64)ecx;
+ sgx_xfrm_reserved_mask = ~xfrm_mask;
+ }
+
+ ret = misc_register(&sgx_dev_enclave);
+ if (ret)
+ return ret;
+
+ ret = misc_register(&sgx_dev_provision);
+ if (ret) {
+ misc_deregister(&sgx_dev_enclave);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/arch/x86/kernel/cpu/sgx/driver.h b/arch/x86/kernel/cpu/sgx/driver.h
new file mode 100644
index 000000000000..4eddb4d571ef
--- /dev/null
+++ b/arch/x86/kernel/cpu/sgx/driver.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ARCH_SGX_DRIVER_H__
+#define __ARCH_SGX_DRIVER_H__
+
+#include <crypto/hash.h>
+#include <linux/kref.h>
+#include <linux/mmu_notifier.h>
+#include <linux/radix-tree.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <uapi/asm/sgx.h>
+#include "sgx.h"
+
+#define SGX_EINIT_SPIN_COUNT 20
+#define SGX_EINIT_SLEEP_COUNT 50
+#define SGX_EINIT_SLEEP_TIME 20
+
+extern u64 sgx_attributes_reserved_mask;
+extern u64 sgx_xfrm_reserved_mask;
+extern u32 sgx_misc_reserved_mask;
+
+extern const struct file_operations sgx_provision_fops;
+
+long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+
+int sgx_drv_init(void);
+
+#endif /* __ARCH_X86_SGX_DRIVER_H__ */
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
new file mode 100644
index 000000000000..ee50a5010277
--- /dev/null
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -0,0 +1,740 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2016-20 Intel Corporation. */
+
+#include <linux/lockdep.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/shmem_fs.h>
+#include <linux/suspend.h>
+#include <linux/sched/mm.h>
+#include "arch.h"
+#include "encl.h"
+#include "encls.h"
+#include "sgx.h"
+
+/*
+ * ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC
+ * Pages" in the SDM.
+ */
+static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
+ struct sgx_epc_page *epc_page,
+ struct sgx_epc_page *secs_page)
+{
+ unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
+ struct sgx_encl *encl = encl_page->encl;
+ struct sgx_pageinfo pginfo;
+ struct sgx_backing b;
+ pgoff_t page_index;
+ int ret;
+
+ if (secs_page)
+ page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
+ else
+ page_index = PFN_DOWN(encl->size);
+
+ ret = sgx_encl_get_backing(encl, page_index, &b);
+ if (ret)
+ return ret;
+
+ pginfo.addr = encl_page->desc & PAGE_MASK;
+ pginfo.contents = (unsigned long)kmap_atomic(b.contents);
+ pginfo.metadata = (unsigned long)kmap_atomic(b.pcmd) +
+ b.pcmd_offset;
+
+ if (secs_page)
+ pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page);
+ else
+ pginfo.secs = 0;
+
+ ret = __eldu(&pginfo, sgx_get_epc_virt_addr(epc_page),
+ sgx_get_epc_virt_addr(encl_page->va_page->epc_page) + va_offset);
+ if (ret) {
+ if (encls_failed(ret))
+ ENCLS_WARN(ret, "ELDU");
+
+ ret = -EFAULT;
+ }
+
+ kunmap_atomic((void *)(unsigned long)(pginfo.metadata - b.pcmd_offset));
+ kunmap_atomic((void *)(unsigned long)pginfo.contents);
+
+ sgx_encl_put_backing(&b, false);
+
+ return ret;
+}
+
+static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
+ struct sgx_epc_page *secs_page)
+{
+
+ unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
+ struct sgx_encl *encl = encl_page->encl;
+ struct sgx_epc_page *epc_page;
+ int ret;
+
+ epc_page = sgx_alloc_epc_page(encl_page, false);
+ if (IS_ERR(epc_page))
+ return epc_page;
+
+ ret = __sgx_encl_eldu(encl_page, epc_page, secs_page);
+ if (ret) {
+ sgx_free_epc_page(epc_page);
+ return ERR_PTR(ret);
+ }
+
+ sgx_free_va_slot(encl_page->va_page, va_offset);
+ list_move(&encl_page->va_page->list, &encl->va_pages);
+ encl_page->desc &= ~SGX_ENCL_PAGE_VA_OFFSET_MASK;
+ encl_page->epc_page = epc_page;
+
+ return epc_page;
+}
+
+static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
+ unsigned long addr,
+ unsigned long vm_flags)
+{
+ unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
+ struct sgx_epc_page *epc_page;
+ struct sgx_encl_page *entry;
+
+ entry = xa_load(&encl->page_array, PFN_DOWN(addr));
+ if (!entry)
+ return ERR_PTR(-EFAULT);
+
+ /*
+ * Verify that the faulted page has equal or higher build time
+ * permissions than the VMA permissions (i.e. the subset of {VM_READ,
+ * VM_WRITE, VM_EXECUTE} in vma->vm_flags).
+ */
+ if ((entry->vm_max_prot_bits & vm_prot_bits) != vm_prot_bits)
+ return ERR_PTR(-EFAULT);
+
+ /* Entry successfully located. */
+ if (entry->epc_page) {
+ if (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)
+ return ERR_PTR(-EBUSY);
+
+ return entry;
+ }
+
+ if (!(encl->secs.epc_page)) {
+ epc_page = sgx_encl_eldu(&encl->secs, NULL);
+ if (IS_ERR(epc_page))
+ return ERR_CAST(epc_page);
+ }
+
+ epc_page = sgx_encl_eldu(entry, encl->secs.epc_page);
+ if (IS_ERR(epc_page))
+ return ERR_CAST(epc_page);
+
+ encl->secs_child_cnt++;
+ sgx_mark_page_reclaimable(entry->epc_page);
+
+ return entry;
+}
+
+static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
+{
+ unsigned long addr = (unsigned long)vmf->address;
+ struct vm_area_struct *vma = vmf->vma;
+ struct sgx_encl_page *entry;
+ unsigned long phys_addr;
+ struct sgx_encl *encl;
+ unsigned long pfn;
+ vm_fault_t ret;
+
+ encl = vma->vm_private_data;
+
+ /*
+ * It's very unlikely but possible that allocating memory for the
+ * mm_list entry of a forked process failed in sgx_vma_open(). When
+ * this happens, vm_private_data is set to NULL.
+ */
+ if (unlikely(!encl))
+ return VM_FAULT_SIGBUS;
+
+ mutex_lock(&encl->lock);
+
+ entry = sgx_encl_load_page(encl, addr, vma->vm_flags);
+ if (IS_ERR(entry)) {
+ mutex_unlock(&encl->lock);
+
+ if (PTR_ERR(entry) == -EBUSY)
+ return VM_FAULT_NOPAGE;
+
+ return VM_FAULT_SIGBUS;
+ }
+
+ phys_addr = sgx_get_epc_phys_addr(entry->epc_page);
+
+ /* Check if another thread got here first to insert the PTE. */
+ if (!follow_pfn(vma, addr, &pfn)) {
+ mutex_unlock(&encl->lock);
+
+ return VM_FAULT_NOPAGE;
+ }
+
+ ret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr));
+ if (ret != VM_FAULT_NOPAGE) {
+ mutex_unlock(&encl->lock);
+
+ return VM_FAULT_SIGBUS;
+ }
+
+ sgx_encl_test_and_clear_young(vma->vm_mm, entry);
+ mutex_unlock(&encl->lock);
+
+ return VM_FAULT_NOPAGE;
+}
+
+static void sgx_vma_open(struct vm_area_struct *vma)
+{
+ struct sgx_encl *encl = vma->vm_private_data;
+
+ /*
+ * It's possible but unlikely that vm_private_data is NULL. This can
+ * happen in a grandchild of a process, when sgx_encl_mm_add() had
+ * failed to allocate memory in this callback.
+ */
+ if (unlikely(!encl))
+ return;
+
+ if (sgx_encl_mm_add(encl, vma->vm_mm))
+ vma->vm_private_data = NULL;
+}
+
+
+/**
+ * sgx_encl_may_map() - Check if a requested VMA mapping is allowed
+ * @encl: an enclave pointer
+ * @start: lower bound of the address range, inclusive
+ * @end: upper bound of the address range, exclusive
+ * @vm_flags: VMA flags
+ *
+ * Iterate through the enclave pages contained within [@start, @end) to verify
+ * that the permissions requested by a subset of {VM_READ, VM_WRITE, VM_EXEC}
+ * do not contain any permissions that are not contained in the build time
+ * permissions of any of the enclave pages within the given address range.
+ *
+ * An enclave creator must declare the strongest permissions that will be
+ * needed for each enclave page. This ensures that mappings have the identical
+ * or weaker permissions than the earlier declared permissions.
+ *
+ * Return: 0 on success, -EACCES otherwise
+ */
+int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
+ unsigned long end, unsigned long vm_flags)
+{
+ unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
+ struct sgx_encl_page *page;
+ unsigned long count = 0;
+ int ret = 0;
+
+ XA_STATE(xas, &encl->page_array, PFN_DOWN(start));
+
+ /*
+ * Disallow READ_IMPLIES_EXEC tasks as their VMA permissions might
+ * conflict with the enclave page permissions.
+ */
+ if (current->personality & READ_IMPLIES_EXEC)
+ return -EACCES;
+
+ mutex_lock(&encl->lock);
+ xas_lock(&xas);
+ xas_for_each(&xas, page, PFN_DOWN(end - 1)) {
+ if (~page->vm_max_prot_bits & vm_prot_bits) {
+ ret = -EACCES;
+ break;
+ }
+
+ /* Reschedule on every XA_CHECK_SCHED iteration. */
+ if (!(++count % XA_CHECK_SCHED)) {
+ xas_pause(&xas);
+ xas_unlock(&xas);
+ mutex_unlock(&encl->lock);
+
+ cond_resched();
+
+ mutex_lock(&encl->lock);
+ xas_lock(&xas);
+ }
+ }
+ xas_unlock(&xas);
+ mutex_unlock(&encl->lock);
+
+ return ret;
+}
+
+static int sgx_vma_mprotect(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, unsigned long newflags)
+{
+ return sgx_encl_may_map(vma->vm_private_data, start, end, newflags);
+}
+
+static int sgx_encl_debug_read(struct sgx_encl *encl, struct sgx_encl_page *page,
+ unsigned long addr, void *data)
+{
+ unsigned long offset = addr & ~PAGE_MASK;
+ int ret;
+
+
+ ret = __edbgrd(sgx_get_epc_virt_addr(page->epc_page) + offset, data);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+static int sgx_encl_debug_write(struct sgx_encl *encl, struct sgx_encl_page *page,
+ unsigned long addr, void *data)
+{
+ unsigned long offset = addr & ~PAGE_MASK;
+ int ret;
+
+ ret = __edbgwr(sgx_get_epc_virt_addr(page->epc_page) + offset, data);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Load an enclave page to EPC if required, and take encl->lock.
+ */
+static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl,
+ unsigned long addr,
+ unsigned long vm_flags)
+{
+ struct sgx_encl_page *entry;
+
+ for ( ; ; ) {
+ mutex_lock(&encl->lock);
+
+ entry = sgx_encl_load_page(encl, addr, vm_flags);
+ if (PTR_ERR(entry) != -EBUSY)
+ break;
+
+ mutex_unlock(&encl->lock);
+ }
+
+ if (IS_ERR(entry))
+ mutex_unlock(&encl->lock);
+
+ return entry;
+}
+
+static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write)
+{
+ struct sgx_encl *encl = vma->vm_private_data;
+ struct sgx_encl_page *entry = NULL;
+ char data[sizeof(unsigned long)];
+ unsigned long align;
+ int offset;
+ int cnt;
+ int ret = 0;
+ int i;
+
+ /*
+ * If process was forked, VMA is still there but vm_private_data is set
+ * to NULL.
+ */
+ if (!encl)
+ return -EFAULT;
+
+ if (!test_bit(SGX_ENCL_DEBUG, &encl->flags))
+ return -EFAULT;
+
+ for (i = 0; i < len; i += cnt) {
+ entry = sgx_encl_reserve_page(encl, (addr + i) & PAGE_MASK,
+ vma->vm_flags);
+ if (IS_ERR(entry)) {
+ ret = PTR_ERR(entry);
+ break;
+ }
+
+ align = ALIGN_DOWN(addr + i, sizeof(unsigned long));
+ offset = (addr + i) & (sizeof(unsigned long) - 1);
+ cnt = sizeof(unsigned long) - offset;
+ cnt = min(cnt, len - i);
+
+ ret = sgx_encl_debug_read(encl, entry, align, data);
+ if (ret)
+ goto out;
+
+ if (write) {
+ memcpy(data + offset, buf + i, cnt);
+ ret = sgx_encl_debug_write(encl, entry, align, data);
+ if (ret)
+ goto out;
+ } else {
+ memcpy(buf + i, data + offset, cnt);
+ }
+
+out:
+ mutex_unlock(&encl->lock);
+
+ if (ret)
+ break;
+ }
+
+ return ret < 0 ? ret : i;
+}
+
+const struct vm_operations_struct sgx_vm_ops = {
+ .fault = sgx_vma_fault,
+ .mprotect = sgx_vma_mprotect,
+ .open = sgx_vma_open,
+ .access = sgx_vma_access,
+};
+
+/**
+ * sgx_encl_release - Destroy an enclave instance
+ * @kref: address of a kref inside &sgx_encl
+ *
+ * Used together with kref_put(). Frees all the resources associated with the
+ * enclave and the instance itself.
+ */
+void sgx_encl_release(struct kref *ref)
+{
+ struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
+ struct sgx_va_page *va_page;
+ struct sgx_encl_page *entry;
+ unsigned long index;
+
+ xa_for_each(&encl->page_array, index, entry) {
+ if (entry->epc_page) {
+ /*
+ * The page and its radix tree entry cannot be freed
+ * if the page is being held by the reclaimer.
+ */
+ if (sgx_unmark_page_reclaimable(entry->epc_page))
+ continue;
+
+ sgx_free_epc_page(entry->epc_page);
+ encl->secs_child_cnt--;
+ entry->epc_page = NULL;
+ }
+
+ kfree(entry);
+ }
+
+ xa_destroy(&encl->page_array);
+
+ if (!encl->secs_child_cnt && encl->secs.epc_page) {
+ sgx_free_epc_page(encl->secs.epc_page);
+ encl->secs.epc_page = NULL;
+ }
+
+ while (!list_empty(&encl->va_pages)) {
+ va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
+ list);
+ list_del(&va_page->list);
+ sgx_free_epc_page(va_page->epc_page);
+ kfree(va_page);
+ }
+
+ if (encl->backing)
+ fput(encl->backing);
+
+ cleanup_srcu_struct(&encl->srcu);
+
+ WARN_ON_ONCE(!list_empty(&encl->mm_list));
+
+ /* Detect EPC page leak's. */
+ WARN_ON_ONCE(encl->secs_child_cnt);
+ WARN_ON_ONCE(encl->secs.epc_page);
+
+ kfree(encl);
+}
+
+/*
+ * 'mm' is exiting and no longer needs mmu notifications.
+ */
+static void sgx_mmu_notifier_release(struct mmu_notifier *mn,
+ struct mm_struct *mm)
+{
+ struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier);
+ struct sgx_encl_mm *tmp = NULL;
+
+ /*
+ * The enclave itself can remove encl_mm. Note, objects can't be moved
+ * off an RCU protected list, but deletion is ok.
+ */
+ spin_lock(&encl_mm->encl->mm_lock);
+ list_for_each_entry(tmp, &encl_mm->encl->mm_list, list) {
+ if (tmp == encl_mm) {
+ list_del_rcu(&encl_mm->list);
+ break;
+ }
+ }
+ spin_unlock(&encl_mm->encl->mm_lock);
+
+ if (tmp == encl_mm) {
+ synchronize_srcu(&encl_mm->encl->srcu);
+ mmu_notifier_put(mn);
+ }
+}
+
+static void sgx_mmu_notifier_free(struct mmu_notifier *mn)
+{
+ struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier);
+
+ kfree(encl_mm);
+}
+
+static const struct mmu_notifier_ops sgx_mmu_notifier_ops = {
+ .release = sgx_mmu_notifier_release,
+ .free_notifier = sgx_mmu_notifier_free,
+};
+
+static struct sgx_encl_mm *sgx_encl_find_mm(struct sgx_encl *encl,
+ struct mm_struct *mm)
+{
+ struct sgx_encl_mm *encl_mm = NULL;
+ struct sgx_encl_mm *tmp;
+ int idx;
+
+ idx = srcu_read_lock(&encl->srcu);
+
+ list_for_each_entry_rcu(tmp, &encl->mm_list, list) {
+ if (tmp->mm == mm) {
+ encl_mm = tmp;
+ break;
+ }
+ }
+
+ srcu_read_unlock(&encl->srcu, idx);
+
+ return encl_mm;
+}
+
+int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
+{
+ struct sgx_encl_mm *encl_mm;
+ int ret;
+
+ /*
+ * Even though a single enclave may be mapped into an mm more than once,
+ * each 'mm' only appears once on encl->mm_list. This is guaranteed by
+ * holding the mm's mmap lock for write before an mm can be added or
+ * remove to an encl->mm_list.
+ */
+ mmap_assert_write_locked(mm);
+
+ /*
+ * It's possible that an entry already exists in the mm_list, because it
+ * is removed only on VFS release or process exit.
+ */
+ if (sgx_encl_find_mm(encl, mm))
+ return 0;
+
+ encl_mm = kzalloc(sizeof(*encl_mm), GFP_KERNEL);
+ if (!encl_mm)
+ return -ENOMEM;
+
+ encl_mm->encl = encl;
+ encl_mm->mm = mm;
+ encl_mm->mmu_notifier.ops = &sgx_mmu_notifier_ops;
+
+ ret = __mmu_notifier_register(&encl_mm->mmu_notifier, mm);
+ if (ret) {
+ kfree(encl_mm);
+ return ret;
+ }
+
+ spin_lock(&encl->mm_lock);
+ list_add_rcu(&encl_mm->list, &encl->mm_list);
+ /* Pairs with smp_rmb() in sgx_reclaimer_block(). */
+ smp_wmb();
+ encl->mm_list_version++;
+ spin_unlock(&encl->mm_lock);
+
+ return 0;
+}
+
+static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl,
+ pgoff_t index)
+{
+ struct inode *inode = encl->backing->f_path.dentry->d_inode;
+ struct address_space *mapping = inode->i_mapping;
+ gfp_t gfpmask = mapping_gfp_mask(mapping);
+
+ return shmem_read_mapping_page_gfp(mapping, index, gfpmask);
+}
+
+/**
+ * sgx_encl_get_backing() - Pin the backing storage
+ * @encl: an enclave pointer
+ * @page_index: enclave page index
+ * @backing: data for accessing backing storage for the page
+ *
+ * Pin the backing storage pages for storing the encrypted contents and Paging
+ * Crypto MetaData (PCMD) of an enclave page.
+ *
+ * Return:
+ * 0 on success,
+ * -errno otherwise.
+ */
+int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
+ struct sgx_backing *backing)
+{
+ pgoff_t pcmd_index = PFN_DOWN(encl->size) + 1 + (page_index >> 5);
+ struct page *contents;
+ struct page *pcmd;
+
+ contents = sgx_encl_get_backing_page(encl, page_index);
+ if (IS_ERR(contents))
+ return PTR_ERR(contents);
+
+ pcmd = sgx_encl_get_backing_page(encl, pcmd_index);
+ if (IS_ERR(pcmd)) {
+ put_page(contents);
+ return PTR_ERR(pcmd);
+ }
+
+ backing->page_index = page_index;
+ backing->contents = contents;
+ backing->pcmd = pcmd;
+ backing->pcmd_offset =
+ (page_index & (PAGE_SIZE / sizeof(struct sgx_pcmd) - 1)) *
+ sizeof(struct sgx_pcmd);
+
+ return 0;
+}
+
+/**
+ * sgx_encl_put_backing() - Unpin the backing storage
+ * @backing: data for accessing backing storage for the page
+ * @do_write: mark pages dirty
+ */
+void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write)
+{
+ if (do_write) {
+ set_page_dirty(backing->pcmd);
+ set_page_dirty(backing->contents);
+ }
+
+ put_page(backing->pcmd);
+ put_page(backing->contents);
+}
+
+static int sgx_encl_test_and_clear_young_cb(pte_t *ptep, unsigned long addr,
+ void *data)
+{
+ pte_t pte;
+ int ret;
+
+ ret = pte_young(*ptep);
+ if (ret) {
+ pte = pte_mkold(*ptep);
+ set_pte_at((struct mm_struct *)data, addr, ptep, pte);
+ }
+
+ return ret;
+}
+
+/**
+ * sgx_encl_test_and_clear_young() - Test and reset the accessed bit
+ * @mm: mm_struct that is checked
+ * @page: enclave page to be tested for recent access
+ *
+ * Checks the Access (A) bit from the PTE corresponding to the enclave page and
+ * clears it.
+ *
+ * Return: 1 if the page has been recently accessed and 0 if not.
+ */
+int sgx_encl_test_and_clear_young(struct mm_struct *mm,
+ struct sgx_encl_page *page)
+{
+ unsigned long addr = page->desc & PAGE_MASK;
+ struct sgx_encl *encl = page->encl;
+ struct vm_area_struct *vma;
+ int ret;
+
+ ret = sgx_encl_find(mm, addr, &vma);
+ if (ret)
+ return 0;
+
+ if (encl != vma->vm_private_data)
+ return 0;
+
+ ret = apply_to_page_range(vma->vm_mm, addr, PAGE_SIZE,
+ sgx_encl_test_and_clear_young_cb, vma->vm_mm);
+ if (ret < 0)
+ return 0;
+
+ return ret;
+}
+
+/**
+ * sgx_alloc_va_page() - Allocate a Version Array (VA) page
+ *
+ * Allocate a free EPC page and convert it to a Version Array (VA) page.
+ *
+ * Return:
+ * a VA page,
+ * -errno otherwise
+ */
+struct sgx_epc_page *sgx_alloc_va_page(void)
+{
+ struct sgx_epc_page *epc_page;
+ int ret;
+
+ epc_page = sgx_alloc_epc_page(NULL, true);
+ if (IS_ERR(epc_page))
+ return ERR_CAST(epc_page);
+
+ ret = __epa(sgx_get_epc_virt_addr(epc_page));
+ if (ret) {
+ WARN_ONCE(1, "EPA returned %d (0x%x)", ret, ret);
+ sgx_free_epc_page(epc_page);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return epc_page;
+}
+
+/**
+ * sgx_alloc_va_slot - allocate a VA slot
+ * @va_page: a &struct sgx_va_page instance
+ *
+ * Allocates a slot from a &struct sgx_va_page instance.
+ *
+ * Return: offset of the slot inside the VA page
+ */
+unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page)
+{
+ int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT);
+
+ if (slot < SGX_VA_SLOT_COUNT)
+ set_bit(slot, va_page->slots);
+
+ return slot << 3;
+}
+
+/**
+ * sgx_free_va_slot - free a VA slot
+ * @va_page: a &struct sgx_va_page instance
+ * @offset: offset of the slot inside the VA page
+ *
+ * Frees a slot from a &struct sgx_va_page instance.
+ */
+void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset)
+{
+ clear_bit(offset >> 3, va_page->slots);
+}
+
+/**
+ * sgx_va_page_full - is the VA page full?
+ * @va_page: a &struct sgx_va_page instance
+ *
+ * Return: true if all slots have been taken
+ */
+bool sgx_va_page_full(struct sgx_va_page *va_page)
+{
+ int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT);
+
+ return slot == SGX_VA_SLOT_COUNT;
+}
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
new file mode 100644
index 000000000000..d8d30ccbef4c
--- /dev/null
+++ b/arch/x86/kernel/cpu/sgx/encl.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+ * Copyright(c) 2016-20 Intel Corporation.
+ *
+ * Contains the software defined data structures for enclaves.
+ */
+#ifndef _X86_ENCL_H
+#define _X86_ENCL_H
+
+#include <linux/cpumask.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mm_types.h>
+#include <linux/mmu_notifier.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/srcu.h>
+#include <linux/workqueue.h>
+#include <linux/xarray.h>
+#include "sgx.h"
+
+/* 'desc' bits holding the offset in the VA (version array) page. */
+#define SGX_ENCL_PAGE_VA_OFFSET_MASK GENMASK_ULL(11, 3)
+
+/* 'desc' bit marking that the page is being reclaimed. */
+#define SGX_ENCL_PAGE_BEING_RECLAIMED BIT(3)
+
+struct sgx_encl_page {
+ unsigned long desc;
+ unsigned long vm_max_prot_bits;
+ struct sgx_epc_page *epc_page;
+ struct sgx_encl *encl;
+ struct sgx_va_page *va_page;
+};
+
+enum sgx_encl_flags {
+ SGX_ENCL_IOCTL = BIT(0),
+ SGX_ENCL_DEBUG = BIT(1),
+ SGX_ENCL_CREATED = BIT(2),
+ SGX_ENCL_INITIALIZED = BIT(3),
+};
+
+struct sgx_encl_mm {
+ struct sgx_encl *encl;
+ struct mm_struct *mm;
+ struct list_head list;
+ struct mmu_notifier mmu_notifier;
+};
+
+struct sgx_encl {
+ unsigned long base;
+ unsigned long size;
+ unsigned long flags;
+ unsigned int page_cnt;
+ unsigned int secs_child_cnt;
+ struct mutex lock;
+ struct xarray page_array;
+ struct sgx_encl_page secs;
+ unsigned long attributes;
+ unsigned long attributes_mask;
+
+ cpumask_t cpumask;
+ struct file *backing;
+ struct kref refcount;
+ struct list_head va_pages;
+ unsigned long mm_list_version;
+ struct list_head mm_list;
+ spinlock_t mm_lock;
+ struct srcu_struct srcu;
+};
+
+#define SGX_VA_SLOT_COUNT 512
+
+struct sgx_va_page {
+ struct sgx_epc_page *epc_page;
+ DECLARE_BITMAP(slots, SGX_VA_SLOT_COUNT);
+ struct list_head list;
+};
+
+struct sgx_backing {
+ pgoff_t page_index;
+ struct page *contents;
+ struct page *pcmd;
+ unsigned long pcmd_offset;
+};
+
+extern const struct vm_operations_struct sgx_vm_ops;
+
+static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
+ struct vm_area_struct **vma)
+{
+ struct vm_area_struct *result;
+
+ result = find_vma(mm, addr);
+ if (!result || result->vm_ops != &sgx_vm_ops || addr < result->vm_start)
+ return -EINVAL;
+
+ *vma = result;
+
+ return 0;
+}
+
+int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
+ unsigned long end, unsigned long vm_flags);
+
+void sgx_encl_release(struct kref *ref);
+int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
+int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
+ struct sgx_backing *backing);
+void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write);
+int sgx_encl_test_and_clear_young(struct mm_struct *mm,
+ struct sgx_encl_page *page);
+
+struct sgx_epc_page *sgx_alloc_va_page(void);
+unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page);
+void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset);
+bool sgx_va_page_full(struct sgx_va_page *va_page);
+
+#endif /* _X86_ENCL_H */
diff --git a/arch/x86/kernel/cpu/sgx/encls.h b/arch/x86/kernel/cpu/sgx/encls.h
new file mode 100644
index 000000000000..443188fe7e70
--- /dev/null
+++ b/arch/x86/kernel/cpu/sgx/encls.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _X86_ENCLS_H
+#define _X86_ENCLS_H
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/rwsem.h>
+#include <linux/types.h>
+#include <asm/asm.h>
+#include <asm/traps.h>
+#include "sgx.h"
+
+enum sgx_encls_function {
+ ECREATE = 0x00,
+ EADD = 0x01,
+ EINIT = 0x02,
+ EREMOVE = 0x03,
+ EDGBRD = 0x04,
+ EDGBWR = 0x05,
+ EEXTEND = 0x06,
+ ELDU = 0x08,
+ EBLOCK = 0x09,
+ EPA = 0x0A,
+ EWB = 0x0B,
+ ETRACK = 0x0C,
+};
+
+/**
+ * ENCLS_FAULT_FLAG - flag signifying an ENCLS return code is a trapnr
+ *
+ * ENCLS has its own (positive value) error codes and also generates
+ * ENCLS specific #GP and #PF faults. And the ENCLS values get munged
+ * with system error codes as everything percolates back up the stack.
+ * Unfortunately (for us), we need to precisely identify each unique
+ * error code, e.g. the action taken if EWB fails varies based on the
+ * type of fault and on the exact SGX error code, i.e. we can't simply
+ * convert all faults to -EFAULT.
+ *
+ * To make all three error types coexist, we set bit 30 to identify an
+ * ENCLS fault. Bit 31 (technically bits N:31) is used to differentiate
+ * between positive (faults and SGX error codes) and negative (system
+ * error codes) values.
+ */
+#define ENCLS_FAULT_FLAG 0x40000000
+
+/* Retrieve the encoded trapnr from the specified return code. */
+#define ENCLS_TRAPNR(r) ((r) & ~ENCLS_FAULT_FLAG)
+
+/* Issue a WARN() about an ENCLS function. */
+#define ENCLS_WARN(r, name) { \
+ do { \
+ int _r = (r); \
+ WARN_ONCE(_r, "%s returned %d (0x%x)\n", (name), _r, _r); \
+ } while (0); \
+}
+
+/**
+ * encls_failed() - Check if an ENCLS function failed
+ * @ret: the return value of an ENCLS function call
+ *
+ * Check if an ENCLS function failed. This happens when the function causes a
+ * fault that is not caused by an EPCM conflict or when the function returns a
+ * non-zero value.
+ */
+static inline bool encls_failed(int ret)
+{
+ if (ret & ENCLS_FAULT_FLAG)
+ return ENCLS_TRAPNR(ret) != X86_TRAP_PF;
+
+ return !!ret;
+}
+
+/**
+ * __encls_ret_N - encode an ENCLS function that returns an error code in EAX
+ * @rax: function number
+ * @inputs: asm inputs for the function
+ *
+ * Emit assembly for an ENCLS function that returns an error code, e.g. EREMOVE.
+ * And because SGX isn't complex enough as it is, function that return an error
+ * code also modify flags.
+ *
+ * Return:
+ * 0 on success,
+ * SGX error code on failure
+ */
+#define __encls_ret_N(rax, inputs...) \
+ ({ \
+ int ret; \
+ asm volatile( \
+ "1: .byte 0x0f, 0x01, 0xcf;\n\t" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: orl $"__stringify(ENCLS_FAULT_FLAG)",%%eax\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE_FAULT(1b, 3b) \
+ : "=a"(ret) \
+ : "a"(rax), inputs \
+ : "memory", "cc"); \
+ ret; \
+ })
+
+#define __encls_ret_1(rax, rcx) \
+ ({ \
+ __encls_ret_N(rax, "c"(rcx)); \
+ })
+
+#define __encls_ret_2(rax, rbx, rcx) \
+ ({ \
+ __encls_ret_N(rax, "b"(rbx), "c"(rcx)); \
+ })
+
+#define __encls_ret_3(rax, rbx, rcx, rdx) \
+ ({ \
+ __encls_ret_N(rax, "b"(rbx), "c"(rcx), "d"(rdx)); \
+ })
+
+/**
+ * __encls_N - encode an ENCLS function that doesn't return an error code
+ * @rax: function number
+ * @rbx_out: optional output variable
+ * @inputs: asm inputs for the function
+ *
+ * Emit assembly for an ENCLS function that does not return an error code, e.g.
+ * ECREATE. Leaves without error codes either succeed or fault. @rbx_out is an
+ * optional parameter for use by EDGBRD, which returns the requested value in
+ * RBX.
+ *
+ * Return:
+ * 0 on success,
+ * trapnr with ENCLS_FAULT_FLAG set on fault
+ */
+#define __encls_N(rax, rbx_out, inputs...) \
+ ({ \
+ int ret; \
+ asm volatile( \
+ "1: .byte 0x0f, 0x01, 0xcf;\n\t" \
+ " xor %%eax,%%eax;\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: orl $"__stringify(ENCLS_FAULT_FLAG)",%%eax\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE_FAULT(1b, 3b) \
+ : "=a"(ret), "=b"(rbx_out) \
+ : "a"(rax), inputs \
+ : "memory"); \
+ ret; \
+ })
+
+#define __encls_2(rax, rbx, rcx) \
+ ({ \
+ unsigned long ign_rbx_out; \
+ __encls_N(rax, ign_rbx_out, "b"(rbx), "c"(rcx)); \
+ })
+
+#define __encls_1_1(rax, data, rcx) \
+ ({ \
+ unsigned long rbx_out; \
+ int ret = __encls_N(rax, rbx_out, "c"(rcx)); \
+ if (!ret) \
+ data = rbx_out; \
+ ret; \
+ })
+
+static inline int __ecreate(struct sgx_pageinfo *pginfo, void *secs)
+{
+ return __encls_2(ECREATE, pginfo, secs);
+}
+
+static inline int __eextend(void *secs, void *addr)
+{
+ return __encls_2(EEXTEND, secs, addr);
+}
+
+static inline int __eadd(struct sgx_pageinfo *pginfo, void *addr)
+{
+ return __encls_2(EADD, pginfo, addr);
+}
+
+static inline int __einit(void *sigstruct, void *token, void *secs)
+{
+ return __encls_ret_3(EINIT, sigstruct, secs, token);
+}
+
+static inline int __eremove(void *addr)
+{
+ return __encls_ret_1(EREMOVE, addr);
+}
+
+static inline int __edbgwr(void *addr, unsigned long *data)
+{
+ return __encls_2(EDGBWR, *data, addr);
+}
+
+static inline int __edbgrd(void *addr, unsigned long *data)
+{
+ return __encls_1_1(EDGBRD, *data, addr);
+}
+
+static inline int __etrack(void *addr)
+{
+ return __encls_ret_1(ETRACK, addr);
+}
+
+static inline int __eldu(struct sgx_pageinfo *pginfo, void *addr,
+ void *va)
+{
+ return __encls_ret_3(ELDU, pginfo, addr, va);
+}
+
+static inline int __eblock(void *addr)
+{
+ return __encls_ret_1(EBLOCK, addr);
+}
+
+static inline int __epa(void *addr)
+{
+ unsigned long rbx = SGX_PAGE_TYPE_VA;
+
+ return __encls_2(EPA, rbx, addr);
+}
+
+static inline int __ewb(struct sgx_pageinfo *pginfo, void *addr,
+ void *va)
+{
+ return __encls_ret_3(EWB, pginfo, addr, va);
+}
+
+#endif /* _X86_ENCLS_H */
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
new file mode 100644
index 000000000000..90a5caf76939
--- /dev/null
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -0,0 +1,716 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2016-20 Intel Corporation. */
+
+#include <asm/mman.h>
+#include <linux/mman.h>
+#include <linux/delay.h>
+#include <linux/file.h>
+#include <linux/hashtable.h>
+#include <linux/highmem.h>
+#include <linux/ratelimit.h>
+#include <linux/sched/signal.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include "driver.h"
+#include "encl.h"
+#include "encls.h"
+
+static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
+{
+ struct sgx_va_page *va_page = NULL;
+ void *err;
+
+ BUILD_BUG_ON(SGX_VA_SLOT_COUNT !=
+ (SGX_ENCL_PAGE_VA_OFFSET_MASK >> 3) + 1);
+
+ if (!(encl->page_cnt % SGX_VA_SLOT_COUNT)) {
+ va_page = kzalloc(sizeof(*va_page), GFP_KERNEL);
+ if (!va_page)
+ return ERR_PTR(-ENOMEM);
+
+ va_page->epc_page = sgx_alloc_va_page();
+ if (IS_ERR(va_page->epc_page)) {
+ err = ERR_CAST(va_page->epc_page);
+ kfree(va_page);
+ return err;
+ }
+
+ WARN_ON_ONCE(encl->page_cnt % SGX_VA_SLOT_COUNT);
+ }
+ encl->page_cnt++;
+ return va_page;
+}
+
+static void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page)
+{
+ encl->page_cnt--;
+
+ if (va_page) {
+ sgx_free_epc_page(va_page->epc_page);
+ list_del(&va_page->list);
+ kfree(va_page);
+ }
+}
+
+static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
+{
+ struct sgx_epc_page *secs_epc;
+ struct sgx_va_page *va_page;
+ struct sgx_pageinfo pginfo;
+ struct sgx_secinfo secinfo;
+ unsigned long encl_size;
+ struct file *backing;
+ long ret;
+
+ va_page = sgx_encl_grow(encl);
+ if (IS_ERR(va_page))
+ return PTR_ERR(va_page);
+ else if (va_page)
+ list_add(&va_page->list, &encl->va_pages);
+ /* else the tail page of the VA page list had free slots. */
+
+ /* The extra page goes to SECS. */
+ encl_size = secs->size + PAGE_SIZE;
+
+ backing = shmem_file_setup("SGX backing", encl_size + (encl_size >> 5),
+ VM_NORESERVE);
+ if (IS_ERR(backing)) {
+ ret = PTR_ERR(backing);
+ goto err_out_shrink;
+ }
+
+ encl->backing = backing;
+
+ secs_epc = sgx_alloc_epc_page(&encl->secs, true);
+ if (IS_ERR(secs_epc)) {
+ ret = PTR_ERR(secs_epc);
+ goto err_out_backing;
+ }
+
+ encl->secs.epc_page = secs_epc;
+
+ pginfo.addr = 0;
+ pginfo.contents = (unsigned long)secs;
+ pginfo.metadata = (unsigned long)&secinfo;
+ pginfo.secs = 0;
+ memset(&secinfo, 0, sizeof(secinfo));
+
+ ret = __ecreate((void *)&pginfo, sgx_get_epc_virt_addr(secs_epc));
+ if (ret) {
+ ret = -EIO;
+ goto err_out;
+ }
+
+ if (secs->attributes & SGX_ATTR_DEBUG)
+ set_bit(SGX_ENCL_DEBUG, &encl->flags);
+
+ encl->secs.encl = encl;
+ encl->base = secs->base;
+ encl->size = secs->size;
+ encl->attributes = secs->attributes;
+ encl->attributes_mask = SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT | SGX_ATTR_KSS;
+
+ /* Set only after completion, as encl->lock has not been taken. */
+ set_bit(SGX_ENCL_CREATED, &encl->flags);
+
+ return 0;
+
+err_out:
+ sgx_free_epc_page(encl->secs.epc_page);
+ encl->secs.epc_page = NULL;
+
+err_out_backing:
+ fput(encl->backing);
+ encl->backing = NULL;
+
+err_out_shrink:
+ sgx_encl_shrink(encl, va_page);
+
+ return ret;
+}
+
+/**
+ * sgx_ioc_enclave_create() - handler for %SGX_IOC_ENCLAVE_CREATE
+ * @encl: An enclave pointer.
+ * @arg: The ioctl argument.
+ *
+ * Allocate kernel data structures for the enclave and invoke ECREATE.
+ *
+ * Return:
+ * - 0: Success.
+ * - -EIO: ECREATE failed.
+ * - -errno: POSIX error.
+ */
+static long sgx_ioc_enclave_create(struct sgx_encl *encl, void __user *arg)
+{
+ struct sgx_enclave_create create_arg;
+ void *secs;
+ int ret;
+
+ if (test_bit(SGX_ENCL_CREATED, &encl->flags))
+ return -EINVAL;
+
+ if (copy_from_user(&create_arg, arg, sizeof(create_arg)))
+ return -EFAULT;
+
+ secs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!secs)
+ return -ENOMEM;
+
+ if (copy_from_user(secs, (void __user *)create_arg.src, PAGE_SIZE))
+ ret = -EFAULT;
+ else
+ ret = sgx_encl_create(encl, secs);
+
+ kfree(secs);
+ return ret;
+}
+
+static struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
+ unsigned long offset,
+ u64 secinfo_flags)
+{
+ struct sgx_encl_page *encl_page;
+ unsigned long prot;
+
+ encl_page = kzalloc(sizeof(*encl_page), GFP_KERNEL);
+ if (!encl_page)
+ return ERR_PTR(-ENOMEM);
+
+ encl_page->desc = encl->base + offset;
+ encl_page->encl = encl;
+
+ prot = _calc_vm_trans(secinfo_flags, SGX_SECINFO_R, PROT_READ) |
+ _calc_vm_trans(secinfo_flags, SGX_SECINFO_W, PROT_WRITE) |
+ _calc_vm_trans(secinfo_flags, SGX_SECINFO_X, PROT_EXEC);
+
+ /*
+ * TCS pages must always RW set for CPU access while the SECINFO
+ * permissions are *always* zero - the CPU ignores the user provided
+ * values and silently overwrites them with zero permissions.
+ */
+ if ((secinfo_flags & SGX_SECINFO_PAGE_TYPE_MASK) == SGX_SECINFO_TCS)
+ prot |= PROT_READ | PROT_WRITE;
+
+ /* Calculate maximum of the VM flags for the page. */
+ encl_page->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
+
+ return encl_page;
+}
+
+static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
+{
+ u64 perm = secinfo->flags & SGX_SECINFO_PERMISSION_MASK;
+ u64 pt = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
+
+ if (pt != SGX_SECINFO_REG && pt != SGX_SECINFO_TCS)
+ return -EINVAL;
+
+ if ((perm & SGX_SECINFO_W) && !(perm & SGX_SECINFO_R))
+ return -EINVAL;
+
+ /*
+ * CPU will silently overwrite the permissions as zero, which means
+ * that we need to validate it ourselves.
+ */
+ if (pt == SGX_SECINFO_TCS && perm)
+ return -EINVAL;
+
+ if (secinfo->flags & SGX_SECINFO_RESERVED_MASK)
+ return -EINVAL;
+
+ if (memchr_inv(secinfo->reserved, 0, sizeof(secinfo->reserved)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __sgx_encl_add_page(struct sgx_encl *encl,
+ struct sgx_encl_page *encl_page,
+ struct sgx_epc_page *epc_page,
+ struct sgx_secinfo *secinfo, unsigned long src)
+{
+ struct sgx_pageinfo pginfo;
+ struct vm_area_struct *vma;
+ struct page *src_page;
+ int ret;
+
+ /* Deny noexec. */
+ vma = find_vma(current->mm, src);
+ if (!vma)
+ return -EFAULT;
+
+ if (!(vma->vm_flags & VM_MAYEXEC))
+ return -EACCES;
+
+ ret = get_user_pages(src, 1, 0, &src_page, NULL);
+ if (ret < 1)
+ return -EFAULT;
+
+ pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page);
+ pginfo.addr = encl_page->desc & PAGE_MASK;
+ pginfo.metadata = (unsigned long)secinfo;
+ pginfo.contents = (unsigned long)kmap_atomic(src_page);
+
+ ret = __eadd(&pginfo, sgx_get_epc_virt_addr(epc_page));
+
+ kunmap_atomic((void *)pginfo.contents);
+ put_page(src_page);
+
+ return ret ? -EIO : 0;
+}
+
+/*
+ * If the caller requires measurement of the page as a proof for the content,
+ * use EEXTEND to add a measurement for 256 bytes of the page. Repeat this
+ * operation until the entire page is measured."
+ */
+static int __sgx_encl_extend(struct sgx_encl *encl,
+ struct sgx_epc_page *epc_page)
+{
+ unsigned long offset;
+ int ret;
+
+ for (offset = 0; offset < PAGE_SIZE; offset += SGX_EEXTEND_BLOCK_SIZE) {
+ ret = __eextend(sgx_get_epc_virt_addr(encl->secs.epc_page),
+ sgx_get_epc_virt_addr(epc_page) + offset);
+ if (ret) {
+ if (encls_failed(ret))
+ ENCLS_WARN(ret, "EEXTEND");
+
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
+ unsigned long offset, struct sgx_secinfo *secinfo,
+ unsigned long flags)
+{
+ struct sgx_encl_page *encl_page;
+ struct sgx_epc_page *epc_page;
+ struct sgx_va_page *va_page;
+ int ret;
+
+ encl_page = sgx_encl_page_alloc(encl, offset, secinfo->flags);
+ if (IS_ERR(encl_page))
+ return PTR_ERR(encl_page);
+
+ epc_page = sgx_alloc_epc_page(encl_page, true);
+ if (IS_ERR(epc_page)) {
+ kfree(encl_page);
+ return PTR_ERR(epc_page);
+ }
+
+ va_page = sgx_encl_grow(encl);
+ if (IS_ERR(va_page)) {
+ ret = PTR_ERR(va_page);
+ goto err_out_free;
+ }
+
+ mmap_read_lock(current->mm);
+ mutex_lock(&encl->lock);
+
+ /*
+ * Adding to encl->va_pages must be done under encl->lock. Ditto for
+ * deleting (via sgx_encl_shrink()) in the error path.
+ */
+ if (va_page)
+ list_add(&va_page->list, &encl->va_pages);
+
+ /*
+ * Insert prior to EADD in case of OOM. EADD modifies MRENCLAVE, i.e.
+ * can't be gracefully unwound, while failure on EADD/EXTEND is limited
+ * to userspace errors (or kernel/hardware bugs).
+ */
+ ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc),
+ encl_page, GFP_KERNEL);
+ if (ret)
+ goto err_out_unlock;
+
+ ret = __sgx_encl_add_page(encl, encl_page, epc_page, secinfo,
+ src);
+ if (ret)
+ goto err_out;
+
+ /*
+ * Complete the "add" before doing the "extend" so that the "add"
+ * isn't in a half-baked state in the extremely unlikely scenario
+ * the enclave will be destroyed in response to EEXTEND failure.
+ */
+ encl_page->encl = encl;
+ encl_page->epc_page = epc_page;
+ encl->secs_child_cnt++;
+
+ if (flags & SGX_PAGE_MEASURE) {
+ ret = __sgx_encl_extend(encl, epc_page);
+ if (ret)
+ goto err_out;
+ }
+
+ sgx_mark_page_reclaimable(encl_page->epc_page);
+ mutex_unlock(&encl->lock);
+ mmap_read_unlock(current->mm);
+ return ret;
+
+err_out:
+ xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc));
+
+err_out_unlock:
+ sgx_encl_shrink(encl, va_page);
+ mutex_unlock(&encl->lock);
+ mmap_read_unlock(current->mm);
+
+err_out_free:
+ sgx_free_epc_page(epc_page);
+ kfree(encl_page);
+
+ return ret;
+}
+
+/**
+ * sgx_ioc_enclave_add_pages() - The handler for %SGX_IOC_ENCLAVE_ADD_PAGES
+ * @encl: an enclave pointer
+ * @arg: a user pointer to a struct sgx_enclave_add_pages instance
+ *
+ * Add one or more pages to an uninitialized enclave, and optionally extend the
+ * measurement with the contents of the page. The SECINFO and measurement mask
+ * are applied to all pages.
+ *
+ * A SECINFO for a TCS is required to always contain zero permissions because
+ * CPU silently zeros them. Allowing anything else would cause a mismatch in
+ * the measurement.
+ *
+ * mmap()'s protection bits are capped by the page permissions. For each page
+ * address, the maximum protection bits are computed with the following
+ * heuristics:
+ *
+ * 1. A regular page: PROT_R, PROT_W and PROT_X match the SECINFO permissions.
+ * 2. A TCS page: PROT_R | PROT_W.
+ *
+ * mmap() is not allowed to surpass the minimum of the maximum protection bits
+ * within the given address range.
+ *
+ * The function deinitializes kernel data structures for enclave and returns
+ * -EIO in any of the following conditions:
+ *
+ * - Enclave Page Cache (EPC), the physical memory holding enclaves, has
+ * been invalidated. This will cause EADD and EEXTEND to fail.
+ * - If the source address is corrupted somehow when executing EADD.
+ *
+ * Return:
+ * - 0: Success.
+ * - -EACCES: The source page is located in a noexec partition.
+ * - -ENOMEM: Out of EPC pages.
+ * - -EINTR: The call was interrupted before data was processed.
+ * - -EIO: Either EADD or EEXTEND failed because invalid source address
+ * or power cycle.
+ * - -errno: POSIX error.
+ */
+static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg)
+{
+ struct sgx_enclave_add_pages add_arg;
+ struct sgx_secinfo secinfo;
+ unsigned long c;
+ int ret;
+
+ if (!test_bit(SGX_ENCL_CREATED, &encl->flags) ||
+ test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
+ return -EINVAL;
+
+ if (copy_from_user(&add_arg, arg, sizeof(add_arg)))
+ return -EFAULT;
+
+ if (!IS_ALIGNED(add_arg.offset, PAGE_SIZE) ||
+ !IS_ALIGNED(add_arg.src, PAGE_SIZE))
+ return -EINVAL;
+
+ if (!add_arg.length || add_arg.length & (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ if (add_arg.offset + add_arg.length - PAGE_SIZE >= encl->size)
+ return -EINVAL;
+
+ if (copy_from_user(&secinfo, (void __user *)add_arg.secinfo,
+ sizeof(secinfo)))
+ return -EFAULT;
+
+ if (sgx_validate_secinfo(&secinfo))
+ return -EINVAL;
+
+ for (c = 0 ; c < add_arg.length; c += PAGE_SIZE) {
+ if (signal_pending(current)) {
+ if (!c)
+ ret = -ERESTARTSYS;
+
+ break;
+ }
+
+ if (need_resched())
+ cond_resched();
+
+ ret = sgx_encl_add_page(encl, add_arg.src + c, add_arg.offset + c,
+ &secinfo, add_arg.flags);
+ if (ret)
+ break;
+ }
+
+ add_arg.count = c;
+
+ if (copy_to_user(arg, &add_arg, sizeof(add_arg)))
+ return -EFAULT;
+
+ return ret;
+}
+
+static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus,
+ void *hash)
+{
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
+
+ return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash);
+}
+
+static int sgx_get_key_hash(const void *modulus, void *hash)
+{
+ struct crypto_shash *tfm;
+ int ret;
+
+ tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ ret = __sgx_get_key_hash(tfm, modulus, hash);
+
+ crypto_free_shash(tfm);
+ return ret;
+}
+
+static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
+ void *token)
+{
+ u64 mrsigner[4];
+ int i, j, k;
+ void *addr;
+ int ret;
+
+ /*
+ * Deny initializing enclaves with attributes (namely provisioning)
+ * that have not been explicitly allowed.
+ */
+ if (encl->attributes & ~encl->attributes_mask)
+ return -EACCES;
+
+ /*
+ * Attributes should not be enforced *only* against what's available on
+ * platform (done in sgx_encl_create) but checked and enforced against
+ * the mask for enforcement in sigstruct. For example an enclave could
+ * opt to sign with AVX bit in xfrm, but still be loadable on a platform
+ * without it if the sigstruct->body.attributes_mask does not turn that
+ * bit on.
+ */
+ if (sigstruct->body.attributes & sigstruct->body.attributes_mask &
+ sgx_attributes_reserved_mask)
+ return -EINVAL;
+
+ if (sigstruct->body.miscselect & sigstruct->body.misc_mask &
+ sgx_misc_reserved_mask)
+ return -EINVAL;
+
+ if (sigstruct->body.xfrm & sigstruct->body.xfrm_mask &
+ sgx_xfrm_reserved_mask)
+ return -EINVAL;
+
+ ret = sgx_get_key_hash(sigstruct->modulus, mrsigner);
+ if (ret)
+ return ret;
+
+ mutex_lock(&encl->lock);
+
+ /*
+ * ENCLS[EINIT] is interruptible because it has such a high latency,
+ * e.g. 50k+ cycles on success. If an IRQ/NMI/SMI becomes pending,
+ * EINIT may fail with SGX_UNMASKED_EVENT so that the event can be
+ * serviced.
+ */
+ for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) {
+ for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) {
+ addr = sgx_get_epc_virt_addr(encl->secs.epc_page);
+
+ preempt_disable();
+
+ for (k = 0; k < 4; k++)
+ wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + k, mrsigner[k]);
+
+ ret = __einit(sigstruct, token, addr);
+
+ preempt_enable();
+
+ if (ret == SGX_UNMASKED_EVENT)
+ continue;
+ else
+ break;
+ }
+
+ if (ret != SGX_UNMASKED_EVENT)
+ break;
+
+ msleep_interruptible(SGX_EINIT_SLEEP_TIME);
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ goto err_out;
+ }
+ }
+
+ if (ret & ENCLS_FAULT_FLAG) {
+ if (encls_failed(ret))
+ ENCLS_WARN(ret, "EINIT");
+
+ ret = -EIO;
+ } else if (ret) {
+ pr_debug("EINIT returned %d\n", ret);
+ ret = -EPERM;
+ } else {
+ set_bit(SGX_ENCL_INITIALIZED, &encl->flags);
+ }
+
+err_out:
+ mutex_unlock(&encl->lock);
+ return ret;
+}
+
+/**
+ * sgx_ioc_enclave_init() - handler for %SGX_IOC_ENCLAVE_INIT
+ * @encl: an enclave pointer
+ * @arg: userspace pointer to a struct sgx_enclave_init instance
+ *
+ * Flush any outstanding enqueued EADD operations and perform EINIT. The
+ * Launch Enclave Public Key Hash MSRs are rewritten as necessary to match
+ * the enclave's MRSIGNER, which is caculated from the provided sigstruct.
+ *
+ * Return:
+ * - 0: Success.
+ * - -EPERM: Invalid SIGSTRUCT.
+ * - -EIO: EINIT failed because of a power cycle.
+ * - -errno: POSIX error.
+ */
+static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg)
+{
+ struct sgx_sigstruct *sigstruct;
+ struct sgx_enclave_init init_arg;
+ struct page *initp_page;
+ void *token;
+ int ret;
+
+ if (!test_bit(SGX_ENCL_CREATED, &encl->flags) ||
+ test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
+ return -EINVAL;
+
+ if (copy_from_user(&init_arg, arg, sizeof(init_arg)))
+ return -EFAULT;
+
+ initp_page = alloc_page(GFP_KERNEL);
+ if (!initp_page)
+ return -ENOMEM;
+
+ sigstruct = kmap(initp_page);
+ token = (void *)((unsigned long)sigstruct + PAGE_SIZE / 2);
+ memset(token, 0, SGX_LAUNCH_TOKEN_SIZE);
+
+ if (copy_from_user(sigstruct, (void __user *)init_arg.sigstruct,
+ sizeof(*sigstruct))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * A legacy field used with Intel signed enclaves. These used to mean
+ * regular and architectural enclaves. The CPU only accepts these values
+ * but they do not have any other meaning.
+ *
+ * Thus, reject any other values.
+ */
+ if (sigstruct->header.vendor != 0x0000 &&
+ sigstruct->header.vendor != 0x8086) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = sgx_encl_init(encl, sigstruct, token);
+
+out:
+ kunmap(initp_page);
+ __free_page(initp_page);
+ return ret;
+}
+
+/**
+ * sgx_ioc_enclave_provision() - handler for %SGX_IOC_ENCLAVE_PROVISION
+ * @encl: an enclave pointer
+ * @arg: userspace pointer to a struct sgx_enclave_provision instance
+ *
+ * Allow ATTRIBUTE.PROVISION_KEY for an enclave by providing a file handle to
+ * /dev/sgx_provision.
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: Otherwise.
+ */
+static long sgx_ioc_enclave_provision(struct sgx_encl *encl, void __user *arg)
+{
+ struct sgx_enclave_provision params;
+ struct file *file;
+
+ if (copy_from_user(&params, arg, sizeof(params)))
+ return -EFAULT;
+
+ file = fget(params.fd);
+ if (!file)
+ return -EINVAL;
+
+ if (file->f_op != &sgx_provision_fops) {
+ fput(file);
+ return -EINVAL;
+ }
+
+ encl->attributes_mask |= SGX_ATTR_PROVISIONKEY;
+
+ fput(file);
+ return 0;
+}
+
+long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ struct sgx_encl *encl = filep->private_data;
+ int ret;
+
+ if (test_and_set_bit(SGX_ENCL_IOCTL, &encl->flags))
+ return -EBUSY;
+
+ switch (cmd) {
+ case SGX_IOC_ENCLAVE_CREATE:
+ ret = sgx_ioc_enclave_create(encl, (void __user *)arg);
+ break;
+ case SGX_IOC_ENCLAVE_ADD_PAGES:
+ ret = sgx_ioc_enclave_add_pages(encl, (void __user *)arg);
+ break;
+ case SGX_IOC_ENCLAVE_INIT:
+ ret = sgx_ioc_enclave_init(encl, (void __user *)arg);
+ break;
+ case SGX_IOC_ENCLAVE_PROVISION:
+ ret = sgx_ioc_enclave_provision(encl, (void __user *)arg);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ clear_bit(SGX_ENCL_IOCTL, &encl->flags);
+ return ret;
+}
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
new file mode 100644
index 000000000000..c519fc5f6948
--- /dev/null
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -0,0 +1,733 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2016-20 Intel Corporation. */
+
+#include <linux/freezer.h>
+#include <linux/highmem.h>
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+#include <linux/ratelimit.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include "driver.h"
+#include "encl.h"
+#include "encls.h"
+
+struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
+static int sgx_nr_epc_sections;
+static struct task_struct *ksgxd_tsk;
+static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq);
+
+/*
+ * These variables are part of the state of the reclaimer, and must be accessed
+ * with sgx_reclaimer_lock acquired.
+ */
+static LIST_HEAD(sgx_active_page_list);
+
+static DEFINE_SPINLOCK(sgx_reclaimer_lock);
+
+/*
+ * Reset dirty EPC pages to uninitialized state. Laundry can be left with SECS
+ * pages whose child pages blocked EREMOVE.
+ */
+static void sgx_sanitize_section(struct sgx_epc_section *section)
+{
+ struct sgx_epc_page *page;
+ LIST_HEAD(dirty);
+ int ret;
+
+ /* init_laundry_list is thread-local, no need for a lock: */
+ while (!list_empty(&section->init_laundry_list)) {
+ if (kthread_should_stop())
+ return;
+
+ /* needed for access to ->page_list: */
+ spin_lock(&section->lock);
+
+ page = list_first_entry(&section->init_laundry_list,
+ struct sgx_epc_page, list);
+
+ ret = __eremove(sgx_get_epc_virt_addr(page));
+ if (!ret)
+ list_move(&page->list, &section->page_list);
+ else
+ list_move_tail(&page->list, &dirty);
+
+ spin_unlock(&section->lock);
+
+ cond_resched();
+ }
+
+ list_splice(&dirty, &section->init_laundry_list);
+}
+
+static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page)
+{
+ struct sgx_encl_page *page = epc_page->owner;
+ struct sgx_encl *encl = page->encl;
+ struct sgx_encl_mm *encl_mm;
+ bool ret = true;
+ int idx;
+
+ idx = srcu_read_lock(&encl->srcu);
+
+ list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
+ if (!mmget_not_zero(encl_mm->mm))
+ continue;
+
+ mmap_read_lock(encl_mm->mm);
+ ret = !sgx_encl_test_and_clear_young(encl_mm->mm, page);
+ mmap_read_unlock(encl_mm->mm);
+
+ mmput_async(encl_mm->mm);
+
+ if (!ret)
+ break;
+ }
+
+ srcu_read_unlock(&encl->srcu, idx);
+
+ if (!ret)
+ return false;
+
+ return true;
+}
+
+static void sgx_reclaimer_block(struct sgx_epc_page *epc_page)
+{
+ struct sgx_encl_page *page = epc_page->owner;
+ unsigned long addr = page->desc & PAGE_MASK;
+ struct sgx_encl *encl = page->encl;
+ unsigned long mm_list_version;
+ struct sgx_encl_mm *encl_mm;
+ struct vm_area_struct *vma;
+ int idx, ret;
+
+ do {
+ mm_list_version = encl->mm_list_version;
+
+ /* Pairs with smp_rmb() in sgx_encl_mm_add(). */
+ smp_rmb();
+
+ idx = srcu_read_lock(&encl->srcu);
+
+ list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
+ if (!mmget_not_zero(encl_mm->mm))
+ continue;
+
+ mmap_read_lock(encl_mm->mm);
+
+ ret = sgx_encl_find(encl_mm->mm, addr, &vma);
+ if (!ret && encl == vma->vm_private_data)
+ zap_vma_ptes(vma, addr, PAGE_SIZE);
+
+ mmap_read_unlock(encl_mm->mm);
+
+ mmput_async(encl_mm->mm);
+ }
+
+ srcu_read_unlock(&encl->srcu, idx);
+ } while (unlikely(encl->mm_list_version != mm_list_version));
+
+ mutex_lock(&encl->lock);
+
+ ret = __eblock(sgx_get_epc_virt_addr(epc_page));
+ if (encls_failed(ret))
+ ENCLS_WARN(ret, "EBLOCK");
+
+ mutex_unlock(&encl->lock);
+}
+
+static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot,
+ struct sgx_backing *backing)
+{
+ struct sgx_pageinfo pginfo;
+ int ret;
+
+ pginfo.addr = 0;
+ pginfo.secs = 0;
+
+ pginfo.contents = (unsigned long)kmap_atomic(backing->contents);
+ pginfo.metadata = (unsigned long)kmap_atomic(backing->pcmd) +
+ backing->pcmd_offset;
+
+ ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot);
+
+ kunmap_atomic((void *)(unsigned long)(pginfo.metadata -
+ backing->pcmd_offset));
+ kunmap_atomic((void *)(unsigned long)pginfo.contents);
+
+ return ret;
+}
+
+static void sgx_ipi_cb(void *info)
+{
+}
+
+static const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl)
+{
+ cpumask_t *cpumask = &encl->cpumask;
+ struct sgx_encl_mm *encl_mm;
+ int idx;
+
+ /*
+ * Can race with sgx_encl_mm_add(), but ETRACK has already been
+ * executed, which means that the CPUs running in the new mm will enter
+ * into the enclave with a fresh epoch.
+ */
+ cpumask_clear(cpumask);
+
+ idx = srcu_read_lock(&encl->srcu);
+
+ list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
+ if (!mmget_not_zero(encl_mm->mm))
+ continue;
+
+ cpumask_or(cpumask, cpumask, mm_cpumask(encl_mm->mm));
+
+ mmput_async(encl_mm->mm);
+ }
+
+ srcu_read_unlock(&encl->srcu, idx);
+
+ return cpumask;
+}
+
+/*
+ * Swap page to the regular memory transformed to the blocked state by using
+ * EBLOCK, which means that it can no loger be referenced (no new TLB entries).
+ *
+ * The first trial just tries to write the page assuming that some other thread
+ * has reset the count for threads inside the enlave by using ETRACK, and
+ * previous thread count has been zeroed out. The second trial calls ETRACK
+ * before EWB. If that fails we kick all the HW threads out, and then do EWB,
+ * which should be guaranteed the succeed.
+ */
+static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
+ struct sgx_backing *backing)
+{
+ struct sgx_encl_page *encl_page = epc_page->owner;
+ struct sgx_encl *encl = encl_page->encl;
+ struct sgx_va_page *va_page;
+ unsigned int va_offset;
+ void *va_slot;
+ int ret;
+
+ encl_page->desc &= ~SGX_ENCL_PAGE_BEING_RECLAIMED;
+
+ va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
+ list);
+ va_offset = sgx_alloc_va_slot(va_page);
+ va_slot = sgx_get_epc_virt_addr(va_page->epc_page) + va_offset;
+ if (sgx_va_page_full(va_page))
+ list_move_tail(&va_page->list, &encl->va_pages);
+
+ ret = __sgx_encl_ewb(epc_page, va_slot, backing);
+ if (ret == SGX_NOT_TRACKED) {
+ ret = __etrack(sgx_get_epc_virt_addr(encl->secs.epc_page));
+ if (ret) {
+ if (encls_failed(ret))
+ ENCLS_WARN(ret, "ETRACK");
+ }
+
+ ret = __sgx_encl_ewb(epc_page, va_slot, backing);
+ if (ret == SGX_NOT_TRACKED) {
+ /*
+ * Slow path, send IPIs to kick cpus out of the
+ * enclave. Note, it's imperative that the cpu
+ * mask is generated *after* ETRACK, else we'll
+ * miss cpus that entered the enclave between
+ * generating the mask and incrementing epoch.
+ */
+ on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
+ sgx_ipi_cb, NULL, 1);
+ ret = __sgx_encl_ewb(epc_page, va_slot, backing);
+ }
+ }
+
+ if (ret) {
+ if (encls_failed(ret))
+ ENCLS_WARN(ret, "EWB");
+
+ sgx_free_va_slot(va_page, va_offset);
+ } else {
+ encl_page->desc |= va_offset;
+ encl_page->va_page = va_page;
+ }
+}
+
+static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
+ struct sgx_backing *backing)
+{
+ struct sgx_encl_page *encl_page = epc_page->owner;
+ struct sgx_encl *encl = encl_page->encl;
+ struct sgx_backing secs_backing;
+ int ret;
+
+ mutex_lock(&encl->lock);
+
+ sgx_encl_ewb(epc_page, backing);
+ encl_page->epc_page = NULL;
+ encl->secs_child_cnt--;
+
+ if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) {
+ ret = sgx_encl_get_backing(encl, PFN_DOWN(encl->size),
+ &secs_backing);
+ if (ret)
+ goto out;
+
+ sgx_encl_ewb(encl->secs.epc_page, &secs_backing);
+
+ sgx_free_epc_page(encl->secs.epc_page);
+ encl->secs.epc_page = NULL;
+
+ sgx_encl_put_backing(&secs_backing, true);
+ }
+
+out:
+ mutex_unlock(&encl->lock);
+}
+
+/*
+ * Take a fixed number of pages from the head of the active page pool and
+ * reclaim them to the enclave's private shmem files. Skip the pages, which have
+ * been accessed since the last scan. Move those pages to the tail of active
+ * page pool so that the pages get scanned in LRU like fashion.
+ *
+ * Batch process a chunk of pages (at the moment 16) in order to degrade amount
+ * of IPI's and ETRACK's potentially required. sgx_encl_ewb() does degrade a bit
+ * among the HW threads with three stage EWB pipeline (EWB, ETRACK + EWB and IPI
+ * + EWB) but not sufficiently. Reclaiming one page at a time would also be
+ * problematic as it would increase the lock contention too much, which would
+ * halt forward progress.
+ */
+static void sgx_reclaim_pages(void)
+{
+ struct sgx_epc_page *chunk[SGX_NR_TO_SCAN];
+ struct sgx_backing backing[SGX_NR_TO_SCAN];
+ struct sgx_epc_section *section;
+ struct sgx_encl_page *encl_page;
+ struct sgx_epc_page *epc_page;
+ pgoff_t page_index;
+ int cnt = 0;
+ int ret;
+ int i;
+
+ spin_lock(&sgx_reclaimer_lock);
+ for (i = 0; i < SGX_NR_TO_SCAN; i++) {
+ if (list_empty(&sgx_active_page_list))
+ break;
+
+ epc_page = list_first_entry(&sgx_active_page_list,
+ struct sgx_epc_page, list);
+ list_del_init(&epc_page->list);
+ encl_page = epc_page->owner;
+
+ if (kref_get_unless_zero(&encl_page->encl->refcount) != 0)
+ chunk[cnt++] = epc_page;
+ else
+ /* The owner is freeing the page. No need to add the
+ * page back to the list of reclaimable pages.
+ */
+ epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
+ }
+ spin_unlock(&sgx_reclaimer_lock);
+
+ for (i = 0; i < cnt; i++) {
+ epc_page = chunk[i];
+ encl_page = epc_page->owner;
+
+ if (!sgx_reclaimer_age(epc_page))
+ goto skip;
+
+ page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
+ ret = sgx_encl_get_backing(encl_page->encl, page_index, &backing[i]);
+ if (ret)
+ goto skip;
+
+ mutex_lock(&encl_page->encl->lock);
+ encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED;
+ mutex_unlock(&encl_page->encl->lock);
+ continue;
+
+skip:
+ spin_lock(&sgx_reclaimer_lock);
+ list_add_tail(&epc_page->list, &sgx_active_page_list);
+ spin_unlock(&sgx_reclaimer_lock);
+
+ kref_put(&encl_page->encl->refcount, sgx_encl_release);
+
+ chunk[i] = NULL;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ epc_page = chunk[i];
+ if (epc_page)
+ sgx_reclaimer_block(epc_page);
+ }
+
+ for (i = 0; i < cnt; i++) {
+ epc_page = chunk[i];
+ if (!epc_page)
+ continue;
+
+ encl_page = epc_page->owner;
+ sgx_reclaimer_write(epc_page, &backing[i]);
+ sgx_encl_put_backing(&backing[i], true);
+
+ kref_put(&encl_page->encl->refcount, sgx_encl_release);
+ epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
+
+ section = &sgx_epc_sections[epc_page->section];
+ spin_lock(&section->lock);
+ list_add_tail(&epc_page->list, &section->page_list);
+ section->free_cnt++;
+ spin_unlock(&section->lock);
+ }
+}
+
+static unsigned long sgx_nr_free_pages(void)
+{
+ unsigned long cnt = 0;
+ int i;
+
+ for (i = 0; i < sgx_nr_epc_sections; i++)
+ cnt += sgx_epc_sections[i].free_cnt;
+
+ return cnt;
+}
+
+static bool sgx_should_reclaim(unsigned long watermark)
+{
+ return sgx_nr_free_pages() < watermark &&
+ !list_empty(&sgx_active_page_list);
+}
+
+static int ksgxd(void *p)
+{
+ int i;
+
+ set_freezable();
+
+ /*
+ * Sanitize pages in order to recover from kexec(). The 2nd pass is
+ * required for SECS pages, whose child pages blocked EREMOVE.
+ */
+ for (i = 0; i < sgx_nr_epc_sections; i++)
+ sgx_sanitize_section(&sgx_epc_sections[i]);
+
+ for (i = 0; i < sgx_nr_epc_sections; i++) {
+ sgx_sanitize_section(&sgx_epc_sections[i]);
+
+ /* Should never happen. */
+ if (!list_empty(&sgx_epc_sections[i].init_laundry_list))
+ WARN(1, "EPC section %d has unsanitized pages.\n", i);
+ }
+
+ while (!kthread_should_stop()) {
+ if (try_to_freeze())
+ continue;
+
+ wait_event_freezable(ksgxd_waitq,
+ kthread_should_stop() ||
+ sgx_should_reclaim(SGX_NR_HIGH_PAGES));
+
+ if (sgx_should_reclaim(SGX_NR_HIGH_PAGES))
+ sgx_reclaim_pages();
+
+ cond_resched();
+ }
+
+ return 0;
+}
+
+static bool __init sgx_page_reclaimer_init(void)
+{
+ struct task_struct *tsk;
+
+ tsk = kthread_run(ksgxd, NULL, "ksgxd");
+ if (IS_ERR(tsk))
+ return false;
+
+ ksgxd_tsk = tsk;
+
+ return true;
+}
+
+static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_section *section)
+{
+ struct sgx_epc_page *page;
+
+ spin_lock(&section->lock);
+
+ if (list_empty(&section->page_list)) {
+ spin_unlock(&section->lock);
+ return NULL;
+ }
+
+ page = list_first_entry(&section->page_list, struct sgx_epc_page, list);
+ list_del_init(&page->list);
+ section->free_cnt--;
+
+ spin_unlock(&section->lock);
+ return page;
+}
+
+/**
+ * __sgx_alloc_epc_page() - Allocate an EPC page
+ *
+ * Iterate through EPC sections and borrow a free EPC page to the caller. When a
+ * page is no longer needed it must be released with sgx_free_epc_page().
+ *
+ * Return:
+ * an EPC page,
+ * -errno on error
+ */
+struct sgx_epc_page *__sgx_alloc_epc_page(void)
+{
+ struct sgx_epc_section *section;
+ struct sgx_epc_page *page;
+ int i;
+
+ for (i = 0; i < sgx_nr_epc_sections; i++) {
+ section = &sgx_epc_sections[i];
+
+ page = __sgx_alloc_epc_page_from_section(section);
+ if (page)
+ return page;
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * sgx_mark_page_reclaimable() - Mark a page as reclaimable
+ * @page: EPC page
+ *
+ * Mark a page as reclaimable and add it to the active page list. Pages
+ * are automatically removed from the active list when freed.
+ */
+void sgx_mark_page_reclaimable(struct sgx_epc_page *page)
+{
+ spin_lock(&sgx_reclaimer_lock);
+ page->flags |= SGX_EPC_PAGE_RECLAIMER_TRACKED;
+ list_add_tail(&page->list, &sgx_active_page_list);
+ spin_unlock(&sgx_reclaimer_lock);
+}
+
+/**
+ * sgx_unmark_page_reclaimable() - Remove a page from the reclaim list
+ * @page: EPC page
+ *
+ * Clear the reclaimable flag and remove the page from the active page list.
+ *
+ * Return:
+ * 0 on success,
+ * -EBUSY if the page is in the process of being reclaimed
+ */
+int sgx_unmark_page_reclaimable(struct sgx_epc_page *page)
+{
+ spin_lock(&sgx_reclaimer_lock);
+ if (page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED) {
+ /* The page is being reclaimed. */
+ if (list_empty(&page->list)) {
+ spin_unlock(&sgx_reclaimer_lock);
+ return -EBUSY;
+ }
+
+ list_del(&page->list);
+ page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
+ }
+ spin_unlock(&sgx_reclaimer_lock);
+
+ return 0;
+}
+
+/**
+ * sgx_alloc_epc_page() - Allocate an EPC page
+ * @owner: the owner of the EPC page
+ * @reclaim: reclaim pages if necessary
+ *
+ * Iterate through EPC sections and borrow a free EPC page to the caller. When a
+ * page is no longer needed it must be released with sgx_free_epc_page(). If
+ * @reclaim is set to true, directly reclaim pages when we are out of pages. No
+ * mm's can be locked when @reclaim is set to true.
+ *
+ * Finally, wake up ksgxd when the number of pages goes below the watermark
+ * before returning back to the caller.
+ *
+ * Return:
+ * an EPC page,
+ * -errno on error
+ */
+struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim)
+{
+ struct sgx_epc_page *page;
+
+ for ( ; ; ) {
+ page = __sgx_alloc_epc_page();
+ if (!IS_ERR(page)) {
+ page->owner = owner;
+ break;
+ }
+
+ if (list_empty(&sgx_active_page_list))
+ return ERR_PTR(-ENOMEM);
+
+ if (!reclaim) {
+ page = ERR_PTR(-EBUSY);
+ break;
+ }
+
+ if (signal_pending(current)) {
+ page = ERR_PTR(-ERESTARTSYS);
+ break;
+ }
+
+ sgx_reclaim_pages();
+ cond_resched();
+ }
+
+ if (sgx_should_reclaim(SGX_NR_LOW_PAGES))
+ wake_up(&ksgxd_waitq);
+
+ return page;
+}
+
+/**
+ * sgx_free_epc_page() - Free an EPC page
+ * @page: an EPC page
+ *
+ * Call EREMOVE for an EPC page and insert it back to the list of free pages.
+ */
+void sgx_free_epc_page(struct sgx_epc_page *page)
+{
+ struct sgx_epc_section *section = &sgx_epc_sections[page->section];
+ int ret;
+
+ WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED);
+
+ ret = __eremove(sgx_get_epc_virt_addr(page));
+ if (WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret))
+ return;
+
+ spin_lock(&section->lock);
+ list_add_tail(&page->list, &section->page_list);
+ section->free_cnt++;
+ spin_unlock(&section->lock);
+}
+
+static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
+ unsigned long index,
+ struct sgx_epc_section *section)
+{
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ unsigned long i;
+
+ section->virt_addr = memremap(phys_addr, size, MEMREMAP_WB);
+ if (!section->virt_addr)
+ return false;
+
+ section->pages = vmalloc(nr_pages * sizeof(struct sgx_epc_page));
+ if (!section->pages) {
+ memunmap(section->virt_addr);
+ return false;
+ }
+
+ section->phys_addr = phys_addr;
+ spin_lock_init(&section->lock);
+ INIT_LIST_HEAD(&section->page_list);
+ INIT_LIST_HEAD(&section->init_laundry_list);
+
+ for (i = 0; i < nr_pages; i++) {
+ section->pages[i].section = index;
+ section->pages[i].flags = 0;
+ section->pages[i].owner = NULL;
+ list_add_tail(&section->pages[i].list, &section->init_laundry_list);
+ }
+
+ section->free_cnt = nr_pages;
+ return true;
+}
+
+/**
+ * A section metric is concatenated in a way that @low bits 12-31 define the
+ * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
+ * metric.
+ */
+static inline u64 __init sgx_calc_section_metric(u64 low, u64 high)
+{
+ return (low & GENMASK_ULL(31, 12)) +
+ ((high & GENMASK_ULL(19, 0)) << 32);
+}
+
+static bool __init sgx_page_cache_init(void)
+{
+ u32 eax, ebx, ecx, edx, type;
+ u64 pa, size;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) {
+ cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC, &eax, &ebx, &ecx, &edx);
+
+ type = eax & SGX_CPUID_EPC_MASK;
+ if (type == SGX_CPUID_EPC_INVALID)
+ break;
+
+ if (type != SGX_CPUID_EPC_SECTION) {
+ pr_err_once("Unknown EPC section type: %u\n", type);
+ break;
+ }
+
+ pa = sgx_calc_section_metric(eax, ebx);
+ size = sgx_calc_section_metric(ecx, edx);
+
+ pr_info("EPC section 0x%llx-0x%llx\n", pa, pa + size - 1);
+
+ if (!sgx_setup_epc_section(pa, size, i, &sgx_epc_sections[i])) {
+ pr_err("No free memory for an EPC section\n");
+ break;
+ }
+
+ sgx_nr_epc_sections++;
+ }
+
+ if (!sgx_nr_epc_sections) {
+ pr_err("There are zero EPC sections.\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void __init sgx_init(void)
+{
+ int ret;
+ int i;
+
+ if (!cpu_feature_enabled(X86_FEATURE_SGX))
+ return;
+
+ if (!sgx_page_cache_init())
+ return;
+
+ if (!sgx_page_reclaimer_init())
+ goto err_page_cache;
+
+ ret = sgx_drv_init();
+ if (ret)
+ goto err_kthread;
+
+ return;
+
+err_kthread:
+ kthread_stop(ksgxd_tsk);
+
+err_page_cache:
+ for (i = 0; i < sgx_nr_epc_sections; i++) {
+ vfree(sgx_epc_sections[i].pages);
+ memunmap(sgx_epc_sections[i].virt_addr);
+ }
+}
+
+device_initcall(sgx_init);
diff --git a/arch/x86/kernel/cpu/sgx/sgx.h b/arch/x86/kernel/cpu/sgx/sgx.h
new file mode 100644
index 000000000000..5fa42d143feb
--- /dev/null
+++ b/arch/x86/kernel/cpu/sgx/sgx.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _X86_SGX_H
+#define _X86_SGX_H
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/rwsem.h>
+#include <linux/types.h>
+#include <asm/asm.h>
+#include "arch.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "sgx: " fmt
+
+#define SGX_MAX_EPC_SECTIONS 8
+#define SGX_EEXTEND_BLOCK_SIZE 256
+#define SGX_NR_TO_SCAN 16
+#define SGX_NR_LOW_PAGES 32
+#define SGX_NR_HIGH_PAGES 64
+
+/* Pages, which are being tracked by the page reclaimer. */
+#define SGX_EPC_PAGE_RECLAIMER_TRACKED BIT(0)
+
+struct sgx_epc_page {
+ unsigned int section;
+ unsigned int flags;
+ struct sgx_encl_page *owner;
+ struct list_head list;
+};
+
+/*
+ * The firmware can define multiple chunks of EPC to the different areas of the
+ * physical memory e.g. for memory areas of the each node. This structure is
+ * used to store EPC pages for one EPC section and virtual memory area where
+ * the pages have been mapped.
+ *
+ * 'lock' must be held before accessing 'page_list' or 'free_cnt'.
+ */
+struct sgx_epc_section {
+ unsigned long phys_addr;
+ void *virt_addr;
+ struct sgx_epc_page *pages;
+
+ spinlock_t lock;
+ struct list_head page_list;
+ unsigned long free_cnt;
+
+ /*
+ * Pages which need EREMOVE run on them before they can be
+ * used. Only safe to be accessed in ksgxd and init code.
+ * Not protected by locks.
+ */
+ struct list_head init_laundry_list;
+};
+
+extern struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
+
+static inline unsigned long sgx_get_epc_phys_addr(struct sgx_epc_page *page)
+{
+ struct sgx_epc_section *section = &sgx_epc_sections[page->section];
+ unsigned long index;
+
+ index = ((unsigned long)page - (unsigned long)section->pages) / sizeof(*page);
+
+ return section->phys_addr + index * PAGE_SIZE;
+}
+
+static inline void *sgx_get_epc_virt_addr(struct sgx_epc_page *page)
+{
+ struct sgx_epc_section *section = &sgx_epc_sections[page->section];
+ unsigned long index;
+
+ index = ((unsigned long)page - (unsigned long)section->pages) / sizeof(*page);
+
+ return section->virt_addr + index * PAGE_SIZE;
+}
+
+struct sgx_epc_page *__sgx_alloc_epc_page(void);
+void sgx_free_epc_page(struct sgx_epc_page *page);
+
+void sgx_mark_page_reclaimable(struct sgx_epc_page *page);
+int sgx_unmark_page_reclaimable(struct sgx_epc_page *page);
+struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim);
+
+#endif /* _X86_SGX_H */
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index d3a0791bc052..1068002c8532 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -96,6 +96,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width;
unsigned int core_select_mask, core_level_siblings;
unsigned int die_select_mask, die_level_siblings;
+ bool die_level_present = false;
int leaf;
leaf = detect_extended_topology_leaf(c);
@@ -126,6 +127,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
}
if (LEAFB_SUBTYPE(ecx) == DIE_TYPE) {
+ die_level_present = true;
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
}
@@ -139,8 +141,12 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid,
ht_mask_width) & core_select_mask;
- c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid,
- core_plus_mask_width) & die_select_mask;
+
+ if (die_level_present) {
+ c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid,
+ core_plus_mask_width) & die_select_mask;
+ }
+
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid,
die_plus_mask_width);
/*
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 3492aa36bf09..6f7b8cc1bc9f 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -74,10 +74,9 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
init_completion(&cmd.done);
for (; count; count -= 16) {
- call_single_data_t csd = {
- .func = cpuid_smp_cpuid,
- .info = &cmd,
- };
+ call_single_data_t csd;
+
+ INIT_CSD(&csd, cpuid_smp_cpuid, &cmd);
cmd.regs.eax = pos;
cmd.regs.ecx = pos >> 32;
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c
index 33ee47670b99..5fcac46aaf6b 100644
--- a/arch/x86/kernel/crash_dump_32.c
+++ b/arch/x86/kernel/crash_dump_32.c
@@ -13,8 +13,6 @@
#include <linux/uaccess.h>
-static void *kdump_buf_page;
-
static inline bool is_crashed_pfn_valid(unsigned long pfn)
{
#ifndef CONFIG_X86_PAE
@@ -41,15 +39,11 @@ static inline bool is_crashed_pfn_valid(unsigned long pfn)
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
* otherwise @buf is in kernel address space, use memcpy().
*
- * Copy a page from "oldmem". For this page, there is no pte mapped
- * in the current kernel. We stitch up a pte, similar to kmap_atomic.
- *
- * Calling copy_to_user() in atomic context is not desirable. Hence first
- * copying the data to a pre-allocated kernel page and then copying to user
- * space in non-atomic context.
+ * Copy a page from "oldmem". For this page, there might be no pte mapped
+ * in the current kernel.
*/
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset, int userbuf)
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
+ unsigned long offset, int userbuf)
{
void *vaddr;
@@ -59,38 +53,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!is_crashed_pfn_valid(pfn))
return -EFAULT;
- vaddr = kmap_atomic_pfn(pfn);
+ vaddr = kmap_local_pfn(pfn);
if (!userbuf) {
- memcpy(buf, (vaddr + offset), csize);
- kunmap_atomic(vaddr);
+ memcpy(buf, vaddr + offset, csize);
} else {
- if (!kdump_buf_page) {
- printk(KERN_WARNING "Kdump: Kdump buffer page not"
- " allocated\n");
- kunmap_atomic(vaddr);
- return -EFAULT;
- }
- copy_page(kdump_buf_page, vaddr);
- kunmap_atomic(vaddr);
- if (copy_to_user(buf, (kdump_buf_page + offset), csize))
- return -EFAULT;
+ if (copy_to_user(buf, vaddr + offset, csize))
+ csize = -EFAULT;
}
- return csize;
-}
+ kunmap_local(vaddr);
-static int __init kdump_buf_page_init(void)
-{
- int ret = 0;
-
- kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!kdump_buf_page) {
- printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer"
- " page\n");
- ret = -ENOMEM;
- }
-
- return ret;
+ return csize;
}
-arch_initcall(kdump_buf_page_init);
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index ddffd80f5c52..6a4cb71c2498 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -184,31 +184,31 @@ static unsigned int ioapic_id;
struct of_ioapic_type {
u32 out_type;
- u32 trigger;
- u32 polarity;
+ u32 is_level;
+ u32 active_low;
};
static struct of_ioapic_type of_ioapic_type[] =
{
{
- .out_type = IRQ_TYPE_EDGE_RISING,
- .trigger = IOAPIC_EDGE,
- .polarity = 1,
+ .out_type = IRQ_TYPE_EDGE_FALLING,
+ .is_level = 0,
+ .active_low = 1,
},
{
- .out_type = IRQ_TYPE_LEVEL_LOW,
- .trigger = IOAPIC_LEVEL,
- .polarity = 0,
+ .out_type = IRQ_TYPE_LEVEL_HIGH,
+ .is_level = 1,
+ .active_low = 0,
},
{
- .out_type = IRQ_TYPE_LEVEL_HIGH,
- .trigger = IOAPIC_LEVEL,
- .polarity = 1,
+ .out_type = IRQ_TYPE_LEVEL_LOW,
+ .is_level = 1,
+ .active_low = 1,
},
{
- .out_type = IRQ_TYPE_EDGE_FALLING,
- .trigger = IOAPIC_EDGE,
- .polarity = 0,
+ .out_type = IRQ_TYPE_EDGE_RISING,
+ .is_level = 0,
+ .active_low = 0,
},
};
@@ -228,7 +228,7 @@ static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
return -EINVAL;
it = &of_ioapic_type[type_index];
- ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity);
+ ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->is_level, it->active_low);
tmp.devid = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain));
tmp.ioapic.pin = fwspec->param[0];
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 25c06b67e7e0..299c20f0a38b 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -78,6 +78,9 @@ static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
if (!user_mode(regs))
return copy_from_kernel_nofault(buf, (u8 *)src, nbytes);
+ /* The user space code from other tasks cannot be accessed. */
+ if (regs != task_pt_regs(current))
+ return -EPERM;
/*
* Make sure userspace isn't trying to trick us into dumping kernel
* memory by pointing the userspace instruction pointer at it.
@@ -85,6 +88,12 @@ static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX))
return -EINVAL;
+ /*
+ * Even if named copy_from_user_nmi() this can be invoked from
+ * other contexts and will not try to resolve a pagefault, which is
+ * the correct thing to do here as this code can be called from any
+ * context.
+ */
return copy_from_user_nmi(buf, (void __user *)src, nbytes);
}
@@ -115,13 +124,19 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl)
u8 opcodes[OPCODE_BUFSIZE];
unsigned long prologue = regs->ip - PROLOGUE_SIZE;
- if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
- printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n",
- loglvl, prologue);
- } else {
+ switch (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
+ case 0:
printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
__stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
opcodes[PROLOGUE_SIZE], opcodes + PROLOGUE_SIZE + 1);
+ break;
+ case -EPERM:
+ /* No access to the user space stack of other tasks. Ignore. */
+ break;
+ default:
+ printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n",
+ loglvl, prologue);
+ break;
}
}
@@ -168,7 +183,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
}
}
-void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
+static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, const char *log_lvl)
{
struct unwind_state state;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 05e117137b45..5e9beb77cafd 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -37,7 +37,6 @@
#include <asm/kasan.h>
#include <asm/fixmap.h>
#include <asm/realmode.h>
-#include <asm/desc.h>
#include <asm/extable.h>
#include <asm/trapnr.h>
#include <asm/sev-es.h>
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 7eb2a1c87969..04bddaaba8e2 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -26,15 +26,6 @@
#include <asm/nospec-branch.h>
#include <asm/fixmap.h>
-#ifdef CONFIG_PARAVIRT_XXL
-#include <asm/asm-offsets.h>
-#include <asm/paravirt.h>
-#define GET_CR2_INTO(reg) GET_CR2_INTO_AX ; _ASM_MOV %_ASM_AX, reg
-#else
-#define INTERRUPT_RETURN iretq
-#define GET_CR2_INTO(reg) _ASM_MOV %cr2, reg
-#endif
-
/*
* We are not able to switch in one step to the final KERNEL ADDRESS SPACE
* because we need identity-mapped pages.
@@ -161,6 +152,21 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
/* Setup early boot stage 4-/5-level pagetables. */
addq phys_base(%rip), %rax
+
+ /*
+ * For SEV guests: Verify that the C-bit is correct. A malicious
+ * hypervisor could lie about the C-bit position to perform a ROP
+ * attack on the guest by writing to the unencrypted stack and wait for
+ * the next RET instruction.
+ * %rsi carries pointer to realmode data and is callee-clobbered. Save
+ * and restore it.
+ */
+ pushq %rsi
+ movq %rax, %rdi
+ call sev_verify_cbit
+ popq %rsi
+
+ /* Switch to new page-table */
movq %rax, %cr3
/* Ensure I am executing from virtual addresses */
@@ -279,6 +285,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
SYM_CODE_END(secondary_startup_64)
#include "verify_cpu.S"
+#include "sev_verify_cbit.S"
#ifdef CONFIG_HOTPLUG_CPU
/*
@@ -524,21 +531,19 @@ SYM_DATA_END(level3_kernel_pgt)
SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
/*
- * 512 MB kernel mapping. We spend a full page on this pagetable
- * anyway.
+ * Kernel high mapping.
*
- * The kernel code+data+bss must not be bigger than that.
+ * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
+ * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
+ * 512 MiB otherwise.
*
- * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
- * If you want to increase this then increase MODULES_VADDR
- * too.)
+ * (NOTE: after that starts the module area, see MODULES_VADDR.)
*
- * This table is eventually used by the kernel during normal
- * runtime. Care must be taken to clear out undesired bits
- * later, like _PAGE_RW or _PAGE_GLOBAL in some cases.
+ * This table is eventually used by the kernel during normal runtime.
+ * Care must be taken to clear out undesired bits later, like _PAGE_RW
+ * or _PAGE_GLOBAL in some cases.
*/
- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
- KERNEL_IMAGE_SIZE/PMD_SIZE)
+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
SYM_DATA_END(level2_kernel_pgt)
SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 7a50f0b62a70..08651a4e6aa0 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -7,6 +7,7 @@
#include <linux/cpu.h>
#include <linux/irq.h>
+#include <asm/irq_remapping.h>
#include <asm/hpet.h>
#include <asm/time.h>
@@ -50,7 +51,7 @@ unsigned long hpet_address;
u8 hpet_blockid; /* OS timer block num */
bool hpet_msi_disable;
-#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_GENERIC_MSI_IRQ
static DEFINE_PER_CPU(struct hpet_channel *, cpu_hpet_channel);
static struct irq_domain *hpet_domain;
#endif
@@ -467,9 +468,8 @@ static void __init hpet_legacy_clockevent_register(struct hpet_channel *hc)
/*
* HPET MSI Support
*/
-#ifdef CONFIG_PCI_MSI
-
-void hpet_msi_unmask(struct irq_data *data)
+#ifdef CONFIG_GENERIC_MSI_IRQ
+static void hpet_msi_unmask(struct irq_data *data)
{
struct hpet_channel *hc = irq_data_get_irq_handler_data(data);
unsigned int cfg;
@@ -479,7 +479,7 @@ void hpet_msi_unmask(struct irq_data *data)
hpet_writel(cfg, HPET_Tn_CFG(hc->num));
}
-void hpet_msi_mask(struct irq_data *data)
+static void hpet_msi_mask(struct irq_data *data)
{
struct hpet_channel *hc = irq_data_get_irq_handler_data(data);
unsigned int cfg;
@@ -489,12 +489,122 @@ void hpet_msi_mask(struct irq_data *data)
hpet_writel(cfg, HPET_Tn_CFG(hc->num));
}
-void hpet_msi_write(struct hpet_channel *hc, struct msi_msg *msg)
+static void hpet_msi_write(struct hpet_channel *hc, struct msi_msg *msg)
{
hpet_writel(msg->data, HPET_Tn_ROUTE(hc->num));
hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hc->num) + 4);
}
+static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ hpet_msi_write(irq_data_get_irq_handler_data(data), msg);
+}
+
+static struct irq_chip hpet_msi_controller __ro_after_init = {
+ .name = "HPET-MSI",
+ .irq_unmask = hpet_msi_unmask,
+ .irq_mask = hpet_msi_mask,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_set_affinity = msi_domain_set_affinity,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_write_msi_msg = hpet_msi_write_msg,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int hpet_msi_init(struct irq_domain *domain,
+ struct msi_domain_info *info, unsigned int virq,
+ irq_hw_number_t hwirq, msi_alloc_info_t *arg)
+{
+ irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
+ irq_domain_set_info(domain, virq, arg->hwirq, info->chip, NULL,
+ handle_edge_irq, arg->data, "edge");
+
+ return 0;
+}
+
+static void hpet_msi_free(struct irq_domain *domain,
+ struct msi_domain_info *info, unsigned int virq)
+{
+ irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT);
+}
+
+static struct msi_domain_ops hpet_msi_domain_ops = {
+ .msi_init = hpet_msi_init,
+ .msi_free = hpet_msi_free,
+};
+
+static struct msi_domain_info hpet_msi_domain_info = {
+ .ops = &hpet_msi_domain_ops,
+ .chip = &hpet_msi_controller,
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS,
+};
+
+static struct irq_domain *hpet_create_irq_domain(int hpet_id)
+{
+ struct msi_domain_info *domain_info;
+ struct irq_domain *parent, *d;
+ struct fwnode_handle *fn;
+ struct irq_fwspec fwspec;
+
+ if (x86_vector_domain == NULL)
+ return NULL;
+
+ domain_info = kzalloc(sizeof(*domain_info), GFP_KERNEL);
+ if (!domain_info)
+ return NULL;
+
+ *domain_info = hpet_msi_domain_info;
+ domain_info->data = (void *)(long)hpet_id;
+
+ fn = irq_domain_alloc_named_id_fwnode(hpet_msi_controller.name,
+ hpet_id);
+ if (!fn) {
+ kfree(domain_info);
+ return NULL;
+ }
+
+ fwspec.fwnode = fn;
+ fwspec.param_count = 1;
+ fwspec.param[0] = hpet_id;
+
+ parent = irq_find_matching_fwspec(&fwspec, DOMAIN_BUS_ANY);
+ if (!parent) {
+ irq_domain_free_fwnode(fn);
+ kfree(domain_info);
+ return NULL;
+ }
+ if (parent != x86_vector_domain)
+ hpet_msi_controller.name = "IR-HPET-MSI";
+
+ d = msi_create_irq_domain(fn, domain_info, parent);
+ if (!d) {
+ irq_domain_free_fwnode(fn);
+ kfree(domain_info);
+ }
+ return d;
+}
+
+static inline int hpet_dev_id(struct irq_domain *domain)
+{
+ struct msi_domain_info *info = msi_get_domain_info(domain);
+
+ return (int)(long)info->data;
+}
+
+static int hpet_assign_irq(struct irq_domain *domain, struct hpet_channel *hc,
+ int dev_num)
+{
+ struct irq_alloc_info info;
+
+ init_irq_alloc_info(&info, NULL);
+ info.type = X86_IRQ_ALLOC_TYPE_HPET;
+ info.data = hc;
+ info.devid = hpet_dev_id(domain);
+ info.hwirq = dev_num;
+
+ return irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info);
+}
+
static int hpet_clkevt_msi_resume(struct clock_event_device *evt)
{
struct hpet_channel *hc = clockevent_to_channel(evt);
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 57c2ecf43134..ce831f9448e7 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -200,8 +200,7 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
params->hdr.hardware_subarch = boot_params.hdr.hardware_subarch;
/* Copying screen_info will do? */
- memcpy(&params->screen_info, &boot_params.screen_info,
- sizeof(struct screen_info));
+ memcpy(&params->screen_info, &screen_info, sizeof(struct screen_info));
/* Fill in memsize later */
params->screen_info.ext_mem_k = 0;
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 547c7abb39f5..a65e9e97857f 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -864,6 +864,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
p->ainsn.boostable = true;
goto no_change;
}
+ break;
default:
break;
}
@@ -937,6 +938,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
* So clear it by resetting the current kprobe:
*/
regs->flags &= ~X86_EFLAGS_TF;
+ /*
+ * Since the single step (trap) has been cancelled,
+ * we need to restore BTF here.
+ */
+ restore_btf();
/*
* If the TF flag was set before the kprobe hit,
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 041f0b50bc27..08eb23074f92 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -272,6 +272,19 @@ static int insn_is_indirect_jump(struct insn *insn)
return ret;
}
+static bool is_padding_int3(unsigned long addr, unsigned long eaddr)
+{
+ unsigned char ops;
+
+ for (; addr < eaddr; addr++) {
+ if (get_kernel_nofault(ops, (void *)addr) < 0 ||
+ ops != INT3_INSN_OPCODE)
+ return false;
+ }
+
+ return true;
+}
+
/* Decode whole function to ensure any instructions don't jump into target */
static int can_optimize(unsigned long paddr)
{
@@ -310,9 +323,14 @@ static int can_optimize(unsigned long paddr)
return 0;
kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
insn_get_length(&insn);
- /* Another subsystem puts a breakpoint */
+ /*
+ * In the case of detecting unknown breakpoint, this could be
+ * a padding INT3 between functions. Let's check that all the
+ * rest of the bytes are also INT3.
+ */
if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
- return 0;
+ return is_padding_int3(addr, paddr - offset + size) ? 1 : 0;
+
/* Recover address */
insn.kaddr = (void *)addr;
insn.next_byte = (void *)(addr + insn.length);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 7f57ede3cb8e..5e78e01ca3b4 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -740,6 +740,11 @@ static void __init kvm_apic_init(void)
#endif
}
+static bool __init kvm_msi_ext_dest_id(void)
+{
+ return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID);
+}
+
static void __init kvm_init_platform(void)
{
kvmclock_init();
@@ -769,6 +774,7 @@ const __initconst struct hypervisor_x86 x86_hyper_kvm = {
.type = X86_HYPER_KVM,
.init.guest_late_init = kvm_guest_init,
.init.x2apic_available = kvm_para_available,
+ .init.msi_ext_dest_id = kvm_msi_ext_dest_id,
.init.init_platform = kvm_init_platform,
#if defined(CONFIG_AMD_MEM_ENCRYPT)
.runtime.sev_es_hcall_prepare = kvm_sev_es_hcall_prepare,
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index c0d409810658..8a67d1fa8dc5 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -99,11 +99,9 @@ static int filter_write(u32 reg)
if (!__ratelimit(&fw_rs))
return 0;
- if (reg == MSR_IA32_ENERGY_PERF_BIAS)
- return 0;
-
- pr_err("Write to unrecognized MSR 0x%x by %s (pid: %d). Please report to x86@kernel.org.\n",
- reg, current->comm, current->pid);
+ pr_warn("Write to unrecognized MSR 0x%x by %s (pid: %d).\n",
+ reg, current->comm, current->pid);
+ pr_warn("See https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/about for details.\n");
return 0;
}
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 4bc77aaf1303..bf250a339655 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -475,7 +475,7 @@ static DEFINE_PER_CPU(unsigned long, nmi_dr7);
DEFINE_IDTENTRY_RAW(exc_nmi)
{
- bool irq_state;
+ irqentry_state_t irq_state;
/*
* Re-enable NMIs right here when running as an SEV-ES guest. This might
@@ -502,14 +502,14 @@ nmi_restart:
this_cpu_write(nmi_dr7, local_db_save());
- irq_state = idtentry_enter_nmi(regs);
+ irq_state = irqentry_nmi_enter(regs);
inc_irq_stat(__nmi_count);
if (!ignore_nmis)
default_do_nmi(regs);
- idtentry_exit_nmi(regs, irq_state);
+ irqentry_nmi_exit(regs, irq_state);
local_db_restore(this_cpu_read(nmi_dr7));
diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c
index bb7e1132290b..624703af80a1 100644
--- a/arch/x86/kernel/perf_regs.c
+++ b/arch/x86/kernel/perf_regs.c
@@ -101,8 +101,7 @@ u64 perf_reg_abi(struct task_struct *task)
}
void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy)
+ struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
@@ -123,18 +122,26 @@ int perf_reg_validate(u64 mask)
u64 perf_reg_abi(struct task_struct *task)
{
- if (test_tsk_thread_flag(task, TIF_IA32))
+ if (!user_64bit_mode(task_pt_regs(task)))
return PERF_SAMPLE_REGS_ABI_32;
else
return PERF_SAMPLE_REGS_ABI_64;
}
+static DEFINE_PER_CPU(struct pt_regs, nmi_user_regs);
+
void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy)
+ struct pt_regs *regs)
{
+ struct pt_regs *regs_user_copy = this_cpu_ptr(&nmi_user_regs);
struct pt_regs *user_regs = task_pt_regs(current);
+ if (!in_nmi()) {
+ regs_user->regs = user_regs;
+ regs_user->abi = perf_reg_abi(current);
+ return;
+ }
+
/*
* If we're in an NMI that interrupted task_pt_regs setup, then
* we can't sample user regs at all. This check isn't really
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index ba4593a913fa..145a7ac0c19a 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -685,7 +685,7 @@ void arch_cpu_idle(void)
*/
void __cpuidle default_idle(void)
{
- safe_halt();
+ raw_safe_halt();
}
#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
EXPORT_SYMBOL(default_idle);
@@ -736,6 +736,8 @@ void stop_this_cpu(void *dummy)
/*
* AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
* states (local apic timer and TSC stop).
+ *
+ * XXX this function is completely buggered vs RCU and tracing.
*/
static void amd_e400_idle(void)
{
@@ -757,9 +759,9 @@ static void amd_e400_idle(void)
* The switch back from broadcast mode needs to be called with
* interrupts disabled.
*/
- local_irq_disable();
+ raw_local_irq_disable();
tick_broadcast_exit();
- local_irq_enable();
+ raw_local_irq_enable();
}
/*
@@ -801,9 +803,9 @@ static __cpuidle void mwait_idle(void)
if (!need_resched())
__sti_mwait(0, 0);
else
- local_irq_enable();
+ raw_local_irq_enable();
} else {
- local_irq_enable();
+ raw_local_irq_enable();
}
__current_clr_polling();
}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index df342bedea88..ad582f9ac5a6 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -511,11 +511,10 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
EXPORT_SYMBOL_GPL(start_thread);
#ifdef CONFIG_COMPAT
-void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
+void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp, bool x32)
{
start_thread_common(regs, new_ip, new_sp,
- test_thread_flag(TIF_X32)
- ? __USER_CS : __USER32_CS,
+ x32 ? __USER_CS : __USER32_CS,
__USER_DS, __USER_DS);
}
#endif
@@ -641,16 +640,12 @@ void set_personality_64bit(void)
/* inherit personality from parent */
/* Make sure to be in 64bit mode */
- clear_thread_flag(TIF_IA32);
clear_thread_flag(TIF_ADDR32);
- clear_thread_flag(TIF_X32);
/* Pretend that this comes from a 64bit execve */
task_pt_regs(current)->orig_ax = __NR_execve;
current_thread_info()->status &= ~TS_COMPAT;
-
- /* Ensure the corresponding mm is not marked. */
if (current->mm)
- current->mm->context.ia32_compat = 0;
+ current->mm->context.flags = MM_CONTEXT_HAS_VSYSCALL;
/* TBD: overwrites user setup. Should have two bits.
But 64bit processes have always behaved this way,
@@ -662,10 +657,9 @@ void set_personality_64bit(void)
static void __set_personality_x32(void)
{
#ifdef CONFIG_X86_X32
- clear_thread_flag(TIF_IA32);
- set_thread_flag(TIF_X32);
if (current->mm)
- current->mm->context.ia32_compat = TIF_X32;
+ current->mm->context.flags = 0;
+
current->personality &= ~READ_IMPLIES_EXEC;
/*
* in_32bit_syscall() uses the presence of the x32 syscall bit
@@ -683,10 +677,14 @@ static void __set_personality_x32(void)
static void __set_personality_ia32(void)
{
#ifdef CONFIG_IA32_EMULATION
- set_thread_flag(TIF_IA32);
- clear_thread_flag(TIF_X32);
- if (current->mm)
- current->mm->context.ia32_compat = TIF_IA32;
+ if (current->mm) {
+ /*
+ * uprobes applied to this MM need to know this and
+ * cannot use user_64bit_mode() at that time.
+ */
+ current->mm->context.flags = MM_CONTEXT_UPROBE_IA32;
+ }
+
current->personality |= force_personality32;
/* Prepare the first "return" to user space */
task_pt_regs(current)->orig_ax = __NR_ia32_execve;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 84f581c91db4..a23130c86bdd 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -119,11 +119,6 @@ EXPORT_SYMBOL(boot_cpu_data);
unsigned int def_to_bigsmp;
-/* For MCA, but anyone else can use it if they want */
-unsigned int machine_id;
-unsigned int machine_submodel_id;
-unsigned int BIOS_revision;
-
struct apm_info apm_info;
EXPORT_SYMBOL(apm_info);
diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
index 5f83ccaab877..7d04b356d44d 100644
--- a/arch/x86/kernel/sev-es-shared.c
+++ b/arch/x86/kernel/sev-es-shared.c
@@ -178,6 +178,32 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
goto fail;
regs->dx = val >> 32;
+ /*
+ * This is a VC handler and the #VC is only raised when SEV-ES is
+ * active, which means SEV must be active too. Do sanity checks on the
+ * CPUID results to make sure the hypervisor does not trick the kernel
+ * into the no-sev path. This could map sensitive data unencrypted and
+ * make it accessible to the hypervisor.
+ *
+ * In particular, check for:
+ * - Hypervisor CPUID bit
+ * - Availability of CPUID leaf 0x8000001f
+ * - SEV CPUID bit.
+ *
+ * The hypervisor might still report the wrong C-bit position, but this
+ * can't be checked here.
+ */
+
+ if ((fn == 1 && !(regs->cx & BIT(31))))
+ /* Hypervisor bit */
+ goto fail;
+ else if (fn == 0x80000000 && (regs->ax < 0x8000001f))
+ /* SEV leaf check */
+ goto fail;
+ else if ((fn == 0x8000001f && !(regs->ax & BIT(1))))
+ /* SEV bit */
+ goto fail;
+
/* Skip over the CPUID two-byte opcode */
regs->ip += 2;
diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
index 4a96726fbaf8..0bd1a0fc587e 100644
--- a/arch/x86/kernel/sev-es.c
+++ b/arch/x86/kernel/sev-es.c
@@ -374,8 +374,8 @@ fault:
return ES_EXCEPTION;
}
-static bool vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
- unsigned long vaddr, phys_addr_t *paddr)
+static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
+ unsigned long vaddr, phys_addr_t *paddr)
{
unsigned long va = (unsigned long)vaddr;
unsigned int level;
@@ -394,15 +394,19 @@ static bool vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
if (user_mode(ctxt->regs))
ctxt->fi.error_code |= X86_PF_USER;
- return false;
+ return ES_EXCEPTION;
}
+ if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
+ /* Emulated MMIO to/from encrypted memory not supported */
+ return ES_UNSUPPORTED;
+
pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
pa |= va & ~page_level_mask(level);
*paddr = pa;
- return true;
+ return ES_OK;
}
/* Include code shared with pre-decompression boot stage */
@@ -731,6 +735,7 @@ static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
{
u64 exit_code, exit_info_1, exit_info_2;
unsigned long ghcb_pa = __pa(ghcb);
+ enum es_result res;
phys_addr_t paddr;
void __user *ref;
@@ -740,11 +745,12 @@ static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
- if (!vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr)) {
- if (!read)
+ res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
+ if (res != ES_OK) {
+ if (res == ES_EXCEPTION && !read)
ctxt->fi.error_code |= X86_PF_WRITE;
- return ES_EXCEPTION;
+ return res;
}
exit_info_1 = paddr;
diff --git a/arch/x86/kernel/sev_verify_cbit.S b/arch/x86/kernel/sev_verify_cbit.S
new file mode 100644
index 000000000000..ee04941a6546
--- /dev/null
+++ b/arch/x86/kernel/sev_verify_cbit.S
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * sev_verify_cbit.S - Code for verification of the C-bit position reported
+ * by the Hypervisor when running with SEV enabled.
+ *
+ * Copyright (c) 2020 Joerg Roedel (jroedel@suse.de)
+ *
+ * sev_verify_cbit() is called before switching to a new long-mode page-table
+ * at boot.
+ *
+ * Verify that the C-bit position is correct by writing a random value to
+ * an encrypted memory location while on the current page-table. Then it
+ * switches to the new page-table to verify the memory content is still the
+ * same. After that it switches back to the current page-table and when the
+ * check succeeded it returns. If the check failed the code invalidates the
+ * stack pointer and goes into a hlt loop. The stack-pointer is invalidated to
+ * make sure no interrupt or exception can get the CPU out of the hlt loop.
+ *
+ * New page-table pointer is expected in %rdi (first parameter)
+ *
+ */
+SYM_FUNC_START(sev_verify_cbit)
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ /* First check if a C-bit was detected */
+ movq sme_me_mask(%rip), %rsi
+ testq %rsi, %rsi
+ jz 3f
+
+ /* sme_me_mask != 0 could mean SME or SEV - Check also for SEV */
+ movq sev_status(%rip), %rsi
+ testq %rsi, %rsi
+ jz 3f
+
+ /* Save CR4 in %rsi */
+ movq %cr4, %rsi
+
+ /* Disable Global Pages */
+ movq %rsi, %rdx
+ andq $(~X86_CR4_PGE), %rdx
+ movq %rdx, %cr4
+
+ /*
+ * Verified that running under SEV - now get a random value using
+ * RDRAND. This instruction is mandatory when running as an SEV guest.
+ *
+ * Don't bail out of the loop if RDRAND returns errors. It is better to
+ * prevent forward progress than to work with a non-random value here.
+ */
+1: rdrand %rdx
+ jnc 1b
+
+ /* Store value to memory and keep it in %rdx */
+ movq %rdx, sev_check_data(%rip)
+
+ /* Backup current %cr3 value to restore it later */
+ movq %cr3, %rcx
+
+ /* Switch to new %cr3 - This might unmap the stack */
+ movq %rdi, %cr3
+
+ /*
+ * Compare value in %rdx with memory location. If C-bit is incorrect
+ * this would read the encrypted data and make the check fail.
+ */
+ cmpq %rdx, sev_check_data(%rip)
+
+ /* Restore old %cr3 */
+ movq %rcx, %cr3
+
+ /* Restore previous CR4 */
+ movq %rsi, %cr4
+
+ /* Check CMPQ result */
+ je 3f
+
+ /*
+ * The check failed, prevent any forward progress to prevent ROP
+ * attacks, invalidate the stack and go into a hlt loop.
+ */
+ xorq %rsp, %rsp
+ subq $0x1000, %rsp
+2: hlt
+ jmp 2b
+3:
+#endif
+ /* Return page-table pointer */
+ movq %rdi, %rax
+ ret
+SYM_FUNC_END(sev_verify_cbit)
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index be0d7d4152ec..ea794a083c44 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -804,11 +804,11 @@ static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-void arch_do_signal(struct pt_regs *regs)
+void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
{
struct ksignal ksig;
- if (get_signal(&ksig)) {
+ if (has_signal && get_signal(&ksig)) {
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs);
return;
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index a7f3e12cfbdb..a5330ff498f0 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -31,7 +31,7 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(NSIGBUS != 5);
BUILD_BUG_ON(NSIGTRAP != 5);
BUILD_BUG_ON(NSIGCHLD != 6);
- BUILD_BUG_ON(NSIGSYS != 1);
+ BUILD_BUG_ON(NSIGSYS != 2);
/* This is part of the ABI and can never change in size: */
BUILD_BUG_ON(sizeof(compat_siginfo_t) != 128);
@@ -165,16 +165,9 @@ void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact)
{
signal_compat_build_tests();
- /* Don't leak in-kernel non-uapi flags to user-space */
- if (oact)
- oact->sa.sa_flags &= ~(SA_IA32_ABI | SA_X32_ABI);
-
if (!act)
return;
- /* Don't let flags to be set from userspace */
- act->sa.sa_flags &= ~(SA_IA32_ABI | SA_X32_ABI);
-
if (in_ia32_syscall())
act->sa.sa_flags |= SA_IA32_ABI;
if (in_x32_syscall())
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index de776b2e6046..8ca66af96a54 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -82,6 +82,10 @@
#include <asm/hw_irq.h>
#include <asm/stackprotector.h>
+#ifdef CONFIG_ACPI_CPPC_LIB
+#include <acpi/cppc_acpi.h>
+#endif
+
/* representing HT siblings of each logical CPU */
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
@@ -148,7 +152,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
*((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
}
-static void init_freq_invariance(bool secondary);
+static void init_freq_invariance(bool secondary, bool cppc_ready);
/*
* Report back to the Boot Processor during boot time or to the caller processor
@@ -186,7 +190,7 @@ static void smp_callin(void)
*/
set_cpu_sibling_map(raw_smp_processor_id());
- init_freq_invariance(true);
+ init_freq_invariance(true, false);
/*
* Get our bogomips.
@@ -229,6 +233,7 @@ static void notrace start_secondary(void *unused)
#endif
cpu_init_exception_handling();
cpu_init();
+ rcu_cpu_starting(raw_smp_processor_id());
x86_cpuinit.early_percpu_clock_init();
preempt_disable();
smp_callin();
@@ -747,13 +752,14 @@ static void __init smp_quirk_init_udelay(void)
int
wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
{
+ u32 dm = apic->dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
unsigned long send_status, accept_status = 0;
int maxlvt;
/* Target chip */
/* Boot on the stack */
/* Kick the second */
- apic_icr_write(APIC_DM_NMI | apic->dest_logical, apicid);
+ apic_icr_write(APIC_DM_NMI | dm, apicid);
pr_debug("Waiting for send to finish...\n");
send_status = safe_apic_wait_icr_idle();
@@ -980,10 +986,7 @@ wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
if (!boot_error) {
enable_start_cpu0 = 1;
*cpu0_nmi_registered = 1;
- if (apic->dest_logical == APIC_DEST_LOGICAL)
- id = cpu0_logical_apicid;
- else
- id = apicid;
+ id = apic->dest_mode_logical ? cpu0_logical_apicid : apicid;
boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
}
@@ -1340,7 +1343,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
set_sched_topology(x86_topology);
set_cpu_sibling_map(0);
- init_freq_invariance(false);
+ init_freq_invariance(false, false);
smp_sanity_check();
switch (apic_intr_mode) {
@@ -2027,6 +2030,48 @@ out:
return true;
}
+#ifdef CONFIG_ACPI_CPPC_LIB
+static bool amd_set_max_freq_ratio(void)
+{
+ struct cppc_perf_caps perf_caps;
+ u64 highest_perf, nominal_perf;
+ u64 perf_ratio;
+ int rc;
+
+ rc = cppc_get_perf_caps(0, &perf_caps);
+ if (rc) {
+ pr_debug("Could not retrieve perf counters (%d)\n", rc);
+ return false;
+ }
+
+ highest_perf = perf_caps.highest_perf;
+ nominal_perf = perf_caps.nominal_perf;
+
+ if (!highest_perf || !nominal_perf) {
+ pr_debug("Could not retrieve highest or nominal performance\n");
+ return false;
+ }
+
+ perf_ratio = div_u64(highest_perf * SCHED_CAPACITY_SCALE, nominal_perf);
+ /* midpoint between max_boost and max_P */
+ perf_ratio = (perf_ratio + SCHED_CAPACITY_SCALE) >> 1;
+ if (!perf_ratio) {
+ pr_debug("Non-zero highest/nominal perf values led to a 0 ratio\n");
+ return false;
+ }
+
+ arch_turbo_freq_ratio = perf_ratio;
+ arch_set_max_freq_ratio(false);
+
+ return true;
+}
+#else
+static bool amd_set_max_freq_ratio(void)
+{
+ return false;
+}
+#endif
+
static void init_counter_refs(void)
{
u64 aperf, mperf;
@@ -2038,7 +2083,7 @@ static void init_counter_refs(void)
this_cpu_write(arch_prev_mperf, mperf);
}
-static void init_freq_invariance(bool secondary)
+static void init_freq_invariance(bool secondary, bool cppc_ready)
{
bool ret = false;
@@ -2054,15 +2099,38 @@ static void init_freq_invariance(bool secondary)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
ret = intel_set_max_freq_ratio();
+ else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ if (!cppc_ready) {
+ return;
+ }
+ ret = amd_set_max_freq_ratio();
+ }
if (ret) {
init_counter_refs();
static_branch_enable(&arch_scale_freq_key);
+ pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
} else {
pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");
}
}
+#ifdef CONFIG_ACPI_CPPC_LIB
+static DEFINE_MUTEX(freq_invariance_lock);
+
+void init_freq_invariance_cppc(void)
+{
+ static bool secondary;
+
+ mutex_lock(&freq_invariance_lock);
+
+ init_freq_invariance(secondary, true);
+ secondary = true;
+
+ mutex_unlock(&freq_invariance_lock);
+}
+#endif
+
static void disable_freq_invariance_workfn(struct work_struct *work)
{
static_branch_disable(&arch_scale_freq_key);
@@ -2112,7 +2180,7 @@ error:
schedule_work(&disable_freq_invariance_work);
}
#else
-static inline void init_freq_invariance(bool secondary)
+static inline void init_freq_invariance(bool secondary, bool cppc_ready)
{
}
#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 992fb1415c0f..4c09ba110204 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -93,6 +93,7 @@ static struct mm_struct tboot_mm = {
.pgd = swapper_pg_dir,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
+ .write_protect_seq = SEQCNT_ZERO(tboot_mm.write_protect_seq),
MMAP_LOCK_INITIALIZER(init_mm)
.page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
@@ -514,16 +515,10 @@ int tboot_force_iommu(void)
if (!tboot_enabled())
return 0;
- if (intel_iommu_tboot_noforce)
- return 1;
-
- if (no_iommu || swiotlb || dmar_disabled)
+ if (no_iommu || dmar_disabled)
pr_warn("Forcing Intel-IOMMU to enabled\n");
dmar_disabled = 0;
-#ifdef CONFIG_SWIOTLB
- swiotlb = 0;
-#endif
no_iommu = 0;
return 1;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 3c70fb34028b..fb55981f2a0d 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -60,6 +60,7 @@
#include <asm/umip.h>
#include <asm/insn.h>
#include <asm/insn-eval.h>
+#include <asm/vdso.h>
#ifdef CONFIG_X86_64
#include <asm/x86_init.h>
@@ -117,6 +118,9 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = trapnr;
die(str, regs, error_code);
+ } else {
+ if (fixup_vdso_exception(regs, trapnr, error_code, 0))
+ return 0;
}
/*
@@ -405,7 +409,7 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
}
#endif
- idtentry_enter_nmi(regs);
+ irqentry_nmi_enter(regs);
instrumentation_begin();
notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
@@ -550,6 +554,9 @@ DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_GP;
+ if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
+ return;
+
show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
force_sig(SIGSEGV);
goto exit;
@@ -651,12 +658,13 @@ DEFINE_IDTENTRY_RAW(exc_int3)
instrumentation_end();
irqentry_exit_to_user_mode(regs);
} else {
- bool irq_state = idtentry_enter_nmi(regs);
+ irqentry_state_t irq_state = irqentry_nmi_enter(regs);
+
instrumentation_begin();
if (!do_int3(regs))
die("int3", regs, 0);
instrumentation_end();
- idtentry_exit_nmi(regs, irq_state);
+ irqentry_nmi_exit(regs, irq_state);
}
}
@@ -793,19 +801,6 @@ static __always_inline unsigned long debug_read_clear_dr6(void)
set_debugreg(DR6_RESERVED, 6);
dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
- /*
- * Clear the virtual DR6 value, ptrace routines will set bits here for
- * things we want signals for.
- */
- current->thread.virtual_dr6 = 0;
-
- /*
- * The SDM says "The processor clears the BTF flag when it
- * generates a debug exception." Clear TIF_BLOCKSTEP to keep
- * TIF_BLOCKSTEP in sync with the hardware BTF flag.
- */
- clear_thread_flag(TIF_BLOCKSTEP);
-
return dr6;
}
@@ -864,7 +859,7 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
* includes the entry stack is excluded for everything.
*/
unsigned long dr7 = local_db_save();
- bool irq_state = idtentry_enter_nmi(regs);
+ irqentry_state_t irq_state = irqentry_nmi_enter(regs);
instrumentation_begin();
/*
@@ -873,6 +868,20 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
*/
WARN_ON_ONCE(user_mode(regs));
+ if (test_thread_flag(TIF_BLOCKSTEP)) {
+ /*
+ * The SDM says "The processor clears the BTF flag when it
+ * generates a debug exception." but PTRACE_BLOCKSTEP requested
+ * it for userspace, but we just took a kernel #DB, so re-set
+ * BTF.
+ */
+ unsigned long debugctl;
+
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ debugctl |= DEBUGCTLMSR_BTF;
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ }
+
/*
* Catch SYSENTER with TF set and clear DR_STEP. If this hit a
* watchpoint at the same time then that will still be handled.
@@ -907,7 +916,7 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
regs->flags &= ~X86_EFLAGS_TF;
out:
instrumentation_end();
- idtentry_exit_nmi(regs, irq_state);
+ irqentry_nmi_exit(regs, irq_state);
local_db_restore(dr7);
}
@@ -925,7 +934,7 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
/*
* NB: We can't easily clear DR7 here because
- * idtentry_exit_to_usermode() can invoke ptrace, schedule, access
+ * irqentry_exit_to_usermode() can invoke ptrace, schedule, access
* user memory, etc. This means that a recursive #DB is possible. If
* this happens, that #DB will hit exc_debug_kernel() and clear DR7.
* Since we're not on the IST stack right now, everything will be
@@ -936,6 +945,22 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
instrumentation_begin();
/*
+ * Start the virtual/ptrace DR6 value with just the DR_STEP mask
+ * of the real DR6. ptrace_triggered() will set the DR_TRAPn bits.
+ *
+ * Userspace expects DR_STEP to be visible in ptrace_get_debugreg(6)
+ * even if it is not the result of PTRACE_SINGLESTEP.
+ */
+ current->thread.virtual_dr6 = (dr6 & DR_STEP);
+
+ /*
+ * The SDM says "The processor clears the BTF flag when it
+ * generates a debug exception." Clear TIF_BLOCKSTEP to keep
+ * TIF_BLOCKSTEP in sync with the hardware BTF flag.
+ */
+ clear_thread_flag(TIF_BLOCKSTEP);
+
+ /*
* If dr6 has no reason to give us about the origin of this trap,
* then it's very likely the result of an icebp/int01 trap.
* User wants a sigtrap for that.
@@ -1031,6 +1056,9 @@ static void math_error(struct pt_regs *regs, int trapnr)
if (!si_code)
goto exit;
+ if (fixup_vdso_exception(regs, trapnr, 0, 0))
+ return;
+
force_sig_fault(SIGFPE, si_code,
(void __user *)uprobe_get_trap_addr(regs));
exit:
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index 6a339ce328e0..73f800100066 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -321,19 +321,12 @@ EXPORT_SYMBOL_GPL(unwind_get_return_address);
unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
{
- struct task_struct *task = state->task;
-
if (unwind_done(state))
return NULL;
if (state->regs)
return &state->regs->ip;
- if (task != current && state->sp == task->thread.sp) {
- struct inactive_task_frame *frame = (void *)task->thread.sp;
- return &frame->ret_addr;
- }
-
if (state->sp)
return (unsigned long *)state->sp - 1;
@@ -663,7 +656,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
} else {
struct inactive_task_frame *frame = (void *)task->thread.sp;
- state->sp = task->thread.sp;
+ state->sp = task->thread.sp + sizeof(*frame);
state->bp = READ_ONCE_NOCHECK(frame->bp);
state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
state->signal = (void *)state->ip == ret_from_fork;
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 3fdaa042823d..a2b413394917 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -255,12 +255,13 @@ static volatile u32 good_2byte_insns[256 / 32] = {
static bool is_prefix_bad(struct insn *insn)
{
+ insn_byte_t p;
int i;
- for (i = 0; i < insn->prefixes.nbytes; i++) {
+ for_each_insn_prefix(insn, i, p) {
insn_attr_t attr;
- attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
+ attr = inat_get_opcode_attribute(p);
switch (attr) {
case INAT_MAKE_PREFIX(INAT_PFX_ES):
case INAT_MAKE_PREFIX(INAT_PFX_CS):
@@ -715,6 +716,7 @@ static const struct uprobe_xol_ops push_xol_ops = {
static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
{
u8 opc1 = OPCODE1(insn);
+ insn_byte_t p;
int i;
switch (opc1) {
@@ -746,8 +748,8 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
* Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix.
* No one uses these insns, reject any branch insns with such prefix.
*/
- for (i = 0; i < insn->prefixes.nbytes; i++) {
- if (insn->prefixes.bytes[i] == 0x66)
+ for_each_insn_prefix(insn, i, p) {
+ if (p == 0x66)
return -ENOTSUPP;
}
@@ -1015,6 +1017,8 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
if (uprobe_post_sstep_notifier(regs))
ret = NOTIFY_STOP;
+ break;
+
default:
break;
}
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index bf9e0adb5b7e..efd9e9ea17f2 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -454,13 +454,13 @@ SECTIONS
ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
}
-#ifdef CONFIG_X86_32
/*
* The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
*/
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
"kernel image bigger than KERNEL_IMAGE_SIZE");
-#else
+
+#ifdef CONFIG_X86_64
/*
* Per-cpu symbols which need to be offset from __per_cpu_load
* for the boot processor.
@@ -470,18 +470,12 @@ INIT_PER_CPU(gdt_page);
INIT_PER_CPU(fixed_percpu_data);
INIT_PER_CPU(irq_stack_backing_store);
-/*
- * Build-time check on the image size:
- */
-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
- "kernel image bigger than KERNEL_IMAGE_SIZE");
-
#ifdef CONFIG_SMP
. = ASSERT((fixed_percpu_data == 0),
"fixed_percpu_data is not at start of per-cpu area");
#endif
-#endif /* CONFIG_X86_32 */
+#endif /* CONFIG_X86_64 */
#ifdef CONFIG_KEXEC_CORE
#include <asm/kexec.h>
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index a3038d8deb6a..8b395821cb8d 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -110,6 +110,7 @@ struct x86_init_ops x86_init __initdata = {
.init_platform = x86_init_noop,
.guest_late_init = x86_init_noop,
.x2apic_available = bool_x86_init_noop,
+ .msi_ext_dest_id = bool_x86_init_noop,
.init_mem_mapping = x86_init_noop,
.init_after_bootmem = x86_init_noop,
},
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 06a278b3701d..83637a2ff605 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -90,6 +90,20 @@ static int kvm_check_cpuid(struct kvm_cpuid_entry2 *entries, int nent)
return 0;
}
+void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
+
+ /*
+ * save the feature bitmap to avoid cpuid lookup for every PV
+ * operation
+ */
+ if (best)
+ vcpu->arch.pv_cpuid.features = best->eax;
+}
+
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@@ -124,13 +138,6 @@ void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
(best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
- /*
- * save the feature bitmap to avoid cpuid lookup for every PV
- * operation
- */
- if (best)
- vcpu->arch.pv_cpuid.features = best->eax;
-
if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
if (best)
@@ -162,6 +169,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.guest_supported_xcr0 =
(best->eax | ((u64)best->edx << 32)) & supported_xcr0;
+ kvm_update_pv_runtime(vcpu);
+
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
kvm_mmu_reset_context(vcpu);
@@ -169,6 +178,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.cr4_guest_rsvd_bits =
__cr4_reserved_bits(guest_cpuid_has, vcpu);
+ vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
+
/* Invoke the vendor callback only after the above state is updated. */
kvm_x86_ops.vcpu_after_set_cpuid(vcpu);
}
@@ -672,7 +683,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS);
edx.split.bit_width_fixed = cap.bit_width_fixed;
- edx.split.reserved = 0;
+ edx.split.anythread_deprecated = 1;
+ edx.split.reserved1 = 0;
+ edx.split.reserved2 = 0;
entry->eax = eax.full;
entry->ebx = cap.events_mask;
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index bf8577947ed2..f7a6e8f83783 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -11,6 +11,7 @@ extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
void kvm_set_cpu_caps(void);
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
+void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index);
int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 0d917eb70319..56cae1ff9e3f 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -4046,6 +4046,12 @@ static int em_clflush(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
+{
+ /* emulating clflushopt regardless of cpuid */
+ return X86EMUL_CONTINUE;
+}
+
static int em_movsxd(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.val = (s32) ctxt->src.val;
@@ -4585,7 +4591,7 @@ static const struct opcode group11[] = {
};
static const struct gprefix pfx_0f_ae_7 = {
- I(SrcMem | ByteOp, em_clflush), N, N, N,
+ I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
};
static const struct group_dual group15 = { {
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 99d118ffc67d..814698e5b152 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -40,29 +40,10 @@ static int pending_userspace_extint(struct kvm_vcpu *v)
* check if there is pending interrupt from
* non-APIC source without intack.
*/
-static int kvm_cpu_has_extint(struct kvm_vcpu *v)
-{
- u8 accept = kvm_apic_accept_pic_intr(v);
-
- if (accept) {
- if (irqchip_split(v->kvm))
- return pending_userspace_extint(v);
- else
- return v->kvm->arch.vpic->output;
- } else
- return 0;
-}
-
-/*
- * check if there is injectable interrupt:
- * when virtual interrupt delivery enabled,
- * interrupt from apic will handled by hardware,
- * we don't need to check it here.
- */
-int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
+int kvm_cpu_has_extint(struct kvm_vcpu *v)
{
/*
- * FIXME: interrupt.injected represents an interrupt that it's
+ * FIXME: interrupt.injected represents an interrupt whose
* side-effects have already been applied (e.g. bit from IRR
* already moved to ISR). Therefore, it is incorrect to rely
* on interrupt.injected to know if there is a pending
@@ -75,6 +56,23 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
if (!lapic_in_kernel(v))
return v->arch.interrupt.injected;
+ if (!kvm_apic_accept_pic_intr(v))
+ return 0;
+
+ if (irqchip_split(v->kvm))
+ return pending_userspace_extint(v);
+ else
+ return v->kvm->arch.vpic->output;
+}
+
+/*
+ * check if there is injectable interrupt:
+ * when virtual interrupt delivery enabled,
+ * interrupt from apic will handled by hardware,
+ * we don't need to check it here.
+ */
+int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
+{
if (kvm_cpu_has_extint(v))
return 1;
@@ -91,20 +89,6 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_injectable_intr);
*/
int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
{
- /*
- * FIXME: interrupt.injected represents an interrupt that it's
- * side-effects have already been applied (e.g. bit from IRR
- * already moved to ISR). Therefore, it is incorrect to rely
- * on interrupt.injected to know if there is a pending
- * interrupt in the user-mode LAPIC.
- * This leads to nVMX/nSVM not be able to distinguish
- * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
- * pending interrupt or should re-inject an injected
- * interrupt.
- */
- if (!lapic_in_kernel(v))
- return v->arch.interrupt.injected;
-
if (kvm_cpu_has_extint(v))
return 1;
@@ -118,16 +102,21 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
*/
static int kvm_cpu_get_extint(struct kvm_vcpu *v)
{
- if (kvm_cpu_has_extint(v)) {
- if (irqchip_split(v->kvm)) {
- int vector = v->arch.pending_external_vector;
-
- v->arch.pending_external_vector = -1;
- return vector;
- } else
- return kvm_pic_read_irq(v->kvm); /* PIC */
- } else
+ if (!kvm_cpu_has_extint(v)) {
+ WARN_ON(!lapic_in_kernel(v));
return -1;
+ }
+
+ if (!lapic_in_kernel(v))
+ return v->arch.interrupt.nr;
+
+ if (irqchip_split(v->kvm)) {
+ int vector = v->arch.pending_external_vector;
+
+ v->arch.pending_external_vector = -1;
+ return vector;
+ } else
+ return kvm_pic_read_irq(v->kvm); /* PIC */
}
/*
@@ -135,13 +124,7 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
*/
int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
{
- int vector;
-
- if (!lapic_in_kernel(v))
- return v->arch.interrupt.nr;
-
- vector = kvm_cpu_get_extint(v);
-
+ int vector = kvm_cpu_get_extint(v);
if (vector != -1)
return vector; /* PIC */
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 4aa1c2e00e2a..8a4de3f12820 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -16,8 +16,6 @@
#include <trace/events/kvm.h>
-#include <asm/msidef.h>
-
#include "irq.h"
#include "ioapic.h"
@@ -104,22 +102,19 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
struct kvm_lapic_irq *irq)
{
- trace_kvm_msi_set_irq(e->msi.address_lo | (kvm->arch.x2apic_format ?
- (u64)e->msi.address_hi << 32 : 0),
- e->msi.data);
-
- irq->dest_id = (e->msi.address_lo &
- MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
- if (kvm->arch.x2apic_format)
- irq->dest_id |= MSI_ADDR_EXT_DEST_ID(e->msi.address_hi);
- irq->vector = (e->msi.data &
- MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
- irq->dest_mode = kvm_lapic_irq_dest_mode(
- !!((1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo));
- irq->trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
- irq->delivery_mode = e->msi.data & 0x700;
- irq->msi_redir_hint = ((e->msi.address_lo
- & MSI_ADDR_REDIRECTION_LOWPRI) > 0);
+ struct msi_msg msg = { .address_lo = e->msi.address_lo,
+ .address_hi = e->msi.address_hi,
+ .data = e->msi.data };
+
+ trace_kvm_msi_set_irq(msg.address_lo | (kvm->arch.x2apic_format ?
+ (u64)msg.address_hi << 32 : 0), msg.data);
+
+ irq->dest_id = x86_msi_msg_get_destid(&msg, kvm->arch.x2apic_format);
+ irq->vector = msg.arch_data.vector;
+ irq->dest_mode = kvm_lapic_irq_dest_mode(msg.arch_addr_lo.dest_mode_logical);
+ irq->trig_mode = msg.arch_data.is_level;
+ irq->delivery_mode = msg.arch_data.delivery_mode << 8;
+ irq->msi_redir_hint = msg.arch_addr_lo.redirect_hint;
irq->level = 1;
irq->shorthand = APIC_DEST_NOSHORT;
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 105e7859d1f2..86c33d53c90a 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2465,7 +2465,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic = vcpu->arch.apic;
u32 ppr;
- if (!kvm_apic_hw_enabled(apic))
+ if (!kvm_apic_present(vcpu))
return -1;
__apic_update_ppr(apic, &ppr);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 17587f496ec7..7a6ae9e90bd7 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -225,7 +225,7 @@ static gfn_t get_mmio_spte_gfn(u64 spte)
{
u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
- gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
+ gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
& shadow_nonpresent_or_rsvd_mask;
return gpa >> PAGE_SHIFT;
@@ -591,15 +591,15 @@ static u64 mmu_spte_get_lockless(u64 *sptep)
static u64 restore_acc_track_spte(u64 spte)
{
u64 new_spte = spte;
- u64 saved_bits = (spte >> shadow_acc_track_saved_bits_shift)
- & shadow_acc_track_saved_bits_mask;
+ u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
+ & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
WARN_ON_ONCE(spte_ad_enabled(spte));
WARN_ON_ONCE(!is_access_track_spte(spte));
new_spte &= ~shadow_acc_track_mask;
- new_spte &= ~(shadow_acc_track_saved_bits_mask <<
- shadow_acc_track_saved_bits_shift);
+ new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
+ SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
new_spte |= saved_bits;
return new_spte;
@@ -856,12 +856,14 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
} else {
rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
- while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
- desc = desc->more;
+ while (desc->sptes[PTE_LIST_EXT-1]) {
count += PTE_LIST_EXT;
- }
- if (desc->sptes[PTE_LIST_EXT-1]) {
- desc->more = mmu_alloc_pte_list_desc(vcpu);
+
+ if (!desc->more) {
+ desc->more = mmu_alloc_pte_list_desc(vcpu);
+ desc = desc->more;
+ break;
+ }
desc = desc->more;
}
for (i = 0; desc->sptes[i]; ++i)
@@ -3515,7 +3517,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
{
u64 sptes[PT64_ROOT_MAX_LEVEL];
struct rsvd_bits_validate *rsvd_check;
- int root = vcpu->arch.mmu->root_level;
+ int root = vcpu->arch.mmu->shadow_root_level;
int leaf;
int level;
bool reserved = false;
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index d9c5665a55e9..c51ad544f25b 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -40,8 +40,8 @@ static u64 generation_mmio_spte_mask(u64 gen)
WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK);
- mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK;
- mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK;
+ mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK;
+ mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK;
return mask;
}
@@ -55,7 +55,7 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
mask |= shadow_mmio_value | access;
mask |= gpa | shadow_nonpresent_or_rsvd_mask;
mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
- << shadow_nonpresent_or_rsvd_mask_len;
+ << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
return mask;
}
@@ -231,12 +231,12 @@ u64 mark_spte_for_access_track(u64 spte)
!spte_can_locklessly_be_made_writable(spte),
"kvm: Writable SPTE is not locklessly dirty-trackable\n");
- WARN_ONCE(spte & (shadow_acc_track_saved_bits_mask <<
- shadow_acc_track_saved_bits_shift),
+ WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
+ SHADOW_ACC_TRACK_SAVED_BITS_SHIFT),
"kvm: Access Tracking saved bit locations are not zero\n");
- spte |= (spte & shadow_acc_track_saved_bits_mask) <<
- shadow_acc_track_saved_bits_shift;
+ spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) <<
+ SHADOW_ACC_TRACK_SAVED_BITS_SHIFT;
spte &= ~shadow_acc_track_mask;
return spte;
@@ -245,7 +245,7 @@ u64 mark_spte_for_access_track(u64 spte)
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask)
{
BUG_ON((u64)(unsigned)access_mask != access_mask);
- WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len));
+ WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN));
WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
shadow_mmio_access_mask = access_mask;
@@ -306,9 +306,9 @@ void kvm_mmu_reset_all_pte_masks(void)
low_phys_bits = boot_cpu_data.x86_phys_bits;
if (boot_cpu_has_bug(X86_BUG_L1TF) &&
!WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
- 52 - shadow_nonpresent_or_rsvd_mask_len)) {
+ 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) {
low_phys_bits = boot_cpu_data.x86_cache_bits
- - shadow_nonpresent_or_rsvd_mask_len;
+ - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
shadow_nonpresent_or_rsvd_mask =
rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
}
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 4ecf40e0b8fe..2b3a30bd38b0 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -56,11 +56,11 @@
#define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
/*
- * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of
+ * Due to limited space in PTEs, the MMIO generation is a 18 bit subset of
* the memslots generation and is derived as follows:
*
* Bits 0-8 of the MMIO generation are propagated to spte bits 3-11
- * Bits 9-18 of the MMIO generation are propagated to spte bits 52-61
+ * Bits 9-17 of the MMIO generation are propagated to spte bits 54-62
*
* The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in
* the MMIO generation number, as doing so would require stealing a bit from
@@ -69,18 +69,29 @@
* requires a full MMU zap). The flag is instead explicitly queried when
* checking for MMIO spte cache hits.
*/
-#define MMIO_SPTE_GEN_MASK GENMASK_ULL(17, 0)
#define MMIO_SPTE_GEN_LOW_START 3
#define MMIO_SPTE_GEN_LOW_END 11
-#define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
- MMIO_SPTE_GEN_LOW_START)
#define MMIO_SPTE_GEN_HIGH_START PT64_SECOND_AVAIL_BITS_SHIFT
#define MMIO_SPTE_GEN_HIGH_END 62
+
+#define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
+ MMIO_SPTE_GEN_LOW_START)
#define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
MMIO_SPTE_GEN_HIGH_START)
+#define MMIO_SPTE_GEN_LOW_BITS (MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1)
+#define MMIO_SPTE_GEN_HIGH_BITS (MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1)
+
+/* remember to adjust the comment above as well if you change these */
+static_assert(MMIO_SPTE_GEN_LOW_BITS == 9 && MMIO_SPTE_GEN_HIGH_BITS == 9);
+
+#define MMIO_SPTE_GEN_LOW_SHIFT (MMIO_SPTE_GEN_LOW_START - 0)
+#define MMIO_SPTE_GEN_HIGH_SHIFT (MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS)
+
+#define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)
+
extern u64 __read_mostly shadow_nx_mask;
extern u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
extern u64 __read_mostly shadow_user_mask;
@@ -105,19 +116,19 @@ extern u64 __read_mostly shadow_acc_track_mask;
extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
/*
+ * The number of high-order 1 bits to use in the mask above.
+ */
+#define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN 5
+
+/*
* The mask/shift to use for saving the original R/X bits when marking the PTE
* as not-present for access tracking purposes. We do not save the W bit as the
* PTEs being access tracked also need to be dirty tracked, so the W bit will be
* restored only when a write is attempted to the page.
*/
-static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
- PT64_EPT_EXECUTABLE_MASK;
-static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;
-
-/*
- * The number of high-order 1 bits to use in the mask above.
- */
-static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
+#define SHADOW_ACC_TRACK_SAVED_BITS_MASK (PT64_EPT_READABLE_MASK | \
+ PT64_EPT_EXECUTABLE_MASK)
+#define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT PT64_SECOND_AVAIL_BITS_SHIFT
/*
* In some cases, we need to preserve the GFN of a non-present or reserved
@@ -228,8 +239,8 @@ static inline u64 get_mmio_spte_generation(u64 spte)
{
u64 gen;
- gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START;
- gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START;
+ gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_SHIFT;
+ gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_SHIFT;
return gen;
}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 27e381c9da6c..84c8f06bec26 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -49,7 +49,14 @@ bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
{
struct kvm_mmu_page *sp;
+ if (!kvm->arch.tdp_mmu_enabled)
+ return false;
+ if (WARN_ON(!VALID_PAGE(hpa)))
+ return false;
+
sp = to_shadow_page(hpa);
+ if (WARN_ON(!sp))
+ return false;
return sp->tdp_mmu_page && sp->root_count;
}
@@ -59,7 +66,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
{
- gfn_t max_gfn = 1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT);
+ gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
lockdep_assert_held(&kvm->mmu_lock);
@@ -449,7 +456,7 @@ bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
void kvm_tdp_mmu_zap_all(struct kvm *kvm)
{
- gfn_t max_gfn = 1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT);
+ gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
bool flush;
flush = kvm_tdp_mmu_zap_gfn_range(kvm, 0, max_gfn);
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index c0b14106258a..566f4d18185b 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -642,8 +642,8 @@ static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
* Its safe to read more than we are asked, caller should ensure that
* destination has enough space.
*/
- src_paddr = round_down(src_paddr, 16);
offset = src_paddr & 15;
+ src_paddr = round_down(src_paddr, 16);
sz = round_up(sz + offset, 16);
return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 2f32fd09e259..da7eb4aaf44f 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -530,12 +530,12 @@ static int svm_hardware_enable(void)
static void svm_cpu_uninit(int cpu)
{
- struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
if (!sd)
return;
- per_cpu(svm_data, raw_smp_processor_id()) = NULL;
+ per_cpu(svm_data, cpu) = NULL;
kfree(sd->sev_vmcbs);
__free_page(sd->save_area);
kfree(sd);
@@ -1309,8 +1309,10 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
svm->avic_is_running = true;
svm->msrpm = svm_vcpu_alloc_msrpm();
- if (!svm->msrpm)
+ if (!svm->msrpm) {
+ err = -ENOMEM;
goto error_free_vmcb_page;
+ }
svm_vcpu_init_msrpm(vcpu, svm->msrpm);
@@ -3741,6 +3743,7 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ struct kvm_cpuid_entry2 *best;
vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
boot_cpu_has(X86_FEATURE_XSAVE) &&
@@ -3753,6 +3756,13 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
/* Check again if INVPCID interception if required */
svm_check_invpcid(svm);
+ /* For sev guests, the memory encryption bit is not reserved in CR3. */
+ if (sev_guest(vcpu->kvm)) {
+ best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0);
+ if (best)
+ vcpu->arch.cr3_lm_rsvd_bits &= ~(1UL << (best->ebx & 0x3f));
+ }
+
if (!kvm_vcpu_apicv_active(vcpu))
return;
diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c
index e5325bd0f304..f3199bb02f22 100644
--- a/arch/x86/kvm/vmx/evmcs.c
+++ b/arch/x86/kvm/vmx/evmcs.c
@@ -297,14 +297,13 @@ const struct evmcs_field vmcs_field_to_evmcs_1[] = {
};
const unsigned int nr_evmcs_1_fields = ARRAY_SIZE(vmcs_field_to_evmcs_1);
-void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
+__init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
{
vmcs_conf->pin_based_exec_ctrl &= ~EVMCS1_UNSUPPORTED_PINCTRL;
vmcs_conf->cpu_based_2nd_exec_ctrl &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
vmcs_conf->vmexit_ctrl &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
vmcs_conf->vmentry_ctrl &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
-
}
#endif
diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h
index e5f7a7ebf27d..bd41d9462355 100644
--- a/arch/x86/kvm/vmx/evmcs.h
+++ b/arch/x86/kvm/vmx/evmcs.h
@@ -185,7 +185,7 @@ static inline void evmcs_load(u64 phys_addr)
vp_ap->enlighten_vmentry = 1;
}
-void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
+__init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
#else /* !IS_ENABLED(CONFIG_HYPERV) */
static inline void evmcs_write64(unsigned long field, u64 value) {}
static inline void evmcs_write32(unsigned long field, u32 value) {}
@@ -194,7 +194,6 @@ static inline u64 evmcs_read64(unsigned long field) { return 0; }
static inline u32 evmcs_read32(unsigned long field) { return 0; }
static inline u16 evmcs_read16(unsigned long field) { return 0; }
static inline void evmcs_load(u64 phys_addr) {}
-static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {}
static inline void evmcs_touch_msr_bitmap(void) {}
#endif /* IS_ENABLED(CONFIG_HYPERV) */
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index d14c94d0aff1..47b8357b9751 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2560,8 +2560,10 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
vmcs_conf->vmexit_ctrl = _vmexit_control;
vmcs_conf->vmentry_ctrl = _vmentry_control;
- if (static_branch_unlikely(&enable_evmcs))
+#if IS_ENABLED(CONFIG_HYPERV)
+ if (enlightened_vmcs)
evmcs_sanitize_exec_ctrls(vmcs_conf);
+#endif
return 0;
}
@@ -6834,7 +6836,6 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx;
- unsigned long *msr_bitmap;
int i, cpu, err;
BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
@@ -6894,7 +6895,6 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
- msr_bitmap = vmx->vmcs01.msr_bitmap;
vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 397f599b20e5..e545a8a613b1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -255,24 +255,23 @@ static struct kmem_cache *x86_emulator_cache;
/*
* When called, it means the previous get/set msr reached an invalid msr.
- * Return 0 if we want to ignore/silent this failed msr access, or 1 if we want
- * to fail the caller.
+ * Return true if we want to ignore/silent this failed msr access.
*/
-static int kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
- u64 data, bool write)
+static bool kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
+ u64 data, bool write)
{
const char *op = write ? "wrmsr" : "rdmsr";
if (ignore_msrs) {
if (report_ignored_msrs)
- vcpu_unimpl(vcpu, "ignored %s: 0x%x data 0x%llx\n",
- op, msr, data);
+ kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n",
+ op, msr, data);
/* Mask the error */
- return 0;
+ return true;
} else {
- vcpu_debug_ratelimited(vcpu, "unhandled %s: 0x%x data 0x%llx\n",
- op, msr, data);
- return -ENOENT;
+ kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
+ op, msr, data);
+ return false;
}
}
@@ -1042,7 +1041,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
}
if (is_long_mode(vcpu) &&
- (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)))
+ (cr3 & vcpu->arch.cr3_lm_rsvd_bits))
return 1;
else if (is_pae_paging(vcpu) &&
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
@@ -1416,7 +1415,8 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
if (r == KVM_MSR_RET_INVALID) {
/* Unconditionally clear the output for simplicity */
*data = 0;
- r = kvm_msr_ignored_check(vcpu, index, 0, false);
+ if (kvm_msr_ignored_check(vcpu, index, 0, false))
+ r = 0;
}
if (r)
@@ -1540,7 +1540,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
struct msr_data msr;
if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
- return -EPERM;
+ return KVM_MSR_RET_FILTERED;
switch (index) {
case MSR_FS_BASE:
@@ -1581,7 +1581,8 @@ static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
if (ret == KVM_MSR_RET_INVALID)
- ret = kvm_msr_ignored_check(vcpu, index, data, true);
+ if (kvm_msr_ignored_check(vcpu, index, data, true))
+ ret = 0;
return ret;
}
@@ -1599,7 +1600,7 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
int ret;
if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
- return -EPERM;
+ return KVM_MSR_RET_FILTERED;
msr.index = index;
msr.host_initiated = host_initiated;
@@ -1618,7 +1619,8 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
if (ret == KVM_MSR_RET_INVALID) {
/* Unconditionally clear *data for simplicity */
*data = 0;
- ret = kvm_msr_ignored_check(vcpu, index, 0, false);
+ if (kvm_msr_ignored_check(vcpu, index, 0, false))
+ ret = 0;
}
return ret;
@@ -1662,9 +1664,9 @@ static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu)
static u64 kvm_msr_reason(int r)
{
switch (r) {
- case -ENOENT:
+ case KVM_MSR_RET_INVALID:
return KVM_MSR_EXIT_REASON_UNKNOWN;
- case -EPERM:
+ case KVM_MSR_RET_FILTERED:
return KVM_MSR_EXIT_REASON_FILTER;
default:
return KVM_MSR_EXIT_REASON_INVAL;
@@ -1965,7 +1967,7 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
struct kvm_arch *ka = &vcpu->kvm->arch;
if (vcpu->vcpu_id == 0 && !host_initiated) {
- if (ka->boot_vcpu_runs_old_kvmclock && old_msr)
+ if (ka->boot_vcpu_runs_old_kvmclock != old_msr)
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
ka->boot_vcpu_runs_old_kvmclock = old_msr;
@@ -3063,9 +3065,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
/* Values other than LBR and BTF are vendor-specific,
thus reserved and should throw a #GP */
return 1;
- }
- vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
- __func__, data);
+ } else if (report_ignored_msrs)
+ vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
+ __func__, data);
break;
case 0x200 ... 0x2ff:
return kvm_mtrr_set_msr(vcpu, msr, data);
@@ -3463,29 +3465,63 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vcpu->arch.efer;
break;
case MSR_KVM_WALL_CLOCK:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
+ return 1;
+
+ msr_info->data = vcpu->kvm->arch.wall_clock;
+ break;
case MSR_KVM_WALL_CLOCK_NEW:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
+ return 1;
+
msr_info->data = vcpu->kvm->arch.wall_clock;
break;
case MSR_KVM_SYSTEM_TIME:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
+ return 1;
+
+ msr_info->data = vcpu->arch.time;
+ break;
case MSR_KVM_SYSTEM_TIME_NEW:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
+ return 1;
+
msr_info->data = vcpu->arch.time;
break;
case MSR_KVM_ASYNC_PF_EN:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+ return 1;
+
msr_info->data = vcpu->arch.apf.msr_en_val;
break;
case MSR_KVM_ASYNC_PF_INT:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
+ return 1;
+
msr_info->data = vcpu->arch.apf.msr_int_val;
break;
case MSR_KVM_ASYNC_PF_ACK:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+ return 1;
+
msr_info->data = 0;
break;
case MSR_KVM_STEAL_TIME:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
+ return 1;
+
msr_info->data = vcpu->arch.st.msr_val;
break;
case MSR_KVM_PV_EOI_EN:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
+ return 1;
+
msr_info->data = vcpu->arch.pv_eoi.msr_val;
break;
case MSR_KVM_POLL_CONTROL:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
+ return 1;
+
msr_info->data = vcpu->arch.msr_kvm_poll_control;
break;
case MSR_IA32_P5_MC_ADDR:
@@ -4015,21 +4051,23 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
{
+ /*
+ * We can accept userspace's request for interrupt injection
+ * as long as we have a place to store the interrupt number.
+ * The actual injection will happen when the CPU is able to
+ * deliver the interrupt.
+ */
+ if (kvm_cpu_has_extint(vcpu))
+ return false;
+
+ /* Acknowledging ExtINT does not happen if LINT0 is masked. */
return (!lapic_in_kernel(vcpu) ||
kvm_apic_accept_pic_intr(vcpu));
}
-/*
- * if userspace requested an interrupt window, check that the
- * interrupt window is open.
- *
- * No need to exit to userspace if we already have an interrupt queued.
- */
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
{
return kvm_arch_interrupt_allowed(vcpu) &&
- !kvm_cpu_has_interrupt(vcpu) &&
- !kvm_event_needs_reinjection(vcpu) &&
kvm_cpu_accept_dm_intr(vcpu);
}
@@ -4575,6 +4613,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
vcpu->arch.pv_cpuid.enforce = cap->args[0];
+ if (vcpu->arch.pv_cpuid.enforce)
+ kvm_update_pv_runtime(vcpu);
return 0;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 3900ab0c6004..e7ca622a468f 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -376,7 +376,13 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
-#define KVM_MSR_RET_INVALID 2
+/*
+ * Internal error codes that are used to indicate that MSR emulation encountered
+ * an error that should result in #GP in the guest, unless userspace
+ * handles it.
+ */
+#define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */
+#define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */
#define __cr4_reserved_bits(__cpu_has, __c) \
({ \
diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
index c13e8c9ee926..80efd45a7761 100644
--- a/arch/x86/lib/copy_mc.c
+++ b/arch/x86/lib/copy_mc.c
@@ -10,10 +10,6 @@
#include <asm/mce.h>
#ifdef CONFIG_X86_MCE
-/*
- * See COPY_MC_TEST for self-test of the copy_mc_fragile()
- * implementation.
- */
static DEFINE_STATIC_KEY_FALSE(copy_mc_fragile_key);
void enable_copy_mc_fragile(void)
diff --git a/arch/x86/lib/copy_mc_64.S b/arch/x86/lib/copy_mc_64.S
index 892d8915f609..e5f77e293034 100644
--- a/arch/x86/lib/copy_mc_64.S
+++ b/arch/x86/lib/copy_mc_64.S
@@ -2,14 +2,11 @@
/* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */
#include <linux/linkage.h>
-#include <asm/copy_mc_test.h>
-#include <asm/export.h>
#include <asm/asm.h>
#ifndef CONFIG_UML
#ifdef CONFIG_X86_MCE
-COPY_MC_TEST_CTL
/*
* copy_mc_fragile - copy memory with indication if an exception / fault happened
@@ -38,8 +35,6 @@ SYM_FUNC_START(copy_mc_fragile)
subl %ecx, %edx
.L_read_leading_bytes:
movb (%rsi), %al
- COPY_MC_TEST_SRC %rsi 1 .E_leading_bytes
- COPY_MC_TEST_DST %rdi 1 .E_leading_bytes
.L_write_leading_bytes:
movb %al, (%rdi)
incq %rsi
@@ -55,8 +50,6 @@ SYM_FUNC_START(copy_mc_fragile)
.L_read_words:
movq (%rsi), %r8
- COPY_MC_TEST_SRC %rsi 8 .E_read_words
- COPY_MC_TEST_DST %rdi 8 .E_write_words
.L_write_words:
movq %r8, (%rdi)
addq $8, %rsi
@@ -73,8 +66,6 @@ SYM_FUNC_START(copy_mc_fragile)
movl %edx, %ecx
.L_read_trailing_bytes:
movb (%rsi), %al
- COPY_MC_TEST_SRC %rsi 1 .E_trailing_bytes
- COPY_MC_TEST_DST %rdi 1 .E_trailing_bytes
.L_write_trailing_bytes:
movb %al, (%rdi)
incq %rsi
@@ -88,7 +79,6 @@ SYM_FUNC_START(copy_mc_fragile)
.L_done:
ret
SYM_FUNC_END(copy_mc_fragile)
-EXPORT_SYMBOL_GPL(copy_mc_fragile)
.section .fixup, "ax"
/*
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index 58f7fb95c7f4..4229950a5d78 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -63,13 +63,12 @@ static bool is_string_insn(struct insn *insn)
*/
bool insn_has_rep_prefix(struct insn *insn)
{
+ insn_byte_t p;
int i;
insn_get_prefixes(insn);
- for (i = 0; i < insn->prefixes.nbytes; i++) {
- insn_byte_t p = insn->prefixes.bytes[i];
-
+ for_each_insn_prefix(insn, i, p) {
if (p == 0xf2 || p == 0xf3)
return true;
}
@@ -95,14 +94,15 @@ static int get_seg_reg_override_idx(struct insn *insn)
{
int idx = INAT_SEG_REG_DEFAULT;
int num_overrides = 0, i;
+ insn_byte_t p;
insn_get_prefixes(insn);
/* Look for any segment override prefixes. */
- for (i = 0; i < insn->prefixes.nbytes; i++) {
+ for_each_insn_prefix(insn, i, p) {
insn_attr_t attr;
- attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
+ attr = inat_get_opcode_attribute(p);
switch (attr) {
case INAT_MAKE_PREFIX(INAT_PFX_CS):
idx = INAT_SEG_REG_CS;
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 037faac46b0c..1e299ac73c86 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -16,8 +16,6 @@
* to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
*/
-.weak memcpy
-
/*
* memcpy - Copy a memory block.
*
@@ -30,7 +28,7 @@
* rax original destination
*/
SYM_FUNC_START_ALIAS(__memcpy)
-SYM_FUNC_START_LOCAL(memcpy)
+SYM_FUNC_START_WEAK(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
"jmp memcpy_erms", X86_FEATURE_ERMS
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 7ff00ea64e4f..41902fe8b859 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -24,9 +24,7 @@
* Output:
* rax: dest
*/
-.weak memmove
-
-SYM_FUNC_START_ALIAS(memmove)
+SYM_FUNC_START_WEAK(memmove)
SYM_FUNC_START(__memmove)
mov %rdi, %rax
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9ff15ee404a4..0bfd26e4ca9e 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -6,8 +6,6 @@
#include <asm/alternative-asm.h>
#include <asm/export.h>
-.weak memset
-
/*
* ISO C memset - set a memory block to a byte value. This function uses fast
* string to get better performance than the original function. The code is
@@ -19,7 +17,7 @@
*
* rax original destination
*/
-SYM_FUNC_START_ALIAS(memset)
+SYM_FUNC_START_WEAK(memset)
SYM_FUNC_START(__memset)
/*
* Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
index fee8b9c0520c..75a0915b0d01 100644
--- a/arch/x86/lib/msr-smp.c
+++ b/arch/x86/lib/msr-smp.c
@@ -169,12 +169,11 @@ static void __wrmsr_safe_on_cpu(void *info)
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{
struct msr_info_completion rv;
- call_single_data_t csd = {
- .func = __rdmsr_safe_on_cpu,
- .info = &rv,
- };
+ call_single_data_t csd;
int err;
+ INIT_CSD(&csd, __rdmsr_safe_on_cpu, &rv);
+
memset(&rv, 0, sizeof(rv));
init_completion(&rv.done);
rv.msr.msr_no = msr_no;
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index 3f435d7fca5e..c3e8a62ca561 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -9,9 +9,23 @@
#include <asm/tlbflush.h>
-/*
- * We rely on the nested NMI work to allow atomic faults from the NMI path; the
- * nested NMI paths are careful to preserve CR2.
+/**
+ * copy_from_user_nmi - NMI safe copy from user
+ * @to: Pointer to the destination buffer
+ * @from: Pointer to a user space address of the current task
+ * @n: Number of bytes to copy
+ *
+ * Returns: The number of not copied bytes. 0 is success, i.e. all bytes copied
+ *
+ * Contrary to other copy_from_user() variants this function can be called
+ * from NMI context. Despite the name it is not restricted to be called
+ * from NMI context. It is safe to be called from any other context as
+ * well. It disables pagefaults across the copy which means a fault will
+ * abort the copy.
+ *
+ * For NMI context invocations this relies on the nested NMI work to allow
+ * atomic faults from the NMI path; the nested NMI paths are careful to
+ * preserve CR2.
*/
unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
@@ -27,7 +41,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
/*
* Even though this function is typically called from NMI/IRQ context
* disable pagefaults so that its behaviour is consistent even when
- * called form other contexts.
+ * called from other contexts.
*/
pagefault_disable();
ret = __copy_from_user_inatomic(to, from, n);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 82bf37a5c9ec..f1f1b5a0956a 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -30,6 +30,7 @@
#include <asm/cpu_entry_area.h> /* exception stack */
#include <asm/pgtable_areas.h> /* VMALLOC_START, ... */
#include <asm/kvm_para.h> /* kvm_handle_async_pf */
+#include <asm/vdso.h> /* fixup_vdso_exception() */
#define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h>
@@ -602,11 +603,9 @@ pgtable_bad(struct pt_regs *regs, unsigned long error_code,
oops_end(flags, regs, sig);
}
-static void set_signal_archinfo(unsigned long address,
- unsigned long error_code)
+static void sanitize_error_code(unsigned long address,
+ unsigned long *error_code)
{
- struct task_struct *tsk = current;
-
/*
* To avoid leaking information about the kernel page
* table layout, pretend that user-mode accesses to
@@ -617,7 +616,13 @@ static void set_signal_archinfo(unsigned long address,
* information and does not appear to cause any problems.
*/
if (address >= TASK_SIZE_MAX)
- error_code |= X86_PF_PROT;
+ *error_code |= X86_PF_PROT;
+}
+
+static void set_signal_archinfo(unsigned long address,
+ unsigned long error_code)
+{
+ struct task_struct *tsk = current;
tsk->thread.trap_nr = X86_TRAP_PF;
tsk->thread.error_code = error_code | X86_PF_USER;
@@ -658,6 +663,8 @@ no_context(struct pt_regs *regs, unsigned long error_code,
* faulting through the emulate_vsyscall() logic.
*/
if (current->thread.sig_on_uaccess_err && signal) {
+ sanitize_error_code(address, &error_code);
+
set_signal_archinfo(address, error_code);
/* XXX: hwpoison faults will set the wrong code. */
@@ -806,13 +813,10 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
if (is_errata100(regs, address))
return;
- /*
- * To avoid leaking information about the kernel page table
- * layout, pretend that user-mode accesses to kernel addresses
- * are always protection faults.
- */
- if (address >= TASK_SIZE_MAX)
- error_code |= X86_PF_PROT;
+ sanitize_error_code(address, &error_code);
+
+ if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
+ return;
if (likely(show_unhandled_signals))
show_signal_msg(regs, error_code, address, tsk);
@@ -931,6 +935,11 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
if (is_prefetch(regs, error_code, address))
return;
+ sanitize_error_code(address, &error_code);
+
+ if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
+ return;
+
set_signal_archinfo(address, error_code);
#ifdef CONFIG_MEMORY_FAILURE
@@ -1102,6 +1111,18 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
return 1;
/*
+ * SGX hardware blocked the access. This usually happens
+ * when the enclave memory contents have been destroyed, like
+ * after a suspend/resume cycle. In any case, the kernel can't
+ * fix the cause of the fault. Handle the fault as an access
+ * error even in cases where no actual access violation
+ * occurred. This allows userspace to rebuild the enclave in
+ * response to the signal.
+ */
+ if (unlikely(error_code & X86_PF_SGX))
+ return 1;
+
+ /*
* Make sure to check the VMA so that we do not perform
* faults just to hit a X86_PF_PK as soon as we fill in a
* page.
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 075fe51317b0..2c54b76d8f84 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -4,65 +4,6 @@
#include <linux/swap.h> /* for totalram_pages */
#include <linux/memblock.h>
-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
-{
- unsigned long vaddr;
- int idx, type;
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- BUG_ON(!pte_none(*(kmap_pte-idx)));
- set_pte(kmap_pte-idx, mk_pte(page, prot));
- arch_flush_lazy_mmu_mode();
-
- return (void *)vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic_high_prot);
-
-/*
- * This is the same as kmap_atomic() but can map memory that doesn't
- * have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn)
-{
- return kmap_atomic_prot_pfn(pfn, kmap_prot);
-}
-EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
-
-void kunmap_atomic_high(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-
- if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
- vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
- int idx, type;
-
- type = kmap_atomic_idx();
- idx = type + KM_TYPE_NR * smp_processor_id();
-
-#ifdef CONFIG_DEBUG_HIGHMEM
- WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-#endif
- /*
- * Force other mappings to Oops if they'll try to access this
- * pte without first remap it. Keeping stale mappings around
- * is a bad idea also, in case the page changes cacheability
- * attributes or becomes a protected page in a hypervisor.
- */
- kpte_clear_flush(kmap_pte-idx, vaddr);
- kmap_atomic_idx_pop();
- arch_flush_lazy_mmu_mode();
- }
-#ifdef CONFIG_DEBUG_HIGHMEM
- else {
- BUG_ON(vaddr < PAGE_OFFSET);
- BUG_ON(vaddr >= (unsigned long)high_memory);
- }
-#endif
-}
-EXPORT_SYMBOL(kunmap_atomic_high);
-
void __init set_highmem_pages_init(void)
{
struct zone *zone;
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index fe7a12599d8e..968d7005f4a7 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -62,6 +62,7 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
unsigned long addr, unsigned long end)
{
unsigned long next;
+ int result;
for (; addr < end; addr = next) {
p4d_t *p4d = p4d_page + p4d_index(addr);
@@ -73,13 +74,20 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
if (p4d_present(*p4d)) {
pud = pud_offset(p4d, 0);
- ident_pud_init(info, pud, addr, next);
+ result = ident_pud_init(info, pud, addr, next);
+ if (result)
+ return result;
+
continue;
}
pud = (pud_t *)info->alloc_pgt_page(info->context);
if (!pud)
return -ENOMEM;
- ident_pud_init(info, pud, addr, next);
+
+ result = ident_pud_init(info, pud, addr, next);
+ if (result)
+ return result;
+
set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index c7a47603537f..e26f5c5c6565 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -596,7 +596,7 @@ static unsigned long __init get_new_step_size(unsigned long step_size)
static void __init memory_map_top_down(unsigned long map_start,
unsigned long map_end)
{
- unsigned long real_end, start, last_start;
+ unsigned long real_end, last_start;
unsigned long step_size;
unsigned long addr;
unsigned long mapped_ram_size = 0;
@@ -609,7 +609,7 @@ static void __init memory_map_top_down(unsigned long map_start,
step_size = PMD_SIZE;
max_pfn_mapped = 0; /* will get exact value next */
min_pfn_mapped = real_end >> PAGE_SHIFT;
- last_start = start = real_end;
+ last_start = real_end;
/*
* We start from the top (end of memory) and go to the bottom.
@@ -618,6 +618,8 @@ static void __init memory_map_top_down(unsigned long map_start,
* for page table.
*/
while (last_start > map_start) {
+ unsigned long start;
+
if (last_start > step_size) {
start = round_down(last_start - 1, step_size);
if (start < map_start)
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 7c055259de3a..da31c2635ee4 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -394,19 +394,6 @@ repeat:
return last_map_addr;
}
-pte_t *kmap_pte;
-
-static void __init kmap_init(void)
-{
- unsigned long kmap_vstart;
-
- /*
- * Cache the first kmap pte:
- */
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = virt_to_kpte(kmap_vstart);
-}
-
#ifdef CONFIG_HIGHMEM
static void __init permanent_kmaps_init(pgd_t *pgd_base)
{
@@ -712,8 +699,6 @@ void __init paging_init(void)
__flush_tlb_all();
- kmap_init();
-
/*
* NOTE: at this point the bootmem allocator is fully available.
*/
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index f60398aeb644..9aaa756ddf21 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -44,28 +44,7 @@ void iomap_free(resource_size_t base, unsigned long size)
}
EXPORT_SYMBOL_GPL(iomap_free);
-void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
-{
- unsigned long vaddr;
- int idx, type;
-
- preempt_disable();
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
- arch_flush_lazy_mmu_mode();
-
- return (void *)vaddr;
-}
-
-/*
- * Map 'pfn' using protections 'prot'
- */
-void __iomem *
-iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
{
/*
* For non-PAT systems, translate non-WB request to UC- just in
@@ -81,36 +60,6 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
/* Filter out unsupported __PAGE_KERNEL* bits: */
pgprot_val(prot) &= __default_kernel_pte_mask;
- return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
-}
-EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
-
-void
-iounmap_atomic(void __iomem *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-
- if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
- vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
- int idx, type;
-
- type = kmap_atomic_idx();
- idx = type + KM_TYPE_NR * smp_processor_id();
-
-#ifdef CONFIG_DEBUG_HIGHMEM
- WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-#endif
- /*
- * Force other mappings to Oops if they'll try to access this
- * pte without first remap it. Keeping stale mappings around
- * is a bad idea also, in case the page changes cacheability
- * attributes or becomes a protected page in a hypervisor.
- */
- kpte_clear_flush(kmap_pte-idx, vaddr);
- kmap_atomic_idx_pop();
- }
-
- pagefault_enable();
- preempt_enable();
+ return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot);
}
-EXPORT_SYMBOL_GPL(iounmap_atomic);
+EXPORT_SYMBOL_GPL(__iomap_local_pfn_prot);
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index efbb3de472df..bc0833713be9 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -39,6 +39,7 @@
*/
u64 sme_me_mask __section(".data") = 0;
u64 sev_status __section(".data") = 0;
+u64 sev_check_data __section(".data") = 0;
EXPORT_SYMBOL(sme_me_mask);
DEFINE_STATIC_KEY_FALSE(sev_enable_key);
EXPORT_SYMBOL_GPL(sev_enable_key);
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index 733b983f3a89..6c5eb6f3f14f 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -45,8 +45,8 @@
#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
#define PMD_FLAGS_DEC PMD_FLAGS_LARGE
-#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
- (_PAGE_PAT | _PAGE_PWT))
+#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \
+ (_PAGE_PAT_LARGE | _PAGE_PWT))
#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 44148691d78b..5eb4dc2b97da 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -938,6 +938,7 @@ int phys_to_target_node(phys_addr_t start)
return meminfo_to_nid(&numa_reserved_meminfo, start);
}
+EXPORT_SYMBOL_GPL(phys_to_target_node);
int memory_add_physaddr_to_nid(u64 start)
{
@@ -947,4 +948,5 @@ int memory_add_physaddr_to_nid(u64 start)
nid = numa_meminfo.blk[0].nid;
return nid;
}
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 40baa90e74f4..16f878c26667 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -2194,6 +2194,7 @@ int set_direct_map_default_noflush(struct page *page)
return __set_pages_p(page, 1);
}
+#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (PageHighMem(page))
@@ -2225,8 +2226,8 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
arch_flush_lazy_mmu_mode();
}
+#endif /* CONFIG_DEBUG_PAGEALLOC */
-#ifdef CONFIG_HIBERNATION
bool kernel_page_present(struct page *page)
{
unsigned int level;
@@ -2238,7 +2239,6 @@ bool kernel_page_present(struct page *page)
pte = lookup_address((unsigned long)page_address(page), &level);
return (pte_val(*pte) & _PAGE_PRESENT);
}
-#endif /* CONFIG_HIBERNATION */
int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
unsigned numpages, unsigned long page_flags)
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 11666ba19b62..569ac1d57f55 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -474,8 +474,14 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
/*
* The membarrier system call requires a full memory barrier and
* core serialization before returning to user-space, after
- * storing to rq->curr. Writing to CR3 provides that full
- * memory barrier and core serializing instruction.
+ * storing to rq->curr, when changing mm. This is because
+ * membarrier() sends IPIs to all CPUs that are in the target mm
+ * to make them issue memory barriers. However, if another CPU
+ * switches to/from the target mm concurrently with
+ * membarrier(), it can cause that CPU not to receive an IPI
+ * when it really should issue a memory barrier. Writing to CR3
+ * provides that full memory barrier and core serializing
+ * instruction.
*/
if (real_prev == next) {
VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index a2488b6e27d6..1d8391fcca68 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -49,7 +49,7 @@ x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
struct stack_frame_ia32 *head;
/* User process is IA32 */
- if (!current || !test_thread_flag(TIF_IA32))
+ if (!current || user_64bit_mode(regs))
return 0;
head = (struct stack_frame_ia32 *) regs->bp;
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index fa855bbaebaf..f2f4a5d50b27 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -366,9 +366,9 @@ static int __init pcibios_assign_resources(void)
return 0;
}
-/**
- * called in fs_initcall (one below subsys_initcall),
- * give a chance for motherboard reserve resources
+/*
+ * This is an fs_initcall (one below subsys_initcall) in order to reserve
+ * resources properly.
*/
fs_initcall(pcibios_assign_resources);
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 24ca4ee2802f..95e2e6bd8d8c 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -215,7 +215,7 @@ static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
static int intel_mid_pci_irq_enable(struct pci_dev *dev)
{
struct irq_alloc_info info;
- int polarity;
+ bool polarity_low;
int ret;
u8 gsi;
@@ -230,7 +230,7 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
switch (intel_mid_identify_cpu()) {
case INTEL_MID_CPU_CHIP_TANGIER:
- polarity = IOAPIC_POL_HIGH;
+ polarity_low = false;
/* Special treatment for IRQ0 */
if (gsi == 0) {
@@ -252,11 +252,11 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
}
break;
default:
- polarity = IOAPIC_POL_LOW;
+ polarity_low = true;
break;
}
- ioapic_set_alloc_attr(&info, dev_to_node(&dev->dev), 1, polarity);
+ ioapic_set_alloc_attr(&info, dev_to_node(&dev->dev), 1, polarity_low);
/*
* MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 6fa42e9c4e6f..234998f196d4 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -425,7 +425,7 @@ static acpi_status find_mboard_resource(acpi_handle handle, u32 lvl,
return AE_OK;
}
-static bool is_acpi_reserved(u64 start, u64 end, unsigned not_used)
+static bool is_acpi_reserved(u64 start, u64 end, enum e820_type not_used)
{
struct resource mcfg_res;
@@ -442,7 +442,7 @@ static bool is_acpi_reserved(u64 start, u64 end, unsigned not_used)
return mcfg_res.flags;
}
-typedef bool (*check_reserved_t)(u64 start, u64 end, unsigned type);
+typedef bool (*check_reserved_t)(u64 start, u64 end, enum e820_type type);
static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
struct pci_mmcfg_region *cfg,
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index c552cd2d0632..3d41a09c2c14 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -152,7 +152,6 @@ static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
#if defined(CONFIG_PCI_MSI)
#include <linux/msi.h>
-#include <asm/msidef.h>
struct xen_pci_frontend_ops *xen_pci_frontend;
EXPORT_SYMBOL_GPL(xen_pci_frontend);
@@ -210,23 +209,20 @@ free:
return ret;
}
-#define XEN_PIRQ_MSI_DATA (MSI_DATA_TRIGGER_EDGE | \
- MSI_DATA_LEVEL_ASSERT | (3 << 8) | MSI_DATA_VECTOR(0))
-
static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq,
struct msi_msg *msg)
{
- /* We set vector == 0 to tell the hypervisor we don't care about it,
- * but we want a pirq setup instead.
- * We use the dest_id field to pass the pirq that we want. */
- msg->address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(pirq);
- msg->address_lo =
- MSI_ADDR_BASE_LO |
- MSI_ADDR_DEST_MODE_PHYSICAL |
- MSI_ADDR_REDIRECTION_CPU |
- MSI_ADDR_DEST_ID(pirq);
-
- msg->data = XEN_PIRQ_MSI_DATA;
+ /*
+ * We set vector == 0 to tell the hypervisor we don't care about
+ * it, but we want a pirq setup instead. We use the dest_id fields
+ * to pass the pirq that we want.
+ */
+ memset(msg, 0, sizeof(*msg));
+ msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
+ msg->arch_addr_hi.destid_8_31 = pirq >> 8;
+ msg->arch_addr_lo.destid_0_7 = pirq & 0xFF;
+ msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
+ msg->arch_data.delivery_mode = APIC_DELIVERY_MODE_EXTINT;
}
static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 8f5759df7776..e1e8d4e3a213 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -78,28 +78,30 @@ int __init efi_alloc_page_tables(void)
gfp_mask = GFP_KERNEL | __GFP_ZERO;
efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
if (!efi_pgd)
- return -ENOMEM;
+ goto fail;
pgd = efi_pgd + pgd_index(EFI_VA_END);
p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
- if (!p4d) {
- free_page((unsigned long)efi_pgd);
- return -ENOMEM;
- }
+ if (!p4d)
+ goto free_pgd;
pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
- if (!pud) {
- if (pgtable_l5_enabled())
- free_page((unsigned long) pgd_page_vaddr(*pgd));
- free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
- return -ENOMEM;
- }
+ if (!pud)
+ goto free_p4d;
efi_mm.pgd = efi_pgd;
mm_init_cpumask(&efi_mm);
init_new_context(NULL, &efi_mm);
return 0;
+
+free_p4d:
+ if (pgtable_l5_enabled())
+ free_page((unsigned long)pgd_page_vaddr(*pgd));
+free_pgd:
+ free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
+fail:
+ return -ENOMEM;
}
/*
diff --git a/arch/x86/platform/uv/Makefile b/arch/x86/platform/uv/Makefile
index 224ff0504890..1441dda8edf7 100644
--- a/arch/x86/platform/uv/Makefile
+++ b/arch/x86/platform/uv/Makefile
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_X86_UV) += bios_uv.o uv_irq.o uv_sysfs.o uv_time.o uv_nmi.o
+obj-$(CONFIG_X86_UV) += bios_uv.o uv_irq.o uv_time.o uv_nmi.o
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index 54511eaccf4d..bf31af3d32d6 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -72,6 +72,7 @@ static s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
long sn_partition_id;
EXPORT_SYMBOL_GPL(sn_partition_id);
long sn_coherency_id;
+EXPORT_SYMBOL_GPL(sn_coherency_id);
long sn_region_size;
EXPORT_SYMBOL_GPL(sn_region_size);
long system_serial_number;
@@ -171,6 +172,60 @@ int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus)
(u64)decode, (u64)domain, (u64)bus, 0, 0);
}
+extern s64 uv_bios_get_master_nasid(u64 size, u64 *master_nasid)
+{
+ return uv_bios_call(UV_BIOS_EXTRA, 0, UV_BIOS_EXTRA_MASTER_NASID, 0,
+ size, (u64)master_nasid);
+}
+EXPORT_SYMBOL_GPL(uv_bios_get_master_nasid);
+
+extern s64 uv_bios_get_heapsize(u64 nasid, u64 size, u64 *heap_size)
+{
+ return uv_bios_call(UV_BIOS_EXTRA, nasid, UV_BIOS_EXTRA_GET_HEAPSIZE,
+ 0, size, (u64)heap_size);
+}
+EXPORT_SYMBOL_GPL(uv_bios_get_heapsize);
+
+extern s64 uv_bios_install_heap(u64 nasid, u64 heap_size, u64 *bios_heap)
+{
+ return uv_bios_call(UV_BIOS_EXTRA, nasid, UV_BIOS_EXTRA_INSTALL_HEAP,
+ 0, heap_size, (u64)bios_heap);
+}
+EXPORT_SYMBOL_GPL(uv_bios_install_heap);
+
+extern s64 uv_bios_obj_count(u64 nasid, u64 size, u64 *objcnt)
+{
+ return uv_bios_call(UV_BIOS_EXTRA, nasid, UV_BIOS_EXTRA_OBJECT_COUNT,
+ 0, size, (u64)objcnt);
+}
+EXPORT_SYMBOL_GPL(uv_bios_obj_count);
+
+extern s64 uv_bios_enum_objs(u64 nasid, u64 size, u64 *objbuf)
+{
+ return uv_bios_call(UV_BIOS_EXTRA, nasid, UV_BIOS_EXTRA_ENUM_OBJECTS,
+ 0, size, (u64)objbuf);
+}
+EXPORT_SYMBOL_GPL(uv_bios_enum_objs);
+
+extern s64 uv_bios_enum_ports(u64 nasid, u64 obj_id, u64 size, u64 *portbuf)
+{
+ return uv_bios_call(UV_BIOS_EXTRA, nasid, UV_BIOS_EXTRA_ENUM_PORTS,
+ obj_id, size, (u64)portbuf);
+}
+EXPORT_SYMBOL_GPL(uv_bios_enum_ports);
+
+extern s64 uv_bios_get_geoinfo(u64 nasid, u64 size, u64 *buf)
+{
+ return uv_bios_call(UV_BIOS_GET_GEOINFO, nasid, (u64)buf, size, 0, 0);
+}
+EXPORT_SYMBOL_GPL(uv_bios_get_geoinfo);
+
+extern s64 uv_bios_get_pci_topology(u64 size, u64 *buf)
+{
+ return uv_bios_call(UV_BIOS_GET_PCI_TOPOLOGY, (u64)buf, size, 0, 0, 0);
+}
+EXPORT_SYMBOL_GPL(uv_bios_get_pci_topology);
+
unsigned long get_uv_systab_phys(bool msg)
{
if ((uv_systab_phys == EFI_INVALID_TABLE_ADDR) ||
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index 18ca2261cc9a..1a536a187d74 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -35,8 +35,8 @@ static void uv_program_mmr(struct irq_cfg *cfg, struct uv_irq_2_mmr_pnode *info)
mmr_value = 0;
entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
entry->vector = cfg->vector;
- entry->delivery_mode = apic->irq_delivery_mode;
- entry->dest_mode = apic->irq_dest_mode;
+ entry->delivery_mode = apic->delivery_mode;
+ entry->dest_mode = apic->dest_mode_logical;
entry->polarity = 0;
entry->trigger = 0;
entry->mask = 0;
diff --git a/arch/x86/platform/uv/uv_sysfs.c b/arch/x86/platform/uv/uv_sysfs.c
deleted file mode 100644
index 266773e2fb37..000000000000
--- a/arch/x86/platform/uv/uv_sysfs.c
+++ /dev/null
@@ -1,63 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * This file supports the /sys/firmware/sgi_uv interfaces for SGI UV.
- *
- * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
- * Copyright (c) Russ Anderson
- */
-
-#include <linux/device.h>
-#include <asm/uv/bios.h>
-#include <asm/uv/uv.h>
-
-struct kobject *sgi_uv_kobj;
-
-static ssize_t partition_id_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%ld\n", sn_partition_id);
-}
-
-static ssize_t coherence_id_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%ld\n", sn_coherency_id);
-}
-
-static struct kobj_attribute partition_id_attr =
- __ATTR(partition_id, S_IRUGO, partition_id_show, NULL);
-
-static struct kobj_attribute coherence_id_attr =
- __ATTR(coherence_id, S_IRUGO, coherence_id_show, NULL);
-
-
-static int __init sgi_uv_sysfs_init(void)
-{
- unsigned long ret;
-
- if (!is_uv_system())
- return -ENODEV;
-
- if (!sgi_uv_kobj)
- sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj);
- if (!sgi_uv_kobj) {
- printk(KERN_WARNING "kobject_create_and_add sgi_uv failed\n");
- return -EINVAL;
- }
-
- ret = sysfs_create_file(sgi_uv_kobj, &partition_id_attr.attr);
- if (ret) {
- printk(KERN_WARNING "sysfs_create_file partition_id failed\n");
- return ret;
- }
-
- ret = sysfs_create_file(sgi_uv_kobj, &coherence_id_attr.attr);
- if (ret) {
- printk(KERN_WARNING "sysfs_create_file coherence_id failed\n");
- return ret;
- }
-
- return 0;
-}
-
-device_initcall(sgi_uv_sysfs_init);
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
index 7b37a412f829..f03b64d9cb51 100644
--- a/arch/x86/purgatory/purgatory.c
+++ b/arch/x86/purgatory/purgatory.c
@@ -9,7 +9,7 @@
*/
#include <linux/bug.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <asm/purgatory.h>
#include "../boot/string.h"
diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c
index fdcd58af707a..27361cbb7ca9 100644
--- a/arch/x86/um/stub_segv.c
+++ b/arch/x86/um/stub_segv.c
@@ -8,7 +8,7 @@
#include <sysdep/mcontext.h>
#include <sys/ucontext.h>
-void __section(".__syscall_stub")
+void __attribute__ ((__section__ (".__syscall_stub")))
stub_segv_handler(int sig, siginfo_t *info, void *p)
{
ucontext_t *uc = p;
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index e82fd1910dae..0d46cc283cf5 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -148,15 +148,12 @@ static struct apic xen_pv_apic = {
.apic_id_valid = xen_id_always_valid,
.apic_id_registered = xen_id_always_registered,
- /* .irq_delivery_mode - used in native_compose_msi_msg only */
- /* .irq_dest_mode - used in native_compose_msi_msg only */
+ /* .delivery_mode and .dest_mode_logical not used by XENPV */
.disable_esr = 0,
- /* .dest_logical - default_send_IPI_ use it but we use our own. */
- .check_apicid_used = default_check_apicid_used, /* Used on 32-bit */
+ .check_apicid_used = default_check_apicid_used, /* Used on 32-bit */
.init_apic_ldr = xen_noop, /* setup_local_APIC calls it */
-
.ioapic_phys_id_map = default_ioapic_phys_id_map, /* Used on 32-bit */
.setup_apic_routing = NULL,
.cpu_present_to_apicid = xen_cpu_present_to_apicid,
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 799f4eba0a62..043c73dfd2c9 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -93,10 +93,20 @@ void xen_init_lock_cpu(int cpu)
void xen_uninit_lock_cpu(int cpu)
{
+ int irq;
+
if (!xen_pvspin)
return;
- unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
+ /*
+ * When booting the kernel with 'mitigations=auto,nosmt', the secondary
+ * CPUs are not activated, and lock_kicker_irq is not initialized.
+ */
+ irq = per_cpu(lock_kicker_irq, cpu);
+ if (irq == -1)
+ return;
+
+ unbind_from_irqhandler(irq, NULL);
per_cpu(lock_kicker_irq, cpu) = -1;
kfree(per_cpu(irq_name, cpu));
per_cpu(irq_name, cpu) = NULL;
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index d0dfa50bd0bb..37ce1489364e 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -16,7 +16,6 @@ config XTENSA
select COMMON_CLK
select DMA_REMAP if MMU
select GENERIC_ATOMIC64
- select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
@@ -666,6 +665,7 @@ endchoice
config HIGHMEM
bool "High Memory Support"
depends on MMU
+ select KMAP_LOCAL
help
Linux can use the full amount of RAM in the system by
default. However, the default MMUv2 setup only maps the
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index c59c42a1221a..9718e9593564 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -7,5 +7,4 @@ generic-y += mcs_spinlock.h
generic-y += param.h
generic-y += qrwlock.h
generic-y += qspinlock.h
-generic-y += seccomp.h
generic-y += user.h
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
index a06ffb0c61c7..1c65dc1d3397 100644
--- a/arch/xtensa/include/asm/fixmap.h
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -16,64 +16,23 @@
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <linux/pgtable.h>
-#include <asm/kmap_types.h>
-#endif
+#include <asm/kmap_size.h>
-/*
- * Here we define all the compile-time 'special' virtual
- * addresses. The point is to have a constant address at
- * compile time, but to set the physical address only
- * in the boot process. We allocate these special addresses
- * from the start of the consistent memory region upwards.
- * Also this lets us do fail-safe vmalloc(), we
- * can guarantee that these special addresses and
- * vmalloc()-ed addresses never overlap.
- *
- * these 'compile-time allocated' memory buffers are
- * fixed-size 4k pages. (or larger if used with an increment
- * higher than 1) use fixmap_set(idx,phys) to associate
- * physical memory with fixmap indices.
- */
+/* The map slots for temporary mappings via kmap_atomic/local(). */
enum fixed_addresses {
-#ifdef CONFIG_HIGHMEM
- /* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN,
FIX_KMAP_END = FIX_KMAP_BEGIN +
- (KM_TYPE_NR * NR_CPUS * DCACHE_N_COLORS) - 1,
-#endif
+ (KM_MAX_IDX * NR_CPUS * DCACHE_N_COLORS) - 1,
__end_of_fixed_addresses
};
-#define FIXADDR_TOP (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
+#define FIXADDR_END (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
+/* Enforce that FIXADDR_START is PMD aligned to handle cache aliasing */
+#define FIXADDR_START ((FIXADDR_END - FIXADDR_SIZE) & PMD_MASK)
+#define FIXADDR_TOP (FIXADDR_START + FIXADDR_SIZE - PAGE_SIZE)
-#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
-
-#ifndef __ASSEMBLY__
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without translation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static __always_inline unsigned long fix_to_virt(const unsigned int idx)
-{
- /* Check if this memory layout is broken because fixmap overlaps page
- * table.
- */
- BUILD_BUG_ON(FIXADDR_START <
- TLBTEMP_BASE_1 + TLBTEMP_SIZE);
- BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
- return __fix_to_virt(idx);
-}
-
-static inline unsigned long virt_to_fix(const unsigned long vaddr)
-{
- BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
- return __virt_to_fix(vaddr);
-}
-
-#endif
+#include <asm-generic/fixmap.h>
+#endif /* CONFIG_HIGHMEM */
#endif
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index eac503215f17..34b8b620e7f1 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -12,13 +12,13 @@
#ifndef _XTENSA_HIGHMEM_H
#define _XTENSA_HIGHMEM_H
+#ifdef CONFIG_HIGHMEM
#include <linux/wait.h>
#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
-#include <asm/kmap_types.h>
-#define PKMAP_BASE ((FIXADDR_START - \
+#define PKMAP_BASE ((FIXADDR_START - \
(LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
#define LAST_PKMAP (PTRS_PER_PTE * DCACHE_N_COLORS)
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
@@ -59,6 +59,13 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
{
return pkmap_map_wait_arr + color;
}
+
+enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn);
+#define arch_kmap_local_map_idx kmap_local_map_idx
+
+enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr);
+#define arch_kmap_local_unmap_idx kmap_local_unmap_idx
+
#endif
extern pte_t *pkmap_page_table;
@@ -68,6 +75,10 @@ static inline void flush_cache_kmaps(void)
flush_cache_all();
}
+#define arch_kmap_local_post_unmap(vaddr) \
+ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
+
void kmap_init(void);
+#endif /* CONFIG_HIGHMEM */
#endif
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
index 74923ef3b228..e337ba9686e9 100644
--- a/arch/xtensa/include/asm/mmu_context.h
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -111,6 +111,7 @@ static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
* to -1 says the process has never run on any core.
*/
+#define init_new_context init_new_context
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
@@ -136,24 +137,18 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
activate_context(next, cpu);
}
-#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
-#define deactivate_mm(tsk, mm) do { } while (0)
-
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
+#define destroy_context destroy_context
static inline void destroy_context(struct mm_struct *mm)
{
invalidate_page_directory();
}
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
- /* Nothing to do. */
-
-}
+#include <asm-generic/mmu_context.h>
#endif /* CONFIG_MMU */
#endif /* _XTENSA_MMU_CONTEXT_H */
diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h
index 37251b2ef871..7c9d1918dc41 100644
--- a/arch/xtensa/include/asm/nommu_context.h
+++ b/arch/xtensa/include/asm/nommu_context.h
@@ -7,28 +7,4 @@ static inline void init_kio(void)
{
}
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
-static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm)
-{
- return 0;
-}
-
-static inline void destroy_context(struct mm_struct *mm)
-{
-}
-
-static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
-{
-}
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
-{
-}
-
-static inline void deactivate_mm(struct task_struct *tsk, struct mm_struct *mm)
-{
-}
+#include <asm-generic/nommu_context.h>
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index fa054a1772e1..4dc04e6c01d7 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -69,7 +69,7 @@
*/
#define VMALLOC_START (XCHAL_KSEG_CACHED_VADDR - 0x10000000)
#define VMALLOC_END (VMALLOC_START + 0x07FEFFFF)
-#define TLBTEMP_BASE_1 (VMALLOC_END + 1)
+#define TLBTEMP_BASE_1 (VMALLOC_START + 0x08000000)
#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
diff --git a/arch/xtensa/include/asm/seccomp.h b/arch/xtensa/include/asm/seccomp.h
new file mode 100644
index 000000000000..f1cb6b0a9e1f
--- /dev/null
+++ b/arch/xtensa/include/asm/seccomp.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm-generic/seccomp.h>
+
+#define SECCOMP_ARCH_NATIVE AUDIT_ARCH_XTENSA
+#define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+#define SECCOMP_ARCH_NATIVE_NAME "xtensa"
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index b9758119feca..5c9fb8005aa8 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -302,7 +302,7 @@ strncpy_from_user(char *dst, const char __user *src, long count)
return -EFAULT;
}
#else
-long strncpy_from_user(char *dst, const char *src, long count);
+long strncpy_from_user(char *dst, const char __user *src, long count);
#endif
/*
diff --git a/arch/xtensa/include/uapi/asm/signal.h b/arch/xtensa/include/uapi/asm/signal.h
index 005dec5bfde4..79ddabaa4e5d 100644
--- a/arch/xtensa/include/uapi/asm/signal.h
+++ b/arch/xtensa/include/uapi/asm/signal.h
@@ -72,30 +72,6 @@ typedef struct {
#define SIGRTMIN 32
#define SIGRTMAX (_NSIG-1)
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
#define SA_RESTORER 0x04000000
#define MINSIGSTKSZ 2048
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 5835406b3cec..085b8c77b9d9 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -70,8 +70,10 @@ static inline void kmap_invalidate_coherent(struct page *page,
kvaddr = TLBTEMP_BASE_1 +
(page_to_phys(page) & DCACHE_ALIAS_MASK);
+ preempt_disable();
__invalidate_dcache_page_alias(kvaddr,
page_to_phys(page));
+ preempt_enable();
}
}
}
@@ -156,6 +158,7 @@ void flush_dcache_page(struct page *page)
if (!alias && !mapping)
return;
+ preempt_disable();
virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(virt, phys);
@@ -166,6 +169,7 @@ void flush_dcache_page(struct page *page)
if (mapping)
__invalidate_icache_page_alias(virt, phys);
+ preempt_enable();
}
/* There shouldn't be an entry in the cache for this page anymore. */
@@ -199,8 +203,10 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
unsigned long phys = page_to_phys(pfn_to_page(pfn));
unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
+ preempt_disable();
__flush_invalidate_dcache_page_alias(virt, phys);
__invalidate_icache_page_alias(virt, phys);
+ preempt_enable();
}
EXPORT_SYMBOL(local_flush_cache_page);
@@ -227,11 +233,13 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
unsigned long phys = page_to_phys(page);
unsigned long tmp;
+ preempt_disable();
tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(tmp, phys);
tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(tmp, phys);
__invalidate_icache_page_alias(tmp, phys);
+ preempt_enable();
clear_bit(PG_arch_1, &page->flags);
}
@@ -265,7 +273,9 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
if (alias) {
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ preempt_disable();
__flush_invalidate_dcache_page_alias(t, phys);
+ preempt_enable();
}
/* Copy data */
@@ -280,9 +290,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
if (alias) {
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ preempt_disable();
__flush_invalidate_dcache_range((unsigned long) dst, len);
if ((vma->vm_flags & VM_EXEC) != 0)
__invalidate_icache_page_alias(t, phys);
+ preempt_enable();
} else if ((vma->vm_flags & VM_EXEC) != 0) {
__flush_dcache_range((unsigned long)dst,len);
@@ -304,7 +316,9 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
if (alias) {
unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ preempt_disable();
__flush_invalidate_dcache_page_alias(t, phys);
+ preempt_enable();
}
memcpy(dst, src, len);
diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c
index 673196fe862e..35c4f7d4a333 100644
--- a/arch/xtensa/mm/highmem.c
+++ b/arch/xtensa/mm/highmem.c
@@ -12,8 +12,6 @@
#include <linux/highmem.h>
#include <asm/tlbflush.h>
-static pte_t *kmap_pte;
-
#if DCACHE_WAY_SIZE > PAGE_SIZE
unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS];
wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS];
@@ -25,67 +23,37 @@ static void __init kmap_waitqueues_init(void)
for (i = 0; i < ARRAY_SIZE(pkmap_map_wait_arr); ++i)
init_waitqueue_head(pkmap_map_wait_arr + i);
}
-#else
-static inline void kmap_waitqueues_init(void)
-{
-}
-#endif
static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
{
- return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS +
- color;
+ int idx = (type + KM_MAX_IDX * smp_processor_id()) * DCACHE_N_COLORS;
+
+ /*
+ * The fixmap operates top down, so the color offset needs to be
+ * reverse as well.
+ */
+ return idx + DCACHE_N_COLORS - 1 - color;
}
-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
+enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn)
{
- enum fixed_addresses idx;
- unsigned long vaddr;
-
- idx = kmap_idx(kmap_atomic_idx_push(),
- DCACHE_ALIAS(page_to_phys(page)));
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte + idx)));
-#endif
- set_pte(kmap_pte + idx, mk_pte(page, prot));
-
- return (void *)vaddr;
+ return kmap_idx(type, DCACHE_ALIAS(pfn << PAGE_SHIFT));
}
-EXPORT_SYMBOL(kmap_atomic_high_prot);
-void kunmap_atomic_high(void *kvaddr)
+enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr)
{
- if (kvaddr >= (void *)FIXADDR_START &&
- kvaddr < (void *)FIXADDR_TOP) {
- int idx = kmap_idx(kmap_atomic_idx(),
- DCACHE_ALIAS((unsigned long)kvaddr));
-
- /*
- * Force other mappings to Oops if they'll try to access this
- * pte without first remap it. Keeping stale mappings around
- * is a bad idea also, in case the page changes cacheability
- * attributes or becomes a protected page in a hypervisor.
- */
- pte_clear(&init_mm, kvaddr, kmap_pte + idx);
- local_flush_tlb_kernel_range((unsigned long)kvaddr,
- (unsigned long)kvaddr + PAGE_SIZE);
-
- kmap_atomic_idx_pop();
- }
+ return kmap_idx(type, DCACHE_ALIAS(addr));
}
-EXPORT_SYMBOL(kunmap_atomic_high);
+
+#else
+static inline void kmap_waitqueues_init(void) { }
+#endif
void __init kmap_init(void)
{
- unsigned long kmap_vstart;
-
/* Check if this memory layout is broken because PKMAP overlaps
* page table.
*/
BUILD_BUG_ON(PKMAP_BASE < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
- /* cache the first kmap pte */
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = virt_to_kpte(kmap_vstart);
kmap_waitqueues_init();
}
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index c6fc83efee0c..2daeba9e454e 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -89,8 +89,8 @@ static void __init free_highpages(void)
/* set highmem page free */
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
&range_start, &range_end, NULL) {
- unsigned long start = PHYS_PFN(range_start);
- unsigned long end = PHYS_PFN(range_end);
+ unsigned long start = PFN_UP(range_start);
+ unsigned long end = PFN_DOWN(range_end);
/* Ignore complete lowmem entries */
if (end <= max_low)
@@ -147,8 +147,8 @@ void __init mem_init(void)
#ifdef CONFIG_HIGHMEM
PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
(LAST_PKMAP*PAGE_SIZE) >> 10,
- FIXADDR_START, FIXADDR_TOP,
- (FIXADDR_TOP - FIXADDR_START) >> 10,
+ FIXADDR_START, FIXADDR_END,
+ (FIXADDR_END - FIXADDR_START) >> 10,
#endif
PAGE_OFFSET, PAGE_OFFSET +
(max_low_pfn - min_low_pfn) * PAGE_SIZE,
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index fd2193df8a14..7e4d97dc8bd8 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -52,7 +52,8 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
static void __init fixedrange_init(void)
{
- init_pmd(__fix_to_virt(0), __end_of_fixed_addresses);
+ BUILD_BUG_ON(FIXADDR_START < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
+ init_pmd(FIXADDR_START, __end_of_fixed_addresses);
}
#endif