summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-driver-dma-idxd9
-rw-r--r--Documentation/ABI/testing/debugfs-driver-habanalabs19
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci17
-rw-r--r--Documentation/PCI/endpoint/pci-endpoint-cfs.rst12
-rw-r--r--Documentation/admin-guide/README.rst2
-rw-r--r--Documentation/admin-guide/acpi/ssdt-overlays.rst49
-rw-r--r--Documentation/admin-guide/bootconfig.rst39
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt5
-rw-r--r--Documentation/admin-guide/mm/damon/index.rst15
-rw-r--r--Documentation/admin-guide/mm/damon/start.rst114
-rw-r--r--Documentation/admin-guide/mm/damon/usage.rst112
-rw-r--r--Documentation/admin-guide/mm/index.rst1
-rw-r--r--Documentation/admin-guide/mm/memory-hotplug.rst800
-rw-r--r--Documentation/arm/marvell.rst1
-rw-r--r--Documentation/block/blk-mq.rst2
-rw-r--r--Documentation/conf.py4
-rw-r--r--Documentation/core-api/cpu_hotplug.rst579
-rw-r--r--Documentation/core-api/kernel-api.rst3
-rw-r--r--Documentation/cpu-freq/cpu-drivers.rst3
-rw-r--r--Documentation/dev-tools/kfence.rst98
-rw-r--r--Documentation/devicetree/bindings/auxdisplay/hit,hd44780.yaml31
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt2
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek-hw.yaml70
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt2
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-st.txt6
-rw-r--r--Documentation/devicetree/bindings/cpufreq/nvidia,tegra20-cpufreq.txt2
-rw-r--r--Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt2
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml8
-rw-r--r--Documentation/devicetree/bindings/dma/altr,msgdma.yaml4
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml130
-rw-r--r--Documentation/devicetree/bindings/dma/st,stm32-dma.yaml7
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-virtio.yaml59
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml2
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-virtio.yaml51
-rw-r--r--Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt55
-rw-r--r--Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.yaml51
-rw-r--r--Documentation/devicetree/bindings/input/regulator-haptic.txt21
-rw-r--r--Documentation/devicetree/bindings/input/regulator-haptic.yaml43
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/chipone,icn8318.yaml62
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt44
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/pixcir,pixcir_ts.yaml68
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/pixcir_i2c_ts.txt31
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ti,tsc2005.yaml128
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/tsc2005.txt64
-rw-r--r--Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml4
-rw-r--r--Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml4
-rw-r--r--Documentation/devicetree/bindings/opp/opp-v1.yaml51
-rw-r--r--Documentation/devicetree/bindings/opp/opp-v2-base.yaml214
-rw-r--r--Documentation/devicetree/bindings/opp/opp-v2.yaml475
-rw-r--r--Documentation/devicetree/bindings/opp/opp.txt622
-rw-r--r--Documentation/devicetree/bindings/opp/qcom-opp.txt2
-rw-r--r--Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/intel,keembay-pcie-ep.yaml69
-rw-r--r--Documentation/devicetree/bindings/pci/intel,keembay-pcie.yaml97
-rw-r--r--Documentation/devicetree/bindings/pci/mediatek-pcie-cfg.yaml39
-rw-r--r--Documentation/devicetree/bindings/pci/mediatek-pcie.txt206
-rw-r--r--Documentation/devicetree/bindings/pci/pci-ep.yaml7
-rw-r--r--Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt1
-rw-r--r--Documentation/devicetree/bindings/power/power-domain.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/reset/qcom,pon.txt49
-rw-r--r--Documentation/devicetree/bindings/power/reset/qcom,pon.yaml80
-rw-r--r--Documentation/devicetree/bindings/power/reset/reboot-mode.yaml2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-rockchip.yaml1
-rw-r--r--Documentation/devicetree/bindings/rtc/trivial-rtc.yaml3
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/mt8195-afe-pcm.yaml40
-rw-r--r--Documentation/devicetree/bindings/spi/omap-spi.yaml6
-rw-r--r--Documentation/devicetree/bindings/spi/spi-xilinx.yaml2
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-lmh.yaml82
-rw-r--r--Documentation/devicetree/bindings/thermal/thermal-zones.yaml2
-rw-r--r--Documentation/devicetree/bindings/virtio/mmio.yaml3
-rw-r--r--Documentation/devicetree/bindings/virtio/virtio-device.yaml41
-rw-r--r--Documentation/devicetree/bindings/watchdog/maxim,max63xx.yaml14
-rw-r--r--Documentation/driver-api/cxl/memory-devices.rst8
-rw-r--r--Documentation/features/vm/ELF-ASLR/arch-support.txt2
-rw-r--r--Documentation/features/vm/huge-vmap/arch-support.txt2
-rw-r--r--Documentation/filesystems/api-summary.rst3
-rw-r--r--Documentation/gpu/drm-mm.rst2
-rw-r--r--Documentation/kbuild/llvm.rst5
-rw-r--r--Documentation/kernel-hacking/hacking.rst4
-rw-r--r--Documentation/kernel-hacking/locking.rst12
-rw-r--r--Documentation/locking/futex-requeue-pi.rst2
-rw-r--r--Documentation/locking/ww-mutex-design.rst2
-rw-r--r--Documentation/power/energy-model.rst15
-rw-r--r--Documentation/process/applying-patches.rst2
-rw-r--r--Documentation/process/changes.rst2
-rw-r--r--Documentation/process/kernel-docs.rst14
-rw-r--r--Documentation/process/maintainer-pgp-guide.rst14
-rw-r--r--Documentation/translations/it_IT/kernel-hacking/hacking.rst2
-rw-r--r--Documentation/translations/it_IT/kernel-hacking/locking.rst4
-rw-r--r--Documentation/translations/zh_CN/admin-guide/README.rst2
-rw-r--r--Documentation/translations/zh_CN/core-api/cachetlb.rst2
-rw-r--r--Documentation/translations/zh_CN/core-api/index.rst8
-rw-r--r--Documentation/translations/zh_CN/core-api/irq/concepts.rst8
-rw-r--r--Documentation/translations/zh_CN/core-api/irq/index.rst7
-rw-r--r--Documentation/translations/zh_CN/core-api/irq/irq-affinity.rst8
-rw-r--r--Documentation/translations/zh_CN/core-api/irq/irq-domain.rst8
-rw-r--r--Documentation/translations/zh_CN/core-api/irq/irqflags-tracing.rst8
-rw-r--r--Documentation/translations/zh_CN/core-api/kernel-api.rst6
-rw-r--r--Documentation/translations/zh_CN/core-api/kobject.rst5
-rw-r--r--Documentation/translations/zh_CN/core-api/local_ops.rst6
-rw-r--r--Documentation/translations/zh_CN/core-api/padata.rst5
-rw-r--r--Documentation/translations/zh_CN/core-api/printk-basics.rst6
-rw-r--r--Documentation/translations/zh_CN/core-api/printk-formats.rst6
-rw-r--r--Documentation/translations/zh_CN/core-api/refcount-vs-atomic.rst6
-rw-r--r--Documentation/translations/zh_CN/core-api/symbol-namespaces.rst6
-rw-r--r--Documentation/translations/zh_CN/core-api/workqueue.rst6
-rw-r--r--Documentation/translations/zh_CN/cpu-freq/core.rst8
-rw-r--r--Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst10
-rw-r--r--Documentation/translations/zh_CN/cpu-freq/cpufreq-stats.rst8
-rw-r--r--Documentation/translations/zh_CN/cpu-freq/index.rst8
-rw-r--r--Documentation/translations/zh_CN/filesystems/debugfs.rst2
-rw-r--r--Documentation/translations/zh_CN/iio/ep93xx_adc.rst8
-rw-r--r--Documentation/translations/zh_CN/iio/iio_configfs.rst8
-rw-r--r--Documentation/translations/zh_CN/iio/index.rst8
-rw-r--r--Documentation/translations/zh_CN/kernel-hacking/hacking.rst2
-rw-r--r--Documentation/translations/zh_CN/mips/booting.rst7
-rw-r--r--Documentation/translations/zh_CN/mips/features.rst7
-rw-r--r--Documentation/translations/zh_CN/mips/index.rst7
-rw-r--r--Documentation/translations/zh_CN/mips/ingenic-tcu.rst7
-rw-r--r--Documentation/translations/zh_CN/openrisc/index.rst8
-rw-r--r--Documentation/translations/zh_CN/openrisc/openrisc_port.rst7
-rw-r--r--Documentation/translations/zh_CN/openrisc/todo.rst7
-rw-r--r--Documentation/translations/zh_CN/parisc/debugging.rst5
-rw-r--r--Documentation/translations/zh_CN/parisc/index.rst5
-rw-r--r--Documentation/translations/zh_CN/parisc/registers.rst5
-rw-r--r--Documentation/translations/zh_CN/riscv/boot-image-header.rst8
-rw-r--r--Documentation/translations/zh_CN/riscv/index.rst8
-rw-r--r--Documentation/translations/zh_CN/riscv/patch-acceptance.rst8
-rw-r--r--Documentation/translations/zh_CN/riscv/pmu.rst8
-rw-r--r--Documentation/translations/zh_TW/admin-guide/README.rst2
-rw-r--r--Documentation/translations/zh_TW/arm64/amu.rst104
-rw-r--r--Documentation/translations/zh_TW/arm64/booting.txt251
-rw-r--r--Documentation/translations/zh_TW/arm64/elf_hwcaps.rst244
-rw-r--r--Documentation/translations/zh_TW/arm64/hugetlbpage.rst49
-rw-r--r--Documentation/translations/zh_TW/arm64/index.rst23
-rw-r--r--Documentation/translations/zh_TW/arm64/legacy_instructions.txt77
-rw-r--r--Documentation/translations/zh_TW/arm64/memory.txt119
-rw-r--r--Documentation/translations/zh_TW/arm64/perf.rst88
-rw-r--r--Documentation/translations/zh_TW/arm64/silicon-errata.txt79
-rw-r--r--Documentation/translations/zh_TW/arm64/tagged-pointers.txt57
-rw-r--r--Documentation/translations/zh_TW/cpu-freq/core.rst108
-rw-r--r--Documentation/translations/zh_TW/cpu-freq/cpu-drivers.rst256
-rw-r--r--Documentation/translations/zh_TW/cpu-freq/cpufreq-stats.rst132
-rw-r--r--Documentation/translations/zh_TW/cpu-freq/index.rst47
-rw-r--r--Documentation/translations/zh_TW/filesystems/debugfs.rst224
-rw-r--r--Documentation/translations/zh_TW/filesystems/index.rst31
-rw-r--r--Documentation/translations/zh_TW/filesystems/sysfs.txt377
-rw-r--r--Documentation/translations/zh_TW/filesystems/tmpfs.rst148
-rw-r--r--Documentation/translations/zh_TW/filesystems/virtiofs.rst61
-rw-r--r--Documentation/translations/zh_TW/index.rst13
-rw-r--r--Documentation/userspace-api/index.rst1
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst1
-rw-r--r--Documentation/userspace-api/vduse.rst233
-rw-r--r--Documentation/vm/damon/api.rst20
-rw-r--r--Documentation/vm/damon/design.rst166
-rw-r--r--Documentation/vm/damon/faq.rst51
-rw-r--r--Documentation/vm/damon/index.rst30
-rw-r--r--Documentation/vm/index.rst1
-rw-r--r--Documentation/x86/x86_64/mm.rst4
-rw-r--r--MAINTAINERS117
-rw-r--r--Makefile12
-rw-r--r--arch/Kconfig2
-rw-r--r--arch/alpha/include/asm/agp.h4
-rw-r--r--arch/alpha/include/asm/io.h6
-rw-r--r--arch/alpha/include/asm/setup.h43
-rw-r--r--arch/alpha/include/uapi/asm/setup.h42
-rw-r--r--arch/alpha/kernel/pci-sysfs.c12
-rw-r--r--arch/arc/kernel/traps.c5
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/Makefile3
-rw-r--r--arch/arm/boot/Makefile14
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/dts/omap34xx.dtsi1
-rw-r--r--arch/arm/boot/dts/omap36xx.dtsi1
-rw-r--r--arch/arm/configs/dove_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig1
-rw-r--r--arch/arm/include/asm/div64.h11
-rw-r--r--arch/arm/include/asm/gpio.h4
-rw-r--r--arch/arm/include/asm/ptrace.h1
-rw-r--r--arch/arm/include/asm/syscall.h16
-rw-r--r--arch/arm/include/asm/thread_info.h6
-rw-r--r--arch/arm/include/asm/uaccess-asm.h6
-rw-r--r--arch/arm/include/asm/uaccess.h169
-rw-r--r--arch/arm/include/asm/unified.h4
-rw-r--r--arch/arm/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm/kernel/asm-offsets.c3
-rw-r--r--arch/arm/kernel/entry-common.S20
-rw-r--r--arch/arm/kernel/process.c7
-rw-r--r--arch/arm/kernel/ptrace.c14
-rw-r--r--arch/arm/kernel/signal.c8
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c216
-rw-r--r--arch/arm/kernel/traps.c52
-rw-r--r--arch/arm/lib/copy_from_user.S3
-rw-r--r--arch/arm/lib/copy_to_user.S3
-rw-r--r--arch/arm/tools/syscall.tbl2
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/include/asm/compat.h5
-rw-r--r--arch/arm64/include/asm/uaccess.h11
-rw-r--r--arch/arm64/include/asm/unistd32.h10
-rw-r--r--arch/arm64/kernel/cacheinfo.c7
-rw-r--r--arch/arm64/kernel/pci.c29
-rw-r--r--arch/arm64/lib/Makefile2
-rw-r--r--arch/arm64/lib/copy_in_user.S77
-rw-r--r--arch/arm64/mm/init.c22
-rw-r--r--arch/arm64/mm/mmu.c3
-rw-r--r--arch/h8300/kernel/traps.c4
-rw-r--r--arch/hexagon/kernel/traps.c4
-rw-r--r--arch/ia64/mm/init.c3
-rw-r--r--arch/m68k/include/asm/raw_io.h20
-rw-r--r--arch/m68k/mvme147/config.c4
-rw-r--r--arch/m68k/mvme16x/config.c4
-rw-r--r--arch/microblaze/Kbuild4
-rw-r--r--arch/microblaze/Makefile5
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S2
-rw-r--r--arch/mips/configs/lemote2f_defconfig1
-rw-r--r--arch/mips/configs/pic32mzda_defconfig1
-rw-r--r--arch/mips/configs/rt305x_defconfig1
-rw-r--r--arch/mips/configs/xway_defconfig1
-rw-r--r--arch/mips/include/asm/compat.h8
-rw-r--r--arch/mips/include/asm/uaccess.h26
-rw-r--r--arch/mips/kernel/cacheinfo.c7
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl10
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl10
-rw-r--r--arch/mips/lib/memcpy.S11
-rw-r--r--arch/nds32/kernel/setup.c1
-rw-r--r--arch/nds32/kernel/traps.c5
-rw-r--r--arch/nios2/kernel/traps.c5
-rw-r--r--arch/openrisc/kernel/traps.c5
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/boot/compressed/Makefile18
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig1
-rw-r--r--arch/parisc/include/asm/compat.h6
-rw-r--r--arch/parisc/include/asm/page.h2
-rw-r--r--arch/parisc/include/asm/processor.h4
-rw-r--r--arch/parisc/include/asm/rt_sigframe.h2
-rw-r--r--arch/parisc/include/asm/thread_info.h2
-rw-r--r--arch/parisc/include/asm/uaccess.h127
-rw-r--r--arch/parisc/kernel/asm-offsets.c1
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c1
-rw-r--r--arch/parisc/kernel/setup.c2
-rw-r--r--arch/parisc/kernel/signal.c45
-rw-r--r--arch/parisc/kernel/signal32.h2
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl8
-rw-r--r--arch/parisc/kernel/time.c7
-rw-r--r--arch/parisc/kernel/traps.c4
-rw-r--r--arch/parisc/lib/lusercopy.S52
-rw-r--r--arch/parisc/lib/memcpy.c9
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/include/asm/asm-const.h10
-rw-r--r--arch/powerpc/include/asm/compat.h16
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl10
-rw-r--r--arch/powerpc/kernel/traps.c5
-rw-r--r--arch/powerpc/mm/mem.c3
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c9
-rw-r--r--arch/riscv/Kconfig5
-rw-r--r--arch/riscv/Makefile7
-rw-r--r--arch/riscv/boot/Makefile8
-rw-r--r--arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts6
-rw-r--r--arch/riscv/configs/defconfig4
-rw-r--r--arch/riscv/include/asm/elf.h3
-rw-r--r--arch/riscv/kernel/cacheinfo.c7
-rw-r--r--arch/riscv/kernel/traps.c5
-rw-r--r--arch/riscv/kernel/vmlinux-xip.lds.S1
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S4
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/configs/debug_defconfig1
-rw-r--r--arch/s390/configs/defconfig1
-rw-r--r--arch/s390/configs/zfcpdump_defconfig1
-rw-r--r--arch/s390/include/asm/compat.h10
-rw-r--r--arch/s390/include/asm/cpu_mcf.h7
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/stacktrace.h20
-rw-r--r--arch/s390/include/asm/uaccess.h3
-rw-r--r--arch/s390/include/asm/unwind.h8
-rw-r--r--arch/s390/kernel/entry.S4
-rw-r--r--arch/s390/kernel/ftrace.c4
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c8
-rw-r--r--arch/s390/kernel/setup.c10
-rw-r--r--arch/s390/kernel/smp.c9
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl10
-rw-r--r--arch/s390/kernel/topology.c13
-rw-r--r--arch/s390/lib/uaccess.c63
-rw-r--r--arch/s390/mm/gmap.c11
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/pgtable.c4
-rw-r--r--arch/s390/pci/pci_clp.c9
-rw-r--r--arch/sh/mm/init.c3
-rw-r--r--arch/sparc/include/asm/compat.h19
-rw-r--r--arch/sparc/kernel/mdesc.c3
-rw-r--r--arch/sparc/kernel/process_64.c2
-rw-r--r--arch/sparc/kernel/signal32.c12
-rw-r--r--arch/sparc/kernel/signal_64.c8
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl10
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/um/drivers/virt-pci.c108
-rw-r--r--arch/um/drivers/virtio_uml.c5
-rw-r--r--arch/um/kernel/skas/clone.c3
-rw-r--r--arch/um/kernel/trap.c4
-rw-r--r--arch/x86/configs/i386_defconfig1
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl4
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/hyperv/hv_apic.c43
-rw-r--r--arch/x86/include/asm/compat.h13
-rw-r--r--arch/x86/include/asm/uaccess.h4
-rw-r--r--arch/x86/include/asm/uaccess_64.h7
-rw-r--r--arch/x86/kernel/cpu/cacheinfo.c7
-rw-r--r--arch/x86/kernel/setup_percpu.c2
-rw-r--r--arch/x86/mm/init_32.c3
-rw-r--r--arch/x86/mm/init_64.c3
-rw-r--r--arch/x86/mm/kasan_init_64.c6
-rw-r--r--arch/x86/mm/numa.c2
-rw-r--r--arch/x86/mm/numa_emulation.c3
-rw-r--r--arch/x86/pci/numachip.c1
-rw-r--r--arch/x86/pci/sta2x11-fixup.c3
-rw-r--r--arch/x86/um/shared/sysdep/stub_32.h12
-rw-r--r--arch/x86/um/shared/sysdep/stub_64.h12
-rw-r--r--arch/x86/um/stub_segv.c3
-rw-r--r--block/Makefile2
-rw-r--r--block/bdev.c (renamed from fs/block_dev.c)641
-rw-r--r--block/blk-mq.c14
-rw-r--r--block/blk-throttle.c1
-rw-r--r--block/blk.h2
-rw-r--r--block/fops.c640
-rw-r--r--block/genhd.c9
-rw-r--r--drivers/acpi/acpi_memhotplug.c46
-rw-r--r--drivers/acpi/cppc_acpi.c47
-rw-r--r--drivers/acpi/prmt.c10
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/x86/s2idle.c67
-rw-r--r--drivers/auxdisplay/cfag12864b.c2
-rw-r--r--drivers/auxdisplay/charlcd.c4
-rw-r--r--drivers/auxdisplay/hd44780.c2
-rw-r--r--drivers/auxdisplay/ks0108.c18
-rw-r--r--drivers/base/arch_numa.c2
-rw-r--r--drivers/base/arch_topology.c2
-rw-r--r--drivers/base/memory.c225
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/wakeirq.c11
-rw-r--r--drivers/block/n64cart.c4
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c23
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c4
-rw-r--r--drivers/cpufreq/Kconfig.arm12
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c14
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c4
-rw-r--r--drivers/cpufreq/cpufreq-dt.c3
-rw-r--r--drivers/cpufreq/cpufreq.c17
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c224
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c308
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c3
-rw-r--r--drivers/cpufreq/omap-cpufreq.c2
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c151
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c65
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c3
-rw-r--r--drivers/cpufreq/sh-cpufreq.c11
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c26
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c4
-rw-r--r--drivers/cxl/Makefile4
-rw-r--r--drivers/cxl/acpi.c12
-rw-r--r--drivers/cxl/core/Makefile8
-rw-r--r--drivers/cxl/core/bus.c (renamed from drivers/cxl/core.c)464
-rw-r--r--drivers/cxl/core/core.h20
-rw-r--r--drivers/cxl/core/memdev.c246
-rw-r--r--drivers/cxl/core/pmem.c230
-rw-r--r--drivers/cxl/core/regs.c249
-rw-r--r--drivers/cxl/cxl.h1
-rw-r--r--drivers/cxl/cxlmem.h (renamed from drivers/cxl/mem.h)35
-rw-r--r--drivers/cxl/pci.c439
-rw-r--r--drivers/cxl/pci.h1
-rw-r--r--drivers/cxl/pmem.c2
-rw-r--r--drivers/dax/kmem.c43
-rw-r--r--drivers/dax/super.c191
-rw-r--r--drivers/devfreq/devfreq.c2
-rw-r--r--drivers/dma-buf/Kconfig4
-rw-r--r--drivers/dma/Kconfig28
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/acpi-dma.c18
-rw-r--r--drivers/dma/altera-msgdma.c37
-rw-r--r--drivers/dma/at_xdmac.c8
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c56
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h4
-rw-r--r--drivers/dma/dw/idma32.c138
-rw-r--r--drivers/dma/dw/internal.h16
-rw-r--r--drivers/dma/dw/of.c49
-rw-r--r--drivers/dma/dw/pci.c6
-rw-r--r--drivers/dma/dw/platform.c6
-rw-r--r--drivers/dma/ep93xx_dma.c6
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c10
-rw-r--r--drivers/dma/hisi_dma.c10
-rw-r--r--drivers/dma/idxd/Makefile8
-rw-r--r--drivers/dma/idxd/bus.c91
-rw-r--r--drivers/dma/idxd/cdev.c73
-rw-r--r--drivers/dma/idxd/compat.c107
-rw-r--r--drivers/dma/idxd/device.c382
-rw-r--r--drivers/dma/idxd/dma.c96
-rw-r--r--drivers/dma/idxd/idxd.h167
-rw-r--r--drivers/dma/idxd/init.c148
-rw-r--r--drivers/dma/idxd/irq.c190
-rw-r--r--drivers/dma/idxd/registers.h6
-rw-r--r--drivers/dma/idxd/submit.c43
-rw-r--r--drivers/dma/idxd/sysfs.c601
-rw-r--r--drivers/dma/ppc4xx/adma.c12
-rw-r--r--drivers/dma/ptdma/Kconfig13
-rw-r--r--drivers/dma/ptdma/Makefile10
-rw-r--r--drivers/dma/ptdma/ptdma-debugfs.c106
-rw-r--r--drivers/dma/ptdma/ptdma-dev.c305
-rw-r--r--drivers/dma/ptdma/ptdma-dmaengine.c389
-rw-r--r--drivers/dma/ptdma/ptdma-pci.c243
-rw-r--r--drivers/dma/ptdma/ptdma.h324
-rw-r--r--drivers/dma/sh/Kconfig9
-rw-r--r--drivers/dma/sh/Makefile1
-rw-r--r--drivers/dma/sh/rz-dmac.c969
-rw-r--r--drivers/dma/sh/usb-dmac.c2
-rw-r--r--drivers/dma/sprd-dma.c1
-rw-r--r--drivers/dma/stm32-dma.c8
-rw-r--r--drivers/dma/tegra210-adma.c7
-rw-r--r--drivers/dma/ti/k3-psil-j721e.c73
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c17
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c3
-rw-r--r--drivers/firewire/net.c4
-rw-r--r--drivers/firmware/qcom_scm.c58
-rw-r--r--drivers/firmware/qcom_scm.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c8
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h24
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_pll.c1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c31
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c2
-rw-r--r--drivers/hv/ring_buffer.c1
-rw-r--r--drivers/hwmon/mr75203.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c3
-rw-r--r--drivers/iio/light/as73211.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c4
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c4
-rw-r--r--drivers/input/joystick/analog.c107
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/adc-keys.c2
-rw-r--r--drivers/input/keyboard/adp5588-keys.c2
-rw-r--r--drivers/input/keyboard/adp5589-keys.c2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c4
-rw-r--r--drivers/input/misc/Kconfig22
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c183
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c2
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c207
-rw-r--r--drivers/input/mouse/elan_i2c.h3
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1
-rw-r--r--drivers/input/serio/parkbd.c14
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c1
-rw-r--r--drivers/input/touchscreen/mms114.c15
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/amd/init.c48
-rw-r--r--drivers/iommu/intel/svm.c15
-rw-r--r--drivers/iommu/iova.c2
-rw-r--r--drivers/macintosh/smu.c2
-rw-r--r--drivers/md/dm-table.c9
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/media/i2c/ov02a10.c2
-rw-r--r--drivers/misc/habanalabs/common/Makefile3
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c4
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c1299
-rw-r--r--drivers/misc/habanalabs/common/context.c146
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c184
-rw-r--r--drivers/misc/habanalabs/common/device.c163
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c56
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h421
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c13
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c2
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c198
-rw-r--r--drivers/misc/habanalabs/common/memory.c169
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu_v1.c12
-rw-r--r--drivers/misc/habanalabs/common/pci/pci.c2
-rw-r--r--drivers/misc/habanalabs/common/state_dump.c718
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c20
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c716
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h19
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c5
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c8
-rw-r--r--drivers/misc/habanalabs/goya/goya.c102
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h115
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h62
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h3
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h17
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h2
-rw-r--r--drivers/misc/lkdtm/core.c10
-rw-r--r--drivers/misc/lkdtm/lkdtm.h28
-rw-r--r--drivers/misc/pci_endpoint_test.c9
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c2
-rw-r--r--drivers/net/dsa/lantiq_gswip.c6
-rw-r--r--drivers/net/dsa/qca8k.c30
-rw-r--r--drivers/net/ethernet/3com/3c515.c2
-rw-r--r--drivers/net/ethernet/8390/ne.c22
-rw-r--r--drivers/net/ethernet/amd/ni65.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c91
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c88
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c115
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c85
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c6
-rw-r--r--drivers/net/ethernet/i825xx/82596.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c7
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c9
-rw-r--r--drivers/net/ethernet/sfc/efx.c78
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c106
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c79
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h8
-rw-r--r--drivers/net/ethernet/sfc/tx.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c44
-rw-r--r--drivers/net/hamradio/6pack.c4
-rw-r--r--drivers/net/ipa/ipa_table.c3
-rw-r--r--drivers/net/phy/dp83640_reg.h2
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/phy/phylink.c30
-rw-r--r--drivers/net/wan/Makefile2
-rw-r--r--drivers/nvdimm/label.c256
-rw-r--r--drivers/nvdimm/label.h1
-rw-r--r--drivers/nvdimm/namespace_devs.c113
-rw-r--r--drivers/nvdimm/nd.h150
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvme/host/core.c68
-rw-r--r--drivers/nvme/host/multipath.c19
-rw-r--r--drivers/nvme/host/nvme.h10
-rw-r--r--drivers/nvme/host/tcp.c22
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/configfs.c5
-rw-r--r--drivers/nvme/target/core.c10
-rw-r--r--drivers/nvme/target/nvmet.h11
-rw-r--r--drivers/nvme/target/passthru.c14
-rw-r--r--drivers/of/property.c3
-rw-r--r--drivers/parisc/dino.c18
-rw-r--r--drivers/pci/ats.c2
-rw-r--r--drivers/pci/controller/Kconfig1
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c61
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c200
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c3
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c16
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h29
-rw-r--r--drivers/pci/controller/dwc/Kconfig48
-rw-r--r--drivers/pci/controller/dwc/Makefile3
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c16
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c14
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c36
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c279
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c460
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c54
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c332
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c15
-rw-r--r--drivers/pci/controller/pci-aardvark.c334
-rw-r--r--drivers/pci/controller/pci-ftpci100.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c153
-rw-r--r--drivers/pci/controller/pci-tegra.c38
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c10
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c10
-rw-r--r--drivers/pci/controller/pcie-altera.c10
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c9
-rw-r--r--drivers/pci/controller/pcie-iproc-bcma.c16
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c4
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c13
-rw-r--r--drivers/pci/controller/pcie-mediatek.c64
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c18
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c23
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c94
-rw-r--r--drivers/pci/controller/pcie-rcar.h7
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c18
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c8
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c4
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c25
-rw-r--r--drivers/pci/controller/pcie-xilinx.c9
-rw-r--r--drivers/pci/controller/vmd.c55
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c89
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c74
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c24
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c134
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c146
-rw-r--r--drivers/pci/host-bridge.c1
-rw-r--r--drivers/pci/hotplug/TODO3
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c5
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c2
-rw-r--r--drivers/pci/hotplug/pnv_php.c2
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/pci-acpi.c159
-rw-r--r--drivers/pci/pci-bridge-emul.h2
-rw-r--r--drivers/pci/pci-sysfs.c3
-rw-r--r--drivers/pci/pci.c331
-rw-r--r--drivers/pci/pci.h47
-rw-r--r--drivers/pci/pcie/aer.c12
-rw-r--r--drivers/pci/pcie/portdrv_core.c9
-rw-r--r--drivers/pci/pcie/ptm.c4
-rw-r--r--drivers/pci/probe.c29
-rw-r--r--drivers/pci/proc.c1
-rw-r--r--drivers/pci/quirks.c128
-rw-r--r--drivers/pci/remove.c1
-rw-r--r--drivers/pci/syscall.c7
-rw-r--r--drivers/pci/vpd.c490
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c2
-rw-r--r--drivers/platform/chrome/Makefile2
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c9
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub_ring.c14
-rw-r--r--drivers/platform/chrome/cros_ec_trace.h94
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c27
-rw-r--r--drivers/pwm/Kconfig5
-rw-r--r--drivers/pwm/core.c4
-rw-r--r--drivers/pwm/pwm-ab8500.c35
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c5
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c5
-rw-r--r--drivers/pwm/pwm-atmel.c102
-rw-r--r--drivers/pwm/pwm-bcm-kona.c12
-rw-r--r--drivers/pwm/pwm-brcmstb.c5
-rw-r--r--drivers/pwm/pwm-cros-ec.c4
-rw-r--r--drivers/pwm/pwm-ep93xx.c11
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c10
-rw-r--r--drivers/pwm/pwm-hibvt.c4
-rw-r--r--drivers/pwm/pwm-img.c20
-rw-r--r--drivers/pwm/pwm-imx-tpm.c5
-rw-r--r--drivers/pwm/pwm-imx27.c14
-rw-r--r--drivers/pwm/pwm-intel-lgm.c12
-rw-r--r--drivers/pwm/pwm-iqs620a.c16
-rw-r--r--drivers/pwm/pwm-jz4740.c12
-rw-r--r--drivers/pwm/pwm-keembay.c12
-rw-r--r--drivers/pwm/pwm-lp3943.c12
-rw-r--r--drivers/pwm/pwm-lpc32xx.c22
-rw-r--r--drivers/pwm/pwm-mediatek.c12
-rw-r--r--drivers/pwm/pwm-mtk-disp.c174
-rw-r--r--drivers/pwm/pwm-mxs.c25
-rw-r--r--drivers/pwm/pwm-ntxec.c14
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c5
-rw-r--r--drivers/pwm/pwm-pca9685.c5
-rw-r--r--drivers/pwm/pwm-pxa.c13
-rw-r--r--drivers/pwm/pwm-raspberrypi-poe.c12
-rw-r--r--drivers/pwm/pwm-rcar.c5
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c5
-rw-r--r--drivers/pwm/pwm-rockchip.c16
-rw-r--r--drivers/pwm/pwm-samsung.c5
-rw-r--r--drivers/pwm/pwm-sifive.c6
-rw-r--r--drivers/pwm/pwm-sl28cpld.c12
-rw-r--r--drivers/pwm/pwm-stm32-lp.c12
-rw-r--r--drivers/pwm/pwm-sun4i.c5
-rw-r--r--drivers/pwm/pwm-tiecap.c6
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c4
-rw-r--r--drivers/pwm/pwm-twl-led.c17
-rw-r--r--drivers/pwm/pwm-twl.c17
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/lib.c107
-rw-r--r--drivers/rtc/lib_test.c81
-rw-r--r--drivers/rtc/rtc-cmos.c10
-rw-r--r--drivers/rtc/rtc-rx8025.c46
-rw-r--r--drivers/rtc/rtc-s5m.c48
-rw-r--r--drivers/rtc/rtc-tps65910.c2
-rw-r--r--drivers/s390/block/Kconfig11
-rw-r--r--drivers/s390/block/Makefile1
-rw-r--r--drivers/s390/block/xpram.c416
-rw-r--r--drivers/s390/char/con3270.c7
-rw-r--r--drivers/s390/char/ctrlchar.c11
-rw-r--r--drivers/s390/char/hmcdrv_ftp.c2
-rw-r--r--drivers/s390/char/sclp.c2
-rw-r--r--drivers/s390/cio/blacklist.c5
-rw-r--r--drivers/s390/cio/device.c21
-rw-r--r--drivers/s390/cio/device_id.c2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c14
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c8
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c10
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c8
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c18
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c38
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/s390/scsi/zfcp_unit.c4
-rw-r--r--drivers/scsi/cxlflash/main.c34
-rw-r--r--drivers/thermal/devfreq_cooling.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c14
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c4
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c2
-rw-r--r--drivers/thermal/qcom/Kconfig10
-rw-r--r--drivers/thermal/qcom/Makefile1
-rw-r--r--drivers/thermal/qcom/lmh.c232
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c6
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c110
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c1
-rw-r--r--drivers/thermal/tegra/Kconfig9
-rw-r--r--drivers/thermal/tegra/Makefile1
-rw-r--r--drivers/thermal/tegra/soctherm.c4
-rw-r--r--drivers/thermal/tegra/tegra30-tsensor.c673
-rw-r--r--drivers/vdpa/Kconfig11
-rw-r--r--drivers/vdpa/Makefile1
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c8
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h25
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c249
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h26
-rw-r--r--drivers/vdpa/mlx5/core/mr.c81
-rw-r--r--drivers/vdpa/mlx5/core/resources.c35
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c555
-rw-r--r--drivers/vdpa/vdpa.c9
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c29
-rw-r--r--drivers/vdpa/vdpa_user/Makefile5
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c545
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h73
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c1641
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c17
-rw-r--r--drivers/vhost/iotlb.c20
-rw-r--r--drivers/vhost/net.c11
-rw-r--r--drivers/vhost/scsi.c14
-rw-r--r--drivers/vhost/vdpa.c188
-rw-r--r--drivers/vhost/vsock.c28
-rw-r--r--drivers/video/fbdev/core/fbmem.c6
-rw-r--r--drivers/virtio/virtio.c56
-rw-r--r--drivers/virtio/virtio_balloon.c4
-rw-r--r--drivers/virtio/virtio_mem.c26
-rw-r--r--fs/Kconfig26
-rw-r--r--fs/Makefile4
-rw-r--r--fs/attr.c50
-rw-r--r--fs/btrfs/disk-io.c48
-rw-r--r--fs/btrfs/ioctl.c15
-rw-r--r--fs/btrfs/misc.h2
-rw-r--r--fs/btrfs/ordered-data.c8
-rw-r--r--fs/btrfs/volumes.c48
-rw-r--r--fs/btrfs/volumes.h3
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/ceph/cache.h6
-rw-r--r--fs/ceph/caps.c266
-rw-r--r--fs/ceph/file.c32
-rw-r--r--fs/ceph/inode.c11
-rw-r--r--fs/ceph/mds_client.c218
-rw-r--r--fs/ceph/mds_client.h5
-rw-r--r--fs/ceph/mdsmap.c12
-rw-r--r--fs/ceph/metric.c4
-rw-r--r--fs/ceph/snap.c59
-rw-r--r--fs/ceph/strings.c1
-rw-r--r--fs/ceph/super.h9
-rw-r--r--fs/ceph/xattr.c19
-rw-r--r--fs/cifs/cifsencrypt.c2
-rw-r--r--fs/cifs/cifspdu.h2
-rw-r--r--fs/cifs/smb2ops.c20
-rw-r--r--fs/cifs/smbencrypt.c2
-rw-r--r--fs/coredump.c15
-rw-r--r--fs/erofs/super.c2
-rw-r--r--fs/eventpoll.c23
-rw-r--r--fs/ext2/super.c3
-rw-r--r--fs/ext4/super.c3
-rw-r--r--fs/file.c6
-rw-r--r--fs/filesystems.c27
-rw-r--r--fs/fs_parser.c1
-rw-r--r--fs/gfs2/inode.c4
-rw-r--r--fs/hostfs/hostfs_kern.c1
-rw-r--r--fs/internal.h2
-rw-r--r--fs/io-wq.c41
-rw-r--r--fs/io_uring.c18
-rw-r--r--fs/ksmbd/ndr.c383
-rw-r--r--fs/ksmbd/oplock.c6
-rw-r--r--fs/ksmbd/smb2pdu.c69
-rw-r--r--fs/ksmbd/smb_common.c4
-rw-r--r--fs/ksmbd/smb_common.h1
-rw-r--r--fs/ksmbd/smbacl.c79
-rw-r--r--fs/ksmbd/smbacl.h25
-rw-r--r--fs/ksmbd/transport_rdma.c2
-rw-r--r--fs/ksmbd/vfs.c47
-rw-r--r--fs/ksmbd/vfs.h3
-rw-r--r--fs/ksmbd/vfs_cache.c16
-rw-r--r--fs/ksmbd/vfs_cache.h1
-rw-r--r--fs/namei.c116
-rw-r--r--fs/nilfs2/sysfs.c26
-rw-r--r--fs/nilfs2/the_nilfs.c9
-rw-r--r--fs/notify/mark.c1
-rw-r--r--fs/proc/array.c18
-rw-r--r--fs/proc/base.c5
-rw-r--r--fs/qnx4/dir.c51
-rw-r--r--fs/smbfs_common/Makefile (renamed from fs/cifs_common/Makefile)4
-rw-r--r--fs/smbfs_common/arc4.h (renamed from fs/cifs_common/arc4.h)0
-rw-r--r--fs/smbfs_common/cifs_arc4.c (renamed from fs/cifs_common/cifs_arc4.c)8
-rw-r--r--fs/smbfs_common/cifs_md4.c (renamed from fs/cifs_common/cifs_md4.c)0
-rw-r--r--fs/smbfs_common/md4.h (renamed from fs/cifs_common/md4.h)0
-rw-r--r--fs/smbfs_common/smbfsctl.h (renamed from fs/cifs/smbfsctl.h)16
-rw-r--r--fs/xfs/xfs_super.c16
-rw-r--r--include/acpi/cppc_acpi.h5
-rw-r--r--include/asm-generic/div64.h14
-rw-r--r--include/asm-generic/early_ioremap.h6
-rw-r--r--include/asm-generic/mshyperv.h21
-rw-r--r--include/asm-generic/pci_iomap.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/drm/ttm/ttm_tt.h3
-rw-r--r--include/linux/bootconfig.h4
-rw-r--r--include/linux/cacheinfo.h18
-rw-r--r--include/linux/ceph/ceph_fs.h1
-rw-r--r--include/linux/cgroup-defs.h107
-rw-r--r--include/linux/cgroup.h22
-rw-r--r--include/linux/compat.h39
-rw-r--r--include/linux/compiler-clang.h13
-rw-r--r--include/linux/compiler-gcc.h11
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/compiler_attributes.h43
-rw-r--r--include/linux/compiler_types.h6
-rw-r--r--include/linux/cpu.h6
-rw-r--r--include/linux/cpufreq.h75
-rw-r--r--include/linux/cpuhotplug.h132
-rw-r--r--include/linux/damon.h268
-rw-r--r--include/linux/dax.h41
-rw-r--r--include/linux/dmaengine.h3
-rw-r--r--include/linux/energy_model.h8
-rw-r--r--include/linux/eventpoll.h18
-rw-r--r--include/linux/file.h7
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/highmem-internal.h27
-rw-r--r--include/linux/hugetlb.h9
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/memory.h55
-rw-r--r--include/linux/memory_hotplug.h34
-rw-r--r--include/linux/mmap_lock.h17
-rw-r--r--include/linux/mmzone.h19
-rw-r--r--include/linux/once.h2
-rw-r--r--include/linux/overflow.h138
-rw-r--r--include/linux/page-flags.h17
-rw-r--r--include/linux/page_ext.h2
-rw-r--r--include/linux/page_idle.h6
-rw-r--r--include/linux/pagemap.h7
-rw-r--r--include/linux/pci-acpi.h3
-rw-r--r--include/linux/pci-epc.h57
-rw-r--r--include/linux/pci-epf.h16
-rw-r--r--include/linux/pci.h159
-rw-r--r--include/linux/pci_hotplug.h2
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/platform_data/dma-dw.h21
-rw-r--r--include/linux/pwm.h2
-rw-r--r--include/linux/qcom_scm.h14
-rw-r--r--include/linux/rwsem.h12
-rw-r--r--include/linux/sched/user.h3
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/slub_def.h6
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/linux/thermal.h7
-rw-r--r--include/linux/threads.h2
-rw-r--r--include/linux/time64.h9
-rw-r--r--include/linux/uaccess.h10
-rw-r--r--include/linux/uio.h6
-rw-r--r--include/linux/units.h10
-rw-r--r--include/linux/vdpa.h62
-rw-r--r--include/linux/vhost_iotlb.h3
-rw-r--r--include/linux/vmalloc.h3
-rw-r--r--include/net/dsa.h5
-rw-r--r--include/trace/events/damon.h43
-rw-r--r--include/trace/events/mmflags.h2
-rw-r--r--include/trace/events/page_ref.h4
-rw-r--r--include/uapi/asm-generic/unistd.h10
-rw-r--r--include/uapi/linux/cxl_mem.h2
-rw-r--r--include/uapi/linux/idxd.h24
-rw-r--r--include/uapi/linux/vduse.h306
-rw-r--r--include/uapi/linux/virtio_ids.h9
-rw-r--r--include/uapi/linux/virtio_pcidev.h5
-rw-r--r--include/uapi/linux/virtio_vsock.h3
-rw-r--r--include/uapi/misc/habanalabs.h186
-rw-r--r--init/Kconfig2
-rw-r--r--init/do_mounts.c90
-rw-r--r--init/initramfs.c2
-rw-r--r--init/main.c42
-rw-r--r--init/noinitramfs.c2
-rw-r--r--ipc/sem.c85
-rw-r--r--ipc/util.c16
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/bpf/disasm.c2
-rw-r--r--kernel/bpf/disasm.h2
-rw-r--r--kernel/bpf/stackmap.c10
-rw-r--r--kernel/bpf/verifier.c2
-rw-r--r--kernel/cgroup/cgroup.c50
-rw-r--r--kernel/compat.c21
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/futex.c190
-rw-r--r--kernel/kexec.c103
-rw-r--r--kernel/locking/rtmutex.c2
-rw-r--r--kernel/locking/rwsem.c10
-rw-r--r--kernel/printk/printk.c4
-rw-r--r--kernel/profile.c21
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/idle.c4
-rw-r--r--kernel/sys.c7
-rw-r--r--kernel/sys_ni.c5
-rw-r--r--kernel/trace/trace.c26
-rw-r--r--kernel/trace/trace_boot.c43
-rw-r--r--kernel/trace/trace_eprobe.c5
-rw-r--r--kernel/trace/trace_events.c1
-rw-r--r--kernel/trace/trace_events_hist.c14
-rw-r--r--kernel/trace/trace_osnoise.c6
-rw-r--r--kernel/trace/trace_output.c11
-rw-r--r--kernel/trace/trace_synth.h2
-rw-r--r--kernel/user.c25
-rw-r--r--lib/Kconfig.debug11
-rw-r--r--lib/bootconfig.c10
-rw-r--r--lib/dump_stack.c3
-rw-r--r--lib/iov_iter.c8
-rw-r--r--lib/logic_iomem.c16
-rw-r--r--lib/math/Kconfig2
-rw-r--r--lib/math/rational.c3
-rw-r--r--lib/test_printf.c2
-rw-r--r--lib/test_sort.c40
-rw-r--r--lib/vsprintf.c2
-rw-r--r--mm/Kconfig15
-rw-r--r--mm/Makefile4
-rw-r--r--mm/compaction.c20
-rw-r--r--mm/damon/Kconfig68
-rw-r--r--mm/damon/Makefile5
-rw-r--r--mm/damon/core-test.h253
-rw-r--r--mm/damon/core.c720
-rw-r--r--mm/damon/dbgfs-test.h126
-rw-r--r--mm/damon/dbgfs.c623
-rw-r--r--mm/damon/vaddr-test.h329
-rw-r--r--mm/damon/vaddr.c672
-rw-r--r--mm/early_ioremap.c5
-rw-r--r--mm/highmem.c2
-rw-r--r--mm/hmm.c5
-rw-r--r--mm/ioremap.c25
-rw-r--r--mm/kfence/core.c3
-rw-r--r--mm/kfence/kfence.h2
-rw-r--r--mm/kfence/kfence_test.c3
-rw-r--r--mm/kfence/report.c19
-rw-r--r--mm/kmemleak.c5
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/maccess.c28
-rw-r--r--mm/memblock.c16
-rw-r--r--mm/memory_hotplug.c374
-rw-r--r--mm/mempolicy.c214
-rw-r--r--mm/memremap.c5
-rw-r--r--mm/migrate.c61
-rw-r--r--mm/page_alloc.c31
-rw-r--r--mm/page_ext.c12
-rw-r--r--mm/page_idle.c10
-rw-r--r--mm/page_isolation.c7
-rw-r--r--mm/page_owner.c14
-rw-r--r--mm/percpu.c1
-rw-r--r--mm/rmap.c6
-rw-r--r--mm/secretmem.c9
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/slub.c797
-rw-r--r--mm/vmalloc.c22
-rw-r--r--mm/vmscan.c2
-rw-r--r--mm/vmstat.c48
-rw-r--r--mm/workingset.c2
-rw-r--r--net/9p/client.c6
-rw-r--r--net/9p/trans_fd.c2
-rw-r--r--net/9p/trans_virtio.c4
-rw-r--r--net/9p/trans_xen.c4
-rw-r--r--net/caif/chnl_net.c19
-rw-r--r--net/core/netclassid_cgroup.c7
-rw-r--r--net/core/netprio_cgroup.c10
-rw-r--r--net/dccp/minisocks.c2
-rw-r--r--net/dsa/dsa.c5
-rw-r--r--net/dsa/dsa2.c46
-rw-r--r--net/dsa/dsa_priv.h1
-rw-r--r--net/dsa/slave.c12
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/udp_tunnel_nic.c2
-rw-r--r--net/ipv6/ip6_fib.c3
-rw-r--r--net/l2tp/l2tp_core.c4
-rw-r--r--net/mctp/route.c2
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/svc_xprt.c13
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/vmw_vsock/af_vsock.c10
-rw-r--r--net/vmw_vsock/virtio_transport_common.c23
-rwxr-xr-xscripts/check_extable.sh2
-rwxr-xr-xscripts/checkpatch.pl93
-rw-r--r--scripts/coccinelle/api/kvmalloc.cocci2
-rw-r--r--scripts/coccinelle/iterators/use_after_iter.cocci2
-rwxr-xr-xscripts/min-tool-version.sh8
-rw-r--r--scripts/module.lds.S1
-rw-r--r--scripts/sorttable.c1
-rw-r--r--sound/isa/gus/gus_main.c44
-rw-r--r--sound/isa/gus/interwave.c61
-rw-r--r--sound/pci/vx222/vx222.c1
-rw-r--r--sound/soc/codecs/rt5682.c3
-rw-r--r--sound/soc/generic/audio-graph-card.c6
-rw-r--r--sound/soc/intel/boards/Kconfig2
-rw-r--r--sound/soc/mediatek/Kconfig3
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c22
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c7
-rw-r--r--sound/soc/samsung/s3c24xx_simtec.c2
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/arch/x86/include/asm/amd-ibs.h132
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--tools/bootconfig/include/linux/memblock.h3
-rw-r--r--tools/bootconfig/main.c4
-rwxr-xr-xtools/bootconfig/scripts/ftrace2bconf.sh4
-rwxr-xr-xtools/bootconfig/test-bootconfig.sh4
-rw-r--r--tools/include/linux/bitmap.h4
-rw-r--r--tools/include/linux/compiler-gcc.h8
-rw-r--r--tools/include/linux/overflow.h140
-rw-r--r--tools/include/uapi/asm-generic/unistd.h14
-rw-r--r--tools/include/uapi/drm/drm.h14
-rw-r--r--tools/include/uapi/drm/i915_drm.h498
-rw-r--r--tools/include/uapi/linux/fs.h1
-rw-r--r--tools/include/uapi/linux/in.h42
-rw-r--r--tools/include/uapi/linux/kvm.h11
-rw-r--r--tools/include/uapi/linux/mount.h3
-rw-r--r--tools/include/uapi/linux/prctl.h12
-rw-r--r--tools/include/uapi/sound/asound.h1
-rw-r--r--tools/pci/pcitest.c2
-rw-r--r--tools/perf/.gitignore1
-rw-r--r--tools/perf/Makefile.config47
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl2
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl12
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl14
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl3
-rw-r--r--tools/perf/bench/find-bit-bench.c2
-rw-r--r--tools/perf/builtin-c2c.c6
-rw-r--r--tools/perf/builtin-record.c2
-rwxr-xr-xtools/perf/check-headers.sh1
-rwxr-xr-xtools/perf/scripts/python/bin/stackcollapse-report2
-rw-r--r--tools/perf/tests/bitmap.c2
-rw-r--r--tools/perf/tests/bpf.c2
-rw-r--r--tools/perf/tests/mem2node.c2
-rw-r--r--tools/perf/trace/beauty/include/linux/socket.h9
-rwxr-xr-xtools/perf/trace/beauty/move_mount_flags.sh2
-rw-r--r--tools/perf/util/Build1
-rw-r--r--tools/perf/util/affinity.c4
-rw-r--r--tools/perf/util/amd-sample-raw.c289
-rw-r--r--tools/perf/util/bpf-event.c8
-rw-r--r--tools/perf/util/dso.c10
-rw-r--r--tools/perf/util/env.c78
-rw-r--r--tools/perf/util/env.h5
-rw-r--r--tools/perf/util/evsel.c20
-rw-r--r--tools/perf/util/evsel.h3
-rw-r--r--tools/perf/util/header.c4
-rw-r--r--tools/perf/util/metricgroup.c2
-rw-r--r--tools/perf/util/mmap.c4
-rw-r--r--tools/perf/util/parse-events-hybrid.c18
-rw-r--r--tools/perf/util/parse-events.c27
-rw-r--r--tools/perf/util/perf_event_attr_fprintf.c5
-rw-r--r--tools/perf/util/sample-raw.c8
-rw-r--r--tools/perf/util/sample-raw.h6
-rw-r--r--tools/perf/util/symbol.c20
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c137
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.h16
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c27
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c79
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_pt_regs.c1
-rw-r--r--tools/testing/selftests/bpf/progs/connect4_dropper.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_task_pt_regs.c19
-rw-r--r--tools/testing/selftests/damon/Makefile7
-rw-r--r--tools/testing/selftests/damon/_chk_dependency.sh28
-rw-r--r--tools/testing/selftests/damon/debugfs_attrs.sh75
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc2
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c2
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c2
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c2
-rw-r--r--tools/testing/selftests/nci/nci_dev.c2
-rwxr-xr-xtools/testing/selftests/net/altnames.sh2
-rw-r--r--tools/testing/vsock/vsock_test.c8
-rw-r--r--tools/thermal/tmon/Makefile10
1129 files changed, 36722 insertions, 13147 deletions
diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd
index d431e2d00472..df4afbccf037 100644
--- a/Documentation/ABI/stable/sysfs-driver-dma-idxd
+++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd
@@ -128,6 +128,8 @@ Date: Aug 28, 2020
KernelVersion: 5.10.0
Contact: dmaengine@vger.kernel.org
Description: The last executed device administrative command's status/error.
+ Also last configuration error overloaded.
+ Writing to it will clear the status.
What: /sys/bus/dsa/devices/wq<m>.<n>/block_on_fault
Date: Oct 27, 2020
@@ -211,6 +213,13 @@ Contact: dmaengine@vger.kernel.org
Description: Indicate whether ATS disable is turned on for the workqueue.
0 indicates ATS is on, and 1 indicates ATS is off for the workqueue.
+What: /sys/bus/dsa/devices/wq<m>.<n>/occupancy
+Date May 25, 2021
+KernelVersion: 5.14.0
+Contact: dmaengine@vger.kernel.org
+Description: Show the current number of entries in this WQ if WQ Occupancy
+ Support bit WQ capabilities is 1.
+
What: /sys/bus/dsa/devices/engine<m>.<n>/group_id
Date: Oct 25, 2019
KernelVersion: 5.6.0
diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs
index a5c28f606865..284e2dfa61cd 100644
--- a/Documentation/ABI/testing/debugfs-driver-habanalabs
+++ b/Documentation/ABI/testing/debugfs-driver-habanalabs
@@ -215,6 +215,17 @@ Description: Sets the skip reset on timeout option for the device. Value of
"0" means device will be reset in case some CS has timed out,
otherwise it will not be reset.
+What: /sys/kernel/debug/habanalabs/hl<n>/state_dump
+Date: Oct 2021
+KernelVersion: 5.15
+Contact: ynudelman@habana.ai
+Description: Gets the state dump occurring on a CS timeout or failure.
+ State dump is used for debug and is created each time in case of
+ a problem in a CS execution, before reset.
+ Reading from the node returns the newest state dump available.
+ Writing an integer X discards X state dumps, so that the
+ next read would return X+1-st newest state dump.
+
What: /sys/kernel/debug/habanalabs/hl<n>/stop_on_err
Date: Mar 2020
KernelVersion: 5.6
@@ -230,6 +241,14 @@ Description: Displays a list with information about the currently user
pointers (user virtual addresses) that are pinned and mapped
to DMA addresses
+What: /sys/kernel/debug/habanalabs/hl<n>/userptr_lookup
+Date: Aug 2021
+KernelVersion: 5.15
+Contact: ogabbay@kernel.org
+Description: Allows to search for specific user pointers (user virtual
+ addresses) that are pinned and mapped to DMA addresses, and see
+ their resolution to the specific dma address.
+
What: /sys/kernel/debug/habanalabs/hl<n>/vm
Date: Jan 2019
KernelVersion: 5.1
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index 793cbb76cd25..d4ae03296861 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -121,6 +121,23 @@ Description:
child buses, and re-discover devices removed earlier
from this part of the device tree.
+What: /sys/bus/pci/devices/.../reset_method
+Date: August 2021
+Contact: Amey Narkhede <ameynarkhede03@gmail.com>
+Description:
+ Some devices allow an individual function to be reset
+ without affecting other functions in the same slot.
+
+ For devices that have this support, a file named
+ reset_method is present in sysfs. Reading this file
+ gives names of the supported and enabled reset methods and
+ their ordering. Writing a space-separated list of names of
+ reset methods sets the reset methods and ordering to be
+ used when resetting the device. Writing an empty string
+ disables the ability to reset the device. Writing
+ "default" enables all supported reset methods in the
+ default ordering.
+
What: /sys/bus/pci/devices/.../reset
Date: July 2009
Contact: Michael S. Tsirkin <mst@redhat.com>
diff --git a/Documentation/PCI/endpoint/pci-endpoint-cfs.rst b/Documentation/PCI/endpoint/pci-endpoint-cfs.rst
index db609b97ad58..fb73345cfb8a 100644
--- a/Documentation/PCI/endpoint/pci-endpoint-cfs.rst
+++ b/Documentation/PCI/endpoint/pci-endpoint-cfs.rst
@@ -43,6 +43,7 @@ entries corresponding to EPF driver will be created by the EPF core.
.. <EPF Driver1>/
... <EPF Device 11>/
... <EPF Device 21>/
+ ... <EPF Device 31>/
.. <EPF Driver2>/
... <EPF Device 12>/
... <EPF Device 22>/
@@ -68,6 +69,7 @@ created)
... subsys_vendor_id
... subsys_id
... interrupt_pin
+ ... <Symlink EPF Device 31>/
... primary/
... <Symlink EPC Device1>/
... secondary/
@@ -79,6 +81,13 @@ interface should be added in 'primary' directory and symlink of endpoint
controller connected to secondary interface should be added in 'secondary'
directory.
+The <EPF Device> directory can have a list of symbolic links
+(<Symlink EPF Device 31>) to other <EPF Device>. These symbolic links should
+be created by the user to represent the virtual functions that are bound to
+the physical function. In the above directory structure <EPF Device 11> is a
+physical function and <EPF Device 31> is a virtual function. An EPF device once
+it's linked to another EPF device, cannot be linked to a EPC device.
+
EPC Device
==========
@@ -98,7 +107,8 @@ entries corresponding to EPC device will be created by the EPC core.
The <EPC Device> directory will have a list of symbolic links to
<EPF Device>. These symbolic links should be created by the user to
-represent the functions present in the endpoint device.
+represent the functions present in the endpoint device. Only <EPF Device>
+that represents a physical function can be linked to a EPC device.
The <EPC Device> directory will also have a *start* field. Once
"1" is written to this field, the endpoint device will be ready to
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index 35314b63008c..caa3c09a5c3f 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -259,7 +259,7 @@ Configuring the kernel
Compiling the kernel
--------------------
- - Make sure you have at least gcc 4.9 available.
+ - Make sure you have at least gcc 5.1 available.
For more information, refer to :ref:`Documentation/process/changes.rst <changes>`.
Please note that you can still run a.out user programs with this kernel.
diff --git a/Documentation/admin-guide/acpi/ssdt-overlays.rst b/Documentation/admin-guide/acpi/ssdt-overlays.rst
index 5d7e25988085..b5fbf54dca19 100644
--- a/Documentation/admin-guide/acpi/ssdt-overlays.rst
+++ b/Documentation/admin-guide/acpi/ssdt-overlays.rst
@@ -30,22 +30,21 @@ following ASL code can be used::
{
Device (STAC)
{
- Name (_ADR, Zero)
Name (_HID, "BMA222E")
+ Name (RBUF, ResourceTemplate ()
+ {
+ I2cSerialBus (0x0018, ControllerInitiated, 0x00061A80,
+ AddressingMode7Bit, "\\_SB.I2C6", 0x00,
+ ResourceConsumer, ,)
+ GpioInt (Edge, ActiveHigh, Exclusive, PullDown, 0x0000,
+ "\\_SB.GPO2", 0x00, ResourceConsumer, , )
+ { // Pin list
+ 0
+ }
+ })
Method (_CRS, 0, Serialized)
{
- Name (RBUF, ResourceTemplate ()
- {
- I2cSerialBus (0x0018, ControllerInitiated, 0x00061A80,
- AddressingMode7Bit, "\\_SB.I2C6", 0x00,
- ResourceConsumer, ,)
- GpioInt (Edge, ActiveHigh, Exclusive, PullDown, 0x0000,
- "\\_SB.GPO2", 0x00, ResourceConsumer, , )
- { // Pin list
- 0
- }
- })
Return (RBUF)
}
}
@@ -75,7 +74,7 @@ This option allows loading of user defined SSDTs from initrd and it is useful
when the system does not support EFI or when there is not enough EFI storage.
It works in a similar way with initrd based ACPI tables override/upgrade: SSDT
-aml code must be placed in the first, uncompressed, initrd under the
+AML code must be placed in the first, uncompressed, initrd under the
"kernel/firmware/acpi" path. Multiple files can be used and this will translate
in loading multiple tables. Only SSDT and OEM tables are allowed. See
initrd_table_override.txt for more details.
@@ -103,12 +102,14 @@ This is the preferred method, when EFI is supported on the platform, because it
allows a persistent, OS independent way of storing the user defined SSDTs. There
is also work underway to implement EFI support for loading user defined SSDTs
and using this method will make it easier to convert to the EFI loading
-mechanism when that will arrive.
+mechanism when that will arrive. To enable it, the
+CONFIG_EFI_CUSTOM_SSDT_OVERLAYS shoyld be chosen to y.
-In order to load SSDTs from an EFI variable the efivar_ssdt kernel command line
-parameter can be used. The argument for the option is the variable name to
-use. If there are multiple variables with the same name but with different
-vendor GUIDs, all of them will be loaded.
+In order to load SSDTs from an EFI variable the ``"efivar_ssdt=..."`` kernel
+command line parameter can be used (the name has a limitation of 16 characters).
+The argument for the option is the variable name to use. If there are multiple
+variables with the same name but with different vendor GUIDs, all of them will
+be loaded.
In order to store the AML code in an EFI variable the efivarfs filesystem can be
used. It is enabled and mounted by default in /sys/firmware/efi/efivars in all
@@ -127,7 +128,7 @@ variable with the content from a given file::
#!/bin/sh -e
- while ! [ -z "$1" ]; do
+ while [ -n "$1" ]; do
case "$1" in
"-f") filename="$2"; shift;;
"-g") guid="$2"; shift;;
@@ -167,14 +168,14 @@ variable with the content from a given file::
Loading ACPI SSDTs from configfs
================================
-This option allows loading of user defined SSDTs from userspace via the configfs
+This option allows loading of user defined SSDTs from user space via the configfs
interface. The CONFIG_ACPI_CONFIGFS option must be select and configfs must be
mounted. In the following examples, we assume that configfs has been mounted in
-/config.
+/sys/kernel/config.
-New tables can be loading by creating new directories in /config/acpi/table/ and
-writing the SSDT aml code in the aml attribute::
+New tables can be loading by creating new directories in /sys/kernel/config/acpi/table
+and writing the SSDT AML code in the aml attribute::
- cd /config/acpi/table
+ cd /sys/kernel/config/acpi/table
mkdir my_ssdt
cat ~/ssdt.aml > my_ssdt/aml
diff --git a/Documentation/admin-guide/bootconfig.rst b/Documentation/admin-guide/bootconfig.rst
index 6a79f2e59396..a1860fc0ca88 100644
--- a/Documentation/admin-guide/bootconfig.rst
+++ b/Documentation/admin-guide/bootconfig.rst
@@ -178,7 +178,7 @@ update the boot loader and the kernel image itself as long as the boot
loader passes the correct initrd file size. If by any chance, the boot
loader passes a longer size, the kernel fails to find the bootconfig data.
-To do this operation, Linux kernel provides "bootconfig" command under
+To do this operation, Linux kernel provides ``bootconfig`` command under
tools/bootconfig, which allows admin to apply or delete the config file
to/from initrd image. You can build it by the following command::
@@ -196,6 +196,43 @@ To remove the config from the image, you can use -d option as below::
Then add "bootconfig" on the normal kernel command line to tell the
kernel to look for the bootconfig at the end of the initrd file.
+
+Kernel parameters via Boot Config
+=================================
+
+In addition to the kernel command line, the boot config can be used for
+passing the kernel parameters. All the key-value pairs under ``kernel``
+key will be passed to kernel cmdline directly. Moreover, the key-value
+pairs under ``init`` will be passed to init process via the cmdline.
+The parameters are concatinated with user-given kernel cmdline string
+as the following order, so that the command line parameter can override
+bootconfig parameters (this depends on how the subsystem handles parameters
+but in general, earlier parameter will be overwritten by later one.)::
+
+ [bootconfig params][cmdline params] -- [bootconfig init params][cmdline init params]
+
+Here is an example of the bootconfig file for kernel/init parameters.::
+
+ kernel {
+ root = 01234567-89ab-cdef-0123-456789abcd
+ }
+ init {
+ splash
+ }
+
+This will be copied into the kernel cmdline string as the following::
+
+ root="01234567-89ab-cdef-0123-456789abcd" -- splash
+
+If user gives some other command line like,::
+
+ ro bootconfig -- quiet
+
+The final kernel cmdline will be the following::
+
+ root="01234567-89ab-cdef-0123-456789abcd" ro bootconfig -- splash quiet
+
+
Config File Limitation
======================
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 828d11441ebf..91ba391f9b32 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1758,6 +1758,11 @@
support for the idxd driver. By default it is set to
true (1).
+ idxd.tc_override= [HW]
+ Format: <bool>
+ Allow override of default traffic class configuration
+ for the device. By default it is set to false (0).
+
ieee754= [MIPS] Select IEEE Std 754 conformance mode
Format: { strict | legacy | 2008 | relaxed }
Default: strict
diff --git a/Documentation/admin-guide/mm/damon/index.rst b/Documentation/admin-guide/mm/damon/index.rst
new file mode 100644
index 000000000000..8c5dde3a5754
--- /dev/null
+++ b/Documentation/admin-guide/mm/damon/index.rst
@@ -0,0 +1,15 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+========================
+Monitoring Data Accesses
+========================
+
+:doc:`DAMON </vm/damon/index>` allows light-weight data access monitoring.
+Using DAMON, users can analyze the memory access patterns of their systems and
+optimize those.
+
+.. toctree::
+ :maxdepth: 2
+
+ start
+ usage
diff --git a/Documentation/admin-guide/mm/damon/start.rst b/Documentation/admin-guide/mm/damon/start.rst
new file mode 100644
index 000000000000..d5eb89a8fc38
--- /dev/null
+++ b/Documentation/admin-guide/mm/damon/start.rst
@@ -0,0 +1,114 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+Getting Started
+===============
+
+This document briefly describes how you can use DAMON by demonstrating its
+default user space tool. Please note that this document describes only a part
+of its features for brevity. Please refer to :doc:`usage` for more details.
+
+
+TL; DR
+======
+
+Follow the commands below to monitor and visualize the memory access pattern of
+your workload. ::
+
+ # # build the kernel with CONFIG_DAMON_*=y, install it, and reboot
+ # mount -t debugfs none /sys/kernel/debug/
+ # git clone https://github.com/awslabs/damo
+ # ./damo/damo record $(pidof <your workload>)
+ # ./damo/damo report heat --plot_ascii
+
+The final command draws the access heatmap of ``<your workload>``. The heatmap
+shows which memory region (x-axis) is accessed when (y-axis) and how frequently
+(number; the higher the more accesses have been observed). ::
+
+ 111111111111111111111111111111111111111111111111111111110000
+ 111121111111111111111111111111211111111111111111111111110000
+ 000000000000000000000000000000000000000000000000001555552000
+ 000000000000000000000000000000000000000000000222223555552000
+ 000000000000000000000000000000000000000011111677775000000000
+ 000000000000000000000000000000000000000488888000000000000000
+ 000000000000000000000000000000000177888400000000000000000000
+ 000000000000000000000000000046666522222100000000000000000000
+ 000000000000000000000014444344444300000000000000000000000000
+ 000000000000000002222245555510000000000000000000000000000000
+ # access_frequency: 0 1 2 3 4 5 6 7 8 9
+ # x-axis: space (140286319947776-140286426374096: 101.496 MiB)
+ # y-axis: time (605442256436361-605479951866441: 37.695430s)
+ # resolution: 60x10 (1.692 MiB and 3.770s for each character)
+
+
+Prerequisites
+=============
+
+Kernel
+------
+
+You should first ensure your system is running on a kernel built with
+``CONFIG_DAMON_*=y``.
+
+
+User Space Tool
+---------------
+
+For the demonstration, we will use the default user space tool for DAMON,
+called DAMON Operator (DAMO). It is available at
+https://github.com/awslabs/damo. The examples below assume that ``damo`` is on
+your ``$PATH``. It's not mandatory, though.
+
+Because DAMO is using the debugfs interface (refer to :doc:`usage` for the
+detail) of DAMON, you should ensure debugfs is mounted. Mount it manually as
+below::
+
+ # mount -t debugfs none /sys/kernel/debug/
+
+or append the following line to your ``/etc/fstab`` file so that your system
+can automatically mount debugfs upon booting::
+
+ debugfs /sys/kernel/debug debugfs defaults 0 0
+
+
+Recording Data Access Patterns
+==============================
+
+The commands below record the memory access patterns of a program and save the
+monitoring results to a file. ::
+
+ $ git clone https://github.com/sjp38/masim
+ $ cd masim; make; ./masim ./configs/zigzag.cfg &
+ $ sudo damo record -o damon.data $(pidof masim)
+
+The first two lines of the commands download an artificial memory access
+generator program and run it in the background. The generator will repeatedly
+access two 100 MiB sized memory regions one by one. You can substitute this
+with your real workload. The last line asks ``damo`` to record the access
+pattern in the ``damon.data`` file.
+
+
+Visualizing Recorded Patterns
+=============================
+
+The following three commands visualize the recorded access patterns and save
+the results as separate image files. ::
+
+ $ damo report heats --heatmap access_pattern_heatmap.png
+ $ damo report wss --range 0 101 1 --plot wss_dist.png
+ $ damo report wss --range 0 101 1 --sortby time --plot wss_chron_change.png
+
+- ``access_pattern_heatmap.png`` will visualize the data access pattern in a
+ heatmap, showing which memory region (y-axis) got accessed when (x-axis)
+ and how frequently (color).
+- ``wss_dist.png`` will show the distribution of the working set size.
+- ``wss_chron_change.png`` will show how the working set size has
+ chronologically changed.
+
+You can view the visualizations of this example workload at [1]_.
+Visualizations of other realistic workloads are available at [2]_ [3]_ [4]_.
+
+.. [1] https://damonitor.github.io/doc/html/v17/admin-guide/mm/damon/start.html#visualizing-recorded-patterns
+.. [2] https://damonitor.github.io/test/result/visual/latest/rec.heatmap.1.png.html
+.. [3] https://damonitor.github.io/test/result/visual/latest/rec.wss_sz.png.html
+.. [4] https://damonitor.github.io/test/result/visual/latest/rec.wss_time.png.html
diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst
new file mode 100644
index 000000000000..a72cda374aba
--- /dev/null
+++ b/Documentation/admin-guide/mm/damon/usage.rst
@@ -0,0 +1,112 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+Detailed Usages
+===============
+
+DAMON provides below three interfaces for different users.
+
+- *DAMON user space tool.*
+ This is for privileged people such as system administrators who want a
+ just-working human-friendly interface. Using this, users can use the DAMON’s
+ major features in a human-friendly way. It may not be highly tuned for
+ special cases, though. It supports only virtual address spaces monitoring.
+- *debugfs interface.*
+ This is for privileged user space programmers who want more optimized use of
+ DAMON. Using this, users can use DAMON’s major features by reading
+ from and writing to special debugfs files. Therefore, you can write and use
+ your personalized DAMON debugfs wrapper programs that reads/writes the
+ debugfs files instead of you. The DAMON user space tool is also a reference
+ implementation of such programs. It supports only virtual address spaces
+ monitoring.
+- *Kernel Space Programming Interface.*
+ This is for kernel space programmers. Using this, users can utilize every
+ feature of DAMON most flexibly and efficiently by writing kernel space
+ DAMON application programs for you. You can even extend DAMON for various
+ address spaces.
+
+Nevertheless, you could write your own user space tool using the debugfs
+interface. A reference implementation is available at
+https://github.com/awslabs/damo. If you are a kernel programmer, you could
+refer to :doc:`/vm/damon/api` for the kernel space programming interface. For
+the reason, this document describes only the debugfs interface
+
+debugfs Interface
+=================
+
+DAMON exports three files, ``attrs``, ``target_ids``, and ``monitor_on`` under
+its debugfs directory, ``<debugfs>/damon/``.
+
+
+Attributes
+----------
+
+Users can get and set the ``sampling interval``, ``aggregation interval``,
+``regions update interval``, and min/max number of monitoring target regions by
+reading from and writing to the ``attrs`` file. To know about the monitoring
+attributes in detail, please refer to the :doc:`/vm/damon/design`. For
+example, below commands set those values to 5 ms, 100 ms, 1,000 ms, 10 and
+1000, and then check it again::
+
+ # cd <debugfs>/damon
+ # echo 5000 100000 1000000 10 1000 > attrs
+ # cat attrs
+ 5000 100000 1000000 10 1000
+
+
+Target IDs
+----------
+
+Some types of address spaces supports multiple monitoring target. For example,
+the virtual memory address spaces monitoring can have multiple processes as the
+monitoring targets. Users can set the targets by writing relevant id values of
+the targets to, and get the ids of the current targets by reading from the
+``target_ids`` file. In case of the virtual address spaces monitoring, the
+values should be pids of the monitoring target processes. For example, below
+commands set processes having pids 42 and 4242 as the monitoring targets and
+check it again::
+
+ # cd <debugfs>/damon
+ # echo 42 4242 > target_ids
+ # cat target_ids
+ 42 4242
+
+Note that setting the target ids doesn't start the monitoring.
+
+
+Turning On/Off
+--------------
+
+Setting the files as described above doesn't incur effect unless you explicitly
+start the monitoring. You can start, stop, and check the current status of the
+monitoring by writing to and reading from the ``monitor_on`` file. Writing
+``on`` to the file starts the monitoring of the targets with the attributes.
+Writing ``off`` to the file stops those. DAMON also stops if every target
+process is terminated. Below example commands turn on, off, and check the
+status of DAMON::
+
+ # cd <debugfs>/damon
+ # echo on > monitor_on
+ # echo off > monitor_on
+ # cat monitor_on
+ off
+
+Please note that you cannot write to the above-mentioned debugfs files while
+the monitoring is turned on. If you write to the files while DAMON is running,
+an error code such as ``-EBUSY`` will be returned.
+
+
+Tracepoint for Monitoring Results
+=================================
+
+DAMON provides the monitoring results via a tracepoint,
+``damon:damon_aggregated``. While the monitoring is turned on, you could
+record the tracepoint events and show results using tracepoint supporting tools
+like ``perf``. For example::
+
+ # echo on > monitor_on
+ # perf record -e damon:damon_aggregated &
+ # sleep 5
+ # kill 9 $(pidof perf)
+ # echo off > monitor_on
+ # perf script
diff --git a/Documentation/admin-guide/mm/index.rst b/Documentation/admin-guide/mm/index.rst
index 4b14d8b50e9e..cbd19d5e625f 100644
--- a/Documentation/admin-guide/mm/index.rst
+++ b/Documentation/admin-guide/mm/index.rst
@@ -27,6 +27,7 @@ the Linux memory management.
concepts
cma_debugfs
+ damon/index
hugetlbpage
idle_page_tracking
ksm
diff --git a/Documentation/admin-guide/mm/memory-hotplug.rst b/Documentation/admin-guide/mm/memory-hotplug.rst
index c6bae2d77160..03dfbc925252 100644
--- a/Documentation/admin-guide/mm/memory-hotplug.rst
+++ b/Documentation/admin-guide/mm/memory-hotplug.rst
@@ -1,466 +1,576 @@
.. _admin_guide_memory_hotplug:
-==============
-Memory Hotplug
-==============
+==================
+Memory Hot(Un)Plug
+==================
-:Created: Jul 28 2007
-:Updated: Add some details about locking internals: Aug 20 2018
-
-This document is about memory hotplug including how-to-use and current status.
-Because Memory Hotplug is still under development, contents of this text will
-be changed often.
+This document describes generic Linux support for memory hot(un)plug with
+a focus on System RAM, including ZONE_MOVABLE support.
.. contents:: :local:
-.. note::
+Introduction
+============
- (1) x86_64's has special implementation for memory hotplug.
- This text does not describe it.
- (2) This text assumes that sysfs is mounted at ``/sys``.
+Memory hot(un)plug allows for increasing and decreasing the size of physical
+memory available to a machine at runtime. In the simplest case, it consists of
+physically plugging or unplugging a DIMM at runtime, coordinated with the
+operating system.
+Memory hot(un)plug is used for various purposes:
-Introduction
-============
+- The physical memory available to a machine can be adjusted at runtime, up- or
+ downgrading the memory capacity. This dynamic memory resizing, sometimes
+ referred to as "capacity on demand", is frequently used with virtual machines
+ and logical partitions.
+
+- Replacing hardware, such as DIMMs or whole NUMA nodes, without downtime. One
+ example is replacing failing memory modules.
-Purpose of memory hotplug
--------------------------
+- Reducing energy consumption either by physically unplugging memory modules or
+ by logically unplugging (parts of) memory modules from Linux.
-Memory Hotplug allows users to increase/decrease the amount of memory.
-Generally, there are two purposes.
+Further, the basic memory hot(un)plug infrastructure in Linux is nowadays also
+used to expose persistent memory, other performance-differentiated memory and
+reserved memory regions as ordinary system RAM to Linux.
-(A) For changing the amount of memory.
- This is to allow a feature like capacity on demand.
-(B) For installing/removing DIMMs or NUMA-nodes physically.
- This is to exchange DIMMs/NUMA-nodes, reduce power consumption, etc.
+Linux only supports memory hot(un)plug on selected 64 bit architectures, such as
+x86_64, arm64, ppc64, s390x and ia64.
-(A) is required by highly virtualized environments and (B) is required by
-hardware which supports memory power management.
+Memory Hot(Un)Plug Granularity
+------------------------------
-Linux memory hotplug is designed for both purpose.
+Memory hot(un)plug in Linux uses the SPARSEMEM memory model, which divides the
+physical memory address space into chunks of the same size: memory sections. The
+size of a memory section is architecture dependent. For example, x86_64 uses
+128 MiB and ppc64 uses 16 MiB.
-Phases of memory hotplug
+Memory sections are combined into chunks referred to as "memory blocks". The
+size of a memory block is architecture dependent and corresponds to the smallest
+granularity that can be hot(un)plugged. The default size of a memory block is
+the same as memory section size, unless an architecture specifies otherwise.
+
+All memory blocks have the same size.
+
+Phases of Memory Hotplug
------------------------
-There are 2 phases in Memory Hotplug:
+Memory hotplug consists of two phases:
- 1) Physical Memory Hotplug phase
- 2) Logical Memory Hotplug phase.
+(1) Adding the memory to Linux
+(2) Onlining memory blocks
-The First phase is to communicate hardware/firmware and make/erase
-environment for hotplugged memory. Basically, this phase is necessary
-for the purpose (B), but this is good phase for communication between
-highly virtualized environments too.
+In the first phase, metadata, such as the memory map ("memmap") and page tables
+for the direct mapping, is allocated and initialized, and memory blocks are
+created; the latter also creates sysfs files for managing newly created memory
+blocks.
-When memory is hotplugged, the kernel recognizes new memory, makes new memory
-management tables, and makes sysfs files for new memory's operation.
+In the second phase, added memory is exposed to the page allocator. After this
+phase, the memory is visible in memory statistics, such as free and total
+memory, of the system.
-If firmware supports notification of connection of new memory to OS,
-this phase is triggered automatically. ACPI can notify this event. If not,
-"probe" operation by system administration is used instead.
-(see :ref:`memory_hotplug_physical_mem`).
+Phases of Memory Hotunplug
+--------------------------
-Logical Memory Hotplug phase is to change memory state into
-available/unavailable for users. Amount of memory from user's view is
-changed by this phase. The kernel makes all memory in it as free pages
-when a memory range is available.
+Memory hotunplug consists of two phases:
-In this document, this phase is described as online/offline.
+(1) Offlining memory blocks
+(2) Removing the memory from Linux
-Logical Memory Hotplug phase is triggered by write of sysfs file by system
-administrator. For the hot-add case, it must be executed after Physical Hotplug
-phase by hand.
-(However, if you writes udev's hotplug scripts for memory hotplug, these
-phases can be execute in seamless way.)
+In the fist phase, memory is "hidden" from the page allocator again, for
+example, by migrating busy memory to other memory locations and removing all
+relevant free pages from the page allocator After this phase, the memory is no
+longer visible in memory statistics of the system.
-Unit of Memory online/offline operation
----------------------------------------
+In the second phase, the memory blocks are removed and metadata is freed.
-Memory hotplug uses SPARSEMEM memory model which allows memory to be divided
-into chunks of the same size. These chunks are called "sections". The size of
-a memory section is architecture dependent. For example, power uses 16MiB, ia64
-uses 1GiB.
+Memory Hotplug Notifications
+============================
-Memory sections are combined into chunks referred to as "memory blocks". The
-size of a memory block is architecture dependent and represents the logical
-unit upon which memory online/offline operations are to be performed. The
-default size of a memory block is the same as memory section size unless an
-architecture specifies otherwise. (see :ref:`memory_hotplug_sysfs_files`.)
+There are various ways how Linux is notified about memory hotplug events such
+that it can start adding hotplugged memory. This description is limited to
+systems that support ACPI; mechanisms specific to other firmware interfaces or
+virtual machines are not described.
-To determine the size (in bytes) of a memory block please read this file::
+ACPI Notifications
+------------------
- /sys/devices/system/memory/block_size_bytes
+Platforms that support ACPI, such as x86_64, can support memory hotplug
+notifications via ACPI.
-Kernel Configuration
-====================
+In general, a firmware supporting memory hotplug defines a memory class object
+HID "PNP0C80". When notified about hotplug of a new memory device, the ACPI
+driver will hotplug the memory to Linux.
-To use memory hotplug feature, kernel must be compiled with following
-config options.
+If the firmware supports hotplug of NUMA nodes, it defines an object _HID
+"ACPI0004", "PNP0A05", or "PNP0A06". When notified about an hotplug event, all
+assigned memory devices are added to Linux by the ACPI driver.
-- For all memory hotplug:
- - Memory model -> Sparse Memory (``CONFIG_SPARSEMEM``)
- - Allow for memory hot-add (``CONFIG_MEMORY_HOTPLUG``)
+Similarly, Linux can be notified about requests to hotunplug a memory device or
+a NUMA node via ACPI. The ACPI driver will try offlining all relevant memory
+blocks, and, if successful, hotunplug the memory from Linux.
-- To enable memory removal, the following are also necessary:
- - Allow for memory hot remove (``CONFIG_MEMORY_HOTREMOVE``)
- - Page Migration (``CONFIG_MIGRATION``)
+Manual Probing
+--------------
-- For ACPI memory hotplug, the following are also necessary:
- - Memory hotplug (under ACPI Support menu) (``CONFIG_ACPI_HOTPLUG_MEMORY``)
- - This option can be kernel module.
+On some architectures, the firmware may not be able to notify the operating
+system about a memory hotplug event. Instead, the memory has to be manually
+probed from user space.
-- As a related configuration, if your box has a feature of NUMA-node hotplug
- via ACPI, then this option is necessary too.
+The probe interface is located at::
- - ACPI0004,PNP0A05 and PNP0A06 Container Driver (under ACPI Support menu)
- (``CONFIG_ACPI_CONTAINER``).
+ /sys/devices/system/memory/probe
- This option can be kernel module too.
+Only complete memory blocks can be probed. Individual memory blocks are probed
+by providing the physical start address of the memory block::
+ % echo addr > /sys/devices/system/memory/probe
-.. _memory_hotplug_sysfs_files:
+Which results in a memory block for the range [addr, addr + memory_block_size)
+being created.
-sysfs files for memory hotplug
-==============================
+.. note::
-All memory blocks have their device information in sysfs. Each memory block
-is described under ``/sys/devices/system/memory`` as::
+ Using the probe interface is discouraged as it is easy to crash the kernel,
+ because Linux cannot validate user input; this interface might be removed in
+ the future.
- /sys/devices/system/memory/memoryXXX
+Onlining and Offlining Memory Blocks
+====================================
-where XXX is the memory block id.
+After a memory block has been created, Linux has to be instructed to actually
+make use of that memory: the memory block has to be "online".
-For the memory block covered by the sysfs directory. It is expected that all
-memory sections in this range are present and no memory holes exist in the
-range. Currently there is no way to determine if there is a memory hole, but
-the existence of one should not affect the hotplug capabilities of the memory
-block.
+Before a memory block can be removed, Linux has to stop using any memory part of
+the memory block: the memory block has to be "offlined".
-For example, assume 1GiB memory block size. A device for a memory starting at
-0x100000000 is ``/sys/device/system/memory/memory4``::
+The Linux kernel can be configured to automatically online added memory blocks
+and drivers automatically trigger offlining of memory blocks when trying
+hotunplug of memory. Memory blocks can only be removed once offlining succeeded
+and drivers may trigger offlining of memory blocks when attempting hotunplug of
+memory.
- (0x100000000 / 1Gib = 4)
+Onlining Memory Blocks Manually
+-------------------------------
-This device covers address range [0x100000000 ... 0x140000000)
+If auto-onlining of memory blocks isn't enabled, user-space has to manually
+trigger onlining of memory blocks. Often, udev rules are used to automate this
+task in user space.
-Under each memory block, you can see 5 files:
+Onlining of a memory block can be triggered via::
-- ``/sys/devices/system/memory/memoryXXX/phys_index``
-- ``/sys/devices/system/memory/memoryXXX/phys_device``
-- ``/sys/devices/system/memory/memoryXXX/state``
-- ``/sys/devices/system/memory/memoryXXX/removable``
-- ``/sys/devices/system/memory/memoryXXX/valid_zones``
+ % echo online > /sys/devices/system/memory/memoryXXX/state
-=================== ============================================================
-``phys_index`` read-only and contains memory block id, same as XXX.
-``state`` read-write
+Or alternatively::
- - at read: contains online/offline state of memory.
- - at write: user can specify "online_kernel",
+ % echo 1 > /sys/devices/system/memory/memoryXXX/online
- "online_movable", "online", "offline" command
- which will be performed on all sections in the block.
-``phys_device`` read-only: legacy interface only ever used on s390x to
- expose the covered storage increment.
-``removable`` read-only: legacy interface that indicated whether a memory
- block was likely to be offlineable or not. Newer kernel
- versions return "1" if and only if the kernel supports
- memory offlining.
-``valid_zones`` read-only: designed to show by which zone memory provided by
- a memory block is managed, and to show by which zone memory
- provided by an offline memory block could be managed when
- onlining.
-
- The first column shows it`s default zone.
-
- "memory6/valid_zones: Normal Movable" shows this memoryblock
- can be onlined to ZONE_NORMAL by default and to ZONE_MOVABLE
- by online_movable.
-
- "memory7/valid_zones: Movable Normal" shows this memoryblock
- can be onlined to ZONE_MOVABLE by default and to ZONE_NORMAL
- by online_kernel.
-=================== ============================================================
+The kernel will select the target zone automatically, usually defaulting to
+``ZONE_NORMAL`` unless ``movablecore=1`` has been specified on the kernel
+command line or if the memory block would intersect the ZONE_MOVABLE already.
-.. note::
+One can explicitly request to associate an offline memory block with
+ZONE_MOVABLE by::
- These directories/files appear after physical memory hotplug phase.
+ % echo online_movable > /sys/devices/system/memory/memoryXXX/state
-If CONFIG_NUMA is enabled the memoryXXX/ directories can also be accessed
-via symbolic links located in the ``/sys/devices/system/node/node*`` directories.
+Or one can explicitly request a kernel zone (usually ZONE_NORMAL) by::
-For example::
+ % echo online_kernel > /sys/devices/system/memory/memoryXXX/state
- /sys/devices/system/node/node0/memory9 -> ../../memory/memory9
+In any case, if onlining succeeds, the state of the memory block is changed to
+be "online". If it fails, the state of the memory block will remain unchanged
+and the above commands will fail.
-A backlink will also be created::
+Onlining Memory Blocks Automatically
+------------------------------------
- /sys/devices/system/memory/memory9/node0 -> ../../node/node0
+The kernel can be configured to try auto-onlining of newly added memory blocks.
+If this feature is disabled, the memory blocks will stay offline until
+explicitly onlined from user space.
-.. _memory_hotplug_physical_mem:
+The configured auto-online behavior can be observed via::
-Physical memory hot-add phase
-=============================
+ % cat /sys/devices/system/memory/auto_online_blocks
-Hardware(Firmware) Support
---------------------------
+Auto-onlining can be enabled by writing ``online``, ``online_kernel`` or
+``online_movable`` to that file, like::
-On x86_64/ia64 platform, memory hotplug by ACPI is supported.
+ % echo online > /sys/devices/system/memory/auto_online_blocks
-In general, the firmware (ACPI) which supports memory hotplug defines
-memory class object of _HID "PNP0C80". When a notify is asserted to PNP0C80,
-Linux's ACPI handler does hot-add memory to the system and calls a hotplug udev
-script. This will be done automatically.
+Modifying the auto-online behavior will only affect all subsequently added
+memory blocks only.
-But scripts for memory hotplug are not contained in generic udev package(now).
-You may have to write it by yourself or online/offline memory by hand.
-Please see :ref:`memory_hotplug_how_to_online_memory` and
-:ref:`memory_hotplug_how_to_offline_memory`.
+.. note::
-If firmware supports NUMA-node hotplug, and defines an object _HID "ACPI0004",
-"PNP0A05", or "PNP0A06", notification is asserted to it, and ACPI handler
-calls hotplug code for all of objects which are defined in it.
-If memory device is found, memory hotplug code will be called.
+ In corner cases, auto-onlining can fail. The kernel won't retry. Note that
+ auto-onlining is not expected to fail in default configurations.
-Notify memory hot-add event by hand
------------------------------------
+.. note::
-On some architectures, the firmware may not notify the kernel of a memory
-hotplug event. Therefore, the memory "probe" interface is supported to
-explicitly notify the kernel. This interface depends on
-CONFIG_ARCH_MEMORY_PROBE and can be configured on powerpc, sh, and x86
-if hotplug is supported, although for x86 this should be handled by ACPI
-notification.
+ DLPAR on ppc64 ignores the ``offline`` setting and will still online added
+ memory blocks; if onlining fails, memory blocks are removed again.
-Probe interface is located at::
+Offlining Memory Blocks
+-----------------------
- /sys/devices/system/memory/probe
+In the current implementation, Linux's memory offlining will try migrating all
+movable pages off the affected memory block. As most kernel allocations, such as
+page tables, are unmovable, page migration can fail and, therefore, inhibit
+memory offlining from succeeding.
-You can tell the physical address of new memory to the kernel by::
+Having the memory provided by memory block managed by ZONE_MOVABLE significantly
+increases memory offlining reliability; still, memory offlining can fail in
+some corner cases.
- % echo start_address_of_new_memory > /sys/devices/system/memory/probe
+Further, memory offlining might retry for a long time (or even forever), until
+aborted by the user.
-Then, [start_address_of_new_memory, start_address_of_new_memory +
-memory_block_size] memory range is hot-added. In this case, hotplug script is
-not called (in current implementation). You'll have to online memory by
-yourself. Please see :ref:`memory_hotplug_how_to_online_memory`.
+Offlining of a memory block can be triggered via::
-Logical Memory hot-add phase
-============================
+ % echo offline > /sys/devices/system/memory/memoryXXX/state
-State of memory
----------------
+Or alternatively::
-To see (online/offline) state of a memory block, read 'state' file::
+ % echo 0 > /sys/devices/system/memory/memoryXXX/online
- % cat /sys/device/system/memory/memoryXXX/state
+If offlining succeeds, the state of the memory block is changed to be "offline".
+If it fails, the state of the memory block will remain unchanged and the above
+commands will fail, for example, via::
+ bash: echo: write error: Device or resource busy
-- If the memory block is online, you'll read "online".
-- If the memory block is offline, you'll read "offline".
+or via::
+ bash: echo: write error: Invalid argument
-.. _memory_hotplug_how_to_online_memory:
+Observing the State of Memory Blocks
+------------------------------------
-How to online memory
---------------------
+The state (online/offline/going-offline) of a memory block can be observed
+either via::
-When the memory is hot-added, the kernel decides whether or not to "online"
-it according to the policy which can be read from "auto_online_blocks" file::
+ % cat /sys/device/system/memory/memoryXXX/state
- % cat /sys/devices/system/memory/auto_online_blocks
+Or alternatively (1/0) via::
-The default depends on the CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE kernel config
-option. If it is disabled the default is "offline" which means the newly added
-memory is not in a ready-to-use state and you have to "online" the newly added
-memory blocks manually. Automatic onlining can be requested by writing "online"
-to "auto_online_blocks" file::
+ % cat /sys/device/system/memory/memoryXXX/online
- % echo online > /sys/devices/system/memory/auto_online_blocks
+For an online memory block, the managing zone can be observed via::
-This sets a global policy and impacts all memory blocks that will subsequently
-be hotplugged. Currently offline blocks keep their state. It is possible, under
-certain circumstances, that some memory blocks will be added but will fail to
-online. User space tools can check their "state" files
-(``/sys/devices/system/memory/memoryXXX/state``) and try to online them manually.
+ % cat /sys/device/system/memory/memoryXXX/valid_zones
-If the automatic onlining wasn't requested, failed, or some memory block was
-offlined it is possible to change the individual block's state by writing to the
-"state" file::
+Configuring Memory Hot(Un)Plug
+==============================
- % echo online > /sys/devices/system/memory/memoryXXX/state
+There are various ways how system administrators can configure memory
+hot(un)plug and interact with memory blocks, especially, to online them.
-This onlining will not change the ZONE type of the target memory block,
-If the memory block doesn't belong to any zone an appropriate kernel zone
-(usually ZONE_NORMAL) will be used unless movable_node kernel command line
-option is specified when ZONE_MOVABLE will be used.
+Memory Hot(Un)Plug Configuration via Sysfs
+------------------------------------------
-You can explicitly request to associate it with ZONE_MOVABLE by::
+Some memory hot(un)plug properties can be configured or inspected via sysfs in::
- % echo online_movable > /sys/devices/system/memory/memoryXXX/state
+ /sys/devices/system/memory/
-.. note:: current limit: this memory block must be adjacent to ZONE_MOVABLE
+The following files are currently defined:
-Or you can explicitly request a kernel zone (usually ZONE_NORMAL) by::
+====================== =========================================================
+``auto_online_blocks`` read-write: set or get the default state of new memory
+ blocks; configure auto-onlining.
- % echo online_kernel > /sys/devices/system/memory/memoryXXX/state
+ The default value depends on the
+ CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE kernel configuration
+ option.
-.. note:: current limit: this memory block must be adjacent to ZONE_NORMAL
+ See the ``state`` property of memory blocks for details.
+``block_size_bytes`` read-only: the size in bytes of a memory block.
+``probe`` write-only: add (probe) selected memory blocks manually
+ from user space by supplying the physical start address.
-An explicit zone onlining can fail (e.g. when the range is already within
-and existing and incompatible zone already).
+ Availability depends on the CONFIG_ARCH_MEMORY_PROBE
+ kernel configuration option.
+``uevent`` read-write: generic udev file for device subsystems.
+====================== =========================================================
-After this, memory block XXX's state will be 'online' and the amount of
-available memory will be increased.
+.. note::
-This may be changed in future.
+ When the CONFIG_MEMORY_FAILURE kernel configuration option is enabled, two
+ additional files ``hard_offline_page`` and ``soft_offline_page`` are available
+ to trigger hwpoisoning of pages, for example, for testing purposes. Note that
+ this functionality is not really related to memory hot(un)plug or actual
+ offlining of memory blocks.
-Logical memory remove
-=====================
+Memory Block Configuration via Sysfs
+------------------------------------
-Memory offline and ZONE_MOVABLE
--------------------------------
+Each memory block is represented as a memory block device that can be
+onlined or offlined. All memory blocks have their device information located in
+sysfs. Each present memory block is listed under
+``/sys/devices/system/memory`` as::
-Memory offlining is more complicated than memory online. Because memory offline
-has to make the whole memory block be unused, memory offline can fail if
-the memory block includes memory which cannot be freed.
+ /sys/devices/system/memory/memoryXXX
-In general, memory offline can use 2 techniques.
+where XXX is the memory block id; the number of digits is variable.
-(1) reclaim and free all memory in the memory block.
-(2) migrate all pages in the memory block.
+A present memory block indicates that some memory in the range is present;
+however, a memory block might span memory holes. A memory block spanning memory
+holes cannot be offlined.
-In the current implementation, Linux's memory offline uses method (2), freeing
-all pages in the memory block by page migration. But not all pages are
-migratable. Under current Linux, migratable pages are anonymous pages and
-page caches. For offlining a memory block by migration, the kernel has to
-guarantee that the memory block contains only migratable pages.
+For example, assume 1 GiB memory block size. A device for a memory starting at
+0x100000000 is ``/sys/device/system/memory/memory4``::
-Now, a boot option for making a memory block which consists of migratable pages
-is supported. By specifying "kernelcore=" or "movablecore=" boot option, you can
-create ZONE_MOVABLE...a zone which is just used for movable pages.
-(See also Documentation/admin-guide/kernel-parameters.rst)
+ (0x100000000 / 1Gib = 4)
-Assume the system has "TOTAL" amount of memory at boot time, this boot option
-creates ZONE_MOVABLE as following.
+This device covers address range [0x100000000 ... 0x140000000)
-1) When kernelcore=YYYY boot option is used,
- Size of memory not for movable pages (not for offline) is YYYY.
- Size of memory for movable pages (for offline) is TOTAL-YYYY.
+The following files are currently defined:
-2) When movablecore=ZZZZ boot option is used,
- Size of memory not for movable pages (not for offline) is TOTAL - ZZZZ.
- Size of memory for movable pages (for offline) is ZZZZ.
+=================== ============================================================
+``online`` read-write: simplified interface to trigger onlining /
+ offlining and to observe the state of a memory block.
+ When onlining, the zone is selected automatically.
+``phys_device`` read-only: legacy interface only ever used on s390x to
+ expose the covered storage increment.
+``phys_index`` read-only: the memory block id (XXX).
+``removable`` read-only: legacy interface that indicated whether a memory
+ block was likely to be offlineable or not. Nowadays, the
+ kernel return ``1`` if and only if it supports memory
+ offlining.
+``state`` read-write: advanced interface to trigger onlining /
+ offlining and to observe the state of a memory block.
+
+ When writing, ``online``, ``offline``, ``online_kernel`` and
+ ``online_movable`` are supported.
+
+ ``online_movable`` specifies onlining to ZONE_MOVABLE.
+ ``online_kernel`` specifies onlining to the default kernel
+ zone for the memory block, such as ZONE_NORMAL.
+ ``online`` let's the kernel select the zone automatically.
+
+ When reading, ``online``, ``offline`` and ``going-offline``
+ may be returned.
+``uevent`` read-write: generic uevent file for devices.
+``valid_zones`` read-only: when a block is online, shows the zone it
+ belongs to; when a block is offline, shows what zone will
+ manage it when the block will be onlined.
+
+ For online memory blocks, ``DMA``, ``DMA32``, ``Normal``,
+ ``Movable`` and ``none`` may be returned. ``none`` indicates
+ that memory provided by a memory block is managed by
+ multiple zones or spans multiple nodes; such memory blocks
+ cannot be offlined. ``Movable`` indicates ZONE_MOVABLE.
+ Other values indicate a kernel zone.
+
+ For offline memory blocks, the first column shows the
+ zone the kernel would select when onlining the memory block
+ right now without further specifying a zone.
+
+ Availability depends on the CONFIG_MEMORY_HOTREMOVE
+ kernel configuration option.
+=================== ============================================================
.. note::
- Unfortunately, there is no information to show which memory block belongs
- to ZONE_MOVABLE. This is TBD.
+ If the CONFIG_NUMA kernel configuration option is enabled, the memoryXXX/
+ directories can also be accessed via symbolic links located in the
+ ``/sys/devices/system/node/node*`` directories.
+
+ For example::
+
+ /sys/devices/system/node/node0/memory9 -> ../../memory/memory9
+
+ A backlink will also be created::
+
+ /sys/devices/system/memory/memory9/node0 -> ../../node/node0
+
+Command Line Parameters
+-----------------------
+
+Some command line parameters affect memory hot(un)plug handling. The following
+command line parameters are relevant:
+
+======================== =======================================================
+``memhp_default_state`` configure auto-onlining by essentially setting
+ ``/sys/devices/system/memory/auto_online_blocks``.
+``movablecore`` configure automatic zone selection of the kernel. When
+ set, the kernel will default to ZONE_MOVABLE, unless
+ other zones can be kept contiguous.
+======================== =======================================================
+
+Module Parameters
+------------------
- Memory offlining can fail when dissolving a free huge page on ZONE_MOVABLE
- and the feature of freeing unused vmemmap pages associated with each hugetlb
- page is enabled.
+Instead of additional command line parameters or sysfs files, the
+``memory_hotplug`` subsystem now provides a dedicated namespace for module
+parameters. Module parameters can be set via the command line by predicating
+them with ``memory_hotplug.`` such as::
+
+ memory_hotplug.memmap_on_memory=1
+
+and they can be observed (and some even modified at runtime) via::
+
+ /sys/modules/memory_hotplug/parameters/
+
+The following module parameters are currently defined:
+
+======================== =======================================================
+``memmap_on_memory`` read-write: Allocate memory for the memmap from the
+ added memory block itself. Even if enabled, actual
+ support depends on various other system properties and
+ should only be regarded as a hint whether the behavior
+ would be desired.
+
+ While allocating the memmap from the memory block
+ itself makes memory hotplug less likely to fail and
+ keeps the memmap on the same NUMA node in any case, it
+ can fragment physical memory in a way that huge pages
+ in bigger granularity cannot be formed on hotplugged
+ memory.
+======================== =======================================================
+
+ZONE_MOVABLE
+============
+
+ZONE_MOVABLE is an important mechanism for more reliable memory offlining.
+Further, having system RAM managed by ZONE_MOVABLE instead of one of the
+kernel zones can increase the number of possible transparent huge pages and
+dynamically allocated huge pages.
+
+Most kernel allocations are unmovable. Important examples include the memory
+map (usually 1/64ths of memory), page tables, and kmalloc(). Such allocations
+can only be served from the kernel zones.
+
+Most user space pages, such as anonymous memory, and page cache pages are
+movable. Such allocations can be served from ZONE_MOVABLE and the kernel zones.
+
+Only movable allocations are served from ZONE_MOVABLE, resulting in unmovable
+allocations being limited to the kernel zones. Without ZONE_MOVABLE, there is
+absolutely no guarantee whether a memory block can be offlined successfully.
+
+Zone Imbalances
+---------------
- This can happen when we have plenty of ZONE_MOVABLE memory, but not enough
- kernel memory to allocate vmemmmap pages. We may even be able to migrate
- huge page contents, but will not be able to dissolve the source huge page.
- This will prevent an offline operation and is unfortunate as memory offlining
- is expected to succeed on movable zones. Users that depend on memory hotplug
- to succeed for movable zones should carefully consider whether the memory
- savings gained from this feature are worth the risk of possibly not being
- able to offline memory in certain situations.
+Having too much system RAM managed by ZONE_MOVABLE is called a zone imbalance,
+which can harm the system or degrade performance. As one example, the kernel
+might crash because it runs out of free memory for unmovable allocations,
+although there is still plenty of free memory left in ZONE_MOVABLE.
+
+Usually, MOVABLE:KERNEL ratios of up to 3:1 or even 4:1 are fine. Ratios of 63:1
+are definitely impossible due to the overhead for the memory map.
+
+Actual safe zone ratios depend on the workload. Extreme cases, like excessive
+long-term pinning of pages, might not be able to deal with ZONE_MOVABLE at all.
.. note::
- Techniques that rely on long-term pinnings of memory (especially, RDMA and
- vfio) are fundamentally problematic with ZONE_MOVABLE and, therefore, memory
- hot remove. Pinned pages cannot reside on ZONE_MOVABLE, to guarantee that
- memory can still get hot removed - be aware that pinning can fail even if
- there is plenty of free memory in ZONE_MOVABLE. In addition, using
- ZONE_MOVABLE might make page pinning more expensive, because pages have to be
- migrated off that zone first.
-.. _memory_hotplug_how_to_offline_memory:
+ CMA memory part of a kernel zone essentially behaves like memory in
+ ZONE_MOVABLE and similar considerations apply, especially when combining
+ CMA with ZONE_MOVABLE.
-How to offline memory
----------------------
+ZONE_MOVABLE Sizing Considerations
+----------------------------------
-You can offline a memory block by using the same sysfs interface that was used
-in memory onlining::
+We usually expect that a large portion of available system RAM will actually
+be consumed by user space, either directly or indirectly via the page cache. In
+the normal case, ZONE_MOVABLE can be used when allocating such pages just fine.
- % echo offline > /sys/devices/system/memory/memoryXXX/state
+With that in mind, it makes sense that we can have a big portion of system RAM
+managed by ZONE_MOVABLE. However, there are some things to consider when using
+ZONE_MOVABLE, especially when fine-tuning zone ratios:
+
+- Having a lot of offline memory blocks. Even offline memory blocks consume
+ memory for metadata and page tables in the direct map; having a lot of offline
+ memory blocks is not a typical case, though.
+
+- Memory ballooning without balloon compaction is incompatible with
+ ZONE_MOVABLE. Only some implementations, such as virtio-balloon and
+ pseries CMM, fully support balloon compaction.
+
+ Further, the CONFIG_BALLOON_COMPACTION kernel configuration option might be
+ disabled. In that case, balloon inflation will only perform unmovable
+ allocations and silently create a zone imbalance, usually triggered by
+ inflation requests from the hypervisor.
+
+- Gigantic pages are unmovable, resulting in user space consuming a
+ lot of unmovable memory.
+
+- Huge pages are unmovable when an architectures does not support huge
+ page migration, resulting in a similar issue as with gigantic pages.
+
+- Page tables are unmovable. Excessive swapping, mapping extremely large
+ files or ZONE_DEVICE memory can be problematic, although only really relevant
+ in corner cases. When we manage a lot of user space memory that has been
+ swapped out or is served from a file/persistent memory/... we still need a lot
+ of page tables to manage that memory once user space accessed that memory.
+
+- In certain DAX configurations the memory map for the device memory will be
+ allocated from the kernel zones.
+
+- KASAN can have a significant memory overhead, for example, consuming 1/8th of
+ the total system memory size as (unmovable) tracking metadata.
+
+- Long-term pinning of pages. Techniques that rely on long-term pinnings
+ (especially, RDMA and vfio/mdev) are fundamentally problematic with
+ ZONE_MOVABLE, and therefore, memory offlining. Pinned pages cannot reside
+ on ZONE_MOVABLE as that would turn these pages unmovable. Therefore, they
+ have to be migrated off that zone while pinning. Pinning a page can fail
+ even if there is plenty of free memory in ZONE_MOVABLE.
+
+ In addition, using ZONE_MOVABLE might make page pinning more expensive,
+ because of the page migration overhead.
+
+By default, all the memory configured at boot time is managed by the kernel
+zones and ZONE_MOVABLE is not used.
+
+To enable ZONE_MOVABLE to include the memory present at boot and to control the
+ratio between movable and kernel zones there are two command line options:
+``kernelcore=`` and ``movablecore=``. See
+Documentation/admin-guide/kernel-parameters.rst for their description.
+
+Memory Offlining and ZONE_MOVABLE
+---------------------------------
+
+Even with ZONE_MOVABLE, there are some corner cases where offlining a memory
+block might fail:
+
+- Memory blocks with memory holes; this applies to memory blocks present during
+ boot and can apply to memory blocks hotplugged via the XEN balloon and the
+ Hyper-V balloon.
+
+- Mixed NUMA nodes and mixed zones within a single memory block prevent memory
+ offlining; this applies to memory blocks present during boot only.
+
+- Special memory blocks prevented by the system from getting offlined. Examples
+ include any memory available during boot on arm64 or memory blocks spanning
+ the crashkernel area on s390x; this usually applies to memory blocks present
+ during boot only.
+
+- Memory blocks overlapping with CMA areas cannot be offlined, this applies to
+ memory blocks present during boot only.
+
+- Concurrent activity that operates on the same physical memory area, such as
+ allocating gigantic pages, can result in temporary offlining failures.
+
+- Out of memory when dissolving huge pages, especially when freeing unused
+ vmemmap pages associated with each hugetlb page is enabled.
+
+ Offlining code may be able to migrate huge page contents, but may not be able
+ to dissolve the source huge page because it fails allocating (unmovable) pages
+ for the vmemmap, because the system might not have free memory in the kernel
+ zones left.
+
+ Users that depend on memory offlining to succeed for movable zones should
+ carefully consider whether the memory savings gained from this feature are
+ worth the risk of possibly not being able to offline memory in certain
+ situations.
+
+Further, when running into out of memory situations while migrating pages, or
+when still encountering permanently unmovable pages within ZONE_MOVABLE
+(-> BUG), memory offlining will keep retrying until it eventually succeeds.
+
+When offlining is triggered from user space, the offlining context can be
+terminated by sending a fatal signal. A timeout based offlining can easily be
+implemented via::
-If offline succeeds, the state of the memory block is changed to be "offline".
-If it fails, some error core (like -EBUSY) will be returned by the kernel.
-Even if a memory block does not belong to ZONE_MOVABLE, you can try to offline
-it. If it doesn't contain 'unmovable' memory, you'll get success.
-
-A memory block under ZONE_MOVABLE is considered to be able to be offlined
-easily. But under some busy state, it may return -EBUSY. Even if a memory
-block cannot be offlined due to -EBUSY, you can retry offlining it and may be
-able to offline it (or not). (For example, a page is referred to by some kernel
-internal call and released soon.)
-
-Consideration:
- Memory hotplug's design direction is to make the possibility of memory
- offlining higher and to guarantee unplugging memory under any situation. But
- it needs more work. Returning -EBUSY under some situation may be good because
- the user can decide to retry more or not by himself. Currently, memory
- offlining code does some amount of retry with 120 seconds timeout.
-
-Physical memory remove
-======================
-
-Need more implementation yet....
- - Notification completion of remove works by OS to firmware.
- - Guard from remove if not yet.
-
-
-Locking Internals
-=================
-
-When adding/removing memory that uses memory block devices (i.e. ordinary RAM),
-the device_hotplug_lock should be held to:
-
-- synchronize against online/offline requests (e.g. via sysfs). This way, memory
- block devices can only be accessed (.online/.state attributes) by user
- space once memory has been fully added. And when removing memory, we
- know nobody is in critical sections.
-- synchronize against CPU hotplug and similar (e.g. relevant for ACPI and PPC)
-
-Especially, there is a possible lock inversion that is avoided using
-device_hotplug_lock when adding memory and user space tries to online that
-memory faster than expected:
-
-- device_online() will first take the device_lock(), followed by
- mem_hotplug_lock
-- add_memory_resource() will first take the mem_hotplug_lock, followed by
- the device_lock() (while creating the devices, during bus_add_device()).
-
-As the device is visible to user space before taking the device_lock(), this
-can result in a lock inversion.
-
-onlining/offlining of memory should be done via device_online()/
-device_offline() - to make sure it is properly synchronized to actions
-via sysfs. Holding device_hotplug_lock is advised (to e.g. protect online_type)
-
-When adding/removing/onlining/offlining memory or adding/removing
-heterogeneous/device memory, we should always hold the mem_hotplug_lock in
-write mode to serialise memory hotplug (e.g. access to global/zone
-variables).
-
-In addition, mem_hotplug_lock (in contrast to device_hotplug_lock) in read
-mode allows for a quite efficient get_online_mems/put_online_mems
-implementation, so code accessing memory can protect from that memory
-vanishing.
-
-
-Future Work
-===========
-
- - allowing memory hot-add to ZONE_MOVABLE. maybe we need some switch like
- sysctl or new control file.
- - showing memory block and physical device relationship.
- - test and make it better memory offlining.
- - support HugeTLB page migration and offlining.
- - memmap removing at memory offline.
- - physical remove memory.
+ % timeout $TIMEOUT offline_block | failure_handling
diff --git a/Documentation/arm/marvell.rst b/Documentation/arm/marvell.rst
index 85169bc3f538..56bb592dbd0c 100644
--- a/Documentation/arm/marvell.rst
+++ b/Documentation/arm/marvell.rst
@@ -140,6 +140,7 @@ EBU Armada family
- 88F6821 Armada 382
- 88F6W21 Armada 383
- 88F6820 Armada 385
+ - 88F6825
- 88F6828 Armada 388
- Product infos: https://web.archive.org/web/20181006144616/http://www.marvell.com/embedded-processors/armada-38x/
diff --git a/Documentation/block/blk-mq.rst b/Documentation/block/blk-mq.rst
index d96118c73954..31f52f326971 100644
--- a/Documentation/block/blk-mq.rst
+++ b/Documentation/block/blk-mq.rst
@@ -54,7 +54,7 @@ layer or if we want to try to merge requests. In both cases, requests will be
sent to the software queue.
Then, after the requests are processed by software queues, they will be placed
-at the hardware queue, a second stage queue were the hardware has direct access
+at the hardware queue, a second stage queue where the hardware has direct access
to process those requests. However, if the hardware does not have enough
resources to accept more requests, blk-mq will places requests on a temporary
queue, to be sent in the future, when the hardware is able.
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 75650f6443af..948a97d6387d 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -463,8 +463,8 @@ latex_elements['preamble'] += '''
\\newcommand{\\kerneldocEndTC}{}
\\newcommand{\\kerneldocBeginKR}{}
\\newcommand{\\kerneldocEndKR}{}
- \\newcommand{\\kerneldocBeginSC}{}
- \\newcommand{\\kerneldocEndKR}{}
+ \\newcommand{\\kerneldocBeginJP}{}
+ \\newcommand{\\kerneldocEndJP}{}
}
'''
diff --git a/Documentation/core-api/cpu_hotplug.rst b/Documentation/core-api/cpu_hotplug.rst
index b66e3cae1472..c6f4ba2fb32d 100644
--- a/Documentation/core-api/cpu_hotplug.rst
+++ b/Documentation/core-api/cpu_hotplug.rst
@@ -2,12 +2,13 @@
CPU hotplug in the Kernel
=========================
-:Date: December, 2016
+:Date: September, 2021
:Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
- Rusty Russell <rusty@rustcorp.com.au>,
- Srivatsa Vaddagiri <vatsa@in.ibm.com>,
- Ashok Raj <ashok.raj@intel.com>,
- Joel Schopp <jschopp@austin.ibm.com>
+ Rusty Russell <rusty@rustcorp.com.au>,
+ Srivatsa Vaddagiri <vatsa@in.ibm.com>,
+ Ashok Raj <ashok.raj@intel.com>,
+ Joel Schopp <jschopp@austin.ibm.com>,
+ Thomas Gleixner <tglx@linutronix.de>
Introduction
============
@@ -158,100 +159,480 @@ at state ``CPUHP_OFFLINE``. This includes:
* Once all services are migrated, kernel calls an arch specific routine
``__cpu_disable()`` to perform arch specific cleanup.
-Using the hotplug API
----------------------
-
-It is possible to receive notifications once a CPU is offline or onlined. This
-might be important to certain drivers which need to perform some kind of setup
-or clean up functions based on the number of available CPUs::
-
- #include <linux/cpuhotplug.h>
-
- ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "X/Y:online",
- Y_online, Y_prepare_down);
-
-*X* is the subsystem and *Y* the particular driver. The *Y_online* callback
-will be invoked during registration on all online CPUs. If an error
-occurs during the online callback the *Y_prepare_down* callback will be
-invoked on all CPUs on which the online callback was previously invoked.
-After registration completed, the *Y_online* callback will be invoked
-once a CPU is brought online and *Y_prepare_down* will be invoked when a
-CPU is shutdown. All resources which were previously allocated in
-*Y_online* should be released in *Y_prepare_down*.
-The return value *ret* is negative if an error occurred during the
-registration process. Otherwise a positive value is returned which
-contains the allocated hotplug for dynamically allocated states
-(*CPUHP_AP_ONLINE_DYN*). It will return zero for predefined states.
-
-The callback can be remove by invoking ``cpuhp_remove_state()``. In case of a
-dynamically allocated state (*CPUHP_AP_ONLINE_DYN*) use the returned state.
-During the removal of a hotplug state the teardown callback will be invoked.
-
-Multiple instances
-~~~~~~~~~~~~~~~~~~
-
-If a driver has multiple instances and each instance needs to perform the
-callback independently then it is likely that a ''multi-state'' should be used.
-First a multi-state state needs to be registered::
-
- ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "X/Y:online,
- Y_online, Y_prepare_down);
- Y_hp_online = ret;
-
-The ``cpuhp_setup_state_multi()`` behaves similar to ``cpuhp_setup_state()``
-except it prepares the callbacks for a multi state and does not invoke
-the callbacks. This is a one time setup.
-Once a new instance is allocated, you need to register this new instance::
-
- ret = cpuhp_state_add_instance(Y_hp_online, &d->node);
-
-This function will add this instance to your previously allocated
-*Y_hp_online* state and invoke the previously registered callback
-(*Y_online*) on all online CPUs. The *node* element is a ``struct
-hlist_node`` member of your per-instance data structure.
-
-On removal of the instance::
-
- cpuhp_state_remove_instance(Y_hp_online, &d->node)
-
-should be invoked which will invoke the teardown callback on all online
-CPUs.
-
-Manual setup
-~~~~~~~~~~~~
-
-Usually it is handy to invoke setup and teardown callbacks on registration or
-removal of a state because usually the operation needs to performed once a CPU
-goes online (offline) and during initial setup (shutdown) of the driver. However
-each registration and removal function is also available with a ``_nocalls``
-suffix which does not invoke the provided callbacks if the invocation of the
-callbacks is not desired. During the manual setup (or teardown) the functions
-``cpus_read_lock()`` and ``cpus_read_unlock()`` should be used to inhibit CPU
-hotplug operations.
-
-
-The ordering of the events
---------------------------
-
-The hotplug states are defined in ``include/linux/cpuhotplug.h``:
-
-* The states *CPUHP_OFFLINE* … *CPUHP_AP_OFFLINE* are invoked before the
- CPU is up.
-* The states *CPUHP_AP_OFFLINE* … *CPUHP_AP_ONLINE* are invoked
- just the after the CPU has been brought up. The interrupts are off and
- the scheduler is not yet active on this CPU. Starting with *CPUHP_AP_OFFLINE*
- the callbacks are invoked on the target CPU.
-* The states between *CPUHP_AP_ONLINE_DYN* and *CPUHP_AP_ONLINE_DYN_END* are
- reserved for the dynamic allocation.
-* The states are invoked in the reverse order on CPU shutdown starting with
- *CPUHP_ONLINE* and stopping at *CPUHP_OFFLINE*. Here the callbacks are
- invoked on the CPU that will be shutdown until *CPUHP_AP_OFFLINE*.
-
-A dynamically allocated state via *CPUHP_AP_ONLINE_DYN* is often enough.
-However if an earlier invocation during the bring up or shutdown is required
-then an explicit state should be acquired. An explicit state might also be
-required if the hotplug event requires specific ordering in respect to
-another hotplug event.
+
+The CPU hotplug API
+===================
+
+CPU hotplug state machine
+-------------------------
+
+CPU hotplug uses a trivial state machine with a linear state space from
+CPUHP_OFFLINE to CPUHP_ONLINE. Each state has a startup and a teardown
+callback.
+
+When a CPU is onlined, the startup callbacks are invoked sequentially until
+the state CPUHP_ONLINE is reached. They can also be invoked when the
+callbacks of a state are set up or an instance is added to a multi-instance
+state.
+
+When a CPU is offlined the teardown callbacks are invoked in the reverse
+order sequentially until the state CPUHP_OFFLINE is reached. They can also
+be invoked when the callbacks of a state are removed or an instance is
+removed from a multi-instance state.
+
+If a usage site requires only a callback in one direction of the hotplug
+operations (CPU online or CPU offline) then the other not-required callback
+can be set to NULL when the state is set up.
+
+The state space is divided into three sections:
+
+* The PREPARE section
+
+ The PREPARE section covers the state space from CPUHP_OFFLINE to
+ CPUHP_BRINGUP_CPU.
+
+ The startup callbacks in this section are invoked before the CPU is
+ started during a CPU online operation. The teardown callbacks are invoked
+ after the CPU has become dysfunctional during a CPU offline operation.
+
+ The callbacks are invoked on a control CPU as they can't obviously run on
+ the hotplugged CPU which is either not yet started or has become
+ dysfunctional already.
+
+ The startup callbacks are used to setup resources which are required to
+ bring a CPU successfully online. The teardown callbacks are used to free
+ resources or to move pending work to an online CPU after the hotplugged
+ CPU became dysfunctional.
+
+ The startup callbacks are allowed to fail. If a callback fails, the CPU
+ online operation is aborted and the CPU is brought down to the previous
+ state (usually CPUHP_OFFLINE) again.
+
+ The teardown callbacks in this section are not allowed to fail.
+
+* The STARTING section
+
+ The STARTING section covers the state space between CPUHP_BRINGUP_CPU + 1
+ and CPUHP_AP_ONLINE.
+
+ The startup callbacks in this section are invoked on the hotplugged CPU
+ with interrupts disabled during a CPU online operation in the early CPU
+ setup code. The teardown callbacks are invoked with interrupts disabled
+ on the hotplugged CPU during a CPU offline operation shortly before the
+ CPU is completely shut down.
+
+ The callbacks in this section are not allowed to fail.
+
+ The callbacks are used for low level hardware initialization/shutdown and
+ for core subsystems.
+
+* The ONLINE section
+
+ The ONLINE section covers the state space between CPUHP_AP_ONLINE + 1 and
+ CPUHP_ONLINE.
+
+ The startup callbacks in this section are invoked on the hotplugged CPU
+ during a CPU online operation. The teardown callbacks are invoked on the
+ hotplugged CPU during a CPU offline operation.
+
+ The callbacks are invoked in the context of the per CPU hotplug thread,
+ which is pinned on the hotplugged CPU. The callbacks are invoked with
+ interrupts and preemption enabled.
+
+ The callbacks are allowed to fail. When a callback fails the hotplug
+ operation is aborted and the CPU is brought back to the previous state.
+
+CPU online/offline operations
+-----------------------------
+
+A successful online operation looks like this::
+
+ [CPUHP_OFFLINE]
+ [CPUHP_OFFLINE + 1]->startup() -> success
+ [CPUHP_OFFLINE + 2]->startup() -> success
+ [CPUHP_OFFLINE + 3] -> skipped because startup == NULL
+ ...
+ [CPUHP_BRINGUP_CPU]->startup() -> success
+ === End of PREPARE section
+ [CPUHP_BRINGUP_CPU + 1]->startup() -> success
+ ...
+ [CPUHP_AP_ONLINE]->startup() -> success
+ === End of STARTUP section
+ [CPUHP_AP_ONLINE + 1]->startup() -> success
+ ...
+ [CPUHP_ONLINE - 1]->startup() -> success
+ [CPUHP_ONLINE]
+
+A successful offline operation looks like this::
+
+ [CPUHP_ONLINE]
+ [CPUHP_ONLINE - 1]->teardown() -> success
+ ...
+ [CPUHP_AP_ONLINE + 1]->teardown() -> success
+ === Start of STARTUP section
+ [CPUHP_AP_ONLINE]->teardown() -> success
+ ...
+ [CPUHP_BRINGUP_ONLINE - 1]->teardown()
+ ...
+ === Start of PREPARE section
+ [CPUHP_BRINGUP_CPU]->teardown()
+ [CPUHP_OFFLINE + 3]->teardown()
+ [CPUHP_OFFLINE + 2] -> skipped because teardown == NULL
+ [CPUHP_OFFLINE + 1]->teardown()
+ [CPUHP_OFFLINE]
+
+A failed online operation looks like this::
+
+ [CPUHP_OFFLINE]
+ [CPUHP_OFFLINE + 1]->startup() -> success
+ [CPUHP_OFFLINE + 2]->startup() -> success
+ [CPUHP_OFFLINE + 3] -> skipped because startup == NULL
+ ...
+ [CPUHP_BRINGUP_CPU]->startup() -> success
+ === End of PREPARE section
+ [CPUHP_BRINGUP_CPU + 1]->startup() -> success
+ ...
+ [CPUHP_AP_ONLINE]->startup() -> success
+ === End of STARTUP section
+ [CPUHP_AP_ONLINE + 1]->startup() -> success
+ ---
+ [CPUHP_AP_ONLINE + N]->startup() -> fail
+ [CPUHP_AP_ONLINE + (N - 1)]->teardown()
+ ...
+ [CPUHP_AP_ONLINE + 1]->teardown()
+ === Start of STARTUP section
+ [CPUHP_AP_ONLINE]->teardown()
+ ...
+ [CPUHP_BRINGUP_ONLINE - 1]->teardown()
+ ...
+ === Start of PREPARE section
+ [CPUHP_BRINGUP_CPU]->teardown()
+ [CPUHP_OFFLINE + 3]->teardown()
+ [CPUHP_OFFLINE + 2] -> skipped because teardown == NULL
+ [CPUHP_OFFLINE + 1]->teardown()
+ [CPUHP_OFFLINE]
+
+A failed offline operation looks like this::
+
+ [CPUHP_ONLINE]
+ [CPUHP_ONLINE - 1]->teardown() -> success
+ ...
+ [CPUHP_ONLINE - N]->teardown() -> fail
+ [CPUHP_ONLINE - (N - 1)]->startup()
+ ...
+ [CPUHP_ONLINE - 1]->startup()
+ [CPUHP_ONLINE]
+
+Recursive failures cannot be handled sensibly. Look at the following
+example of a recursive fail due to a failed offline operation: ::
+
+ [CPUHP_ONLINE]
+ [CPUHP_ONLINE - 1]->teardown() -> success
+ ...
+ [CPUHP_ONLINE - N]->teardown() -> fail
+ [CPUHP_ONLINE - (N - 1)]->startup() -> success
+ [CPUHP_ONLINE - (N - 2)]->startup() -> fail
+
+The CPU hotplug state machine stops right here and does not try to go back
+down again because that would likely result in an endless loop::
+
+ [CPUHP_ONLINE - (N - 1)]->teardown() -> success
+ [CPUHP_ONLINE - N]->teardown() -> fail
+ [CPUHP_ONLINE - (N - 1)]->startup() -> success
+ [CPUHP_ONLINE - (N - 2)]->startup() -> fail
+ [CPUHP_ONLINE - (N - 1)]->teardown() -> success
+ [CPUHP_ONLINE - N]->teardown() -> fail
+
+Lather, rinse and repeat. In this case the CPU left in state::
+
+ [CPUHP_ONLINE - (N - 1)]
+
+which at least lets the system make progress and gives the user a chance to
+debug or even resolve the situation.
+
+Allocating a state
+------------------
+
+There are two ways to allocate a CPU hotplug state:
+
+* Static allocation
+
+ Static allocation has to be used when the subsystem or driver has
+ ordering requirements versus other CPU hotplug states. E.g. the PERF core
+ startup callback has to be invoked before the PERF driver startup
+ callbacks during a CPU online operation. During a CPU offline operation
+ the driver teardown callbacks have to be invoked before the core teardown
+ callback. The statically allocated states are described by constants in
+ the cpuhp_state enum which can be found in include/linux/cpuhotplug.h.
+
+ Insert the state into the enum at the proper place so the ordering
+ requirements are fulfilled. The state constant has to be used for state
+ setup and removal.
+
+ Static allocation is also required when the state callbacks are not set
+ up at runtime and are part of the initializer of the CPU hotplug state
+ array in kernel/cpu.c.
+
+* Dynamic allocation
+
+ When there are no ordering requirements for the state callbacks then
+ dynamic allocation is the preferred method. The state number is allocated
+ by the setup function and returned to the caller on success.
+
+ Only the PREPARE and ONLINE sections provide a dynamic allocation
+ range. The STARTING section does not as most of the callbacks in that
+ section have explicit ordering requirements.
+
+Setup of a CPU hotplug state
+----------------------------
+
+The core code provides the following functions to setup a state:
+
+* cpuhp_setup_state(state, name, startup, teardown)
+* cpuhp_setup_state_nocalls(state, name, startup, teardown)
+* cpuhp_setup_state_cpuslocked(state, name, startup, teardown)
+* cpuhp_setup_state_nocalls_cpuslocked(state, name, startup, teardown)
+
+For cases where a driver or a subsystem has multiple instances and the same
+CPU hotplug state callbacks need to be invoked for each instance, the CPU
+hotplug core provides multi-instance support. The advantage over driver
+specific instance lists is that the instance related functions are fully
+serialized against CPU hotplug operations and provide the automatic
+invocations of the state callbacks on add and removal. To set up such a
+multi-instance state the following function is available:
+
+* cpuhp_setup_state_multi(state, name, startup, teardown)
+
+The @state argument is either a statically allocated state or one of the
+constants for dynamically allocated states - CPUHP_PREPARE_DYN,
+CPUHP_ONLINE_DYN - depending on the state section (PREPARE, ONLINE) for
+which a dynamic state should be allocated.
+
+The @name argument is used for sysfs output and for instrumentation. The
+naming convention is "subsys:mode" or "subsys/driver:mode",
+e.g. "perf:mode" or "perf/x86:mode". The common mode names are:
+
+======== =======================================================
+prepare For states in the PREPARE section
+
+dead For states in the PREPARE section which do not provide
+ a startup callback
+
+starting For states in the STARTING section
+
+dying For states in the STARTING section which do not provide
+ a startup callback
+
+online For states in the ONLINE section
+
+offline For states in the ONLINE section which do not provide
+ a startup callback
+======== =======================================================
+
+As the @name argument is only used for sysfs and instrumentation other mode
+descriptors can be used as well if they describe the nature of the state
+better than the common ones.
+
+Examples for @name arguments: "perf/online", "perf/x86:prepare",
+"RCU/tree:dying", "sched/waitempty"
+
+The @startup argument is a function pointer to the callback which should be
+invoked during a CPU online operation. If the usage site does not require a
+startup callback set the pointer to NULL.
+
+The @teardown argument is a function pointer to the callback which should
+be invoked during a CPU offline operation. If the usage site does not
+require a teardown callback set the pointer to NULL.
+
+The functions differ in the way how the installed callbacks are treated:
+
+ * cpuhp_setup_state_nocalls(), cpuhp_setup_state_nocalls_cpuslocked()
+ and cpuhp_setup_state_multi() only install the callbacks
+
+ * cpuhp_setup_state() and cpuhp_setup_state_cpuslocked() install the
+ callbacks and invoke the @startup callback (if not NULL) for all online
+ CPUs which have currently a state greater than the newly installed
+ state. Depending on the state section the callback is either invoked on
+ the current CPU (PREPARE section) or on each online CPU (ONLINE
+ section) in the context of the CPU's hotplug thread.
+
+ If a callback fails for CPU N then the teardown callback for CPU
+ 0 .. N-1 is invoked to rollback the operation. The state setup fails,
+ the callbacks for the state are not installed and in case of dynamic
+ allocation the allocated state is freed.
+
+The state setup and the callback invocations are serialized against CPU
+hotplug operations. If the setup function has to be called from a CPU
+hotplug read locked region, then the _cpuslocked() variants have to be
+used. These functions cannot be used from within CPU hotplug callbacks.
+
+The function return values:
+ ======== ===================================================================
+ 0 Statically allocated state was successfully set up
+
+ >0 Dynamically allocated state was successfully set up.
+
+ The returned number is the state number which was allocated. If
+ the state callbacks have to be removed later, e.g. module
+ removal, then this number has to be saved by the caller and used
+ as @state argument for the state remove function. For
+ multi-instance states the dynamically allocated state number is
+ also required as @state argument for the instance add/remove
+ operations.
+
+ <0 Operation failed
+ ======== ===================================================================
+
+Removal of a CPU hotplug state
+------------------------------
+
+To remove a previously set up state, the following functions are provided:
+
+* cpuhp_remove_state(state)
+* cpuhp_remove_state_nocalls(state)
+* cpuhp_remove_state_nocalls_cpuslocked(state)
+* cpuhp_remove_multi_state(state)
+
+The @state argument is either a statically allocated state or the state
+number which was allocated in the dynamic range by cpuhp_setup_state*(). If
+the state is in the dynamic range, then the state number is freed and
+available for dynamic allocation again.
+
+The functions differ in the way how the installed callbacks are treated:
+
+ * cpuhp_remove_state_nocalls(), cpuhp_remove_state_nocalls_cpuslocked()
+ and cpuhp_remove_multi_state() only remove the callbacks.
+
+ * cpuhp_remove_state() removes the callbacks and invokes the teardown
+ callback (if not NULL) for all online CPUs which have currently a state
+ greater than the removed state. Depending on the state section the
+ callback is either invoked on the current CPU (PREPARE section) or on
+ each online CPU (ONLINE section) in the context of the CPU's hotplug
+ thread.
+
+ In order to complete the removal, the teardown callback should not fail.
+
+The state removal and the callback invocations are serialized against CPU
+hotplug operations. If the remove function has to be called from a CPU
+hotplug read locked region, then the _cpuslocked() variants have to be
+used. These functions cannot be used from within CPU hotplug callbacks.
+
+If a multi-instance state is removed then the caller has to remove all
+instances first.
+
+Multi-Instance state instance management
+----------------------------------------
+
+Once the multi-instance state is set up, instances can be added to the
+state:
+
+ * cpuhp_state_add_instance(state, node)
+ * cpuhp_state_add_instance_nocalls(state, node)
+
+The @state argument is either a statically allocated state or the state
+number which was allocated in the dynamic range by cpuhp_setup_state_multi().
+
+The @node argument is a pointer to an hlist_node which is embedded in the
+instance's data structure. The pointer is handed to the multi-instance
+state callbacks and can be used by the callback to retrieve the instance
+via container_of().
+
+The functions differ in the way how the installed callbacks are treated:
+
+ * cpuhp_state_add_instance_nocalls() and only adds the instance to the
+ multi-instance state's node list.
+
+ * cpuhp_state_add_instance() adds the instance and invokes the startup
+ callback (if not NULL) associated with @state for all online CPUs which
+ have currently a state greater than @state. The callback is only
+ invoked for the to be added instance. Depending on the state section
+ the callback is either invoked on the current CPU (PREPARE section) or
+ on each online CPU (ONLINE section) in the context of the CPU's hotplug
+ thread.
+
+ If a callback fails for CPU N then the teardown callback for CPU
+ 0 .. N-1 is invoked to rollback the operation, the function fails and
+ the instance is not added to the node list of the multi-instance state.
+
+To remove an instance from the state's node list these functions are
+available:
+
+ * cpuhp_state_remove_instance(state, node)
+ * cpuhp_state_remove_instance_nocalls(state, node)
+
+The arguments are the same as for the the cpuhp_state_add_instance*()
+variants above.
+
+The functions differ in the way how the installed callbacks are treated:
+
+ * cpuhp_state_remove_instance_nocalls() only removes the instance from the
+ state's node list.
+
+ * cpuhp_state_remove_instance() removes the instance and invokes the
+ teardown callback (if not NULL) associated with @state for all online
+ CPUs which have currently a state greater than @state. The callback is
+ only invoked for the to be removed instance. Depending on the state
+ section the callback is either invoked on the current CPU (PREPARE
+ section) or on each online CPU (ONLINE section) in the context of the
+ CPU's hotplug thread.
+
+ In order to complete the removal, the teardown callback should not fail.
+
+The node list add/remove operations and the callback invocations are
+serialized against CPU hotplug operations. These functions cannot be used
+from within CPU hotplug callbacks and CPU hotplug read locked regions.
+
+Examples
+--------
+
+Setup and teardown a statically allocated state in the STARTING section for
+notifications on online and offline operations::
+
+ ret = cpuhp_setup_state(CPUHP_SUBSYS_STARTING, "subsys:starting", subsys_cpu_starting, subsys_cpu_dying);
+ if (ret < 0)
+ return ret;
+ ....
+ cpuhp_remove_state(CPUHP_SUBSYS_STARTING);
+
+Setup and teardown a dynamically allocated state in the ONLINE section
+for notifications on offline operations::
+
+ state = cpuhp_setup_state(CPUHP_ONLINE_DYN, "subsys:offline", NULL, subsys_cpu_offline);
+ if (state < 0)
+ return state;
+ ....
+ cpuhp_remove_state(state);
+
+Setup and teardown a dynamically allocated state in the ONLINE section
+for notifications on online operations without invoking the callbacks::
+
+ state = cpuhp_setup_state_nocalls(CPUHP_ONLINE_DYN, "subsys:online", subsys_cpu_online, NULL);
+ if (state < 0)
+ return state;
+ ....
+ cpuhp_remove_state_nocalls(state);
+
+Setup, use and teardown a dynamically allocated multi-instance state in the
+ONLINE section for notifications on online and offline operation::
+
+ state = cpuhp_setup_state_multi(CPUHP_ONLINE_DYN, "subsys:online", subsys_cpu_online, subsys_cpu_offline);
+ if (state < 0)
+ return state;
+ ....
+ ret = cpuhp_state_add_instance(state, &inst1->node);
+ if (ret)
+ return ret;
+ ....
+ ret = cpuhp_state_add_instance(state, &inst2->node);
+ if (ret)
+ return ret;
+ ....
+ cpuhp_remove_instance(state, &inst1->node);
+ ....
+ cpuhp_remove_instance(state, &inst2->node);
+ ....
+ remove_multi_state(state);
+
Testing of hotplug states
=========================
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 2a7444e3a4c2..2e7186805148 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -315,6 +315,9 @@ Block Devices
.. kernel-doc:: block/genhd.c
:export:
+.. kernel-doc:: block/bdev.c
+ :export:
+
Char devices
============
diff --git a/Documentation/cpu-freq/cpu-drivers.rst b/Documentation/cpu-freq/cpu-drivers.rst
index d84ededb66f9..3b32336a7803 100644
--- a/Documentation/cpu-freq/cpu-drivers.rst
+++ b/Documentation/cpu-freq/cpu-drivers.rst
@@ -75,9 +75,6 @@ And optionally
.resume - A pointer to a per-policy resume function which is called
with interrupts disabled and _before_ the governor is started again.
- .ready - A pointer to a per-policy ready function which is called after
- the policy is fully initialized.
-
.attr - A pointer to a NULL-terminated list of "struct freq_attr" which
allow to export values to sysfs.
diff --git a/Documentation/dev-tools/kfence.rst b/Documentation/dev-tools/kfence.rst
index fdf04e741ea5..0fbe3308bf37 100644
--- a/Documentation/dev-tools/kfence.rst
+++ b/Documentation/dev-tools/kfence.rst
@@ -65,25 +65,27 @@ Error reports
A typical out-of-bounds access looks like this::
==================================================================
- BUG: KFENCE: out-of-bounds read in test_out_of_bounds_read+0xa3/0x22b
+ BUG: KFENCE: out-of-bounds read in test_out_of_bounds_read+0xa6/0x234
- Out-of-bounds read at 0xffffffffb672efff (1B left of kfence-#17):
- test_out_of_bounds_read+0xa3/0x22b
- kunit_try_run_case+0x51/0x85
+ Out-of-bounds read at 0xffff8c3f2e291fff (1B left of kfence-#72):
+ test_out_of_bounds_read+0xa6/0x234
+ kunit_try_run_case+0x61/0xa0
kunit_generic_run_threadfn_adapter+0x16/0x30
- kthread+0x137/0x160
+ kthread+0x176/0x1b0
ret_from_fork+0x22/0x30
- kfence-#17 [0xffffffffb672f000-0xffffffffb672f01f, size=32, cache=kmalloc-32] allocated by task 507:
- test_alloc+0xf3/0x25b
- test_out_of_bounds_read+0x98/0x22b
- kunit_try_run_case+0x51/0x85
+ kfence-#72: 0xffff8c3f2e292000-0xffff8c3f2e29201f, size=32, cache=kmalloc-32
+
+ allocated by task 484 on cpu 0 at 32.919330s:
+ test_alloc+0xfe/0x738
+ test_out_of_bounds_read+0x9b/0x234
+ kunit_try_run_case+0x61/0xa0
kunit_generic_run_threadfn_adapter+0x16/0x30
- kthread+0x137/0x160
+ kthread+0x176/0x1b0
ret_from_fork+0x22/0x30
- CPU: 4 PID: 107 Comm: kunit_try_catch Not tainted 5.8.0-rc6+ #7
- Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1 04/01/2014
+ CPU: 0 PID: 484 Comm: kunit_try_catch Not tainted 5.13.0-rc3+ #7
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014
==================================================================
The header of the report provides a short summary of the function involved in
@@ -96,30 +98,32 @@ Use-after-free accesses are reported as::
==================================================================
BUG: KFENCE: use-after-free read in test_use_after_free_read+0xb3/0x143
- Use-after-free read at 0xffffffffb673dfe0 (in kfence-#24):
+ Use-after-free read at 0xffff8c3f2e2a0000 (in kfence-#79):
test_use_after_free_read+0xb3/0x143
- kunit_try_run_case+0x51/0x85
+ kunit_try_run_case+0x61/0xa0
kunit_generic_run_threadfn_adapter+0x16/0x30
- kthread+0x137/0x160
+ kthread+0x176/0x1b0
ret_from_fork+0x22/0x30
- kfence-#24 [0xffffffffb673dfe0-0xffffffffb673dfff, size=32, cache=kmalloc-32] allocated by task 507:
- test_alloc+0xf3/0x25b
+ kfence-#79: 0xffff8c3f2e2a0000-0xffff8c3f2e2a001f, size=32, cache=kmalloc-32
+
+ allocated by task 488 on cpu 2 at 33.871326s:
+ test_alloc+0xfe/0x738
test_use_after_free_read+0x76/0x143
- kunit_try_run_case+0x51/0x85
+ kunit_try_run_case+0x61/0xa0
kunit_generic_run_threadfn_adapter+0x16/0x30
- kthread+0x137/0x160
+ kthread+0x176/0x1b0
ret_from_fork+0x22/0x30
- freed by task 507:
+ freed by task 488 on cpu 2 at 33.871358s:
test_use_after_free_read+0xa8/0x143
- kunit_try_run_case+0x51/0x85
+ kunit_try_run_case+0x61/0xa0
kunit_generic_run_threadfn_adapter+0x16/0x30
- kthread+0x137/0x160
+ kthread+0x176/0x1b0
ret_from_fork+0x22/0x30
- CPU: 4 PID: 109 Comm: kunit_try_catch Tainted: G W 5.8.0-rc6+ #7
- Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1 04/01/2014
+ CPU: 2 PID: 488 Comm: kunit_try_catch Tainted: G B 5.13.0-rc3+ #7
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014
==================================================================
KFENCE also reports on invalid frees, such as double-frees::
@@ -127,30 +131,32 @@ KFENCE also reports on invalid frees, such as double-frees::
==================================================================
BUG: KFENCE: invalid free in test_double_free+0xdc/0x171
- Invalid free of 0xffffffffb6741000:
+ Invalid free of 0xffff8c3f2e2a4000 (in kfence-#81):
test_double_free+0xdc/0x171
- kunit_try_run_case+0x51/0x85
+ kunit_try_run_case+0x61/0xa0
kunit_generic_run_threadfn_adapter+0x16/0x30
- kthread+0x137/0x160
+ kthread+0x176/0x1b0
ret_from_fork+0x22/0x30
- kfence-#26 [0xffffffffb6741000-0xffffffffb674101f, size=32, cache=kmalloc-32] allocated by task 507:
- test_alloc+0xf3/0x25b
+ kfence-#81: 0xffff8c3f2e2a4000-0xffff8c3f2e2a401f, size=32, cache=kmalloc-32
+
+ allocated by task 490 on cpu 1 at 34.175321s:
+ test_alloc+0xfe/0x738
test_double_free+0x76/0x171
- kunit_try_run_case+0x51/0x85
+ kunit_try_run_case+0x61/0xa0
kunit_generic_run_threadfn_adapter+0x16/0x30
- kthread+0x137/0x160
+ kthread+0x176/0x1b0
ret_from_fork+0x22/0x30
- freed by task 507:
+ freed by task 490 on cpu 1 at 34.175348s:
test_double_free+0xa8/0x171
- kunit_try_run_case+0x51/0x85
+ kunit_try_run_case+0x61/0xa0
kunit_generic_run_threadfn_adapter+0x16/0x30
- kthread+0x137/0x160
+ kthread+0x176/0x1b0
ret_from_fork+0x22/0x30
- CPU: 4 PID: 111 Comm: kunit_try_catch Tainted: G W 5.8.0-rc6+ #7
- Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1 04/01/2014
+ CPU: 1 PID: 490 Comm: kunit_try_catch Tainted: G B 5.13.0-rc3+ #7
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014
==================================================================
KFENCE also uses pattern-based redzones on the other side of an object's guard
@@ -160,23 +166,25 @@ These are reported on frees::
==================================================================
BUG: KFENCE: memory corruption in test_kmalloc_aligned_oob_write+0xef/0x184
- Corrupted memory at 0xffffffffb6797ff9 [ 0xac . . . . . . ] (in kfence-#69):
+ Corrupted memory at 0xffff8c3f2e33aff9 [ 0xac . . . . . . ] (in kfence-#156):
test_kmalloc_aligned_oob_write+0xef/0x184
- kunit_try_run_case+0x51/0x85
+ kunit_try_run_case+0x61/0xa0
kunit_generic_run_threadfn_adapter+0x16/0x30
- kthread+0x137/0x160
+ kthread+0x176/0x1b0
ret_from_fork+0x22/0x30
- kfence-#69 [0xffffffffb6797fb0-0xffffffffb6797ff8, size=73, cache=kmalloc-96] allocated by task 507:
- test_alloc+0xf3/0x25b
+ kfence-#156: 0xffff8c3f2e33afb0-0xffff8c3f2e33aff8, size=73, cache=kmalloc-96
+
+ allocated by task 502 on cpu 7 at 42.159302s:
+ test_alloc+0xfe/0x738
test_kmalloc_aligned_oob_write+0x57/0x184
- kunit_try_run_case+0x51/0x85
+ kunit_try_run_case+0x61/0xa0
kunit_generic_run_threadfn_adapter+0x16/0x30
- kthread+0x137/0x160
+ kthread+0x176/0x1b0
ret_from_fork+0x22/0x30
- CPU: 4 PID: 120 Comm: kunit_try_catch Tainted: G W 5.8.0-rc6+ #7
- Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1 04/01/2014
+ CPU: 7 PID: 502 Comm: kunit_try_catch Tainted: G B 5.13.0-rc3+ #7
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014
==================================================================
For such errors, the address where the corruption occurred as well as the
diff --git a/Documentation/devicetree/bindings/auxdisplay/hit,hd44780.yaml b/Documentation/devicetree/bindings/auxdisplay/hit,hd44780.yaml
index 9222b06e93a0..fde07e4b119d 100644
--- a/Documentation/devicetree/bindings/auxdisplay/hit,hd44780.yaml
+++ b/Documentation/devicetree/bindings/auxdisplay/hit,hd44780.yaml
@@ -12,7 +12,10 @@ maintainers:
description:
The Hitachi HD44780 Character LCD Controller is commonly used on character
LCDs that can display one or more lines of text. It exposes an M6800 bus
- interface, which can be used in either 4-bit or 8-bit mode.
+ interface, which can be used in either 4-bit or 8-bit mode. By using a
+ GPIO expander it is possible to use the driver with one of the popular I2C
+ expander boards based on the PCF8574 available for these displays. For
+ an example see below.
properties:
compatible:
@@ -94,3 +97,29 @@ examples:
display-height-chars = <2>;
display-width-chars = <16>;
};
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pcf8574: pcf8574@27 {
+ compatible = "nxp,pcf8574";
+ reg = <0x27>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
+ hd44780 {
+ compatible = "hit,hd44780";
+ display-height-chars = <2>;
+ display-width-chars = <16>;
+ data-gpios = <&pcf8574 4 0>,
+ <&pcf8574 5 0>,
+ <&pcf8574 6 0>,
+ <&pcf8574 7 0>;
+ enable-gpios = <&pcf8574 2 0>;
+ rs-gpios = <&pcf8574 0 0>;
+ rw-gpios = <&pcf8574 1 0>;
+ backlight-gpios = <&pcf8574 3 0>;
+ };
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt
index 56f442374383..1d7e49167666 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-dt.txt
@@ -11,7 +11,7 @@ Required properties:
- None
Optional properties:
-- operating-points: Refer to Documentation/devicetree/bindings/opp/opp.txt for
+- operating-points: Refer to Documentation/devicetree/bindings/opp/opp-v1.yaml for
details. OPPs *must* be supplied either via DT, i.e. this property, or
populated at runtime.
- clock-latency: Specify the possible maximum transition latency for clock,
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek-hw.yaml b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek-hw.yaml
new file mode 100644
index 000000000000..9cd42a64b13e
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek-hw.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/cpufreq/cpufreq-mediatek-hw.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek's CPUFREQ Bindings
+
+maintainers:
+ - Hector Yuan <hector.yuan@mediatek.com>
+
+description:
+ CPUFREQ HW is a hardware engine used by MediaTek SoCs to
+ manage frequency in hardware. It is capable of controlling
+ frequency for multiple clusters.
+
+properties:
+ compatible:
+ const: mediatek,cpufreq-hw
+
+ reg:
+ minItems: 1
+ maxItems: 2
+ description:
+ Addresses and sizes for the memory of the HW bases in
+ each frequency domain. Each entry corresponds to
+ a register bank for each frequency domain present.
+
+ "#performance-domain-cells":
+ description:
+ Number of cells in a performance domain specifier.
+ Set const to 1 here for nodes providing multiple
+ performance domains.
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - "#performance-domain-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ enable-method = "psci";
+ performance-domains = <&performance 0>;
+ reg = <0x000>;
+ };
+ };
+
+ /* ... */
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ performance: performance-controller@11bc00 {
+ compatible = "mediatek,cpufreq-hw";
+ reg = <0 0x0011bc10 0 0x120>, <0 0x0011bd30 0 0x120>;
+
+ #performance-domain-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt
index ef68711716fb..b8233ec91d3d 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt
@@ -10,7 +10,7 @@ Required properties:
transition and not stable yet.
Please refer to Documentation/devicetree/bindings/clock/clock-bindings.txt for
generic clock consumer properties.
-- operating-points-v2: Please refer to Documentation/devicetree/bindings/opp/opp.txt
+- operating-points-v2: Please refer to Documentation/devicetree/bindings/opp/opp-v2.yaml
for detail.
- proc-supply: Regulator for Vproc of CPU cluster.
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-st.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-st.txt
index d91a02a3b6b0..6b0b452acef0 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-st.txt
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-st.txt
@@ -6,8 +6,6 @@ from the SoC, then supplies the OPP framework with 'prop' and 'supported
hardware' information respectively. The framework is then able to read
the DT and operate in the usual way.
-For more information about the expected DT format [See: ../opp/opp.txt].
-
Frequency Scaling only
----------------------
@@ -15,7 +13,7 @@ No vendor specific driver required for this.
Located in CPU's node:
-- operating-points : [See: ../power/opp.txt]
+- operating-points : [See: ../power/opp-v1.yaml]
Example [safe]
--------------
@@ -37,7 +35,7 @@ This requires the ST CPUFreq driver to supply 'process' and 'version' info.
Located in CPU's node:
-- operating-points-v2 : [See ../power/opp.txt]
+- operating-points-v2 : [See ../power/opp-v2.yaml]
Example [unsafe]
----------------
diff --git a/Documentation/devicetree/bindings/cpufreq/nvidia,tegra20-cpufreq.txt b/Documentation/devicetree/bindings/cpufreq/nvidia,tegra20-cpufreq.txt
index 52a24b82fd86..bdbfd7c36101 100644
--- a/Documentation/devicetree/bindings/cpufreq/nvidia,tegra20-cpufreq.txt
+++ b/Documentation/devicetree/bindings/cpufreq/nvidia,tegra20-cpufreq.txt
@@ -4,7 +4,7 @@ Binding for NVIDIA Tegra20 CPUFreq
Required properties:
- clocks: Must contain an entry for the CPU clock.
See ../clocks/clock-bindings.txt for details.
-- operating-points-v2: See ../bindings/opp/opp.txt for details.
+- operating-points-v2: See ../bindings/opp/opp-v2.yaml for details.
- #cooling-cells: Should be 2. See ../thermal/thermal-cooling-devices.yaml for details.
For each opp entry in 'operating-points-v2' table:
diff --git a/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt b/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt
index ac189dd82b08..3fbeb3733c48 100644
--- a/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt
+++ b/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt
@@ -8,7 +8,7 @@ Required properties:
- clocks: Phandles for clock specified in "clock-names" property
- clock-names : The name of clock used by the DFI, must be
"pclk_ddr_mon";
-- operating-points-v2: Refer to Documentation/devicetree/bindings/opp/opp.txt
+- operating-points-v2: Refer to Documentation/devicetree/bindings/opp/opp-v2.yaml
for details.
- center-supply: DMC supply node.
- status: Marks the node enabled/disabled.
diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml
index 4265399bb154..c851770bbdf2 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml
@@ -14,10 +14,10 @@ allOf:
properties:
compatible:
- oneOf:
- - const: qcom,dsi-phy-7nm
- - const: qcom,dsi-phy-7nm-8150
- - const: qcom,sc7280-dsi-phy-7nm
+ enum:
+ - qcom,dsi-phy-7nm
+ - qcom,dsi-phy-7nm-8150
+ - qcom,sc7280-dsi-phy-7nm
reg:
items:
diff --git a/Documentation/devicetree/bindings/dma/altr,msgdma.yaml b/Documentation/devicetree/bindings/dma/altr,msgdma.yaml
index a4f9fe23dcd9..b193ee2db4a7 100644
--- a/Documentation/devicetree/bindings/dma/altr,msgdma.yaml
+++ b/Documentation/devicetree/bindings/dma/altr,msgdma.yaml
@@ -24,13 +24,15 @@ properties:
items:
- description: Control and Status Register Slave Port
- description: Descriptor Slave Port
- - description: Response Slave Port
+ - description: Response Slave Port (Optional)
+ minItems: 2
reg-names:
items:
- const: csr
- const: desc
- const: resp
+ minItems: 2
interrupts:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
new file mode 100644
index 000000000000..7a4f415d74dc
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
@@ -0,0 +1,130 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/renesas,rz-dmac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas RZ/G2L DMA Controller
+
+maintainers:
+ - Biju Das <biju.das.jz@bp.renesas.com>
+
+allOf:
+ - $ref: "dma-controller.yaml#"
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,r9a07g044-dmac # RZ/G2{L,LC}
+ - const: renesas,rz-dmac
+
+ reg:
+ items:
+ - description: Control and channel register block
+ - description: DMA extended resource selector block
+
+ interrupts:
+ maxItems: 17
+
+ interrupt-names:
+ items:
+ - const: error
+ - const: ch0
+ - const: ch1
+ - const: ch2
+ - const: ch3
+ - const: ch4
+ - const: ch5
+ - const: ch6
+ - const: ch7
+ - const: ch8
+ - const: ch9
+ - const: ch10
+ - const: ch11
+ - const: ch12
+ - const: ch13
+ - const: ch14
+ - const: ch15
+
+ clocks:
+ items:
+ - description: DMA main clock
+ - description: DMA register access clock
+
+ '#dma-cells':
+ const: 1
+ description:
+ The cell specifies the encoded MID/RID values of the DMAC port
+ connected to the DMA client and the slave channel configuration
+ parameters.
+ bits[0:9] - Specifies MID/RID value
+ bit[10] - Specifies DMA request high enable (HIEN)
+ bit[11] - Specifies DMA request detection type (LVL)
+ bits[12:14] - Specifies DMAACK output mode (AM)
+ bit[15] - Specifies Transfer Mode (TM)
+
+ dma-channels:
+ const: 16
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ items:
+ - description: Reset for DMA ARESETN reset terminal
+ - description: Reset for DMA RST_ASYNC reset terminal
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - clocks
+ - '#dma-cells'
+ - dma-channels
+ - power-domains
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/r9a07g044-cpg.h>
+
+ dmac: dma-controller@11820000 {
+ compatible = "renesas,r9a07g044-dmac",
+ "renesas,rz-dmac";
+ reg = <0x11820000 0x10000>,
+ <0x11830000 0x10000>;
+ interrupts = <GIC_SPI 141 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 125 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 126 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 127 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 128 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 129 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 130 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 131 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 132 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 133 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 134 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 135 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 136 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 137 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 138 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 139 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 140 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14", "ch15";
+ clocks = <&cpg CPG_MOD R9A07G044_DMAC_ACLK>,
+ <&cpg CPG_MOD R9A07G044_DMAC_PCLK>;
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G044_DMAC_ARESETN>,
+ <&cpg R9A07G044_DMAC_RST_ASYNC>;
+ #dma-cells = <1>;
+ dma-channels = <16>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml b/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
index 2a5325f480f6..4bf676fd25dc 100644
--- a/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
@@ -40,6 +40,13 @@ description: |
0x0: FIFO mode with threshold selectable with bit 0-1
0x1: Direct mode: each DMA request immediately initiates a transfer
from/to the memory, FIFO is bypassed.
+ -bit 4: alternative DMA request/acknowledge protocol
+ 0x0: Use standard DMA ACK management, where ACK signal is maintained
+ up to the removal of request and transfer completion
+ 0x1: Use alternative DMA ACK management, where ACK de-assertion does
+ not wait for the de-assertion of the REQuest, ACK is only managed
+ by transfer completion. This must only be used on channels
+ managing transfers for STM32 USART/UART.
maintainers:
diff --git a/Documentation/devicetree/bindings/gpio/gpio-virtio.yaml b/Documentation/devicetree/bindings/gpio/gpio-virtio.yaml
new file mode 100644
index 000000000000..601d85754577
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-virtio.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/gpio-virtio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Virtio GPIO controller
+
+maintainers:
+ - Viresh Kumar <viresh.kumar@linaro.org>
+
+allOf:
+ - $ref: /schemas/virtio/virtio-device.yaml#
+
+description:
+ Virtio GPIO controller, see /schemas/virtio/virtio-device.yaml for more
+ details.
+
+properties:
+ $nodename:
+ const: gpio
+
+ compatible:
+ const: virtio,device29
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+required:
+ - compatible
+ - gpio-controller
+ - "#gpio-cells"
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ virtio@3000 {
+ compatible = "virtio,mmio";
+ reg = <0x3000 0x100>;
+ interrupts = <41>;
+
+ gpio {
+ compatible = "virtio,device29";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
index c5f6092a2855..6f98dd55fb4c 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
@@ -137,7 +137,7 @@ examples:
resets = <&reset 0>, <&reset 1>;
};
- gpu_opp_table: opp_table0 {
+ gpu_opp_table: opp-table {
compatible = "operating-points-v2";
opp-533000000 {
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
index 696c17aedbbe..d209f272625d 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
@@ -160,7 +160,7 @@ examples:
#cooling-cells = <2>;
};
- gpu_opp_table: opp_table0 {
+ gpu_opp_table: opp-table {
compatible = "operating-points-v2";
opp-533000000 {
diff --git a/Documentation/devicetree/bindings/i2c/i2c-virtio.yaml b/Documentation/devicetree/bindings/i2c/i2c-virtio.yaml
new file mode 100644
index 000000000000..7d87ed855301
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-virtio.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/i2c-virtio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Virtio I2C Adapter
+
+maintainers:
+ - Viresh Kumar <viresh.kumar@linaro.org>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+ - $ref: /schemas/virtio/virtio-device.yaml#
+
+description:
+ Virtio I2C device, see /schemas/virtio/virtio-device.yaml for more details.
+
+properties:
+ $nodename:
+ const: i2c
+
+ compatible:
+ const: virtio,device22
+
+required:
+ - compatible
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ virtio@3000 {
+ compatible = "virtio,mmio";
+ reg = <0x3000 0x100>;
+ interrupts = <41>;
+
+ i2c {
+ compatible = "virtio,device22";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ light-sensor@20 {
+ compatible = "dynaimage,al3320a";
+ reg = <0x20>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml b/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
index cffd02028d02..d74f2002409e 100644
--- a/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
+++ b/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
@@ -29,6 +29,8 @@ properties:
description:
Regulator for the LRADC reference voltage
+ wakeup-source: true
+
patternProperties:
"^button-[0-9]+$":
type: object
diff --git a/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt b/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt
deleted file mode 100644
index 6cd08bca2c66..000000000000
--- a/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-Qualcomm PM8941 PMIC Power Key
-
-PROPERTIES
-
-- compatible:
- Usage: required
- Value type: <string>
- Definition: must be one of:
- "qcom,pm8941-pwrkey"
- "qcom,pm8941-resin"
- "qcom,pmk8350-pwrkey"
- "qcom,pmk8350-resin"
-
-- reg:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: base address of registers for block
-
-- interrupts:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: key change interrupt; The format of the specifier is
- defined by the binding document describing the node's
- interrupt parent.
-
-- debounce:
- Usage: optional
- Value type: <u32>
- Definition: time in microseconds that key must be pressed or released
- for state change interrupt to trigger.
-
-- bias-pull-up:
- Usage: optional
- Value type: <empty>
- Definition: presence of this property indicates that the KPDPWR_N pin
- should be configured for pull up.
-
-- linux,code:
- Usage: optional
- Value type: <u32>
- Definition: The input key-code associated with the power key.
- Use the linux event codes defined in
- include/dt-bindings/input/linux-event-codes.h
- When property is omitted KEY_POWER is assumed.
-
-EXAMPLE
-
- pwrkey@800 {
- compatible = "qcom,pm8941-pwrkey";
- reg = <0x800>;
- interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
- debounce = <15625>;
- bias-pull-up;
- linux,code = <KEY_POWER>;
- };
diff --git a/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.yaml b/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.yaml
new file mode 100644
index 000000000000..62314a5fdce5
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/qcom,pm8941-pwrkey.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm PM8941 PMIC Power Key
+
+maintainers:
+ - Courtney Cavin <courtney.cavin@sonymobile.com>
+ - Vinod Koul <vkoul@kernel.org>
+
+allOf:
+ - $ref: input.yaml#
+
+properties:
+ compatible:
+ enum:
+ - qcom,pm8941-pwrkey
+ - qcom,pm8941-resin
+ - qcom,pmk8350-pwrkey
+ - qcom,pmk8350-resin
+
+ interrupts:
+ maxItems: 1
+
+ debounce:
+ description: |
+ Time in microseconds that key must be pressed or
+ released for state change interrupt to trigger.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ bias-pull-up:
+ description: |
+ Presence of this property indicates that the KPDPWR_N
+ pin should be configured for pull up.
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ linux,code:
+ description: |
+ The input key-code associated with the power key.
+ Use the linux event codes defined in
+ include/dt-bindings/input/linux-event-codes.h
+ When property is omitted KEY_POWER is assumed.
+
+required:
+ - compatible
+ - interrupts
+
+unevaluatedProperties: false
+...
diff --git a/Documentation/devicetree/bindings/input/regulator-haptic.txt b/Documentation/devicetree/bindings/input/regulator-haptic.txt
deleted file mode 100644
index 3ed1c7eb2f97..000000000000
--- a/Documentation/devicetree/bindings/input/regulator-haptic.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-* Regulator Haptic Device Tree Bindings
-
-Required Properties:
- - compatible : Should be "regulator-haptic"
- - haptic-supply : Power supply to the haptic motor.
- [*] refer Documentation/devicetree/bindings/regulator/regulator.txt
-
- - max-microvolt : The maximum voltage value supplied to the haptic motor.
- [The unit of the voltage is a micro]
-
- - min-microvolt : The minimum voltage value supplied to the haptic motor.
- [The unit of the voltage is a micro]
-
-Example:
-
- haptics {
- compatible = "regulator-haptic";
- haptic-supply = <&motor_regulator>;
- max-microvolt = <2700000>;
- min-microvolt = <1100000>;
- };
diff --git a/Documentation/devicetree/bindings/input/regulator-haptic.yaml b/Documentation/devicetree/bindings/input/regulator-haptic.yaml
new file mode 100644
index 000000000000..b1ae72f9cd2d
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/regulator-haptic.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/input/regulator-haptic.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Regulator Haptic Device Tree Bindings
+
+maintainers:
+ - Jaewon Kim <jaewon02.kim@samsung.com>
+
+properties:
+ compatible:
+ const: regulator-haptic
+
+ haptic-supply:
+ description: >
+ Power supply to the haptic motor
+
+ max-microvolt:
+ description: >
+ The maximum voltage value supplied to the haptic motor
+
+ min-microvolt:
+ description: >
+ The minimum voltage value supplied to the haptic motor
+
+required:
+ - compatible
+ - haptic-supply
+ - max-microvolt
+ - min-microvolt
+
+additionalProperties: false
+
+examples:
+ - |
+ haptics {
+ compatible = "regulator-haptic";
+ haptic-supply = <&motor_regulator>;
+ max-microvolt = <2700000>;
+ min-microvolt = <1100000>;
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/chipone,icn8318.yaml b/Documentation/devicetree/bindings/input/touchscreen/chipone,icn8318.yaml
new file mode 100644
index 000000000000..9df685bdc5db
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/chipone,icn8318.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/chipone,icn8318.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ChipOne ICN8318 Touchscreen Controller Device Tree Bindings
+
+maintainers:
+ - Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+allOf:
+ - $ref: touchscreen.yaml#
+
+properties:
+ compatible:
+ const: chipone,icn8318
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ wake-gpios:
+ maxItems: 1
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - wake-gpios
+ - touchscreen-size-x
+ - touchscreen-size-y
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchscreen@40 {
+ compatible = "chipone,icn8318";
+ reg = <0x40>;
+ interrupt-parent = <&pio>;
+ interrupts = <9 IRQ_TYPE_EDGE_FALLING>; /* EINT9 (PG9) */
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts_wake_pin_p66>;
+ wake-gpios = <&pio 1 3 GPIO_ACTIVE_HIGH>; /* PB3 */
+ touchscreen-size-x = <800>;
+ touchscreen-size-y = <480>;
+ touchscreen-inverted-x;
+ touchscreen-swapped-x-y;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt b/Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt
deleted file mode 100644
index 38b0603f65f3..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/chipone_icn8318.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-* ChipOne icn8318 I2C touchscreen controller
-
-Required properties:
- - compatible : "chipone,icn8318"
- - reg : I2C slave address of the chip (0x40)
- - interrupts : interrupt specification for the icn8318 interrupt
- - wake-gpios : GPIO specification for the WAKE input
- - touchscreen-size-x : horizontal resolution of touchscreen (in pixels)
- - touchscreen-size-y : vertical resolution of touchscreen (in pixels)
-
-Optional properties:
- - pinctrl-names : should be "default"
- - pinctrl-0: : a phandle pointing to the pin settings for the
- control gpios
- - touchscreen-fuzz-x : horizontal noise value of the absolute input
- device (in pixels)
- - touchscreen-fuzz-y : vertical noise value of the absolute input
- device (in pixels)
- - touchscreen-inverted-x : X axis is inverted (boolean)
- - touchscreen-inverted-y : Y axis is inverted (boolean)
- - touchscreen-swapped-x-y : X and Y axis are swapped (boolean)
- Swapping is done after inverting the axis
-
-Example:
-
-i2c@00000000 {
- /* ... */
-
- chipone_icn8318@40 {
- compatible = "chipone,icn8318";
- reg = <0x40>;
- interrupt-parent = <&pio>;
- interrupts = <9 IRQ_TYPE_EDGE_FALLING>; /* EINT9 (PG9) */
- pinctrl-names = "default";
- pinctrl-0 = <&ts_wake_pin_p66>;
- wake-gpios = <&pio 1 3 GPIO_ACTIVE_HIGH>; /* PB3 */
- touchscreen-size-x = <800>;
- touchscreen-size-y = <480>;
- touchscreen-inverted-x;
- touchscreen-swapped-x-y;
- };
-
- /* ... */
-};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/pixcir,pixcir_ts.yaml b/Documentation/devicetree/bindings/input/touchscreen/pixcir,pixcir_ts.yaml
new file mode 100644
index 000000000000..f9998edbff70
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/pixcir,pixcir_ts.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/pixcir,pixcir_ts.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Pixcir Touchscreen Controller Device Tree Bindings
+
+maintainers:
+ - Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+allOf:
+ - $ref: touchscreen.yaml#
+
+properties:
+ compatible:
+ enum:
+ - pixcir,pixcir_ts
+ - pixcir,pixcir_tangoc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ attb-gpio:
+ maxItems: 1
+
+ reset-gpios:
+ maxItems: 1
+
+ enable-gpios:
+ maxItems: 1
+
+ wake-gpios:
+ maxItems: 1
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - attb-gpio
+ - touchscreen-size-x
+ - touchscreen-size-y
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchscreen@5c {
+ compatible = "pixcir,pixcir_ts";
+ reg = <0x5c>;
+ interrupts = <2 0>;
+ attb-gpio = <&gpf 2 0 2>;
+ touchscreen-size-x = <800>;
+ touchscreen-size-y = <600>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/input/touchscreen/pixcir_i2c_ts.txt b/Documentation/devicetree/bindings/input/touchscreen/pixcir_i2c_ts.txt
deleted file mode 100644
index 697a3e7831e7..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/pixcir_i2c_ts.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-* Pixcir I2C touchscreen controllers
-
-Required properties:
-- compatible: must be "pixcir,pixcir_ts" or "pixcir,pixcir_tangoc"
-- reg: I2C address of the chip
-- interrupts: interrupt to which the chip is connected
-- attb-gpio: GPIO connected to the ATTB line of the chip
-- touchscreen-size-x: horizontal resolution of touchscreen (in pixels)
-- touchscreen-size-y: vertical resolution of touchscreen (in pixels)
-
-Optional properties:
-- reset-gpios: GPIO connected to the RESET line of the chip
-- enable-gpios: GPIO connected to the ENABLE line of the chip
-- wake-gpios: GPIO connected to the WAKE line of the chip
-
-Example:
-
- i2c@00000000 {
- /* ... */
-
- pixcir_ts@5c {
- compatible = "pixcir,pixcir_ts";
- reg = <0x5c>;
- interrupts = <2 0>;
- attb-gpio = <&gpf 2 0 2>;
- touchscreen-size-x = <800>;
- touchscreen-size-y = <600>;
- };
-
- /* ... */
- };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ti,tsc2005.yaml b/Documentation/devicetree/bindings/input/touchscreen/ti,tsc2005.yaml
new file mode 100644
index 000000000000..938aab016cc2
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/ti,tsc2005.yaml
@@ -0,0 +1,128 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/ti,tsc2005.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments TSC2004 and TSC2005 touchscreen controller bindings
+
+maintainers:
+ - Marek Vasut <marex@denx.de>
+ - Michael Welling <mwelling@ieee.org>
+
+properties:
+ $nodename:
+ pattern: "^touchscreen(@.*)?$"
+
+ compatible:
+ enum:
+ - ti,tsc2004
+ - ti,tsc2005
+
+ reg:
+ maxItems: 1
+ description: |
+ I2C address when used on the I2C bus, or the SPI chip select index
+ when used on the SPI bus
+
+ interrupts:
+ maxItems: 1
+
+ reset-gpios:
+ maxItems: 1
+ description: GPIO specifier for the controller reset line
+
+ spi-max-frequency:
+ description: TSC2005 SPI bus clock frequency.
+ maximum: 25000000
+
+ ti,x-plate-ohms:
+ description: resistance of the touchscreen's X plates in ohm (defaults to 280)
+
+ ti,esd-recovery-timeout-ms:
+ description: |
+ if the touchscreen does not respond after the configured time
+ (in milli seconds), the driver will reset it. This is disabled
+ by default.
+
+ vio-supply:
+ description: Regulator specifier
+
+ touchscreen-fuzz-pressure: true
+ touchscreen-fuzz-x: true
+ touchscreen-fuzz-y: true
+ touchscreen-max-pressure: true
+ touchscreen-size-x: true
+ touchscreen-size-y: true
+
+allOf:
+ - $ref: touchscreen.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ti,tsc2004
+ then:
+ properties:
+ spi-max-frequency: false
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ touchscreen@48 {
+ compatible = "ti,tsc2004";
+ reg = <0x48>;
+ vio-supply = <&vio>;
+
+ reset-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>;
+ interrupts-extended = <&gpio1 27 IRQ_TYPE_EDGE_RISING>;
+
+ touchscreen-fuzz-x = <4>;
+ touchscreen-fuzz-y = <7>;
+ touchscreen-fuzz-pressure = <2>;
+ touchscreen-size-x = <4096>;
+ touchscreen-size-y = <4096>;
+ touchscreen-max-pressure = <2048>;
+
+ ti,x-plate-ohms = <280>;
+ ti,esd-recovery-timeout-ms = <8000>;
+ };
+ };
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ touchscreen@0 {
+ compatible = "ti,tsc2005";
+ spi-max-frequency = <6000000>;
+ reg = <0>;
+
+ vio-supply = <&vio>;
+
+ reset-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>; /* 104 */
+ interrupts-extended = <&gpio4 4 IRQ_TYPE_EDGE_RISING>; /* 100 */
+
+ touchscreen-fuzz-x = <4>;
+ touchscreen-fuzz-y = <7>;
+ touchscreen-fuzz-pressure = <2>;
+ touchscreen-size-x = <4096>;
+ touchscreen-size-y = <4096>;
+ touchscreen-max-pressure = <2048>;
+
+ ti,x-plate-ohms = <280>;
+ ti,esd-recovery-timeout-ms = <8000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/tsc2005.txt b/Documentation/devicetree/bindings/input/touchscreen/tsc2005.txt
deleted file mode 100644
index b80c04b0e5c0..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/tsc2005.txt
+++ /dev/null
@@ -1,64 +0,0 @@
-* Texas Instruments tsc2004 and tsc2005 touchscreen controllers
-
-Required properties:
- - compatible : "ti,tsc2004" or "ti,tsc2005"
- - reg : Device address
- - interrupts : IRQ specifier
- - spi-max-frequency : Maximum SPI clocking speed of the device
- (for tsc2005)
-
-Optional properties:
- - vio-supply : Regulator specifier
- - reset-gpios : GPIO specifier for the controller reset line
- - ti,x-plate-ohms : integer, resistance of the touchscreen's X plates
- in ohm (defaults to 280)
- - ti,esd-recovery-timeout-ms : integer, if the touchscreen does not respond after
- the configured time (in milli seconds), the driver
- will reset it. This is disabled by default.
- - properties defined in touchscreen.txt
-
-Example:
-
-&i2c3 {
- tsc2004@48 {
- compatible = "ti,tsc2004";
- reg = <0x48>;
- vio-supply = <&vio>;
-
- reset-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>;
- interrupts-extended = <&gpio1 27 IRQ_TYPE_EDGE_RISING>;
-
- touchscreen-fuzz-x = <4>;
- touchscreen-fuzz-y = <7>;
- touchscreen-fuzz-pressure = <2>;
- touchscreen-size-x = <4096>;
- touchscreen-size-y = <4096>;
- touchscreen-max-pressure = <2048>;
-
- ti,x-plate-ohms = <280>;
- ti,esd-recovery-timeout-ms = <8000>;
- };
-}
-
-&mcspi1 {
- tsc2005@0 {
- compatible = "ti,tsc2005";
- spi-max-frequency = <6000000>;
- reg = <0>;
-
- vio-supply = <&vio>;
-
- reset-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>; /* 104 */
- interrupts-extended = <&gpio4 4 IRQ_TYPE_EDGE_RISING>; /* 100 */
-
- touchscreen-fuzz-x = <4>;
- touchscreen-fuzz-y = <7>;
- touchscreen-fuzz-pressure = <2>;
- touchscreen-size-x = <4096>;
- touchscreen-size-y = <4096>;
- touchscreen-max-pressure = <2048>;
-
- ti,x-plate-ohms = <280>;
- ti,esd-recovery-timeout-ms = <8000>;
- };
-}
diff --git a/Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml b/Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml
index a8873739d61a..b8204ed22dd5 100644
--- a/Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml
+++ b/Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml
@@ -81,10 +81,10 @@ examples:
noc_opp_table: opp-table {
compatible = "operating-points-v2";
- opp-133M {
+ opp-133333333 {
opp-hz = /bits/ 64 <133333333>;
};
- opp-800M {
+ opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
};
};
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
index 7f2578d48e3f..9eb4bb529ad5 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
+++ b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
@@ -19,7 +19,9 @@ properties:
- const: allwinner,sun8i-v3s-emac
- const: allwinner,sun50i-a64-emac
- items:
- - const: allwinner,sun50i-h6-emac
+ - enum:
+ - allwinner,sun20i-d1-emac
+ - allwinner,sun50i-h6-emac
- const: allwinner,sun50i-a64-emac
reg:
diff --git a/Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml b/Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml
index aeff2bd774dd..729ae97b63d9 100644
--- a/Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml
+++ b/Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml
@@ -18,6 +18,9 @@ description: |
sun50i-cpufreq-nvmem driver reads the efuse value from the SoC to
provide the OPP framework with required information.
+allOf:
+ - $ref: opp-v2-base.yaml#
+
properties:
compatible:
const: allwinner,sun50i-h6-operating-points
@@ -43,6 +46,7 @@ patternProperties:
properties:
opp-hz: true
+ clock-latency-ns: true
patternProperties:
"opp-microvolt-.*": true
diff --git a/Documentation/devicetree/bindings/opp/opp-v1.yaml b/Documentation/devicetree/bindings/opp/opp-v1.yaml
new file mode 100644
index 000000000000..d585d536a3fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/opp/opp-v1.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/opp/opp-v1.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic OPP (Operating Performance Points) v1 Bindings
+
+maintainers:
+ - Viresh Kumar <viresh.kumar@linaro.org>
+
+description: |+
+ Devices work at voltage-current-frequency combinations and some implementations
+ have the liberty of choosing these. These combinations are called Operating
+ Performance Points aka OPPs. This document defines bindings for these OPPs
+ applicable across wide range of devices. For illustration purpose, this document
+ uses CPU as a device.
+
+ This binding only supports voltage-frequency pairs.
+
+select: true
+
+properties:
+ operating-points:
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: Frequency in kHz
+ - description: Voltage for OPP in uV
+
+
+additionalProperties: true
+examples:
+ - |
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,cortex-a9";
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2>;
+ operating-points =
+ /* kHz uV */
+ <792000 1100000>,
+ <396000 950000>,
+ <198000 850000>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/opp/opp-v2-base.yaml b/Documentation/devicetree/bindings/opp/opp-v2-base.yaml
new file mode 100644
index 000000000000..ae3ae4d39843
--- /dev/null
+++ b/Documentation/devicetree/bindings/opp/opp-v2-base.yaml
@@ -0,0 +1,214 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/opp/opp-v2-base.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic OPP (Operating Performance Points) Common Binding
+
+maintainers:
+ - Viresh Kumar <viresh.kumar@linaro.org>
+
+description: |
+ Devices work at voltage-current-frequency combinations and some implementations
+ have the liberty of choosing these. These combinations are called Operating
+ Performance Points aka OPPs. This document defines bindings for these OPPs
+ applicable across wide range of devices. For illustration purpose, this document
+ uses CPU as a device.
+
+ This describes the OPPs belonging to a device.
+
+select: false
+
+properties:
+ $nodename:
+ pattern: '^opp-table(-[a-z0-9]+)?$'
+
+ opp-shared:
+ description:
+ Indicates that device nodes using this OPP Table Node's phandle switch
+ their DVFS state together, i.e. they share clock/voltage/current lines.
+ Missing property means devices have independent clock/voltage/current
+ lines, but they share OPP tables.
+ type: boolean
+
+patternProperties:
+ '^opp-?[0-9]+$':
+ type: object
+ description:
+ One or more OPP nodes describing voltage-current-frequency combinations.
+ Their name isn't significant but their phandle can be used to reference an
+ OPP. These are mandatory except for the case where the OPP table is
+ present only to indicate dependency between devices using the opp-shared
+ property.
+
+ properties:
+ opp-hz:
+ description:
+ Frequency in Hz, expressed as a 64-bit big-endian integer. This is a
+ required property for all device nodes, unless another "required"
+ property to uniquely identify the OPP nodes exists. Devices like power
+ domains must have another (implementation dependent) property.
+
+ opp-microvolt:
+ description: |
+ Voltage for the OPP
+
+ A single regulator's voltage is specified with an array of size one or three.
+ Single entry is for target voltage and three entries are for <target min max>
+ voltages.
+
+ Entries for multiple regulators shall be provided in the same field separated
+ by angular brackets <>. The OPP binding doesn't provide any provisions to
+ relate the values to their power supplies or the order in which the supplies
+ need to be configured and that is left for the implementation specific
+ binding.
+
+ Entries for all regulators shall be of the same size, i.e. either all use a
+ single value or triplets.
+ minItems: 1
+ maxItems: 8 # Should be enough regulators
+ items:
+ minItems: 1
+ maxItems: 3
+
+ opp-microamp:
+ description: |
+ The maximum current drawn by the device in microamperes considering
+ system specific parameters (such as transients, process, aging,
+ maximum operating temperature range etc.) as necessary. This may be
+ used to set the most efficient regulator operating mode.
+
+ Should only be set if opp-microvolt or opp-microvolt-<name> is set for
+ the OPP.
+
+ Entries for multiple regulators shall be provided in the same field
+ separated by angular brackets <>. If current values aren't required
+ for a regulator, then it shall be filled with 0. If current values
+ aren't required for any of the regulators, then this field is not
+ required. The OPP binding doesn't provide any provisions to relate the
+ values to their power supplies or the order in which the supplies need
+ to be configured and that is left for the implementation specific
+ binding.
+ minItems: 1
+ maxItems: 8 # Should be enough regulators
+
+ opp-level:
+ description:
+ A value representing the performance level of the device.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ opp-peak-kBps:
+ description:
+ Peak bandwidth in kilobytes per second, expressed as an array of
+ 32-bit big-endian integers. Each element of the array represents the
+ peak bandwidth value of each interconnect path. The number of elements
+ should match the number of interconnect paths.
+ minItems: 1
+ maxItems: 32 # Should be enough
+
+ opp-avg-kBps:
+ description:
+ Average bandwidth in kilobytes per second, expressed as an array
+ of 32-bit big-endian integers. Each element of the array represents the
+ average bandwidth value of each interconnect path. The number of elements
+ should match the number of interconnect paths. This property is only
+ meaningful in OPP tables where opp-peak-kBps is present.
+ minItems: 1
+ maxItems: 32 # Should be enough
+
+ clock-latency-ns:
+ description:
+ Specifies the maximum possible transition latency (in nanoseconds) for
+ switching to this OPP from any other OPP.
+
+ turbo-mode:
+ description:
+ Marks the OPP to be used only for turbo modes. Turbo mode is available
+ on some platforms, where the device can run over its operating
+ frequency for a short duration of time limited by the device's power,
+ current and thermal limits.
+ type: boolean
+
+ opp-suspend:
+ description:
+ Marks the OPP to be used during device suspend. If multiple OPPs in
+ the table have this, the OPP with highest opp-hz will be used.
+ type: boolean
+
+ opp-supported-hw:
+ description: |
+ This property allows a platform to enable only a subset of the OPPs
+ from the larger set present in the OPP table, based on the current
+ version of the hardware (already known to the operating system).
+
+ Each block present in the array of blocks in this property, represents
+ a sub-group of hardware versions supported by the OPP. i.e. <sub-group
+ A>, <sub-group B>, etc. The OPP will be enabled if _any_ of these
+ sub-groups match the hardware's version.
+
+ Each sub-group is a platform defined array representing the hierarchy
+ of hardware versions supported by the platform. For a platform with
+ three hierarchical levels of version (X.Y.Z), this field shall look
+ like
+
+ opp-supported-hw = <X1 Y1 Z1>, <X2 Y2 Z2>, <X3 Y3 Z3>.
+
+ Each level (eg. X1) in version hierarchy is represented by a 32 bit
+ value, one bit per version and so there can be maximum 32 versions per
+ level. Logical AND (&) operation is performed for each level with the
+ hardware's level version and a non-zero output for _all_ the levels in
+ a sub-group means the OPP is supported by hardware. A value of
+ 0xFFFFFFFF for each level in the sub-group will enable the OPP for all
+ versions for the hardware.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ maxItems: 32
+ items:
+ minItems: 1
+ maxItems: 4
+
+ required-opps:
+ description:
+ This contains phandle to an OPP node in another device's OPP table. It
+ may contain an array of phandles, where each phandle points to an OPP
+ of a different device. It should not contain multiple phandles to the
+ OPP nodes in the same OPP table. This specifies the minimum required
+ OPP of the device(s), whose OPP's phandle is present in this property,
+ for the functioning of the current device at the current OPP (where
+ this property is present).
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+
+ patternProperties:
+ '^opp-microvolt-':
+ description:
+ Named opp-microvolt property. This is exactly similar to the above
+ opp-microvolt property, but allows multiple voltage ranges to be
+ provided for the same OPP. At runtime, the platform can pick a <name>
+ and matching opp-microvolt-<name> property will be enabled for all
+ OPPs. If the platform doesn't pick a specific <name> or the <name>
+ doesn't match with any opp-microvolt-<name> properties, then
+ opp-microvolt property shall be used, if present.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ minItems: 1
+ maxItems: 8 # Should be enough regulators
+ items:
+ minItems: 1
+ maxItems: 3
+
+ '^opp-microamp-':
+ description:
+ Named opp-microamp property. Similar to opp-microvolt-<name> property,
+ but for microamp instead.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 8 # Should be enough regulators
+
+ dependencies:
+ opp-avg-kBps: [ opp-peak-kBps ]
+
+required:
+ - compatible
+
+additionalProperties: true
+
+...
diff --git a/Documentation/devicetree/bindings/opp/opp-v2.yaml b/Documentation/devicetree/bindings/opp/opp-v2.yaml
new file mode 100644
index 000000000000..eaf8fba2c691
--- /dev/null
+++ b/Documentation/devicetree/bindings/opp/opp-v2.yaml
@@ -0,0 +1,475 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/opp/opp-v2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic OPP (Operating Performance Points) Bindings
+
+maintainers:
+ - Viresh Kumar <viresh.kumar@linaro.org>
+
+allOf:
+ - $ref: opp-v2-base.yaml#
+
+properties:
+ compatible:
+ const: operating-points-v2
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ /*
+ * Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states
+ * together.
+ */
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,cortex-a9";
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2>;
+ clocks = <&clk_controller 0>;
+ clock-names = "cpu";
+ cpu-supply = <&cpu_supply0>;
+ operating-points-v2 = <&cpu0_opp_table0>;
+ };
+
+ cpu@1 {
+ compatible = "arm,cortex-a9";
+ device_type = "cpu";
+ reg = <1>;
+ next-level-cache = <&L2>;
+ clocks = <&clk_controller 0>;
+ clock-names = "cpu";
+ cpu-supply = <&cpu_supply0>;
+ operating-points-v2 = <&cpu0_opp_table0>;
+ };
+ };
+
+ cpu0_opp_table0: opp-table {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <975000 970000 985000>;
+ opp-microamp = <70000>;
+ clock-latency-ns = <300000>;
+ opp-suspend;
+ };
+ opp-1100000000 {
+ opp-hz = /bits/ 64 <1100000000>;
+ opp-microvolt = <1000000 980000 1010000>;
+ opp-microamp = <80000>;
+ clock-latency-ns = <310000>;
+ };
+ opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <1025000>;
+ clock-latency-ns = <290000>;
+ turbo-mode;
+ };
+ };
+
+ - |
+ /*
+ * Example 2: Single cluster, Quad-core Qualcom-krait, switches DVFS states
+ * independently.
+ */
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "qcom,krait";
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2>;
+ clocks = <&clk_controller 0>;
+ clock-names = "cpu";
+ cpu-supply = <&cpu_supply0>;
+ operating-points-v2 = <&cpu_opp_table>;
+ };
+
+ cpu@1 {
+ compatible = "qcom,krait";
+ device_type = "cpu";
+ reg = <1>;
+ next-level-cache = <&L2>;
+ clocks = <&clk_controller 1>;
+ clock-names = "cpu";
+ cpu-supply = <&cpu_supply1>;
+ operating-points-v2 = <&cpu_opp_table>;
+ };
+
+ cpu@2 {
+ compatible = "qcom,krait";
+ device_type = "cpu";
+ reg = <2>;
+ next-level-cache = <&L2>;
+ clocks = <&clk_controller 2>;
+ clock-names = "cpu";
+ cpu-supply = <&cpu_supply2>;
+ operating-points-v2 = <&cpu_opp_table>;
+ };
+
+ cpu@3 {
+ compatible = "qcom,krait";
+ device_type = "cpu";
+ reg = <3>;
+ next-level-cache = <&L2>;
+ clocks = <&clk_controller 3>;
+ clock-names = "cpu";
+ cpu-supply = <&cpu_supply3>;
+ operating-points-v2 = <&cpu_opp_table>;
+ };
+ };
+
+ cpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ /*
+ * Missing opp-shared property means CPUs switch DVFS states
+ * independently.
+ */
+
+ opp-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <975000 970000 985000>;
+ opp-microamp = <70000>;
+ clock-latency-ns = <300000>;
+ opp-suspend;
+ };
+ opp-1100000000 {
+ opp-hz = /bits/ 64 <1100000000>;
+ opp-microvolt = <1000000 980000 1010000>;
+ opp-microamp = <80000>;
+ clock-latency-ns = <310000>;
+ };
+ opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <1025000>;
+ opp-microamp = <90000>;
+ lock-latency-ns = <290000>;
+ turbo-mode;
+ };
+ };
+
+ - |
+ /*
+ * Example 3: Dual-cluster, Dual-core per cluster. CPUs within a cluster switch
+ * DVFS state together.
+ */
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,cortex-a7";
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2>;
+ clocks = <&clk_controller 0>;
+ clock-names = "cpu";
+ cpu-supply = <&cpu_supply0>;
+ operating-points-v2 = <&cluster0_opp>;
+ };
+
+ cpu@1 {
+ compatible = "arm,cortex-a7";
+ device_type = "cpu";
+ reg = <1>;
+ next-level-cache = <&L2>;
+ clocks = <&clk_controller 0>;
+ clock-names = "cpu";
+ cpu-supply = <&cpu_supply0>;
+ operating-points-v2 = <&cluster0_opp>;
+ };
+
+ cpu@100 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <100>;
+ next-level-cache = <&L2>;
+ clocks = <&clk_controller 1>;
+ clock-names = "cpu";
+ cpu-supply = <&cpu_supply1>;
+ operating-points-v2 = <&cluster1_opp>;
+ };
+
+ cpu@101 {
+ compatible = "arm,cortex-a15";
+ device_type = "cpu";
+ reg = <101>;
+ next-level-cache = <&L2>;
+ clocks = <&clk_controller 1>;
+ clock-names = "cpu";
+ cpu-supply = <&cpu_supply1>;
+ operating-points-v2 = <&cluster1_opp>;
+ };
+ };
+
+ cluster0_opp: opp-table-0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <975000 970000 985000>;
+ opp-microamp = <70000>;
+ clock-latency-ns = <300000>;
+ opp-suspend;
+ };
+ opp-1100000000 {
+ opp-hz = /bits/ 64 <1100000000>;
+ opp-microvolt = <1000000 980000 1010000>;
+ opp-microamp = <80000>;
+ clock-latency-ns = <310000>;
+ };
+ opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <1025000>;
+ opp-microamp = <90000>;
+ clock-latency-ns = <290000>;
+ turbo-mode;
+ };
+ };
+
+ cluster1_opp: opp-table-1 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-1300000000 {
+ opp-hz = /bits/ 64 <1300000000>;
+ opp-microvolt = <1050000 1045000 1055000>;
+ opp-microamp = <95000>;
+ clock-latency-ns = <400000>;
+ opp-suspend;
+ };
+ opp-1400000000 {
+ opp-hz = /bits/ 64 <1400000000>;
+ opp-microvolt = <1075000>;
+ opp-microamp = <100000>;
+ clock-latency-ns = <400000>;
+ };
+ opp-1500000000 {
+ opp-hz = /bits/ 64 <1500000000>;
+ opp-microvolt = <1100000 1010000 1110000>;
+ opp-microamp = <95000>;
+ clock-latency-ns = <400000>;
+ turbo-mode;
+ };
+ };
+
+ - |
+ /* Example 4: Handling multiple regulators */
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "foo,cpu-type";
+ device_type = "cpu";
+ reg = <0>;
+
+ vcc0-supply = <&cpu_supply0>;
+ vcc1-supply = <&cpu_supply1>;
+ vcc2-supply = <&cpu_supply2>;
+ operating-points-v2 = <&cpu0_opp_table4>;
+ };
+ };
+
+ cpu0_opp_table4: opp-table-0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <970000>, /* Supply 0 */
+ <960000>, /* Supply 1 */
+ <960000>; /* Supply 2 */
+ opp-microamp = <70000>, /* Supply 0 */
+ <70000>, /* Supply 1 */
+ <70000>; /* Supply 2 */
+ clock-latency-ns = <300000>;
+ };
+
+ /* OR */
+
+ opp-1000000001 {
+ opp-hz = /bits/ 64 <1000000001>;
+ opp-microvolt = <975000 970000 985000>, /* Supply 0 */
+ <965000 960000 975000>, /* Supply 1 */
+ <965000 960000 975000>; /* Supply 2 */
+ opp-microamp = <70000>, /* Supply 0 */
+ <70000>, /* Supply 1 */
+ <70000>; /* Supply 2 */
+ clock-latency-ns = <300000>;
+ };
+
+ /* OR */
+
+ opp-1000000002 {
+ opp-hz = /bits/ 64 <1000000002>;
+ opp-microvolt = <975000 970000 985000>, /* Supply 0 */
+ <965000 960000 975000>, /* Supply 1 */
+ <965000 960000 975000>; /* Supply 2 */
+ opp-microamp = <70000>, /* Supply 0 */
+ <0>, /* Supply 1 doesn't need this */
+ <70000>; /* Supply 2 */
+ clock-latency-ns = <300000>;
+ };
+ };
+
+ - |
+ /*
+ * Example 5: opp-supported-hw
+ * (example: three level hierarchy of versions: cuts, substrate and process)
+ */
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,cortex-a7";
+ device_type = "cpu";
+ reg = <0>;
+ cpu-supply = <&cpu_supply>;
+ operating-points-v2 = <&cpu0_opp_table_slow>;
+ };
+ };
+
+ cpu0_opp_table_slow: opp-table {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-600000000 {
+ /*
+ * Supports all substrate and process versions for 0xF
+ * cuts, i.e. only first four cuts.
+ */
+ opp-supported-hw = <0xF 0xFFFFFFFF 0xFFFFFFFF>;
+ opp-hz = /bits/ 64 <600000000>;
+ };
+
+ opp-800000000 {
+ /*
+ * Supports:
+ * - cuts: only one, 6th cut (represented by 6th bit).
+ * - substrate: supports 16 different substrate versions
+ * - process: supports 9 different process versions
+ */
+ opp-supported-hw = <0x20 0xff0000ff 0x0000f4f0>;
+ opp-hz = /bits/ 64 <800000000>;
+ };
+
+ opp-900000000 {
+ /*
+ * Supports:
+ * - All cuts and substrate where process version is 0x2.
+ * - All cuts and process where substrate version is 0x2.
+ */
+ opp-supported-hw = <0xFFFFFFFF 0xFFFFFFFF 0x02>,
+ <0xFFFFFFFF 0x01 0xFFFFFFFF>;
+ opp-hz = /bits/ 64 <900000000>;
+ };
+ };
+
+ - |
+ /*
+ * Example 6: opp-microvolt-<name>, opp-microamp-<name>:
+ * (example: device with two possible microvolt ranges: slow and fast)
+ */
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,cortex-a7";
+ device_type = "cpu";
+ reg = <0>;
+ operating-points-v2 = <&cpu0_opp_table6>;
+ };
+ };
+
+ cpu0_opp_table6: opp-table-0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt-slow = <915000 900000 925000>;
+ opp-microvolt-fast = <975000 970000 985000>;
+ opp-microamp-slow = <70000>;
+ opp-microamp-fast = <71000>;
+ };
+
+ opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt-slow = <915000 900000 925000>, /* Supply vcc0 */
+ <925000 910000 935000>; /* Supply vcc1 */
+ opp-microvolt-fast = <975000 970000 985000>, /* Supply vcc0 */
+ <965000 960000 975000>; /* Supply vcc1 */
+ opp-microamp = <70000>; /* Will be used for both slow/fast */
+ };
+ };
+
+ - |
+ /*
+ * Example 7: Single cluster Quad-core ARM cortex A53, OPP points from firmware,
+ * distinct clock controls but two sets of clock/voltage/current lines.
+ */
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,cortex-a53";
+ device_type = "cpu";
+ reg = <0x0 0x100>;
+ next-level-cache = <&A53_L2>;
+ clocks = <&dvfs_controller 0>;
+ operating-points-v2 = <&cpu_opp0_table>;
+ };
+ cpu@1 {
+ compatible = "arm,cortex-a53";
+ device_type = "cpu";
+ reg = <0x0 0x101>;
+ next-level-cache = <&A53_L2>;
+ clocks = <&dvfs_controller 1>;
+ operating-points-v2 = <&cpu_opp0_table>;
+ };
+ cpu@2 {
+ compatible = "arm,cortex-a53";
+ device_type = "cpu";
+ reg = <0x0 0x102>;
+ next-level-cache = <&A53_L2>;
+ clocks = <&dvfs_controller 2>;
+ operating-points-v2 = <&cpu_opp1_table>;
+ };
+ cpu@3 {
+ compatible = "arm,cortex-a53";
+ device_type = "cpu";
+ reg = <0x0 0x103>;
+ next-level-cache = <&A53_L2>;
+ clocks = <&dvfs_controller 3>;
+ operating-points-v2 = <&cpu_opp1_table>;
+ };
+
+ };
+
+ cpu_opp0_table: opp-table-0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+ };
+
+ cpu_opp1_table: opp-table-1 {
+ compatible = "operating-points-v2";
+ opp-shared;
+ };
+...
diff --git a/Documentation/devicetree/bindings/opp/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt
deleted file mode 100644
index 08b3da4736cf..000000000000
--- a/Documentation/devicetree/bindings/opp/opp.txt
+++ /dev/null
@@ -1,622 +0,0 @@
-Generic OPP (Operating Performance Points) Bindings
-----------------------------------------------------
-
-Devices work at voltage-current-frequency combinations and some implementations
-have the liberty of choosing these. These combinations are called Operating
-Performance Points aka OPPs. This document defines bindings for these OPPs
-applicable across wide range of devices. For illustration purpose, this document
-uses CPU as a device.
-
-This document contain multiple versions of OPP binding and only one of them
-should be used per device.
-
-Binding 1: operating-points
-============================
-
-This binding only supports voltage-frequency pairs.
-
-Properties:
-- operating-points: An array of 2-tuples items, and each item consists
- of frequency and voltage like <freq-kHz vol-uV>.
- freq: clock frequency in kHz
- vol: voltage in microvolt
-
-Examples:
-
-cpu@0 {
- compatible = "arm,cortex-a9";
- reg = <0>;
- next-level-cache = <&L2>;
- operating-points = <
- /* kHz uV */
- 792000 1100000
- 396000 950000
- 198000 850000
- >;
-};
-
-
-Binding 2: operating-points-v2
-============================
-
-* Property: operating-points-v2
-
-Devices supporting OPPs must set their "operating-points-v2" property with
-phandle to a OPP table in their DT node. The OPP core will use this phandle to
-find the operating points for the device.
-
-This can contain more than one phandle for power domain providers that provide
-multiple power domains. That is, one phandle for each power domain. If only one
-phandle is available, then the same OPP table will be used for all power domains
-provided by the power domain provider.
-
-If required, this can be extended for SoC vendor specific bindings. Such bindings
-should be documented as Documentation/devicetree/bindings/power/<vendor>-opp.txt
-and should have a compatible description like: "operating-points-v2-<vendor>".
-
-* OPP Table Node
-
-This describes the OPPs belonging to a device. This node can have following
-properties:
-
-Required properties:
-- compatible: Allow OPPs to express their compatibility. It should be:
- "operating-points-v2".
-
-- OPP nodes: One or more OPP nodes describing voltage-current-frequency
- combinations. Their name isn't significant but their phandle can be used to
- reference an OPP. These are mandatory except for the case where the OPP table
- is present only to indicate dependency between devices using the opp-shared
- property.
-
-Optional properties:
-- opp-shared: Indicates that device nodes using this OPP Table Node's phandle
- switch their DVFS state together, i.e. they share clock/voltage/current lines.
- Missing property means devices have independent clock/voltage/current lines,
- but they share OPP tables.
-
-- status: Marks the OPP table enabled/disabled.
-
-
-* OPP Node
-
-This defines voltage-current-frequency combinations along with other related
-properties.
-
-Required properties:
-- opp-hz: Frequency in Hz, expressed as a 64-bit big-endian integer. This is a
- required property for all device nodes, unless another "required" property to
- uniquely identify the OPP nodes exists. Devices like power domains must have
- another (implementation dependent) property.
-
-- opp-peak-kBps: Peak bandwidth in kilobytes per second, expressed as an array
- of 32-bit big-endian integers. Each element of the array represents the
- peak bandwidth value of each interconnect path. The number of elements should
- match the number of interconnect paths.
-
-Optional properties:
-- opp-microvolt: voltage in micro Volts.
-
- A single regulator's voltage is specified with an array of size one or three.
- Single entry is for target voltage and three entries are for <target min max>
- voltages.
-
- Entries for multiple regulators shall be provided in the same field separated
- by angular brackets <>. The OPP binding doesn't provide any provisions to
- relate the values to their power supplies or the order in which the supplies
- need to be configured and that is left for the implementation specific
- binding.
-
- Entries for all regulators shall be of the same size, i.e. either all use a
- single value or triplets.
-
-- opp-microvolt-<name>: Named opp-microvolt property. This is exactly similar to
- the above opp-microvolt property, but allows multiple voltage ranges to be
- provided for the same OPP. At runtime, the platform can pick a <name> and
- matching opp-microvolt-<name> property will be enabled for all OPPs. If the
- platform doesn't pick a specific <name> or the <name> doesn't match with any
- opp-microvolt-<name> properties, then opp-microvolt property shall be used, if
- present.
-
-- opp-microamp: The maximum current drawn by the device in microamperes
- considering system specific parameters (such as transients, process, aging,
- maximum operating temperature range etc.) as necessary. This may be used to
- set the most efficient regulator operating mode.
-
- Should only be set if opp-microvolt is set for the OPP.
-
- Entries for multiple regulators shall be provided in the same field separated
- by angular brackets <>. If current values aren't required for a regulator,
- then it shall be filled with 0. If current values aren't required for any of
- the regulators, then this field is not required. The OPP binding doesn't
- provide any provisions to relate the values to their power supplies or the
- order in which the supplies need to be configured and that is left for the
- implementation specific binding.
-
-- opp-microamp-<name>: Named opp-microamp property. Similar to
- opp-microvolt-<name> property, but for microamp instead.
-
-- opp-level: A value representing the performance level of the device,
- expressed as a 32-bit integer.
-
-- opp-avg-kBps: Average bandwidth in kilobytes per second, expressed as an array
- of 32-bit big-endian integers. Each element of the array represents the
- average bandwidth value of each interconnect path. The number of elements
- should match the number of interconnect paths. This property is only
- meaningful in OPP tables where opp-peak-kBps is present.
-
-- clock-latency-ns: Specifies the maximum possible transition latency (in
- nanoseconds) for switching to this OPP from any other OPP.
-
-- turbo-mode: Marks the OPP to be used only for turbo modes. Turbo mode is
- available on some platforms, where the device can run over its operating
- frequency for a short duration of time limited by the device's power, current
- and thermal limits.
-
-- opp-suspend: Marks the OPP to be used during device suspend. If multiple OPPs
- in the table have this, the OPP with highest opp-hz will be used.
-
-- opp-supported-hw: This property allows a platform to enable only a subset of
- the OPPs from the larger set present in the OPP table, based on the current
- version of the hardware (already known to the operating system).
-
- Each block present in the array of blocks in this property, represents a
- sub-group of hardware versions supported by the OPP. i.e. <sub-group A>,
- <sub-group B>, etc. The OPP will be enabled if _any_ of these sub-groups match
- the hardware's version.
-
- Each sub-group is a platform defined array representing the hierarchy of
- hardware versions supported by the platform. For a platform with three
- hierarchical levels of version (X.Y.Z), this field shall look like
-
- opp-supported-hw = <X1 Y1 Z1>, <X2 Y2 Z2>, <X3 Y3 Z3>.
-
- Each level (eg. X1) in version hierarchy is represented by a 32 bit value, one
- bit per version and so there can be maximum 32 versions per level. Logical AND
- (&) operation is performed for each level with the hardware's level version
- and a non-zero output for _all_ the levels in a sub-group means the OPP is
- supported by hardware. A value of 0xFFFFFFFF for each level in the sub-group
- will enable the OPP for all versions for the hardware.
-
-- status: Marks the node enabled/disabled.
-
-- required-opps: This contains phandle to an OPP node in another device's OPP
- table. It may contain an array of phandles, where each phandle points to an
- OPP of a different device. It should not contain multiple phandles to the OPP
- nodes in the same OPP table. This specifies the minimum required OPP of the
- device(s), whose OPP's phandle is present in this property, for the
- functioning of the current device at the current OPP (where this property is
- present).
-
-Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
-
-/ {
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- compatible = "arm,cortex-a9";
- reg = <0>;
- next-level-cache = <&L2>;
- clocks = <&clk_controller 0>;
- clock-names = "cpu";
- cpu-supply = <&cpu_supply0>;
- operating-points-v2 = <&cpu0_opp_table>;
- };
-
- cpu@1 {
- compatible = "arm,cortex-a9";
- reg = <1>;
- next-level-cache = <&L2>;
- clocks = <&clk_controller 0>;
- clock-names = "cpu";
- cpu-supply = <&cpu_supply0>;
- operating-points-v2 = <&cpu0_opp_table>;
- };
- };
-
- cpu0_opp_table: opp_table0 {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp-1000000000 {
- opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <975000 970000 985000>;
- opp-microamp = <70000>;
- clock-latency-ns = <300000>;
- opp-suspend;
- };
- opp-1100000000 {
- opp-hz = /bits/ 64 <1100000000>;
- opp-microvolt = <1000000 980000 1010000>;
- opp-microamp = <80000>;
- clock-latency-ns = <310000>;
- };
- opp-1200000000 {
- opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <1025000>;
- clock-latency-ns = <290000>;
- turbo-mode;
- };
- };
-};
-
-Example 2: Single cluster, Quad-core Qualcom-krait, switches DVFS states
-independently.
-
-/ {
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- compatible = "qcom,krait";
- reg = <0>;
- next-level-cache = <&L2>;
- clocks = <&clk_controller 0>;
- clock-names = "cpu";
- cpu-supply = <&cpu_supply0>;
- operating-points-v2 = <&cpu_opp_table>;
- };
-
- cpu@1 {
- compatible = "qcom,krait";
- reg = <1>;
- next-level-cache = <&L2>;
- clocks = <&clk_controller 1>;
- clock-names = "cpu";
- cpu-supply = <&cpu_supply1>;
- operating-points-v2 = <&cpu_opp_table>;
- };
-
- cpu@2 {
- compatible = "qcom,krait";
- reg = <2>;
- next-level-cache = <&L2>;
- clocks = <&clk_controller 2>;
- clock-names = "cpu";
- cpu-supply = <&cpu_supply2>;
- operating-points-v2 = <&cpu_opp_table>;
- };
-
- cpu@3 {
- compatible = "qcom,krait";
- reg = <3>;
- next-level-cache = <&L2>;
- clocks = <&clk_controller 3>;
- clock-names = "cpu";
- cpu-supply = <&cpu_supply3>;
- operating-points-v2 = <&cpu_opp_table>;
- };
- };
-
- cpu_opp_table: opp_table {
- compatible = "operating-points-v2";
-
- /*
- * Missing opp-shared property means CPUs switch DVFS states
- * independently.
- */
-
- opp-1000000000 {
- opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <975000 970000 985000>;
- opp-microamp = <70000>;
- clock-latency-ns = <300000>;
- opp-suspend;
- };
- opp-1100000000 {
- opp-hz = /bits/ 64 <1100000000>;
- opp-microvolt = <1000000 980000 1010000>;
- opp-microamp = <80000>;
- clock-latency-ns = <310000>;
- };
- opp-1200000000 {
- opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <1025000>;
- opp-microamp = <90000;
- lock-latency-ns = <290000>;
- turbo-mode;
- };
- };
-};
-
-Example 3: Dual-cluster, Dual-core per cluster. CPUs within a cluster switch
-DVFS state together.
-
-/ {
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- compatible = "arm,cortex-a7";
- reg = <0>;
- next-level-cache = <&L2>;
- clocks = <&clk_controller 0>;
- clock-names = "cpu";
- cpu-supply = <&cpu_supply0>;
- operating-points-v2 = <&cluster0_opp>;
- };
-
- cpu@1 {
- compatible = "arm,cortex-a7";
- reg = <1>;
- next-level-cache = <&L2>;
- clocks = <&clk_controller 0>;
- clock-names = "cpu";
- cpu-supply = <&cpu_supply0>;
- operating-points-v2 = <&cluster0_opp>;
- };
-
- cpu@100 {
- compatible = "arm,cortex-a15";
- reg = <100>;
- next-level-cache = <&L2>;
- clocks = <&clk_controller 1>;
- clock-names = "cpu";
- cpu-supply = <&cpu_supply1>;
- operating-points-v2 = <&cluster1_opp>;
- };
-
- cpu@101 {
- compatible = "arm,cortex-a15";
- reg = <101>;
- next-level-cache = <&L2>;
- clocks = <&clk_controller 1>;
- clock-names = "cpu";
- cpu-supply = <&cpu_supply1>;
- operating-points-v2 = <&cluster1_opp>;
- };
- };
-
- cluster0_opp: opp_table0 {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp-1000000000 {
- opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <975000 970000 985000>;
- opp-microamp = <70000>;
- clock-latency-ns = <300000>;
- opp-suspend;
- };
- opp-1100000000 {
- opp-hz = /bits/ 64 <1100000000>;
- opp-microvolt = <1000000 980000 1010000>;
- opp-microamp = <80000>;
- clock-latency-ns = <310000>;
- };
- opp-1200000000 {
- opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <1025000>;
- opp-microamp = <90000>;
- clock-latency-ns = <290000>;
- turbo-mode;
- };
- };
-
- cluster1_opp: opp_table1 {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp-1300000000 {
- opp-hz = /bits/ 64 <1300000000>;
- opp-microvolt = <1050000 1045000 1055000>;
- opp-microamp = <95000>;
- clock-latency-ns = <400000>;
- opp-suspend;
- };
- opp-1400000000 {
- opp-hz = /bits/ 64 <1400000000>;
- opp-microvolt = <1075000>;
- opp-microamp = <100000>;
- clock-latency-ns = <400000>;
- };
- opp-1500000000 {
- opp-hz = /bits/ 64 <1500000000>;
- opp-microvolt = <1100000 1010000 1110000>;
- opp-microamp = <95000>;
- clock-latency-ns = <400000>;
- turbo-mode;
- };
- };
-};
-
-Example 4: Handling multiple regulators
-
-/ {
- cpus {
- cpu@0 {
- compatible = "vendor,cpu-type";
- ...
-
- vcc0-supply = <&cpu_supply0>;
- vcc1-supply = <&cpu_supply1>;
- vcc2-supply = <&cpu_supply2>;
- operating-points-v2 = <&cpu0_opp_table>;
- };
- };
-
- cpu0_opp_table: opp_table0 {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp-1000000000 {
- opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <970000>, /* Supply 0 */
- <960000>, /* Supply 1 */
- <960000>; /* Supply 2 */
- opp-microamp = <70000>, /* Supply 0 */
- <70000>, /* Supply 1 */
- <70000>; /* Supply 2 */
- clock-latency-ns = <300000>;
- };
-
- /* OR */
-
- opp-1000000000 {
- opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <975000 970000 985000>, /* Supply 0 */
- <965000 960000 975000>, /* Supply 1 */
- <965000 960000 975000>; /* Supply 2 */
- opp-microamp = <70000>, /* Supply 0 */
- <70000>, /* Supply 1 */
- <70000>; /* Supply 2 */
- clock-latency-ns = <300000>;
- };
-
- /* OR */
-
- opp-1000000000 {
- opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <975000 970000 985000>, /* Supply 0 */
- <965000 960000 975000>, /* Supply 1 */
- <965000 960000 975000>; /* Supply 2 */
- opp-microamp = <70000>, /* Supply 0 */
- <0>, /* Supply 1 doesn't need this */
- <70000>; /* Supply 2 */
- clock-latency-ns = <300000>;
- };
- };
-};
-
-Example 5: opp-supported-hw
-(example: three level hierarchy of versions: cuts, substrate and process)
-
-/ {
- cpus {
- cpu@0 {
- compatible = "arm,cortex-a7";
- ...
-
- cpu-supply = <&cpu_supply>
- operating-points-v2 = <&cpu0_opp_table_slow>;
- };
- };
-
- opp_table {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp-600000000 {
- /*
- * Supports all substrate and process versions for 0xF
- * cuts, i.e. only first four cuts.
- */
- opp-supported-hw = <0xF 0xFFFFFFFF 0xFFFFFFFF>
- opp-hz = /bits/ 64 <600000000>;
- ...
- };
-
- opp-800000000 {
- /*
- * Supports:
- * - cuts: only one, 6th cut (represented by 6th bit).
- * - substrate: supports 16 different substrate versions
- * - process: supports 9 different process versions
- */
- opp-supported-hw = <0x20 0xff0000ff 0x0000f4f0>
- opp-hz = /bits/ 64 <800000000>;
- ...
- };
-
- opp-900000000 {
- /*
- * Supports:
- * - All cuts and substrate where process version is 0x2.
- * - All cuts and process where substrate version is 0x2.
- */
- opp-supported-hw = <0xFFFFFFFF 0xFFFFFFFF 0x02>, <0xFFFFFFFF 0x01 0xFFFFFFFF>
- opp-hz = /bits/ 64 <900000000>;
- ...
- };
- };
-};
-
-Example 6: opp-microvolt-<name>, opp-microamp-<name>:
-(example: device with two possible microvolt ranges: slow and fast)
-
-/ {
- cpus {
- cpu@0 {
- compatible = "arm,cortex-a7";
- ...
-
- operating-points-v2 = <&cpu0_opp_table>;
- };
- };
-
- cpu0_opp_table: opp_table0 {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp-1000000000 {
- opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt-slow = <915000 900000 925000>;
- opp-microvolt-fast = <975000 970000 985000>;
- opp-microamp-slow = <70000>;
- opp-microamp-fast = <71000>;
- };
-
- opp-1200000000 {
- opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt-slow = <915000 900000 925000>, /* Supply vcc0 */
- <925000 910000 935000>; /* Supply vcc1 */
- opp-microvolt-fast = <975000 970000 985000>, /* Supply vcc0 */
- <965000 960000 975000>; /* Supply vcc1 */
- opp-microamp = <70000>; /* Will be used for both slow/fast */
- };
- };
-};
-
-Example 7: Single cluster Quad-core ARM cortex A53, OPP points from firmware,
-distinct clock controls but two sets of clock/voltage/current lines.
-
-/ {
- cpus {
- #address-cells = <2>;
- #size-cells = <0>;
-
- cpu@0 {
- compatible = "arm,cortex-a53";
- reg = <0x0 0x100>;
- next-level-cache = <&A53_L2>;
- clocks = <&dvfs_controller 0>;
- operating-points-v2 = <&cpu_opp0_table>;
- };
- cpu@1 {
- compatible = "arm,cortex-a53";
- reg = <0x0 0x101>;
- next-level-cache = <&A53_L2>;
- clocks = <&dvfs_controller 1>;
- operating-points-v2 = <&cpu_opp0_table>;
- };
- cpu@2 {
- compatible = "arm,cortex-a53";
- reg = <0x0 0x102>;
- next-level-cache = <&A53_L2>;
- clocks = <&dvfs_controller 2>;
- operating-points-v2 = <&cpu_opp1_table>;
- };
- cpu@3 {
- compatible = "arm,cortex-a53";
- reg = <0x0 0x103>;
- next-level-cache = <&A53_L2>;
- clocks = <&dvfs_controller 3>;
- operating-points-v2 = <&cpu_opp1_table>;
- };
-
- };
-
- cpu_opp0_table: opp0_table {
- compatible = "operating-points-v2";
- opp-shared;
- };
-
- cpu_opp1_table: opp1_table {
- compatible = "operating-points-v2";
- opp-shared;
- };
-};
diff --git a/Documentation/devicetree/bindings/opp/qcom-opp.txt b/Documentation/devicetree/bindings/opp/qcom-opp.txt
index 32eb0793c7e6..41d3e4ff2dc3 100644
--- a/Documentation/devicetree/bindings/opp/qcom-opp.txt
+++ b/Documentation/devicetree/bindings/opp/qcom-opp.txt
@@ -1,7 +1,7 @@
Qualcomm OPP bindings to describe OPP nodes
The bindings are based on top of the operating-points-v2 bindings
-described in Documentation/devicetree/bindings/opp/opp.txt
+described in Documentation/devicetree/bindings/opp/opp-v2-base.yaml
Additional properties are described below.
* OPP Table Node
diff --git a/Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt b/Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt
index 832346e489a3..b70d326117cd 100644
--- a/Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt
+++ b/Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt
@@ -13,7 +13,7 @@ regulators to the device that will undergo OPP transitions we can make use
of the multi regulator binding that is part of the OPP core described here [1]
to describe both regulators needed by the platform.
-[1] Documentation/devicetree/bindings/opp/opp.txt
+[1] Documentation/devicetree/bindings/opp/opp-v2.yaml
Required Properties for Device Node:
- vdd-supply: phandle to regulator controlling VDD supply
diff --git a/Documentation/devicetree/bindings/pci/intel,keembay-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/intel,keembay-pcie-ep.yaml
new file mode 100644
index 000000000000..e87ff27526ff
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/intel,keembay-pcie-ep.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/pci/intel,keembay-pcie-ep.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Intel Keem Bay PCIe controller Endpoint mode
+
+maintainers:
+ - Wan Ahmad Zainie <wan.ahmad.zainie.wan.mohamad@intel.com>
+ - Srikanth Thokala <srikanth.thokala@intel.com>
+
+properties:
+ compatible:
+ const: intel,keembay-pcie-ep
+
+ reg:
+ maxItems: 5
+
+ reg-names:
+ items:
+ - const: dbi
+ - const: dbi2
+ - const: atu
+ - const: addr_space
+ - const: apb
+
+ interrupts:
+ maxItems: 4
+
+ interrupt-names:
+ items:
+ - const: pcie
+ - const: pcie_ev
+ - const: pcie_err
+ - const: pcie_mem_access
+
+ num-lanes:
+ description: Number of lanes to use.
+ enum: [ 1, 2 ]
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - interrupt-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ pcie-ep@37000000 {
+ compatible = "intel,keembay-pcie-ep";
+ reg = <0x37000000 0x00001000>,
+ <0x37100000 0x00001000>,
+ <0x37300000 0x00001000>,
+ <0x36000000 0x01000000>,
+ <0x37800000 0x00000200>;
+ reg-names = "dbi", "dbi2", "atu", "addr_space", "apb";
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 108 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pcie", "pcie_ev", "pcie_err", "pcie_mem_access";
+ num-lanes = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/pci/intel,keembay-pcie.yaml b/Documentation/devicetree/bindings/pci/intel,keembay-pcie.yaml
new file mode 100644
index 000000000000..ed4400c9ac09
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/intel,keembay-pcie.yaml
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/pci/intel,keembay-pcie.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Intel Keem Bay PCIe controller Root Complex mode
+
+maintainers:
+ - Wan Ahmad Zainie <wan.ahmad.zainie.wan.mohamad@intel.com>
+ - Srikanth Thokala <srikanth.thokala@intel.com>
+
+allOf:
+ - $ref: /schemas/pci/pci-bus.yaml#
+
+properties:
+ compatible:
+ const: intel,keembay-pcie
+
+ ranges:
+ maxItems: 1
+
+ reset-gpios:
+ maxItems: 1
+
+ reg:
+ maxItems: 4
+
+ reg-names:
+ items:
+ - const: dbi
+ - const: atu
+ - const: config
+ - const: apb
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: master
+ - const: aux
+
+ interrupts:
+ maxItems: 3
+
+ interrupt-names:
+ items:
+ - const: pcie
+ - const: pcie_ev
+ - const: pcie_err
+
+ num-lanes:
+ description: Number of lanes to use.
+ enum: [ 1, 2 ]
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - ranges
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - reset-gpios
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+ #define KEEM_BAY_A53_PCIE
+ #define KEEM_BAY_A53_AUX_PCIE
+ pcie@37000000 {
+ compatible = "intel,keembay-pcie";
+ reg = <0x37000000 0x00001000>,
+ <0x37300000 0x00001000>,
+ <0x36e00000 0x00200000>,
+ <0x37800000 0x00000200>;
+ reg-names = "dbi", "atu", "config", "apb";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ ranges = <0x02000000 0 0x36000000 0x36000000 0 0x00e00000>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pcie", "pcie_ev", "pcie_err";
+ clocks = <&scmi_clk KEEM_BAY_A53_PCIE>,
+ <&scmi_clk KEEM_BAY_A53_AUX_PCIE>;
+ clock-names = "master", "aux";
+ reset-gpios = <&pca2 9 GPIO_ACTIVE_LOW>;
+ num-lanes = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie-cfg.yaml b/Documentation/devicetree/bindings/pci/mediatek-pcie-cfg.yaml
new file mode 100644
index 000000000000..841a3d284bbf
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/mediatek-pcie-cfg.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/mediatek-pcie-cfg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MediaTek PCIECFG controller
+
+maintainers:
+ - Chuanjia Liu <chuanjia.liu@mediatek.com>
+ - Jianjun Wang <jianjun.wang@mediatek.com>
+
+description: |
+ The MediaTek PCIECFG controller controls some feature about
+ LTSSM, ASPM and so on.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - mediatek,generic-pciecfg
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ pciecfg: pciecfg@1a140000 {
+ compatible = "mediatek,generic-pciecfg", "syscon";
+ reg = <0x1a140000 0x1000>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie.txt b/Documentation/devicetree/bindings/pci/mediatek-pcie.txt
index 7468d666763a..57ae73462272 100644
--- a/Documentation/devicetree/bindings/pci/mediatek-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/mediatek-pcie.txt
@@ -8,7 +8,7 @@ Required properties:
"mediatek,mt7623-pcie"
"mediatek,mt7629-pcie"
- device_type: Must be "pci"
-- reg: Base addresses and lengths of the PCIe subsys and root ports.
+- reg: Base addresses and lengths of the root ports.
- reg-names: Names of the above areas to use during resource lookup.
- #address-cells: Address representation for root ports (must be 3)
- #size-cells: Size representation for root ports (must be 2)
@@ -47,9 +47,12 @@ Required properties for MT7623/MT2701:
- reset-names: Must be "pcie-rst0", "pcie-rst1", "pcie-rstN".. based on the
number of root ports.
-Required properties for MT2712/MT7622:
+Required properties for MT2712/MT7622/MT7629:
-interrupts: A list of interrupt outputs of the controller, must have one
entry for each PCIe port
+- interrupt-names: Must include the following entries:
+ - "pcie_irq": The interrupt that is asserted when an MSI/INTX is received
+- linux,pci-domain: PCI domain ID. Should be unique for each host controller
In addition, the device tree node must have sub-nodes describing each
PCIe port interface, having the following mandatory properties:
@@ -143,130 +146,143 @@ Examples for MT7623:
Examples for MT2712:
- pcie: pcie@11700000 {
+ pcie1: pcie@112ff000 {
compatible = "mediatek,mt2712-pcie";
device_type = "pci";
- reg = <0 0x11700000 0 0x1000>,
- <0 0x112ff000 0 0x1000>;
- reg-names = "port0", "port1";
+ reg = <0 0x112ff000 0 0x1000>;
+ reg-names = "port1";
+ linux,pci-domain = <1>;
#address-cells = <3>;
#size-cells = <2>;
- interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&topckgen CLK_TOP_PE2_MAC_P0_SEL>,
- <&topckgen CLK_TOP_PE2_MAC_P1_SEL>,
- <&pericfg CLK_PERI_PCIE0>,
+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pcie_irq";
+ clocks = <&topckgen CLK_TOP_PE2_MAC_P1_SEL>,
<&pericfg CLK_PERI_PCIE1>;
- clock-names = "sys_ck0", "sys_ck1", "ahb_ck0", "ahb_ck1";
- phys = <&pcie0_phy PHY_TYPE_PCIE>, <&pcie1_phy PHY_TYPE_PCIE>;
- phy-names = "pcie-phy0", "pcie-phy1";
+ clock-names = "sys_ck1", "ahb_ck1";
+ phys = <&u3port1 PHY_TYPE_PCIE>;
+ phy-names = "pcie-phy1";
bus-range = <0x00 0xff>;
- ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x10000000>;
+ ranges = <0x82000000 0 0x11400000 0x0 0x11400000 0 0x300000>;
+ status = "disabled";
- pcie0: pcie@0,0 {
- reg = <0x0000 0 0 0 0>;
- #address-cells = <3>;
- #size-cells = <2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc1 0>,
+ <0 0 0 2 &pcie_intc1 1>,
+ <0 0 0 3 &pcie_intc1 2>,
+ <0 0 0 4 &pcie_intc1 3>;
+ pcie_intc1: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
#interrupt-cells = <1>;
- ranges;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &pcie_intc0 0>,
- <0 0 0 2 &pcie_intc0 1>,
- <0 0 0 3 &pcie_intc0 2>,
- <0 0 0 4 &pcie_intc0 3>;
- pcie_intc0: interrupt-controller {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <1>;
- };
};
+ };
- pcie1: pcie@1,0 {
- reg = <0x0800 0 0 0 0>;
- #address-cells = <3>;
- #size-cells = <2>;
+ pcie0: pcie@11700000 {
+ compatible = "mediatek,mt2712-pcie";
+ device_type = "pci";
+ reg = <0 0x11700000 0 0x1000>;
+ reg-names = "port0";
+ linux,pci-domain = <0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pcie_irq";
+ clocks = <&topckgen CLK_TOP_PE2_MAC_P0_SEL>,
+ <&pericfg CLK_PERI_PCIE0>;
+ clock-names = "sys_ck0", "ahb_ck0";
+ phys = <&u3port0 PHY_TYPE_PCIE>;
+ phy-names = "pcie-phy0";
+ bus-range = <0x00 0xff>;
+ ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x10000000>;
+ status = "disabled";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc0 0>,
+ <0 0 0 2 &pcie_intc0 1>,
+ <0 0 0 3 &pcie_intc0 2>,
+ <0 0 0 4 &pcie_intc0 3>;
+ pcie_intc0: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
#interrupt-cells = <1>;
- ranges;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &pcie_intc1 0>,
- <0 0 0 2 &pcie_intc1 1>,
- <0 0 0 3 &pcie_intc1 2>,
- <0 0 0 4 &pcie_intc1 3>;
- pcie_intc1: interrupt-controller {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <1>;
- };
};
};
Examples for MT7622:
- pcie: pcie@1a140000 {
+ pcie0: pcie@1a143000 {
compatible = "mediatek,mt7622-pcie";
device_type = "pci";
- reg = <0 0x1a140000 0 0x1000>,
- <0 0x1a143000 0 0x1000>,
- <0 0x1a145000 0 0x1000>;
- reg-names = "subsys", "port0", "port1";
+ reg = <0 0x1a143000 0 0x1000>;
+ reg-names = "port0";
+ linux,pci-domain = <0>;
#address-cells = <3>;
#size-cells = <2>;
- interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_LOW>,
- <GIC_SPI 229 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "pcie_irq";
clocks = <&pciesys CLK_PCIE_P0_MAC_EN>,
- <&pciesys CLK_PCIE_P1_MAC_EN>,
<&pciesys CLK_PCIE_P0_AHB_EN>,
- <&pciesys CLK_PCIE_P1_AHB_EN>,
<&pciesys CLK_PCIE_P0_AUX_EN>,
- <&pciesys CLK_PCIE_P1_AUX_EN>,
<&pciesys CLK_PCIE_P0_AXI_EN>,
- <&pciesys CLK_PCIE_P1_AXI_EN>,
<&pciesys CLK_PCIE_P0_OBFF_EN>,
- <&pciesys CLK_PCIE_P1_OBFF_EN>,
- <&pciesys CLK_PCIE_P0_PIPE_EN>,
- <&pciesys CLK_PCIE_P1_PIPE_EN>;
- clock-names = "sys_ck0", "sys_ck1", "ahb_ck0", "ahb_ck1",
- "aux_ck0", "aux_ck1", "axi_ck0", "axi_ck1",
- "obff_ck0", "obff_ck1", "pipe_ck0", "pipe_ck1";
- phys = <&pcie0_phy PHY_TYPE_PCIE>, <&pcie1_phy PHY_TYPE_PCIE>;
- phy-names = "pcie-phy0", "pcie-phy1";
+ <&pciesys CLK_PCIE_P0_PIPE_EN>;
+ clock-names = "sys_ck0", "ahb_ck0", "aux_ck0",
+ "axi_ck0", "obff_ck0", "pipe_ck0";
+
power-domains = <&scpsys MT7622_POWER_DOMAIN_HIF0>;
bus-range = <0x00 0xff>;
- ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x10000000>;
+ ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x8000000>;
+ status = "disabled";
- pcie0: pcie@0,0 {
- reg = <0x0000 0 0 0 0>;
- #address-cells = <3>;
- #size-cells = <2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc0 0>,
+ <0 0 0 2 &pcie_intc0 1>,
+ <0 0 0 3 &pcie_intc0 2>,
+ <0 0 0 4 &pcie_intc0 3>;
+ pcie_intc0: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
#interrupt-cells = <1>;
- ranges;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &pcie_intc0 0>,
- <0 0 0 2 &pcie_intc0 1>,
- <0 0 0 3 &pcie_intc0 2>,
- <0 0 0 4 &pcie_intc0 3>;
- pcie_intc0: interrupt-controller {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <1>;
- };
};
+ };
- pcie1: pcie@1,0 {
- reg = <0x0800 0 0 0 0>;
- #address-cells = <3>;
- #size-cells = <2>;
+ pcie1: pcie@1a145000 {
+ compatible = "mediatek,mt7622-pcie";
+ device_type = "pci";
+ reg = <0 0x1a145000 0 0x1000>;
+ reg-names = "port1";
+ linux,pci-domain = <1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "pcie_irq";
+ clocks = <&pciesys CLK_PCIE_P1_MAC_EN>,
+ /* designer has connect RC1 with p0_ahb clock */
+ <&pciesys CLK_PCIE_P0_AHB_EN>,
+ <&pciesys CLK_PCIE_P1_AUX_EN>,
+ <&pciesys CLK_PCIE_P1_AXI_EN>,
+ <&pciesys CLK_PCIE_P1_OBFF_EN>,
+ <&pciesys CLK_PCIE_P1_PIPE_EN>;
+ clock-names = "sys_ck1", "ahb_ck1", "aux_ck1",
+ "axi_ck1", "obff_ck1", "pipe_ck1";
+
+ power-domains = <&scpsys MT7622_POWER_DOMAIN_HIF0>;
+ bus-range = <0x00 0xff>;
+ ranges = <0x82000000 0 0x28000000 0x0 0x28000000 0 0x8000000>;
+ status = "disabled";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc1 0>,
+ <0 0 0 2 &pcie_intc1 1>,
+ <0 0 0 3 &pcie_intc1 2>,
+ <0 0 0 4 &pcie_intc1 3>;
+ pcie_intc1: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
#interrupt-cells = <1>;
- ranges;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &pcie_intc1 0>,
- <0 0 0 2 &pcie_intc1 1>,
- <0 0 0 3 &pcie_intc1 2>,
- <0 0 0 4 &pcie_intc1 3>;
- pcie_intc1: interrupt-controller {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <1>;
- };
};
};
diff --git a/Documentation/devicetree/bindings/pci/pci-ep.yaml b/Documentation/devicetree/bindings/pci/pci-ep.yaml
index 7847bbcd4a03..ccec51ab5247 100644
--- a/Documentation/devicetree/bindings/pci/pci-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/pci-ep.yaml
@@ -23,6 +23,13 @@ properties:
default: 1
maximum: 255
+ max-virtual-functions:
+ description: Array representing the number of virtual functions corresponding to each physical
+ function
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ minItems: 1
+ maxItems: 255
+
max-link-speed:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [ 1, 2, 3, 4 ]
diff --git a/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt b/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt
index 2d677e90a7e2..f56f8c58c5d9 100644
--- a/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt
@@ -35,6 +35,7 @@ Required properties:
Optional properties:
- dma-coherent: present if DMA operations are coherent
+- clocks: Input clock specifier. Refer to common clock bindings
Example:
++++++++
diff --git a/Documentation/devicetree/bindings/power/power-domain.yaml b/Documentation/devicetree/bindings/power/power-domain.yaml
index aed51e9dcb11..3143ed9a3313 100644
--- a/Documentation/devicetree/bindings/power/power-domain.yaml
+++ b/Documentation/devicetree/bindings/power/power-domain.yaml
@@ -46,7 +46,7 @@ properties:
Phandles to the OPP tables of power domains provided by a power domain
provider. If the provider provides a single power domain only or all
the power domains provided by the provider have identical OPP tables,
- then this shall contain a single phandle. Refer to ../opp/opp.txt
+ then this shall contain a single phandle. Refer to ../opp/opp-v2-base.yaml
for more information.
"#power-domain-cells":
diff --git a/Documentation/devicetree/bindings/power/reset/qcom,pon.txt b/Documentation/devicetree/bindings/power/reset/qcom,pon.txt
deleted file mode 100644
index 0c0dc3a1e693..000000000000
--- a/Documentation/devicetree/bindings/power/reset/qcom,pon.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-Qualcomm PON Device
-
-The Power On device for Qualcomm PM8xxx is MFD supporting pwrkey
-and resin along with the Android reboot-mode.
-
-This DT node has pwrkey and resin as sub nodes.
-
-Required Properties:
--compatible: Must be one of:
- "qcom,pm8916-pon"
- "qcom,pms405-pon"
- "qcom,pm8998-pon"
-
--reg: Specifies the physical address of the pon register
-
-Optional subnode:
--pwrkey: Specifies the subnode pwrkey and should follow the
- qcom,pm8941-pwrkey.txt description.
--resin: Specifies the subnode resin and should follow the
- qcom,pm8xxx-pwrkey.txt description.
-
-The rest of the properties should follow the generic reboot-mode description
-found in reboot-mode.txt
-
-Example:
-
- pon@800 {
- compatible = "qcom,pm8916-pon";
-
- reg = <0x800>;
- mode-bootloader = <0x2>;
- mode-recovery = <0x1>;
-
- pwrkey {
- compatible = "qcom,pm8941-pwrkey";
- interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
- debounce = <15625>;
- bias-pull-up;
- linux,code = <KEY_POWER>;
- };
-
- resin {
- compatible = "qcom,pm8941-resin";
- interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
- debounce = <15625>;
- bias-pull-up;
- linux,code = <KEY_VOLUMEDOWN>;
- };
- };
diff --git a/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml b/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
new file mode 100644
index 000000000000..353f155df0f4
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/reset/qcom,pon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm PON Device
+
+maintainers:
+ - Vinod Koul <vkoul@kernel.org>
+
+description: |
+ The Power On device for Qualcomm PM8xxx is MFD supporting pwrkey
+ and resin along with the Android reboot-mode.
+
+ This DT node has pwrkey and resin as sub nodes.
+
+allOf:
+ - $ref: reboot-mode.yaml#
+
+properties:
+ compatible:
+ enum:
+ - qcom,pm8916-pon
+ - qcom,pms405-pon
+ - qcom,pm8998-pon
+
+ reg:
+ maxItems: 1
+
+ pwrkey:
+ type: object
+ $ref: "../../input/qcom,pm8941-pwrkey.yaml#"
+
+ resin:
+ type: object
+ $ref: "../../input/qcom,pm8941-pwrkey.yaml#"
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/input/linux-event-codes.h>
+ #include <dt-bindings/spmi/spmi.h>
+ spmi_bus: spmi@c440000 {
+ reg = <0x0c440000 0x1100>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ pmk8350: pmic@0 {
+ reg = <0x0 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pmk8350_pon: pon_hlos@1300 {
+ reg = <0x1300>;
+ compatible = "qcom,pm8998-pon";
+
+ pwrkey {
+ compatible = "qcom,pm8941-pwrkey";
+ interrupts = < 0x0 0x8 0 IRQ_TYPE_EDGE_BOTH >;
+ debounce = <15625>;
+ bias-pull-up;
+ linux,code = <KEY_POWER>;
+ };
+
+ resin {
+ compatible = "qcom,pm8941-resin";
+ interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
+ debounce = <15625>;
+ bias-pull-up;
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/power/reset/reboot-mode.yaml b/Documentation/devicetree/bindings/power/reset/reboot-mode.yaml
index 9c6fda6b1dd9..ad0a0b95cec1 100644
--- a/Documentation/devicetree/bindings/power/reset/reboot-mode.yaml
+++ b/Documentation/devicetree/bindings/power/reset/reboot-mode.yaml
@@ -36,7 +36,7 @@ patternProperties:
"^mode-.*$":
$ref: /schemas/types.yaml#/definitions/uint32
-additionalProperties: false
+additionalProperties: true
examples:
- |
diff --git a/Documentation/devicetree/bindings/pwm/pwm-rockchip.yaml b/Documentation/devicetree/bindings/pwm/pwm-rockchip.yaml
index 5596bee70509..81a54a4e8e3e 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-rockchip.yaml
+++ b/Documentation/devicetree/bindings/pwm/pwm-rockchip.yaml
@@ -29,6 +29,7 @@ properties:
- enum:
- rockchip,px30-pwm
- rockchip,rk3308-pwm
+ - rockchip,rk3568-pwm
- const: rockchip,rk3328-pwm
reg:
diff --git a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
index 7548d8714871..13925bb78ec7 100644
--- a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
@@ -32,6 +32,9 @@ properties:
- dallas,ds3232
# I2C-BUS INTERFACE REAL TIME CLOCK MODULE
- epson,rx8010
+ # I2C-BUS INTERFACE REAL TIME CLOCK MODULE
+ - epson,rx8025
+ - epson,rx8035
# I2C-BUS INTERFACE REAL TIME CLOCK MODULE with Battery Backed RAM
- epson,rx8571
# I2C-BUS INTERFACE REAL TIME CLOCK MODULE
diff --git a/Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml b/Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml
index 61802a11baf4..d370c98a62c7 100644
--- a/Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml
@@ -21,6 +21,7 @@ properties:
- fsl,imx8mn-rpmsg-audio
- fsl,imx8mm-rpmsg-audio
- fsl,imx8mp-rpmsg-audio
+ - fsl,imx8ulp-rpmsg-audio
model:
$ref: /schemas/types.yaml#/definitions/string
diff --git a/Documentation/devicetree/bindings/sound/mt8195-afe-pcm.yaml b/Documentation/devicetree/bindings/sound/mt8195-afe-pcm.yaml
index 53e9434a6d9d..dcf790b053d2 100644
--- a/Documentation/devicetree/bindings/sound/mt8195-afe-pcm.yaml
+++ b/Documentation/devicetree/bindings/sound/mt8195-afe-pcm.yaml
@@ -130,36 +130,34 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/clock/mt8195-clk.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
- #include <dt-bindings/power/mt8195-power.h>
afe: mt8195-afe-pcm@10890000 {
compatible = "mediatek,mt8195-audio";
reg = <0x10890000 0x10000>;
interrupts = <GIC_SPI 822 IRQ_TYPE_LEVEL_HIGH 0>;
mediatek,topckgen = <&topckgen>;
- power-domains = <&spm MT8195_POWER_DOMAIN_AUDIO>;
+ power-domains = <&spm 7>; //MT8195_POWER_DOMAIN_AUDIO
clocks = <&clk26m>,
- <&topckgen CLK_TOP_APLL1>,
- <&topckgen CLK_TOP_APLL2>,
- <&topckgen CLK_TOP_APLL12_DIV0>,
- <&topckgen CLK_TOP_APLL12_DIV1>,
- <&topckgen CLK_TOP_APLL12_DIV2>,
- <&topckgen CLK_TOP_APLL12_DIV3>,
- <&topckgen CLK_TOP_APLL12_DIV9>,
- <&topckgen CLK_TOP_A1SYS_HP_SEL>,
- <&topckgen CLK_TOP_AUD_INTBUS_SEL>,
- <&topckgen CLK_TOP_AUDIO_H_SEL>,
- <&topckgen CLK_TOP_AUDIO_LOCAL_BUS_SEL>,
- <&topckgen CLK_TOP_DPTX_M_SEL>,
- <&topckgen CLK_TOP_I2SO1_M_SEL>,
- <&topckgen CLK_TOP_I2SO2_M_SEL>,
- <&topckgen CLK_TOP_I2SI1_M_SEL>,
- <&topckgen CLK_TOP_I2SI2_M_SEL>,
- <&infracfg_ao CLK_INFRA_AO_AUDIO_26M_B>,
- <&scp_adsp CLK_SCP_ADSP_AUDIODSP>;
+ <&topckgen 163>, //CLK_TOP_APLL1
+ <&topckgen 166>, //CLK_TOP_APLL2
+ <&topckgen 233>, //CLK_TOP_APLL12_DIV0
+ <&topckgen 234>, //CLK_TOP_APLL12_DIV1
+ <&topckgen 235>, //CLK_TOP_APLL12_DIV2
+ <&topckgen 236>, //CLK_TOP_APLL12_DIV3
+ <&topckgen 238>, //CLK_TOP_APLL12_DIV9
+ <&topckgen 100>, //CLK_TOP_A1SYS_HP_SEL
+ <&topckgen 33>, //CLK_TOP_AUD_INTBUS_SEL
+ <&topckgen 34>, //CLK_TOP_AUDIO_H_SEL
+ <&topckgen 107>, //CLK_TOP_AUDIO_LOCAL_BUS_SEL
+ <&topckgen 98>, //CLK_TOP_DPTX_M_SEL
+ <&topckgen 94>, //CLK_TOP_I2SO1_M_SEL
+ <&topckgen 95>, //CLK_TOP_I2SO2_M_SEL
+ <&topckgen 96>, //CLK_TOP_I2SI1_M_SEL
+ <&topckgen 97>, //CLK_TOP_I2SI2_M_SEL
+ <&infracfg_ao 50>, //CLK_INFRA_AO_AUDIO_26M_B
+ <&scp_adsp 0>; //CLK_SCP_ADSP_AUDIODSP
clock-names = "clk26m",
"apll1_ck",
"apll2_ck",
diff --git a/Documentation/devicetree/bindings/spi/omap-spi.yaml b/Documentation/devicetree/bindings/spi/omap-spi.yaml
index e55538186cf6..9952199cae11 100644
--- a/Documentation/devicetree/bindings/spi/omap-spi.yaml
+++ b/Documentation/devicetree/bindings/spi/omap-spi.yaml
@@ -84,9 +84,9 @@ unevaluatedProperties: false
if:
properties:
compatible:
- oneOf:
- - const: ti,omap2-mcspi
- - const: ti,omap4-mcspi
+ enum:
+ - ti,omap2-mcspi
+ - ti,omap4-mcspi
then:
properties:
diff --git a/Documentation/devicetree/bindings/spi/spi-xilinx.yaml b/Documentation/devicetree/bindings/spi/spi-xilinx.yaml
index 593f7693bace..03e5dca7e933 100644
--- a/Documentation/devicetree/bindings/spi/spi-xilinx.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-xilinx.yaml
@@ -27,13 +27,11 @@ properties:
xlnx,num-ss-bits:
description: Number of chip selects used.
- $ref: /schemas/types.yaml#/definitions/uint32
minimum: 1
maximum: 32
xlnx,num-transfer-bits:
description: Number of bits per transfer. This will be 8 if not specified.
- $ref: /schemas/types.yaml#/definitions/uint32
enum: [8, 16, 32]
default: 8
diff --git a/Documentation/devicetree/bindings/thermal/qcom-lmh.yaml b/Documentation/devicetree/bindings/thermal/qcom-lmh.yaml
new file mode 100644
index 000000000000..289e9a845600
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/qcom-lmh.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2021 Linaro Ltd.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/thermal/qcom-lmh.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Limits Management Hardware(LMh)
+
+maintainers:
+ - Thara Gopinath <thara.gopinath@linaro.org>
+
+description:
+ Limits Management Hardware(LMh) is a hardware infrastructure on some
+ Qualcomm SoCs that can enforce temperature and current limits as
+ programmed by software for certain IPs like CPU.
+
+properties:
+ compatible:
+ enum:
+ - qcom,sdm845-lmh
+
+ reg:
+ items:
+ - description: core registers
+
+ interrupts:
+ maxItems: 1
+
+ '#interrupt-cells':
+ const: 1
+
+ interrupt-controller: true
+
+ cpus:
+ description:
+ phandle of the first cpu in the LMh cluster
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ qcom,lmh-temp-arm-millicelsius:
+ description:
+ An integer expressing temperature threshold at which the LMh thermal
+ FSM is engaged.
+
+ qcom,lmh-temp-low-millicelsius:
+ description:
+ An integer expressing temperature threshold at which the state machine
+ will attempt to remove frequency throttling.
+
+ qcom,lmh-temp-high-millicelsius:
+ description:
+ An integer expressing temperature threshold at which the state machine
+ will attempt to throttle the frequency.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#interrupt-cells'
+ - interrupt-controller
+ - cpus
+ - qcom,lmh-temp-arm-millicelsius
+ - qcom,lmh-temp-low-millicelsius
+ - qcom,lmh-temp-high-millicelsius
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ lmh@17d70800 {
+ compatible = "qcom,sdm845-lmh";
+ reg = <0x17d70800 0x400>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ cpus = <&CPU4>;
+ qcom,lmh-temp-arm-millicelsius = <65000>;
+ qcom,lmh-temp-low-millicelsius = <94500>;
+ qcom,lmh-temp-high-millicelsius = <95000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
index 164f71598c59..a07de5ed0ca6 100644
--- a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
+++ b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
@@ -215,7 +215,7 @@ patternProperties:
- polling-delay
- polling-delay-passive
- thermal-sensors
- - trips
+
additionalProperties: false
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/virtio/mmio.yaml b/Documentation/devicetree/bindings/virtio/mmio.yaml
index d46597028cf1..4b7a0273181c 100644
--- a/Documentation/devicetree/bindings/virtio/mmio.yaml
+++ b/Documentation/devicetree/bindings/virtio/mmio.yaml
@@ -36,7 +36,8 @@ required:
- reg
- interrupts
-additionalProperties: false
+additionalProperties:
+ type: object
examples:
- |
diff --git a/Documentation/devicetree/bindings/virtio/virtio-device.yaml b/Documentation/devicetree/bindings/virtio/virtio-device.yaml
new file mode 100644
index 000000000000..1778ea9b5aa5
--- /dev/null
+++ b/Documentation/devicetree/bindings/virtio/virtio-device.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/virtio/virtio-device.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Virtio device bindings
+
+maintainers:
+ - Viresh Kumar <viresh.kumar@linaro.org>
+
+description:
+ These bindings are applicable to virtio devices irrespective of the bus they
+ are bound to, like mmio or pci.
+
+# We need a select here so we don't match all nodes with 'virtio,mmio'
+properties:
+ compatible:
+ pattern: "^virtio,device[0-9a-f]{1,8}$"
+ description: Virtio device nodes.
+ "virtio,deviceID", where ID is the virtio device id. The textual
+ representation of ID shall be in lower case hexadecimal with leading
+ zeroes suppressed.
+
+required:
+ - compatible
+
+additionalProperties: true
+
+examples:
+ - |
+ virtio@3000 {
+ compatible = "virtio,mmio";
+ reg = <0x3000 0x100>;
+ interrupts = <43>;
+
+ i2c {
+ compatible = "virtio,device22";
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/watchdog/maxim,max63xx.yaml b/Documentation/devicetree/bindings/watchdog/maxim,max63xx.yaml
index f2105eedac2c..ab9641e845db 100644
--- a/Documentation/devicetree/bindings/watchdog/maxim,max63xx.yaml
+++ b/Documentation/devicetree/bindings/watchdog/maxim,max63xx.yaml
@@ -15,13 +15,13 @@ maintainers:
properties:
compatible:
- oneOf:
- - const: maxim,max6369
- - const: maxim,max6370
- - const: maxim,max6371
- - const: maxim,max6372
- - const: maxim,max6373
- - const: maxim,max6374
+ enum:
+ - maxim,max6369
+ - maxim,max6370
+ - maxim,max6371
+ - maxim,max6372
+ - maxim,max6373
+ - maxim,max6374
reg:
description: This is a 1-byte memory-mapped address
diff --git a/Documentation/driver-api/cxl/memory-devices.rst b/Documentation/driver-api/cxl/memory-devices.rst
index 487ce4f41d77..50ebcda17ad0 100644
--- a/Documentation/driver-api/cxl/memory-devices.rst
+++ b/Documentation/driver-api/cxl/memory-devices.rst
@@ -36,9 +36,15 @@ CXL Core
.. kernel-doc:: drivers/cxl/cxl.h
:internal:
-.. kernel-doc:: drivers/cxl/core.c
+.. kernel-doc:: drivers/cxl/core/bus.c
:doc: cxl core
+.. kernel-doc:: drivers/cxl/core/pmem.c
+ :doc: cxl pmem
+
+.. kernel-doc:: drivers/cxl/core/regs.c
+ :doc: cxl registers
+
External Interfaces
===================
diff --git a/Documentation/features/vm/ELF-ASLR/arch-support.txt b/Documentation/features/vm/ELF-ASLR/arch-support.txt
index 99cb6d7f5005..2949c99fbb2f 100644
--- a/Documentation/features/vm/ELF-ASLR/arch-support.txt
+++ b/Documentation/features/vm/ELF-ASLR/arch-support.txt
@@ -22,7 +22,7 @@
| openrisc: | TODO |
| parisc: | ok |
| powerpc: | ok |
- | riscv: | TODO |
+ | riscv: | ok |
| s390: | ok |
| sh: | TODO |
| sparc: | TODO |
diff --git a/Documentation/features/vm/huge-vmap/arch-support.txt b/Documentation/features/vm/huge-vmap/arch-support.txt
index 439fd9069b8b..bc53905a0306 100644
--- a/Documentation/features/vm/huge-vmap/arch-support.txt
+++ b/Documentation/features/vm/huge-vmap/arch-support.txt
@@ -1,7 +1,7 @@
#
# Feature name: huge-vmap
# Kconfig: HAVE_ARCH_HUGE_VMAP
-# description: arch supports the ioremap_pud_enabled() and ioremap_pmd_enabled() VM APIs
+# description: arch supports the arch_vmap_pud_supported() and arch_vmap_pmd_supported() VM APIs
#
-----------------------
| arch |status|
diff --git a/Documentation/filesystems/api-summary.rst b/Documentation/filesystems/api-summary.rst
index 7e5c04c98619..98db2ea5fa12 100644
--- a/Documentation/filesystems/api-summary.rst
+++ b/Documentation/filesystems/api-summary.rst
@@ -71,9 +71,6 @@ Other Functions
.. kernel-doc:: fs/fs-writeback.c
:export:
-.. kernel-doc:: fs/block_dev.c
- :export:
-
.. kernel-doc:: fs/anon_inodes.c
:export:
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index d5a73fa2c9ef..8126beadc7df 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -37,7 +37,7 @@ TTM initialization
This section is outdated.
Drivers wishing to support TTM must pass a filled :c:type:`ttm_bo_driver
-<ttm_bo_driver>` structure to ttm_bo_device_init, together with an
+<ttm_bo_driver>` structure to ttm_device_init, together with an
initialized global reference to the memory manager. The ttm_bo_driver
structure contains several fields with function pointers for
initializing the TTM, allocating and freeing memory, waiting for command
diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst
index e87ed5479963..d32616891dcf 100644
--- a/Documentation/kbuild/llvm.rst
+++ b/Documentation/kbuild/llvm.rst
@@ -130,9 +130,10 @@ Getting Help
------------
- `Website <https://clangbuiltlinux.github.io/>`_
-- `Mailing List <https://groups.google.com/forum/#!forum/clang-built-linux>`_: <clang-built-linux@googlegroups.com>
+- `Mailing List <https://lore.kernel.org/llvm/>`_: <llvm@lists.linux.dev>
+- `Old Mailing List Archives <https://groups.google.com/g/clang-built-linux>`_
- `Issue Tracker <https://github.com/ClangBuiltLinux/linux/issues>`_
-- IRC: #clangbuiltlinux on chat.freenode.net
+- IRC: #clangbuiltlinux on irc.libera.chat
- `Telegram <https://t.me/ClangBuiltLinux>`_: @ClangBuiltLinux
- `Wiki <https://github.com/ClangBuiltLinux/linux/wiki>`_
- `Beginner Bugs <https://github.com/ClangBuiltLinux/linux/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22>`_
diff --git a/Documentation/kernel-hacking/hacking.rst b/Documentation/kernel-hacking/hacking.rst
index df65c19aa7df..55bd37a2efb0 100644
--- a/Documentation/kernel-hacking/hacking.rst
+++ b/Documentation/kernel-hacking/hacking.rst
@@ -76,8 +76,8 @@ handler is never re-entered: if the same interrupt arrives, it is queued
fast: frequently it simply acknowledges the interrupt, marks a 'software
interrupt' for execution and exits.
-You can tell you are in a hardware interrupt, because
-:c:func:`in_irq()` returns true.
+You can tell you are in a hardware interrupt, because in_hardirq() returns
+true.
.. warning::
diff --git a/Documentation/kernel-hacking/locking.rst b/Documentation/kernel-hacking/locking.rst
index ed1284c6f078..90bc3f51eda9 100644
--- a/Documentation/kernel-hacking/locking.rst
+++ b/Documentation/kernel-hacking/locking.rst
@@ -94,16 +94,10 @@ primitives, but I'll pretend they don't exist.
Locking in the Linux Kernel
===========================
-If I could give you one piece of advice: never sleep with anyone crazier
-than yourself. But if I had to give you advice on locking: **keep it
-simple**.
+If I could give you one piece of advice on locking: **keep it simple**.
Be reluctant to introduce new locks.
-Strangely enough, this last one is the exact reverse of my advice when
-you **have** slept with someone crazier than yourself. And you should
-think about getting a big dog.
-
Two Main Types of Kernel Locks: Spinlocks and Mutexes
-----------------------------------------------------
@@ -1406,7 +1400,7 @@ bh
half will be running at any time.
Hardware Interrupt / Hardware IRQ
- Hardware interrupt request. in_irq() returns true in a
+ Hardware interrupt request. in_hardirq() returns true in a
hardware interrupt handler.
Interrupt Context
@@ -1418,7 +1412,7 @@ SMP
(``CONFIG_SMP=y``).
Software Interrupt / softirq
- Software interrupt handler. in_irq() returns false;
+ Software interrupt handler. in_hardirq() returns false;
in_softirq() returns true. Tasklets and softirqs both
fall into the category of 'software interrupts'.
diff --git a/Documentation/locking/futex-requeue-pi.rst b/Documentation/locking/futex-requeue-pi.rst
index 14ab5787b9a7..dd4ecf4528a4 100644
--- a/Documentation/locking/futex-requeue-pi.rst
+++ b/Documentation/locking/futex-requeue-pi.rst
@@ -5,7 +5,7 @@ Futex Requeue PI
Requeueing of tasks from a non-PI futex to a PI futex requires
special handling in order to ensure the underlying rt_mutex is never
left without an owner if it has waiters; doing so would break the PI
-boosting logic [see rt-mutex-desgin.txt] For the purposes of
+boosting logic [see rt-mutex-design.rst] For the purposes of
brevity, this action will be referred to as "requeue_pi" throughout
this document. Priority inheritance is abbreviated throughout as
"PI".
diff --git a/Documentation/locking/ww-mutex-design.rst b/Documentation/locking/ww-mutex-design.rst
index 54d9c17bb66b..6a4d7319f8f0 100644
--- a/Documentation/locking/ww-mutex-design.rst
+++ b/Documentation/locking/ww-mutex-design.rst
@@ -2,7 +2,7 @@
Wound/Wait Deadlock-Proof Mutex Design
======================================
-Please read mutex-design.txt first, as it applies to wait/wound mutexes too.
+Please read mutex-design.rst first, as it applies to wait/wound mutexes too.
Motivation for WW-Mutexes
-------------------------
diff --git a/Documentation/power/energy-model.rst b/Documentation/power/energy-model.rst
index 60ac091d3b0d..8a2788afe89b 100644
--- a/Documentation/power/energy-model.rst
+++ b/Documentation/power/energy-model.rst
@@ -101,8 +101,7 @@ subsystems which use EM might rely on this flag to check if all EM devices use
the same scale. If there are different scales, these subsystems might decide
to: return warning/error, stop working or panic.
See Section 3. for an example of driver implementing this
-callback, and kernel/power/energy_model.c for further documentation on this
-API.
+callback, or Section 2.4 for further documentation on this API
2.3 Accessing performance domains
@@ -123,7 +122,17 @@ em_cpu_energy() API. The estimation is performed assuming that the schedutil
CPUfreq governor is in use in case of CPU device. Currently this calculation is
not provided for other type of devices.
-More details about the above APIs can be found in include/linux/energy_model.h.
+More details about the above APIs can be found in ``<linux/energy_model.h>``
+or in Section 2.4
+
+
+2.4 Description details of this API
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+.. kernel-doc:: include/linux/energy_model.h
+ :internal:
+
+.. kernel-doc:: kernel/power/energy_model.c
+ :export:
3. Example driver
diff --git a/Documentation/process/applying-patches.rst b/Documentation/process/applying-patches.rst
index 2e7017bef4b8..c2121c1e55d7 100644
--- a/Documentation/process/applying-patches.rst
+++ b/Documentation/process/applying-patches.rst
@@ -389,7 +389,7 @@ The -mm patches are experimental patches released by Andrew Morton.
In the past, -mm tree were used to also test subsystem patches, but this
function is now done via the
-`linux-next <https://www.kernel.org/doc/man-pages/linux-next.html>`
+`linux-next` (https://www.kernel.org/doc/man-pages/linux-next.html)
tree. The Subsystem maintainers push their patches first to linux-next,
and, during the merge window, sends them directly to Linus.
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index d3a8557b66a1..e35ab74a0f80 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -29,7 +29,7 @@ you probably needn't concern yourself with pcmciautils.
====================== =============== ========================================
Program Minimal version Command to check the version
====================== =============== ========================================
-GNU C 4.9 gcc --version
+GNU C 5.1 gcc --version
Clang/LLVM (optional) 10.0.1 clang --version
GNU make 3.81 make --version
binutils 2.23 ld -v
diff --git a/Documentation/process/kernel-docs.rst b/Documentation/process/kernel-docs.rst
index 22d9ace5df2a..da9527502ef0 100644
--- a/Documentation/process/kernel-docs.rst
+++ b/Documentation/process/kernel-docs.rst
@@ -126,15 +126,17 @@ On-line docs
describes how to write user-mode utilities for communicating with
Card Services.
- * Title: **Linux Kernel Module Programming Guide**
+ * Title: **The Linux Kernel Module Programming Guide**
- :Author: Ori Pomerantz.
- :URL: https://tldp.org/LDP/lkmpg/2.6/html/index.html
- :Date: 2001
+ :Author: Peter Jay Salzman, Michael Burian, Ori Pomerantz, Bob Mottram,
+ Jim Huang.
+ :URL: https://sysprog21.github.io/lkmpg/
+ :Date: 2021
:Keywords: modules, GPL book, /proc, ioctls, system calls,
interrupt handlers .
- :Description: Very nice 92 pages GPL book on the topic of modules
- programming. Lots of examples.
+ :Description: A very nice GPL book on the topic of modules
+ programming. Lots of examples. Currently the new version is being
+ actively maintained at https://github.com/sysprog21/lkmpg.
* Title: **Global spinlock list and usage**
diff --git a/Documentation/process/maintainer-pgp-guide.rst b/Documentation/process/maintainer-pgp-guide.rst
index 8f8f1fee92b8..29e7d7b1cd44 100644
--- a/Documentation/process/maintainer-pgp-guide.rst
+++ b/Documentation/process/maintainer-pgp-guide.rst
@@ -944,12 +944,11 @@ have on your keyring::
uid [ unknown] Linus Torvalds <torvalds@kernel.org>
sub rsa2048 2011-09-20 [E]
-Next, open the `PGP pathfinder`_. In the "From" field, paste the key
-fingerprint of Linus Torvalds from the output above. In the "To" field,
-paste the key-id you found via ``gpg --search`` of the unknown key, and
-check the results:
-
-- `Finding paths to Linus`_
+Next, find a trust path from Linus Torvalds to the key-id you found via ``gpg
+--search`` of the unknown key. For this, you can use several tools including
+https://github.com/mricon/wotmate,
+https://git.kernel.org/pub/scm/docs/kernel/pgpkeys.git/tree/graphs, and
+https://the.earth.li/~noodles/pathfind.html.
If you get a few decent trust paths, then it's a pretty good indication
that it is a valid key. You can add it to your keyring from the
@@ -962,6 +961,3 @@ administrators of the PGP Pathfinder service to not be malicious (in
fact, this goes against :ref:`devs_not_infra`). However, if you
do not carefully maintain your own web of trust, then it is a marked
improvement over blindly trusting keyservers.
-
-.. _`PGP pathfinder`: https://pgp.cs.uu.nl/
-.. _`Finding paths to Linus`: https://pgp.cs.uu.nl/paths/79BE3E4300411886/to/C94035C21B4F2AEB.html
diff --git a/Documentation/translations/it_IT/kernel-hacking/hacking.rst b/Documentation/translations/it_IT/kernel-hacking/hacking.rst
index b4ea00f1b583..d5c521327f6a 100644
--- a/Documentation/translations/it_IT/kernel-hacking/hacking.rst
+++ b/Documentation/translations/it_IT/kernel-hacking/hacking.rst
@@ -90,7 +90,7 @@ i gestori d'interruzioni devono essere veloci: spesso si limitano
esclusivamente a notificare la presa in carico dell'interruzione,
programmare una 'interruzione software' per l'esecuzione e quindi terminare.
-Potete dire d'essere in una interruzione hardware perché :c:func:`in_irq()`
+Potete dire d'essere in una interruzione hardware perché in_hardirq()
ritorna vero.
.. warning::
diff --git a/Documentation/translations/it_IT/kernel-hacking/locking.rst b/Documentation/translations/it_IT/kernel-hacking/locking.rst
index 1e7c84def369..1efb8293bf1f 100644
--- a/Documentation/translations/it_IT/kernel-hacking/locking.rst
+++ b/Documentation/translations/it_IT/kernel-hacking/locking.rst
@@ -1459,11 +1459,11 @@ contesto utente
che hardware.
interruzione hardware
- Richiesta di interruzione hardware. in_irq() ritorna vero in un
+ Richiesta di interruzione hardware. in_hardirq() ritorna vero in un
gestore d'interruzioni hardware.
interruzione software / softirq
- Gestore di interruzioni software: in_irq() ritorna falso;
+ Gestore di interruzioni software: in_hardirq() ritorna falso;
in_softirq() ritorna vero. I tasklet e le softirq sono entrambi
considerati 'interruzioni software'.
diff --git a/Documentation/translations/zh_CN/admin-guide/README.rst b/Documentation/translations/zh_CN/admin-guide/README.rst
index 669a022f6817..980eb20521cf 100644
--- a/Documentation/translations/zh_CN/admin-guide/README.rst
+++ b/Documentation/translations/zh_CN/admin-guide/README.rst
@@ -223,7 +223,7 @@ Linux内核5.x版本 <http://kernel.org/>
编译内核
---------
- - 确保您至少有gcc 4.9可用。
+ - 确保您至少有gcc 5.1可用。
有关更多信息,请参阅 :ref:`Documentation/process/changes.rst <changes>` 。
请注意,您仍然可以使用此内核运行a.out用户程序。
diff --git a/Documentation/translations/zh_CN/core-api/cachetlb.rst b/Documentation/translations/zh_CN/core-api/cachetlb.rst
index 55827b8a7c53..6fee45fe5e80 100644
--- a/Documentation/translations/zh_CN/core-api/cachetlb.rst
+++ b/Documentation/translations/zh_CN/core-api/cachetlb.rst
@@ -80,7 +80,7 @@ cpu上对这个地址空间进行刷新。
5) ``void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)``
- 在每个页面故障结束时,这个程序被调用,以告诉体系结构特定的代码,在
+ 在每个缺页异常结束时,这个程序被调用,以告诉体系结构特定的代码,在
软件页表中,在地址空间“vma->vm_mm”的虚拟地址“地址”处,现在存在
一个翻译。
diff --git a/Documentation/translations/zh_CN/core-api/index.rst b/Documentation/translations/zh_CN/core-api/index.rst
index d5e947d8b6f1..72f0a36daa1c 100644
--- a/Documentation/translations/zh_CN/core-api/index.rst
+++ b/Documentation/translations/zh_CN/core-api/index.rst
@@ -1,10 +1,12 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../core-api/irq/index`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/core-api/index.rst
-.. _cn_core-api_index.rst:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_core-api_index.rst:
===========
核心API文档
diff --git a/Documentation/translations/zh_CN/core-api/irq/concepts.rst b/Documentation/translations/zh_CN/core-api/irq/concepts.rst
index 41455bf0f783..9957f0453353 100644
--- a/Documentation/translations/zh_CN/core-api/irq/concepts.rst
+++ b/Documentation/translations/zh_CN/core-api/irq/concepts.rst
@@ -1,10 +1,12 @@
.. include:: ../../disclaimer-zh_CN.rst
-:Original: :doc:`../../../../core-api/irq/concepts`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/core-api/irq/concepts.rst
-.. _cn_concepts.rst:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_concepts.rst:
===========
什么是IRQ?
diff --git a/Documentation/translations/zh_CN/core-api/irq/index.rst b/Documentation/translations/zh_CN/core-api/irq/index.rst
index 910ccabf041f..ba6acc4b48e5 100644
--- a/Documentation/translations/zh_CN/core-api/irq/index.rst
+++ b/Documentation/translations/zh_CN/core-api/irq/index.rst
@@ -1,7 +1,10 @@
.. include:: ../../disclaimer-zh_CN.rst
-:Original: :doc:`../../../../core-api/irq/index`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/core-api/irq/index.rst
+
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
.. _cn_irq_index.rst:
diff --git a/Documentation/translations/zh_CN/core-api/irq/irq-affinity.rst b/Documentation/translations/zh_CN/core-api/irq/irq-affinity.rst
index 82a4428f22fd..7addd5f27a88 100644
--- a/Documentation/translations/zh_CN/core-api/irq/irq-affinity.rst
+++ b/Documentation/translations/zh_CN/core-api/irq/irq-affinity.rst
@@ -1,10 +1,12 @@
.. include:: ../../disclaimer-zh_CN.rst
-:Original: :doc:`../../../../core-api/irq/irq-affinity`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/core-api/irq/irq-affinity
-.. _cn_irq-affinity.rst:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_irq-affinity.rst:
==============
SMP IRQ 亲和性
diff --git a/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst b/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst
index 3c82dd307a46..7d077742f758 100644
--- a/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst
+++ b/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst
@@ -1,10 +1,12 @@
.. include:: ../../disclaimer-zh_CN.rst
-:Original: :doc:`../../../../core-api/irq/irq-domain`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/core-api/irq/irq-domain.rst
-.. _cn_irq-domain.rst:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_irq-domain.rst:
=======================
irq_domain 中断号映射库
diff --git a/Documentation/translations/zh_CN/core-api/irq/irqflags-tracing.rst b/Documentation/translations/zh_CN/core-api/irq/irqflags-tracing.rst
index c889bd0f65d9..9af50b4b8c2d 100644
--- a/Documentation/translations/zh_CN/core-api/irq/irqflags-tracing.rst
+++ b/Documentation/translations/zh_CN/core-api/irq/irqflags-tracing.rst
@@ -1,10 +1,12 @@
.. include:: ../../disclaimer-zh_CN.rst
-:Original: :doc:`../../../../core-api/irq/irqflags-tracing`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/core-api/irq/irqflags-tracing.rst
-.. _cn_irqflags-tracing.rst:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_irqflags-tracing.rst:
=================
IRQ-flags状态追踪
diff --git a/Documentation/translations/zh_CN/core-api/kernel-api.rst b/Documentation/translations/zh_CN/core-api/kernel-api.rst
index d6f815ec265b..ab7d81889340 100644
--- a/Documentation/translations/zh_CN/core-api/kernel-api.rst
+++ b/Documentation/translations/zh_CN/core-api/kernel-api.rst
@@ -1,10 +1,12 @@
.. include:: ../disclaimer-zh_CN.rst
:Original: Documentation/core-api/kernel-api.rst
-:Translator: Yanteng Si <siyanteng@loongson.cn>
-.. _cn_kernel-api.rst:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_kernel-api.rst:
============
Linux内核API
diff --git a/Documentation/translations/zh_CN/core-api/kobject.rst b/Documentation/translations/zh_CN/core-api/kobject.rst
index f0e6a4aeb372..b7c37794cc7f 100644
--- a/Documentation/translations/zh_CN/core-api/kobject.rst
+++ b/Documentation/translations/zh_CN/core-api/kobject.rst
@@ -1,7 +1,10 @@
.. include:: ../disclaimer-zh_CN.rst
:Original: Documentation/core-api/kobject.rst
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
.. _cn_core_api_kobject.rst:
diff --git a/Documentation/translations/zh_CN/core-api/local_ops.rst b/Documentation/translations/zh_CN/core-api/local_ops.rst
index ee67379b6869..41e4525038e8 100644
--- a/Documentation/translations/zh_CN/core-api/local_ops.rst
+++ b/Documentation/translations/zh_CN/core-api/local_ops.rst
@@ -1,10 +1,12 @@
.. include:: ../disclaimer-zh_CN.rst
:Original: Documentation/core-api/local_ops.rst
-:Translator: Yanteng Si <siyanteng@loongson.cn>
-.. _cn_local_ops:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_local_ops:
========================
本地原子操作的语义和行为
diff --git a/Documentation/translations/zh_CN/core-api/padata.rst b/Documentation/translations/zh_CN/core-api/padata.rst
index c627f8f131f9..781d30675afd 100644
--- a/Documentation/translations/zh_CN/core-api/padata.rst
+++ b/Documentation/translations/zh_CN/core-api/padata.rst
@@ -3,7 +3,10 @@
.. include:: ../disclaimer-zh_CN.rst
:Original: Documentation/core-api/padata.rst
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
.. _cn_core_api_padata.rst:
diff --git a/Documentation/translations/zh_CN/core-api/printk-basics.rst b/Documentation/translations/zh_CN/core-api/printk-basics.rst
index 2b20f6303a82..d574de3167c8 100644
--- a/Documentation/translations/zh_CN/core-api/printk-basics.rst
+++ b/Documentation/translations/zh_CN/core-api/printk-basics.rst
@@ -2,10 +2,12 @@
.. include:: ../disclaimer-zh_CN.rst
:Original: Documentation/core-api/printk-basics.rst
-:Translator: Yanteng Si <siyanteng@loongson.cn>
-.. _cn_printk-basics.rst:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_printk-basics.rst:
==================
使用printk记录消息
diff --git a/Documentation/translations/zh_CN/core-api/printk-formats.rst b/Documentation/translations/zh_CN/core-api/printk-formats.rst
index a680c8f164c3..ce39c788cf5a 100644
--- a/Documentation/translations/zh_CN/core-api/printk-formats.rst
+++ b/Documentation/translations/zh_CN/core-api/printk-formats.rst
@@ -1,10 +1,12 @@
.. include:: ../disclaimer-zh_CN.rst
:Original: Documentation/core-api/printk-formats.rst
-:Translator: Yanteng Si <siyanteng@loongson.cn>
-.. _cn_printk-formats.rst:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_printk-formats.rst:
==============================
如何获得正确的printk格式占位符
diff --git a/Documentation/translations/zh_CN/core-api/refcount-vs-atomic.rst b/Documentation/translations/zh_CN/core-api/refcount-vs-atomic.rst
index ea834e38d2f6..e2467fd26fc0 100644
--- a/Documentation/translations/zh_CN/core-api/refcount-vs-atomic.rst
+++ b/Documentation/translations/zh_CN/core-api/refcount-vs-atomic.rst
@@ -1,10 +1,12 @@
.. include:: ../disclaimer-zh_CN.rst
:Original: Documentation/core-api/refcount-vs-atomic.rst
-:Translator: Yanteng Si <siyanteng@loongson.cn>
-.. _cn_refcount-vs-atomic:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_refcount-vs-atomic:
=======================================
与atomic_t相比,refcount_t的API是这样的
diff --git a/Documentation/translations/zh_CN/core-api/symbol-namespaces.rst b/Documentation/translations/zh_CN/core-api/symbol-namespaces.rst
index ce05c29c7697..6abf7ed534ca 100644
--- a/Documentation/translations/zh_CN/core-api/symbol-namespaces.rst
+++ b/Documentation/translations/zh_CN/core-api/symbol-namespaces.rst
@@ -1,10 +1,12 @@
.. include:: ../disclaimer-zh_CN.rst
:Original: Documentation/core-api/symbol-namespaces.rst
-:Translator: Yanteng Si <siyanteng@loongson.cn>
-.. _cn_symbol-namespaces.rst:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_symbol-namespaces.rst:
=================================
符号命名空间(Symbol Namespaces)
diff --git a/Documentation/translations/zh_CN/core-api/workqueue.rst b/Documentation/translations/zh_CN/core-api/workqueue.rst
index 0b8f730db6c0..e372fa5cf101 100644
--- a/Documentation/translations/zh_CN/core-api/workqueue.rst
+++ b/Documentation/translations/zh_CN/core-api/workqueue.rst
@@ -2,10 +2,12 @@
.. include:: ../disclaimer-zh_CN.rst
:Original: Documentation/core-api/workqueue.rst
-:Translator: Yanteng Si <siyanteng@loongson.cn>
-.. _cn_workqueue.rst:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_workqueue.rst:
=========================
并发管理的工作队列 (cmwq)
diff --git a/Documentation/translations/zh_CN/cpu-freq/core.rst b/Documentation/translations/zh_CN/cpu-freq/core.rst
index 19fb9c029cfe..0c6fd447ced6 100644
--- a/Documentation/translations/zh_CN/cpu-freq/core.rst
+++ b/Documentation/translations/zh_CN/cpu-freq/core.rst
@@ -1,11 +1,13 @@
.. SPDX-License-Identifier: GPL-2.0
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../cpu-freq/core`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/cpu-freq/core.rst
-.. _cn_core.rst:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_core.rst:
====================================
CPUFreq核心和CPUFreq通知器的通用说明
diff --git a/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst b/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst
index 5ae9cfa2ec55..0fc5d1495789 100644
--- a/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst
+++ b/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst
@@ -2,11 +2,13 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../cpu-freq/cpu-drivers`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/cpu-freq/cpu-drivers.rst
-.. _cn_cpu-drivers.rst:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_cpu-drivers.rst:
=======================================
如何实现一个新的CPUFreq处理器驱动程序?
@@ -80,8 +82,6 @@ CPUfreq核心层注册一个cpufreq_driver结构体。
.resume - 一个指向per-policy恢复函数的指针,该函数在关中断且在调节器再一次开始前被
调用。
- .ready - 一个指向per-policy准备函数的指针,该函数在策略完全初始化之后被调用。
-
.attr - 一个指向NULL结尾的"struct freq_attr"列表的指针,该函数允许导出值到
sysfs。
diff --git a/Documentation/translations/zh_CN/cpu-freq/cpufreq-stats.rst b/Documentation/translations/zh_CN/cpu-freq/cpufreq-stats.rst
index c90d1d8353ed..f14423099d4b 100644
--- a/Documentation/translations/zh_CN/cpu-freq/cpufreq-stats.rst
+++ b/Documentation/translations/zh_CN/cpu-freq/cpufreq-stats.rst
@@ -2,11 +2,13 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../cpu-freq/cpufreq-stats`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/cpu-freq/cpufreq-stats.rst
-.. _cn_cpufreq-stats.rst:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_cpufreq-stats.rst:
==========================================
sysfs CPUFreq Stats的一般说明
diff --git a/Documentation/translations/zh_CN/cpu-freq/index.rst b/Documentation/translations/zh_CN/cpu-freq/index.rst
index 65074e211940..c6e50963cd33 100644
--- a/Documentation/translations/zh_CN/cpu-freq/index.rst
+++ b/Documentation/translations/zh_CN/cpu-freq/index.rst
@@ -2,11 +2,13 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../cpu-freq/index`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/cpu-freq/index.rst
-.. _cn_index.rst:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_index.rst:
=======================================================
Linux CPUFreq - Linux(TM)内核中的CPU频率和电压升降代码
diff --git a/Documentation/translations/zh_CN/filesystems/debugfs.rst b/Documentation/translations/zh_CN/filesystems/debugfs.rst
index 822c4d42fdf9..4981a82dd651 100644
--- a/Documentation/translations/zh_CN/filesystems/debugfs.rst
+++ b/Documentation/translations/zh_CN/filesystems/debugfs.rst
@@ -2,7 +2,7 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../filesystems/debugfs`
+:Original: Documentation/filesystems/debugfs.rst
=======
Debugfs
diff --git a/Documentation/translations/zh_CN/iio/ep93xx_adc.rst b/Documentation/translations/zh_CN/iio/ep93xx_adc.rst
index 7e91d2197867..64f3f3508353 100644
--- a/Documentation/translations/zh_CN/iio/ep93xx_adc.rst
+++ b/Documentation/translations/zh_CN/iio/ep93xx_adc.rst
@@ -1,10 +1,12 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../iio/ep93xx_adc`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/iio/ep93xx_adc.rst
-.. _cn_iio_ep93xx_adc:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_iio_ep93xx_adc:
==================================
思睿逻辑 EP93xx 模拟数字转换器驱动
diff --git a/Documentation/translations/zh_CN/iio/iio_configfs.rst b/Documentation/translations/zh_CN/iio/iio_configfs.rst
index 274488e8dce4..d5460e951804 100644
--- a/Documentation/translations/zh_CN/iio/iio_configfs.rst
+++ b/Documentation/translations/zh_CN/iio/iio_configfs.rst
@@ -1,10 +1,12 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../iio/iio_configfs`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/iio/iio_configfs.rst
-.. _cn_iio_configfs:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_iio_configfs:
=====================
工业 IIO configfs支持
diff --git a/Documentation/translations/zh_CN/iio/index.rst b/Documentation/translations/zh_CN/iio/index.rst
index 7087076a10f6..32d69047b16a 100644
--- a/Documentation/translations/zh_CN/iio/index.rst
+++ b/Documentation/translations/zh_CN/iio/index.rst
@@ -2,11 +2,13 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../iio/index`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/iio/index.rst
-.. _cn_iio_index:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_iio_index:
========
工业 I/O
diff --git a/Documentation/translations/zh_CN/kernel-hacking/hacking.rst b/Documentation/translations/zh_CN/kernel-hacking/hacking.rst
index ab974faddecf..f2bc154c5bcc 100644
--- a/Documentation/translations/zh_CN/kernel-hacking/hacking.rst
+++ b/Documentation/translations/zh_CN/kernel-hacking/hacking.rst
@@ -68,7 +68,7 @@
它将被排队(或丢弃)。因为它会关闭中断,所以处理程序必须很快:通常它只是
确认中断,标记一个“软件中断”以执行并退出。
-您可以通过 :c:func:`in_irq()` 返回真来判断您处于硬件中断状态。
+您可以通过 in_hardirq() 返回真来判断您处于硬件中断状态。
.. warning::
diff --git a/Documentation/translations/zh_CN/mips/booting.rst b/Documentation/translations/zh_CN/mips/booting.rst
index 96453e1b962e..e0bbd3f20862 100644
--- a/Documentation/translations/zh_CN/mips/booting.rst
+++ b/Documentation/translations/zh_CN/mips/booting.rst
@@ -2,8 +2,11 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../mips/booting`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/mips/booting.rst
+
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
.. _cn_booting:
diff --git a/Documentation/translations/zh_CN/mips/features.rst b/Documentation/translations/zh_CN/mips/features.rst
index 93d93d06b1b3..b61dab06ceaf 100644
--- a/Documentation/translations/zh_CN/mips/features.rst
+++ b/Documentation/translations/zh_CN/mips/features.rst
@@ -2,8 +2,11 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../mips/features`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/mips/features.rst
+
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
.. _cn_features:
diff --git a/Documentation/translations/zh_CN/mips/index.rst b/Documentation/translations/zh_CN/mips/index.rst
index b85033f9d67c..192c6adbb72e 100644
--- a/Documentation/translations/zh_CN/mips/index.rst
+++ b/Documentation/translations/zh_CN/mips/index.rst
@@ -2,8 +2,11 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../mips/index`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/mips/index.rst
+
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
===========================
MIPS特性文档
diff --git a/Documentation/translations/zh_CN/mips/ingenic-tcu.rst b/Documentation/translations/zh_CN/mips/ingenic-tcu.rst
index f04ba407384a..ddbe149c517b 100644
--- a/Documentation/translations/zh_CN/mips/ingenic-tcu.rst
+++ b/Documentation/translations/zh_CN/mips/ingenic-tcu.rst
@@ -2,8 +2,11 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../mips/ingenic-tcu`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/mips/ingenic-tcu.rst
+
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
.. _cn_ingenic-tcu:
diff --git a/Documentation/translations/zh_CN/openrisc/index.rst b/Documentation/translations/zh_CN/openrisc/index.rst
index d722642796c8..9ad6cc600884 100644
--- a/Documentation/translations/zh_CN/openrisc/index.rst
+++ b/Documentation/translations/zh_CN/openrisc/index.rst
@@ -2,11 +2,13 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../openrisc/index`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/openrisc/index.rst
-.. _cn_openrisc_index:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_openrisc_index:
=================
OpenRISC 体系架构
diff --git a/Documentation/translations/zh_CN/openrisc/openrisc_port.rst b/Documentation/translations/zh_CN/openrisc/openrisc_port.rst
index e87d0eec281d..b8a67670492d 100644
--- a/Documentation/translations/zh_CN/openrisc/openrisc_port.rst
+++ b/Documentation/translations/zh_CN/openrisc/openrisc_port.rst
@@ -1,7 +1,10 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../openrisc/openrisc_port`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/openrisc/openrisc_port.rst
+
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
.. _cn_openrisc_port:
diff --git a/Documentation/translations/zh_CN/openrisc/todo.rst b/Documentation/translations/zh_CN/openrisc/todo.rst
index 9944ad05473b..63c38717edb1 100644
--- a/Documentation/translations/zh_CN/openrisc/todo.rst
+++ b/Documentation/translations/zh_CN/openrisc/todo.rst
@@ -1,7 +1,10 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../openrisc/todo`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/openrisc/todo.rst
+
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
.. _cn_openrisc_todo.rst:
diff --git a/Documentation/translations/zh_CN/parisc/debugging.rst b/Documentation/translations/zh_CN/parisc/debugging.rst
index c21beb986e15..68b73eb57105 100644
--- a/Documentation/translations/zh_CN/parisc/debugging.rst
+++ b/Documentation/translations/zh_CN/parisc/debugging.rst
@@ -1,7 +1,10 @@
.. include:: ../disclaimer-zh_CN.rst
:Original: Documentation/parisc/debugging.rst
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
.. _cn_parisc_debugging:
diff --git a/Documentation/translations/zh_CN/parisc/index.rst b/Documentation/translations/zh_CN/parisc/index.rst
index a47454ebe32e..0cc553fc8272 100644
--- a/Documentation/translations/zh_CN/parisc/index.rst
+++ b/Documentation/translations/zh_CN/parisc/index.rst
@@ -2,7 +2,10 @@
.. include:: ../disclaimer-zh_CN.rst
:Original: Documentation/parisc/index.rst
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
.. _cn_parisc_index:
diff --git a/Documentation/translations/zh_CN/parisc/registers.rst b/Documentation/translations/zh_CN/parisc/registers.rst
index 71e2404cd103..d2ab1874a602 100644
--- a/Documentation/translations/zh_CN/parisc/registers.rst
+++ b/Documentation/translations/zh_CN/parisc/registers.rst
@@ -1,7 +1,10 @@
.. include:: ../disclaimer-zh_CN.rst
:Original: Documentation/parisc/registers.rst
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
.. _cn_parisc_registers:
diff --git a/Documentation/translations/zh_CN/riscv/boot-image-header.rst b/Documentation/translations/zh_CN/riscv/boot-image-header.rst
index 241bf9c1bcbe..0234c28a7114 100644
--- a/Documentation/translations/zh_CN/riscv/boot-image-header.rst
+++ b/Documentation/translations/zh_CN/riscv/boot-image-header.rst
@@ -1,10 +1,12 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../riscv/boot-image-header`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/riscv/boot-image-header.rst
-.. _cn_boot-image-header.rst:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_boot-image-header.rst:
==========================
RISC-V Linux启动镜像文件头
diff --git a/Documentation/translations/zh_CN/riscv/index.rst b/Documentation/translations/zh_CN/riscv/index.rst
index db13b1101490..bbf5d7b3777a 100644
--- a/Documentation/translations/zh_CN/riscv/index.rst
+++ b/Documentation/translations/zh_CN/riscv/index.rst
@@ -2,11 +2,13 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../riscv/index`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/riscv/index.rst
-.. _cn_riscv_index:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_riscv_index:
===============
RISC-V 体系结构
diff --git a/Documentation/translations/zh_CN/riscv/patch-acceptance.rst b/Documentation/translations/zh_CN/riscv/patch-acceptance.rst
index 9fd1c8216763..d180d24717bf 100644
--- a/Documentation/translations/zh_CN/riscv/patch-acceptance.rst
+++ b/Documentation/translations/zh_CN/riscv/patch-acceptance.rst
@@ -2,11 +2,13 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../riscv/patch-acceptance`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/riscv/patch-acceptance.rst
-.. _cn_riscv_patch-acceptance:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_riscv_patch-acceptance:
arch/riscv 开发者维护指南
=========================
diff --git a/Documentation/translations/zh_CN/riscv/pmu.rst b/Documentation/translations/zh_CN/riscv/pmu.rst
index 22dcf3a9ca6e..7ec801026c4d 100644
--- a/Documentation/translations/zh_CN/riscv/pmu.rst
+++ b/Documentation/translations/zh_CN/riscv/pmu.rst
@@ -1,10 +1,12 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :doc:`../../../riscv/pmu`
-:Translator: Yanteng Si <siyanteng@loongson.cn>
+:Original: Documentation/riscv/pmu.rst
-.. _cn_riscv_pmu:
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+.. _cn_riscv_pmu:
========================
RISC-V平台上对PMUs的支持
diff --git a/Documentation/translations/zh_TW/admin-guide/README.rst b/Documentation/translations/zh_TW/admin-guide/README.rst
index b752e50359e6..6ce97edbab37 100644
--- a/Documentation/translations/zh_TW/admin-guide/README.rst
+++ b/Documentation/translations/zh_TW/admin-guide/README.rst
@@ -226,7 +226,7 @@ Linux內核5.x版本 <http://kernel.org/>
編譯內核
---------
- - 確保您至少有gcc 4.9可用。
+ - 確保您至少有gcc 5.1可用。
有關更多信息,請參閱 :ref:`Documentation/process/changes.rst <changes>` 。
請注意,您仍然可以使用此內核運行a.out用戶程序。
diff --git a/Documentation/translations/zh_TW/arm64/amu.rst b/Documentation/translations/zh_TW/arm64/amu.rst
new file mode 100644
index 000000000000..ffdc466e0f62
--- /dev/null
+++ b/Documentation/translations/zh_TW/arm64/amu.rst
@@ -0,0 +1,104 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_TW.rst
+
+:Original: :ref:`Documentation/arm64/amu.rst <amu_index>`
+
+Translator: Bailu Lin <bailu.lin@vivo.com>
+ Hu Haowen <src.res@email.cn>
+
+==================================
+AArch64 Linux 中擴展的活動監控單元
+==================================
+
+作者: Ionela Voinescu <ionela.voinescu@arm.com>
+
+日期: 2019-09-10
+
+本文檔簡要描述了 AArch64 Linux 支持的活動監控單元的規範。
+
+
+架構總述
+--------
+
+活動監控是 ARMv8.4 CPU 架構引入的一個可選擴展特性。
+
+活動監控單元(在每個 CPU 中實現)爲系統管理提供了性能計數器。既可以通
+過系統寄存器的方式訪問計數器,同時也支持外部內存映射的方式訪問計數器。
+
+AMUv1 架構實現了一個由4個固定的64位事件計數器組成的計數器組。
+
+ - CPU 周期計數器:同 CPU 的頻率增長
+ - 常量計數器:同固定的系統時鐘頻率增長
+ - 淘汰指令計數器: 同每次架構指令執行增長
+ - 內存停頓周期計數器:計算由在時鐘域內的最後一級緩存中未命中而引起
+ 的指令調度停頓周期數
+
+當處於 WFI 或者 WFE 狀態時,計數器不會增長。
+
+AMU 架構提供了一個高達16位的事件計數器空間,未來新的 AMU 版本中可能
+用它來實現新增的事件計數器。
+
+另外,AMUv1 實現了一個多達16個64位輔助事件計數器的計數器組。
+
+冷復位時所有的計數器會清零。
+
+
+基本支持
+--------
+
+內核可以安全地運行在支持 AMU 和不支持 AMU 的 CPU 組合中。
+因此,當配置 CONFIG_ARM64_AMU_EXTN 後我們無條件使能後續
+(secondary or hotplugged) CPU 檢測和使用這個特性。
+
+當在 CPU 上檢測到該特性時,我們會標記爲特性可用但是不能保證計數器的功能,
+僅表明有擴展屬性。
+
+固件(代碼運行在高異常級別,例如 arm-tf )需支持以下功能:
+
+ - 提供低異常級別(EL2 和 EL1)訪問 AMU 寄存器的能力。
+ - 使能計數器。如果未使能,它的值應爲 0。
+ - 在從電源關閉狀態啓動 CPU 前或後保存或者恢復計數器。
+
+當使用使能了該特性的內核啓動但固件損壞時,訪問計數器寄存器可能會遭遇
+panic 或者死鎖。即使未發現這些症狀,計數器寄存器返回的數據結果並不一
+定能反映真實情況。通常,計數器會返回 0,表明他們未被使能。
+
+如果固件沒有提供適當的支持最好關閉 CONFIG_ARM64_AMU_EXTN。
+值得注意的是,出於安全原因,不要繞過 AMUSERRENR_EL0 設置而捕獲從
+EL0(用戶空間) 訪問 EL1(內核空間)。 因此,固件應該確保訪問 AMU寄存器
+不會困在 EL2或EL3。
+
+AMUv1 的固定計數器可以通過如下系統寄存器訪問:
+
+ - SYS_AMEVCNTR0_CORE_EL0
+ - SYS_AMEVCNTR0_CONST_EL0
+ - SYS_AMEVCNTR0_INST_RET_EL0
+ - SYS_AMEVCNTR0_MEM_STALL_EL0
+
+特定輔助計數器可以通過 SYS_AMEVCNTR1_EL0(n) 訪問,其中n介於0到15。
+
+詳細信息定義在目錄:arch/arm64/include/asm/sysreg.h。
+
+
+用戶空間訪問
+------------
+
+由於以下原因,當前禁止從用戶空間訪問 AMU 的寄存器:
+
+ - 安全因數:可能會暴露處於安全模式執行的代碼信息。
+ - 意願:AMU 是用於系統管理的。
+
+同樣,該功能對用戶空間不可見。
+
+
+虛擬化
+------
+
+由於以下原因,當前禁止從 KVM 客戶端的用戶空間(EL0)和內核空間(EL1)
+訪問 AMU 的寄存器:
+
+ - 安全因數:可能會暴露給其他客戶端或主機端執行的代碼信息。
+
+任何試圖訪問 AMU 寄存器的行爲都會觸發一個註冊在客戶端的未定義異常。
+
diff --git a/Documentation/translations/zh_TW/arm64/booting.txt b/Documentation/translations/zh_TW/arm64/booting.txt
new file mode 100644
index 000000000000..b9439dd54012
--- /dev/null
+++ b/Documentation/translations/zh_TW/arm64/booting.txt
@@ -0,0 +1,251 @@
+SPDX-License-Identifier: GPL-2.0
+
+Chinese translated version of Documentation/arm64/booting.rst
+
+If you have any comment or update to the content, please contact the
+original document maintainer directly. However, if you have a problem
+communicating in English you can also ask the Chinese maintainer for
+help. Contact the Chinese maintainer if this translation is outdated
+or if there is a problem with the translation.
+
+M: Will Deacon <will.deacon@arm.com>
+zh_CN: Fu Wei <wefu@redhat.com>
+zh_TW: Hu Haowen <src.res@email.cn>
+C: 55f058e7574c3615dea4615573a19bdb258696c6
+---------------------------------------------------------------------
+Documentation/arm64/booting.rst 的中文翻譯
+
+如果想評論或更新本文的內容,請直接聯繫原文檔的維護者。如果你使用英文
+交流有困難的話,也可以向中文版維護者求助。如果本翻譯更新不及時或者翻
+譯存在問題,請聯繫中文版維護者。
+
+英文版維護者: Will Deacon <will.deacon@arm.com>
+中文版維護者: 傅煒 Fu Wei <wefu@redhat.com>
+中文版翻譯者: 傅煒 Fu Wei <wefu@redhat.com>
+中文版校譯者: 傅煒 Fu Wei <wefu@redhat.com>
+繁體中文版校譯者: 胡皓文 Hu Haowen <src.res@email.cn>
+本文翻譯提交時的 Git 檢出點爲: 55f058e7574c3615dea4615573a19bdb258696c6
+
+以下爲正文
+---------------------------------------------------------------------
+ 啓動 AArch64 Linux
+ ==================
+
+作者: Will Deacon <will.deacon@arm.com>
+日期: 2012 年 09 月 07 日
+
+本文檔基於 Russell King 的 ARM 啓動文檔,且適用於所有公開發布的
+AArch64 Linux 內核代碼。
+
+AArch64 異常模型由多個異常級(EL0 - EL3)組成,對於 EL0 和 EL1 異常級
+有對應的安全和非安全模式。EL2 是系統管理級,且僅存在於非安全模式下。
+EL3 是最高特權級,且僅存在於安全模式下。
+
+基於本文檔的目的,我們將簡單地使用『引導裝載程序』(『boot loader』)
+這個術語來定義在將控制權交給 Linux 內核前 CPU 上執行的所有軟體。
+這可能包含安全監控和系統管理代碼,或者它可能只是一些用於準備最小啓動
+環境的指令。
+
+基本上,引導裝載程序(至少)應實現以下操作:
+
+1、設置和初始化 RAM
+2、設置設備樹數據
+3、解壓內核映像
+4、調用內核映像
+
+
+1、設置和初始化 RAM
+-----------------
+
+必要性: 強制
+
+引導裝載程序應該找到並初始化系統中所有內核用於保持系統變量數據的 RAM。
+這個操作的執行方式因設備而異。(它可能使用內部算法來自動定位和計算所有
+RAM,或可能使用對這個設備已知的 RAM 信息,還可能是引導裝載程序設計者
+想到的任何合適的方法。)
+
+
+2、設置設備樹數據
+---------------
+
+必要性: 強制
+
+設備樹數據塊(dtb)必須 8 字節對齊,且大小不能超過 2MB。由於設備樹
+數據塊將在使能緩存的情況下以 2MB 粒度被映射,故其不能被置於必須以特定
+屬性映射的2M區域內。
+
+註: v4.2 之前的版本同時要求設備樹數據塊被置於從內核映像以下
+text_offset 字節處算起第一個 512MB 內。
+
+3、解壓內核映像
+-------------
+
+必要性: 可選
+
+AArch64 內核當前沒有提供自解壓代碼,因此如果使用了壓縮內核映像文件
+(比如 Image.gz),則需要通過引導裝載程序(使用 gzip 等)來進行解壓。
+若引導裝載程序沒有實現這個功能,就要使用非壓縮內核映像文件。
+
+
+4、調用內核映像
+-------------
+
+必要性: 強制
+
+已解壓的內核映像包含一個 64 字節的頭,內容如下:
+
+ u32 code0; /* 可執行代碼 */
+ u32 code1; /* 可執行代碼 */
+ u64 text_offset; /* 映像裝載偏移,小端模式 */
+ u64 image_size; /* 映像實際大小, 小端模式 */
+ u64 flags; /* 內核旗標, 小端模式 *
+ u64 res2 = 0; /* 保留 */
+ u64 res3 = 0; /* 保留 */
+ u64 res4 = 0; /* 保留 */
+ u32 magic = 0x644d5241; /* 魔數, 小端, "ARM\x64" */
+ u32 res5; /* 保留 (用於 PE COFF 偏移) */
+
+
+映像頭注釋:
+
+- 自 v3.17 起,除非另有說明,所有域都是小端模式。
+
+- code0/code1 負責跳轉到 stext.
+
+- 當通過 EFI 啓動時, 最初 code0/code1 被跳過。
+ res5 是到 PE 文件頭的偏移,而 PE 文件頭含有 EFI 的啓動入口點
+ (efi_stub_entry)。當 stub 代碼完成了它的使命,它會跳轉到 code0
+ 繼續正常的啓動流程。
+
+- v3.17 之前,未明確指定 text_offset 的字節序。此時,image_size 爲零,
+ 且 text_offset 依照內核字節序爲 0x80000。
+ 當 image_size 非零,text_offset 爲小端模式且是有效值,應被引導加載
+ 程序使用。當 image_size 爲零,text_offset 可假定爲 0x80000。
+
+- flags 域 (v3.17 引入) 爲 64 位小端模式,其編碼如下:
+ 位 0: 內核字節序。 1 表示大端模式,0 表示小端模式。
+ 位 1-2: 內核頁大小。
+ 0 - 未指定。
+ 1 - 4K
+ 2 - 16K
+ 3 - 64K
+ 位 3: 內核物理位置
+ 0 - 2MB 對齊基址應儘量靠近內存起始處,因爲
+ 其基址以下的內存無法通過線性映射訪問
+ 1 - 2MB 對齊基址可以在物理內存的任意位置
+ 位 4-63: 保留。
+
+- 當 image_size 爲零時,引導裝載程序應試圖在內核映像末尾之後儘可能
+ 多地保留空閒內存供內核直接使用。對內存空間的需求量因所選定的內核
+ 特性而異, 並無實際限制。
+
+內核映像必須被放置在任意一個可用系統內存 2MB 對齊基址的 text_offset
+字節處,並從該處被調用。2MB 對齊基址和內核映像起始地址之間的區域對於
+內核來說沒有特殊意義,且可能被用於其他目的。
+從映像起始地址算起,最少必須準備 image_size 字節的空閒內存供內核使用。
+註: v4.6 之前的版本無法使用內核映像物理偏移以下的內存,所以當時建議
+將映像儘量放置在靠近系統內存起始的地方。
+
+任何提供給內核的內存(甚至在映像起始地址之前),若未從內核中標記爲保留
+(如在設備樹(dtb)的 memreserve 區域),都將被認爲對內核是可用。
+
+在跳轉入內核前,必須符合以下狀態:
+
+- 停止所有 DMA 設備,這樣內存數據就不會因爲虛假網絡包或磁碟數據而
+ 被破壞。這可能可以節省你許多的調試時間。
+
+- 主 CPU 通用寄存器設置
+ x0 = 系統 RAM 中設備樹數據塊(dtb)的物理地址。
+ x1 = 0 (保留,將來可能使用)
+ x2 = 0 (保留,將來可能使用)
+ x3 = 0 (保留,將來可能使用)
+
+- CPU 模式
+ 所有形式的中斷必須在 PSTATE.DAIF 中被屏蔽(Debug、SError、IRQ
+ 和 FIQ)。
+ CPU 必須處於 EL2(推薦,可訪問虛擬化擴展)或非安全 EL1 模式下。
+
+- 高速緩存、MMU
+ MMU 必須關閉。
+ 指令緩存開啓或關閉皆可。
+ 已載入的內核映像的相應內存區必須被清理,以達到緩存一致性點(PoC)。
+ 當存在系統緩存或其他使能緩存的一致性主控器時,通常需使用虛擬地址
+ 維護其緩存,而非 set/way 操作。
+ 遵從通過虛擬地址操作維護構架緩存的系統緩存必須被配置,並可以被使能。
+ 而不通過虛擬地址操作維護構架緩存的系統緩存(不推薦),必須被配置且
+ 禁用。
+
+ *譯者註:對於 PoC 以及緩存相關內容,請參考 ARMv8 構架參考手冊
+ ARM DDI 0487A
+
+- 架構計時器
+ CNTFRQ 必須設定爲計時器的頻率,且 CNTVOFF 必須設定爲對所有 CPU
+ 都一致的值。如果在 EL1 模式下進入內核,則 CNTHCTL_EL2 中的
+ EL1PCTEN (bit 0) 必須置位。
+
+- 一致性
+ 通過內核啓動的所有 CPU 在內核入口地址上必須處於相同的一致性域中。
+ 這可能要根據具體實現來定義初始化過程,以使能每個CPU上對維護操作的
+ 接收。
+
+- 系統寄存器
+ 在進入內核映像的異常級中,所有構架中可寫的系統寄存器必須通過軟體
+ 在一個更高的異常級別下初始化,以防止在 未知 狀態下運行。
+
+ 對於擁有 GICv3 中斷控制器並以 v3 模式運行的系統:
+ - 如果 EL3 存在:
+ ICC_SRE_EL3.Enable (位 3) 必須初始化爲 0b1。
+ ICC_SRE_EL3.SRE (位 0) 必須初始化爲 0b1。
+ - 若內核運行在 EL1:
+ ICC_SRE_EL2.Enable (位 3) 必須初始化爲 0b1。
+ ICC_SRE_EL2.SRE (位 0) 必須初始化爲 0b1。
+ - 設備樹(DT)或 ACPI 表必須描述一個 GICv3 中斷控制器。
+
+ 對於擁有 GICv3 中斷控制器並以兼容(v2)模式運行的系統:
+ - 如果 EL3 存在:
+ ICC_SRE_EL3.SRE (位 0) 必須初始化爲 0b0。
+ - 若內核運行在 EL1:
+ ICC_SRE_EL2.SRE (位 0) 必須初始化爲 0b0。
+ - 設備樹(DT)或 ACPI 表必須描述一個 GICv2 中斷控制器。
+
+以上對於 CPU 模式、高速緩存、MMU、架構計時器、一致性、系統寄存器的
+必要條件描述適用於所有 CPU。所有 CPU 必須在同一異常級別跳入內核。
+
+引導裝載程序必須在每個 CPU 處於以下狀態時跳入內核入口:
+
+- 主 CPU 必須直接跳入內核映像的第一條指令。通過此 CPU 傳遞的設備樹
+ 數據塊必須在每個 CPU 節點中包含一個 『enable-method』 屬性,所
+ 支持的 enable-method 請見下文。
+
+ 引導裝載程序必須生成這些設備樹屬性,並在跳入內核入口之前將其插入
+ 數據塊。
+
+- enable-method 爲 「spin-table」 的 CPU 必須在它們的 CPU
+ 節點中包含一個 『cpu-release-addr』 屬性。這個屬性標識了一個
+ 64 位自然對齊且初始化爲零的內存位置。
+
+ 這些 CPU 必須在內存保留區(通過設備樹中的 /memreserve/ 域傳遞
+ 給內核)中自旋於內核之外,輪詢它們的 cpu-release-addr 位置(必須
+ 包含在保留區中)。可通過插入 wfe 指令來降低忙循環開銷,而主 CPU 將
+ 發出 sev 指令。當對 cpu-release-addr 所指位置的讀取操作返回非零值
+ 時,CPU 必須跳入此值所指向的地址。此值爲一個單獨的 64 位小端值,
+ 因此 CPU 須在跳轉前將所讀取的值轉換爲其本身的端模式。
+
+- enable-method 爲 「psci」 的 CPU 保持在內核外(比如,在
+ memory 節點中描述爲內核空間的內存區外,或在通過設備樹 /memreserve/
+ 域中描述爲內核保留區的空間中)。內核將會發起在 ARM 文檔(編號
+ ARM DEN 0022A:用於 ARM 上的電源狀態協調接口系統軟體)中描述的
+ CPU_ON 調用來將 CPU 帶入內核。
+
+ *譯者注: ARM DEN 0022A 已更新到 ARM DEN 0022C。
+
+ 設備樹必須包含一個 『psci』 節點,請參考以下文檔:
+ Documentation/devicetree/bindings/arm/psci.yaml
+
+
+- 輔助 CPU 通用寄存器設置
+ x0 = 0 (保留,將來可能使用)
+ x1 = 0 (保留,將來可能使用)
+ x2 = 0 (保留,將來可能使用)
+ x3 = 0 (保留,將來可能使用)
+
diff --git a/Documentation/translations/zh_TW/arm64/elf_hwcaps.rst b/Documentation/translations/zh_TW/arm64/elf_hwcaps.rst
new file mode 100644
index 000000000000..3eb1c623ce31
--- /dev/null
+++ b/Documentation/translations/zh_TW/arm64/elf_hwcaps.rst
@@ -0,0 +1,244 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_TW.rst
+
+:Original: :ref:`Documentation/arm64/elf_hwcaps.rst <elf_hwcaps_index>`
+
+Translator: Bailu Lin <bailu.lin@vivo.com>
+ Hu Haowen <src.res@email.cn>
+
+================
+ARM64 ELF hwcaps
+================
+
+這篇文檔描述了 arm64 ELF hwcaps 的用法和語義。
+
+
+1. 簡介
+-------
+
+有些硬體或軟體功能僅在某些 CPU 實現上和/或在具體某個內核配置上可用,但
+對於處於 EL0 的用戶空間代碼沒有可用的架構發現機制。內核通過在輔助向量表
+公開一組稱爲 hwcaps 的標誌而把這些功能暴露給用戶空間。
+
+用戶空間軟體可以通過獲取輔助向量的 AT_HWCAP 或 AT_HWCAP2 條目來測試功能,
+並測試是否設置了相關標誌,例如::
+
+ bool floating_point_is_present(void)
+ {
+ unsigned long hwcaps = getauxval(AT_HWCAP);
+ if (hwcaps & HWCAP_FP)
+ return true;
+
+ return false;
+ }
+
+如果軟體依賴於 hwcap 描述的功能,在嘗試使用該功能前則應檢查相關的 hwcap
+標誌以驗證該功能是否存在。
+
+不能通過其他方式探查這些功能。當一個功能不可用時,嘗試使用它可能導致不可
+預測的行爲,並且無法保證能確切的知道該功能不可用,例如 SIGILL。
+
+
+2. Hwcaps 的說明
+----------------
+
+大多數 hwcaps 旨在說明通過架構 ID 寄存器(處於 EL0 的用戶空間代碼無法訪問)
+描述的功能的存在。這些 hwcap 通過 ID 寄存器欄位定義,並且應根據 ARM 體系
+結構參考手冊(ARM ARM)中定義的欄位來解釋說明。
+
+這些 hwcaps 以下面的形式描述::
+
+ idreg.field == val 表示有某個功能。
+
+當 idreg.field 中有 val 時,hwcaps 表示 ARM ARM 定義的功能是有效的,但是
+並不是說要完全和 val 相等,也不是說 idreg.field 描述的其他功能就是缺失的。
+
+其他 hwcaps 可能表明無法僅由 ID 寄存器描述的功能的存在。這些 hwcaps 可能
+沒有被 ID 寄存器描述,需要參考其他文檔。
+
+
+3. AT_HWCAP 中揭示的 hwcaps
+---------------------------
+
+HWCAP_FP
+ ID_AA64PFR0_EL1.FP == 0b0000 表示有此功能。
+
+HWCAP_ASIMD
+ ID_AA64PFR0_EL1.AdvSIMD == 0b0000 表示有此功能。
+
+HWCAP_EVTSTRM
+ 通用計時器頻率配置爲大約100KHz以生成事件。
+
+HWCAP_AES
+ ID_AA64ISAR0_EL1.AES == 0b0001 表示有此功能。
+
+HWCAP_PMULL
+ ID_AA64ISAR0_EL1.AES == 0b0010 表示有此功能。
+
+HWCAP_SHA1
+ ID_AA64ISAR0_EL1.SHA1 == 0b0001 表示有此功能。
+
+HWCAP_SHA2
+ ID_AA64ISAR0_EL1.SHA2 == 0b0001 表示有此功能。
+
+HWCAP_CRC32
+ ID_AA64ISAR0_EL1.CRC32 == 0b0001 表示有此功能。
+
+HWCAP_ATOMICS
+ ID_AA64ISAR0_EL1.Atomic == 0b0010 表示有此功能。
+
+HWCAP_FPHP
+ ID_AA64PFR0_EL1.FP == 0b0001 表示有此功能。
+
+HWCAP_ASIMDHP
+ ID_AA64PFR0_EL1.AdvSIMD == 0b0001 表示有此功能。
+
+HWCAP_CPUID
+ 根據 Documentation/arm64/cpu-feature-registers.rst 描述,EL0 可以訪問
+ 某些 ID 寄存器。
+
+ 這些 ID 寄存器可能表示功能的可用性。
+
+HWCAP_ASIMDRDM
+ ID_AA64ISAR0_EL1.RDM == 0b0001 表示有此功能。
+
+HWCAP_JSCVT
+ ID_AA64ISAR1_EL1.JSCVT == 0b0001 表示有此功能。
+
+HWCAP_FCMA
+ ID_AA64ISAR1_EL1.FCMA == 0b0001 表示有此功能。
+
+HWCAP_LRCPC
+ ID_AA64ISAR1_EL1.LRCPC == 0b0001 表示有此功能。
+
+HWCAP_DCPOP
+ ID_AA64ISAR1_EL1.DPB == 0b0001 表示有此功能。
+
+HWCAP_SHA3
+ ID_AA64ISAR0_EL1.SHA3 == 0b0001 表示有此功能。
+
+HWCAP_SM3
+ ID_AA64ISAR0_EL1.SM3 == 0b0001 表示有此功能。
+
+HWCAP_SM4
+ ID_AA64ISAR0_EL1.SM4 == 0b0001 表示有此功能。
+
+HWCAP_ASIMDDP
+ ID_AA64ISAR0_EL1.DP == 0b0001 表示有此功能。
+
+HWCAP_SHA512
+ ID_AA64ISAR0_EL1.SHA2 == 0b0010 表示有此功能。
+
+HWCAP_SVE
+ ID_AA64PFR0_EL1.SVE == 0b0001 表示有此功能。
+
+HWCAP_ASIMDFHM
+ ID_AA64ISAR0_EL1.FHM == 0b0001 表示有此功能。
+
+HWCAP_DIT
+ ID_AA64PFR0_EL1.DIT == 0b0001 表示有此功能。
+
+HWCAP_USCAT
+ ID_AA64MMFR2_EL1.AT == 0b0001 表示有此功能。
+
+HWCAP_ILRCPC
+ ID_AA64ISAR1_EL1.LRCPC == 0b0010 表示有此功能。
+
+HWCAP_FLAGM
+ ID_AA64ISAR0_EL1.TS == 0b0001 表示有此功能。
+
+HWCAP_SSBS
+ ID_AA64PFR1_EL1.SSBS == 0b0010 表示有此功能。
+
+HWCAP_SB
+ ID_AA64ISAR1_EL1.SB == 0b0001 表示有此功能。
+
+HWCAP_PACA
+ 如 Documentation/arm64/pointer-authentication.rst 所描述,
+ ID_AA64ISAR1_EL1.APA == 0b0001 或 ID_AA64ISAR1_EL1.API == 0b0001
+ 表示有此功能。
+
+HWCAP_PACG
+ 如 Documentation/arm64/pointer-authentication.rst 所描述,
+ ID_AA64ISAR1_EL1.GPA == 0b0001 或 ID_AA64ISAR1_EL1.GPI == 0b0001
+ 表示有此功能。
+
+HWCAP2_DCPODP
+
+ ID_AA64ISAR1_EL1.DPB == 0b0010 表示有此功能。
+
+HWCAP2_SVE2
+
+ ID_AA64ZFR0_EL1.SVEVer == 0b0001 表示有此功能。
+
+HWCAP2_SVEAES
+
+ ID_AA64ZFR0_EL1.AES == 0b0001 表示有此功能。
+
+HWCAP2_SVEPMULL
+
+ ID_AA64ZFR0_EL1.AES == 0b0010 表示有此功能。
+
+HWCAP2_SVEBITPERM
+
+ ID_AA64ZFR0_EL1.BitPerm == 0b0001 表示有此功能。
+
+HWCAP2_SVESHA3
+
+ ID_AA64ZFR0_EL1.SHA3 == 0b0001 表示有此功能。
+
+HWCAP2_SVESM4
+
+ ID_AA64ZFR0_EL1.SM4 == 0b0001 表示有此功能。
+
+HWCAP2_FLAGM2
+
+ ID_AA64ISAR0_EL1.TS == 0b0010 表示有此功能。
+
+HWCAP2_FRINT
+
+ ID_AA64ISAR1_EL1.FRINTTS == 0b0001 表示有此功能。
+
+HWCAP2_SVEI8MM
+
+ ID_AA64ZFR0_EL1.I8MM == 0b0001 表示有此功能。
+
+HWCAP2_SVEF32MM
+
+ ID_AA64ZFR0_EL1.F32MM == 0b0001 表示有此功能。
+
+HWCAP2_SVEF64MM
+
+ ID_AA64ZFR0_EL1.F64MM == 0b0001 表示有此功能。
+
+HWCAP2_SVEBF16
+
+ ID_AA64ZFR0_EL1.BF16 == 0b0001 表示有此功能。
+
+HWCAP2_I8MM
+
+ ID_AA64ISAR1_EL1.I8MM == 0b0001 表示有此功能。
+
+HWCAP2_BF16
+
+ ID_AA64ISAR1_EL1.BF16 == 0b0001 表示有此功能。
+
+HWCAP2_DGH
+
+ ID_AA64ISAR1_EL1.DGH == 0b0001 表示有此功能。
+
+HWCAP2_RNG
+
+ ID_AA64ISAR0_EL1.RNDR == 0b0001 表示有此功能。
+
+HWCAP2_BTI
+
+ ID_AA64PFR0_EL1.BT == 0b0001 表示有此功能。
+
+
+4. 未使用的 AT_HWCAP 位
+-----------------------
+
+爲了與用戶空間交互,內核保證 AT_HWCAP 的第62、63位將始終返回0。
+
diff --git a/Documentation/translations/zh_TW/arm64/hugetlbpage.rst b/Documentation/translations/zh_TW/arm64/hugetlbpage.rst
new file mode 100644
index 000000000000..846b500dae97
--- /dev/null
+++ b/Documentation/translations/zh_TW/arm64/hugetlbpage.rst
@@ -0,0 +1,49 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_TW.rst
+
+:Original: :ref:`Documentation/arm64/hugetlbpage.rst <hugetlbpage_index>`
+
+Translator: Bailu Lin <bailu.lin@vivo.com>
+ Hu Haowen <src.res@email.cn>
+
+=====================
+ARM64中的 HugeTLBpage
+=====================
+
+大頁依靠有效利用 TLBs 來提高地址翻譯的性能。這取決於以下
+兩點 -
+
+ - 大頁的大小
+ - TLBs 支持的條目大小
+
+ARM64 接口支持2種大頁方式。
+
+1) pud/pmd 級別的塊映射
+-----------------------
+
+這是常規大頁,他們的 pmd 或 pud 頁面表條目指向一個內存塊。
+不管 TLB 中支持的條目大小如何,塊映射可以減少翻譯大頁地址
+所需遍歷的頁表深度。
+
+2) 使用連續位
+-------------
+
+架構中轉換頁表條目(D4.5.3, ARM DDI 0487C.a)中提供一個連續
+位告訴 MMU 這個條目是一個連續條目集的一員,它可以被緩存在單
+個 TLB 條目中。
+
+在 Linux 中連續位用來增加 pmd 和 pte(最後一級)級別映射的大
+小。受支持的連續頁表條目數量因頁面大小和頁表級別而異。
+
+
+支持以下大頁尺寸配置 -
+
+ ====== ======== ==== ======== ===
+ - CONT PTE PMD CONT PMD PUD
+ ====== ======== ==== ======== ===
+ 4K: 64K 2M 32M 1G
+ 16K: 2M 32M 1G
+ 64K: 2M 512M 16G
+ ====== ======== ==== ======== ===
+
diff --git a/Documentation/translations/zh_TW/arm64/index.rst b/Documentation/translations/zh_TW/arm64/index.rst
new file mode 100644
index 000000000000..2322783f3881
--- /dev/null
+++ b/Documentation/translations/zh_TW/arm64/index.rst
@@ -0,0 +1,23 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_TW.rst
+
+:Original: :ref:`Documentation/arm64/index.rst <arm64_index>`
+:Translator: Bailu Lin <bailu.lin@vivo.com>
+ Hu Haowen <src.res@email.cn>
+
+.. _tw_arm64_index:
+
+
+==========
+ARM64 架構
+==========
+
+.. toctree::
+ :maxdepth: 2
+
+ amu
+ hugetlbpage
+ perf
+ elf_hwcaps
+
diff --git a/Documentation/translations/zh_TW/arm64/legacy_instructions.txt b/Documentation/translations/zh_TW/arm64/legacy_instructions.txt
new file mode 100644
index 000000000000..6d4454f77b9e
--- /dev/null
+++ b/Documentation/translations/zh_TW/arm64/legacy_instructions.txt
@@ -0,0 +1,77 @@
+SPDX-License-Identifier: GPL-2.0
+
+Chinese translated version of Documentation/arm64/legacy_instructions.rst
+
+If you have any comment or update to the content, please contact the
+original document maintainer directly. However, if you have a problem
+communicating in English you can also ask the Chinese maintainer for
+help. Contact the Chinese maintainer if this translation is outdated
+or if there is a problem with the translation.
+
+Maintainer: Punit Agrawal <punit.agrawal@arm.com>
+ Suzuki K. Poulose <suzuki.poulose@arm.com>
+Chinese maintainer: Fu Wei <wefu@redhat.com>
+Traditional Chinese maintainer: Hu Haowen <src.res@email.cn>
+---------------------------------------------------------------------
+Documentation/arm64/legacy_instructions.rst 的中文翻譯
+
+如果想評論或更新本文的內容,請直接聯繫原文檔的維護者。如果你使用英文
+交流有困難的話,也可以向中文版維護者求助。如果本翻譯更新不及時或者翻
+譯存在問題,請聯繫中文版維護者。
+
+本文翻譯提交時的 Git 檢出點爲: bc465aa9d045feb0e13b4a8f32cc33c1943f62d6
+
+英文版維護者: Punit Agrawal <punit.agrawal@arm.com>
+ Suzuki K. Poulose <suzuki.poulose@arm.com>
+中文版維護者: 傅煒 Fu Wei <wefu@redhat.com>
+中文版翻譯者: 傅煒 Fu Wei <wefu@redhat.com>
+中文版校譯者: 傅煒 Fu Wei <wefu@redhat.com>
+繁體中文版校譯者:胡皓文 Hu Haowen <src.res@email.cn>
+
+以下爲正文
+---------------------------------------------------------------------
+Linux 內核在 arm64 上的移植提供了一個基礎框架,以支持構架中正在被淘汰或已廢棄指令的模擬執行。
+這個基礎框架的代碼使用未定義指令鉤子(hooks)來支持模擬。如果指令存在,它也允許在硬體中啓用該指令。
+
+模擬模式可通過寫 sysctl 節點(/proc/sys/abi)來控制。
+不同的執行方式及 sysctl 節點的相應值,解釋如下:
+
+* Undef(未定義)
+ 值: 0
+ 產生未定義指令終止異常。它是那些構架中已廢棄的指令,如 SWP,的默認處理方式。
+
+* Emulate(模擬)
+ 值: 1
+ 使用軟體模擬方式。爲解決軟體遷移問題,這種模擬指令模式的使用是被跟蹤的,並會發出速率限制警告。
+ 它是那些構架中正在被淘汰的指令,如 CP15 barriers(隔離指令),的默認處理方式。
+
+* Hardware Execution(硬體執行)
+ 值: 2
+ 雖然標記爲正在被淘汰,但一些實現可能提供硬體執行這些指令的使能/禁用操作。
+ 使用硬體執行一般會有更好的性能,但將無法收集運行時對正被淘汰指令的使用統計數據。
+
+默認執行模式依賴於指令在構架中狀態。正在被淘汰的指令應該以模擬(Emulate)作爲默認模式,
+而已廢棄的指令必須默認使用未定義(Undef)模式
+
+注意:指令模擬可能無法應對所有情況。更多詳情請參考單獨的指令注釋。
+
+受支持的遺留指令
+-------------
+* SWP{B}
+節點: /proc/sys/abi/swp
+狀態: 已廢棄
+默認執行方式: Undef (0)
+
+* CP15 Barriers
+節點: /proc/sys/abi/cp15_barrier
+狀態: 正被淘汰,不推薦使用
+默認執行方式: Emulate (1)
+
+* SETEND
+節點: /proc/sys/abi/setend
+狀態: 正被淘汰,不推薦使用
+默認執行方式: Emulate (1)*
+註:爲了使能這個特性,系統中的所有 CPU 必須在 EL0 支持混合字節序。
+如果一個新的 CPU (不支持混合字節序) 在使能這個特性後被熱插入系統,
+在應用中可能會出現不可預期的結果。
+
diff --git a/Documentation/translations/zh_TW/arm64/memory.txt b/Documentation/translations/zh_TW/arm64/memory.txt
new file mode 100644
index 000000000000..99c2b78b5674
--- /dev/null
+++ b/Documentation/translations/zh_TW/arm64/memory.txt
@@ -0,0 +1,119 @@
+SPDX-License-Identifier: GPL-2.0
+
+Chinese translated version of Documentation/arm64/memory.rst
+
+If you have any comment or update to the content, please contact the
+original document maintainer directly. However, if you have a problem
+communicating in English you can also ask the Chinese maintainer for
+help. Contact the Chinese maintainer if this translation is outdated
+or if there is a problem with the translation.
+
+Maintainer: Catalin Marinas <catalin.marinas@arm.com>
+Chinese maintainer: Fu Wei <wefu@redhat.com>
+Traditional Chinese maintainer: Hu Haowen <src.res@email.cn>
+---------------------------------------------------------------------
+Documentation/arm64/memory.rst 的中文翻譯
+
+如果想評論或更新本文的內容,請直接聯繫原文檔的維護者。如果你使用英文
+交流有困難的話,也可以向中文版維護者求助。如果本翻譯更新不及時或者翻
+譯存在問題,請聯繫中文版維護者。
+
+本文翻譯提交時的 Git 檢出點爲: bc465aa9d045feb0e13b4a8f32cc33c1943f62d6
+
+英文版維護者: Catalin Marinas <catalin.marinas@arm.com>
+中文版維護者: 傅煒 Fu Wei <wefu@redhat.com>
+中文版翻譯者: 傅煒 Fu Wei <wefu@redhat.com>
+中文版校譯者: 傅煒 Fu Wei <wefu@redhat.com>
+繁體中文版校譯者: 胡皓文 Hu Haowen <src.res@email.cn>
+
+以下爲正文
+---------------------------------------------------------------------
+ Linux 在 AArch64 中的內存布局
+ ===========================
+
+作者: Catalin Marinas <catalin.marinas@arm.com>
+
+本文檔描述 AArch64 Linux 內核所使用的虛擬內存布局。此構架可以實現
+頁大小爲 4KB 的 4 級轉換表和頁大小爲 64KB 的 3 級轉換表。
+
+AArch64 Linux 使用 3 級或 4 級轉換表,其頁大小配置爲 4KB,對於用戶和內核
+分別都有 39-bit (512GB) 或 48-bit (256TB) 的虛擬地址空間。
+對於頁大小爲 64KB的配置,僅使用 2 級轉換表,有 42-bit (4TB) 的虛擬地址空間,但內存布局相同。
+
+用戶地址空間的 63:48 位爲 0,而內核地址空間的相應位爲 1。TTBRx 的
+選擇由虛擬地址的 63 位給出。swapper_pg_dir 僅包含內核(全局)映射,
+而用戶 pgd 僅包含用戶(非全局)映射。swapper_pg_dir 地址被寫入
+TTBR1 中,且從不寫入 TTBR0。
+
+
+AArch64 Linux 在頁大小爲 4KB,並使用 3 級轉換表時的內存布局:
+
+起始地址 結束地址 大小 用途
+-----------------------------------------------------------------------
+0000000000000000 0000007fffffffff 512GB 用戶空間
+ffffff8000000000 ffffffffffffffff 512GB 內核空間
+
+
+AArch64 Linux 在頁大小爲 4KB,並使用 4 級轉換表時的內存布局:
+
+起始地址 結束地址 大小 用途
+-----------------------------------------------------------------------
+0000000000000000 0000ffffffffffff 256TB 用戶空間
+ffff000000000000 ffffffffffffffff 256TB 內核空間
+
+
+AArch64 Linux 在頁大小爲 64KB,並使用 2 級轉換表時的內存布局:
+
+起始地址 結束地址 大小 用途
+-----------------------------------------------------------------------
+0000000000000000 000003ffffffffff 4TB 用戶空間
+fffffc0000000000 ffffffffffffffff 4TB 內核空間
+
+
+AArch64 Linux 在頁大小爲 64KB,並使用 3 級轉換表時的內存布局:
+
+起始地址 結束地址 大小 用途
+-----------------------------------------------------------------------
+0000000000000000 0000ffffffffffff 256TB 用戶空間
+ffff000000000000 ffffffffffffffff 256TB 內核空間
+
+
+更詳細的內核虛擬內存布局,請參閱內核啓動信息。
+
+
+4KB 頁大小的轉換表查找:
+
++--------+--------+--------+--------+--------+--------+--------+--------+
+|63 56|55 48|47 40|39 32|31 24|23 16|15 8|7 0|
++--------+--------+--------+--------+--------+--------+--------+--------+
+ | | | | | |
+ | | | | | v
+ | | | | | [11:0] 頁內偏移
+ | | | | +-> [20:12] L3 索引
+ | | | +-----------> [29:21] L2 索引
+ | | +---------------------> [38:30] L1 索引
+ | +-------------------------------> [47:39] L0 索引
+ +-------------------------------------------------> [63] TTBR0/1
+
+
+64KB 頁大小的轉換表查找:
+
++--------+--------+--------+--------+--------+--------+--------+--------+
+|63 56|55 48|47 40|39 32|31 24|23 16|15 8|7 0|
++--------+--------+--------+--------+--------+--------+--------+--------+
+ | | | | |
+ | | | | v
+ | | | | [15:0] 頁內偏移
+ | | | +----------> [28:16] L3 索引
+ | | +--------------------------> [41:29] L2 索引
+ | +-------------------------------> [47:42] L1 索引
+ +-------------------------------------------------> [63] TTBR0/1
+
+
+當使用 KVM 時, 管理程序(hypervisor)在 EL2 中通過相對內核虛擬地址的
+一個固定偏移來映射內核頁(內核虛擬地址的高 24 位設爲零):
+
+起始地址 結束地址 大小 用途
+-----------------------------------------------------------------------
+0000004000000000 0000007fffffffff 256GB 在 HYP 中映射的內核對象
+
diff --git a/Documentation/translations/zh_TW/arm64/perf.rst b/Documentation/translations/zh_TW/arm64/perf.rst
new file mode 100644
index 000000000000..f1ffd55dfe50
--- /dev/null
+++ b/Documentation/translations/zh_TW/arm64/perf.rst
@@ -0,0 +1,88 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_TW.rst
+
+:Original: :ref:`Documentation/arm64/perf.rst <perf_index>`
+
+Translator: Bailu Lin <bailu.lin@vivo.com>
+ Hu Haowen <src.res@email.cn>
+
+=============
+Perf 事件屬性
+=============
+
+:作者: Andrew Murray <andrew.murray@arm.com>
+:日期: 2019-03-06
+
+exclude_user
+------------
+
+該屬性排除用戶空間。
+
+用戶空間始終運行在 EL0,因此該屬性將排除 EL0。
+
+
+exclude_kernel
+--------------
+
+該屬性排除內核空間。
+
+打開 VHE 時內核運行在 EL2,不打開 VHE 時內核運行在 EL1。客戶機
+內核總是運行在 EL1。
+
+對於宿主機,該屬性排除 EL1 和 VHE 上的 EL2。
+
+對於客戶機,該屬性排除 EL1。請注意客戶機從來不會運行在 EL2。
+
+
+exclude_hv
+----------
+
+該屬性排除虛擬機監控器。
+
+對於 VHE 宿主機該屬性將被忽略,此時我們認爲宿主機內核是虛擬機監
+控器。
+
+對於 non-VHE 宿主機該屬性將排除 EL2,因爲虛擬機監控器運行在 EL2
+的任何代碼主要用於客戶機和宿主機的切換。
+
+對於客戶機該屬性無效。請注意客戶機從來不會運行在 EL2。
+
+
+exclude_host / exclude_guest
+----------------------------
+
+這些屬性分別排除了 KVM 宿主機和客戶機。
+
+KVM 宿主機可能運行在 EL0(用戶空間),EL1(non-VHE 內核)和
+EL2(VHE 內核 或 non-VHE 虛擬機監控器)。
+
+KVM 客戶機可能運行在 EL0(用戶空間)和 EL1(內核)。
+
+由於宿主機和客戶機之間重疊的異常級別,我們不能僅僅依靠 PMU 的硬體異
+常過濾機制-因此我們必須啓用/禁用對於客戶機進入和退出的計數。而這在
+VHE 和 non-VHE 系統上表現不同。
+
+對於 non-VHE 系統的 exclude_host 屬性排除 EL2 - 在進入和退出客戶
+機時,我們會根據 exclude_host 和 exclude_guest 屬性在適當的情況下
+禁用/啓用該事件。
+
+對於 VHE 系統的 exclude_guest 屬性排除 EL1,而對其中的 exclude_host
+屬性同時排除 EL0,EL2。在進入和退出客戶機時,我們會適當地根據
+exclude_host 和 exclude_guest 屬性包括/排除 EL0。
+
+以上聲明也適用於在 not-VHE 客戶機使用這些屬性時,但是請注意客戶機從
+來不會運行在 EL2。
+
+
+準確性
+------
+
+在 non-VHE 宿主機上,我們在 EL2 進入/退出宿主機/客戶機的切換時啓用/
+關閉計數器 -但是在啓用/禁用計數器和進入/退出客戶機之間存在一段延時。
+對於 exclude_host, 我們可以通過過濾 EL2 消除在客戶機進入/退出邊界
+上用於計數客戶機事件的宿主機事件計數器。但是當使用 !exclude_hv 時,
+在客戶機進入/退出有一個小的停電窗口無法捕獲到宿主機的事件。
+
+在 VHE 系統沒有停電窗口。
+
diff --git a/Documentation/translations/zh_TW/arm64/silicon-errata.txt b/Documentation/translations/zh_TW/arm64/silicon-errata.txt
new file mode 100644
index 000000000000..bf2077197504
--- /dev/null
+++ b/Documentation/translations/zh_TW/arm64/silicon-errata.txt
@@ -0,0 +1,79 @@
+SPDX-License-Identifier: GPL-2.0
+
+Chinese translated version of Documentation/arm64/silicon-errata.rst
+
+If you have any comment or update to the content, please contact the
+original document maintainer directly. However, if you have a problem
+communicating in English you can also ask the Chinese maintainer for
+help. Contact the Chinese maintainer if this translation is outdated
+or if there is a problem with the translation.
+
+M: Will Deacon <will.deacon@arm.com>
+zh_CN: Fu Wei <wefu@redhat.com>
+zh_TW: Hu Haowen <src.res@email.cn>
+C: 1926e54f115725a9248d0c4c65c22acaf94de4c4
+---------------------------------------------------------------------
+Documentation/arm64/silicon-errata.rst 的中文翻譯
+
+如果想評論或更新本文的內容,請直接聯繫原文檔的維護者。如果你使用英文
+交流有困難的話,也可以向中文版維護者求助。如果本翻譯更新不及時或者翻
+譯存在問題,請聯繫中文版維護者。
+
+英文版維護者: Will Deacon <will.deacon@arm.com>
+中文版維護者: 傅煒 Fu Wei <wefu@redhat.com>
+中文版翻譯者: 傅煒 Fu Wei <wefu@redhat.com>
+中文版校譯者: 傅煒 Fu Wei <wefu@redhat.com>
+繁體中文版校譯者: 胡皓文 Hu Haowen <src.res@email.cn>
+本文翻譯提交時的 Git 檢出點爲: 1926e54f115725a9248d0c4c65c22acaf94de4c4
+
+以下爲正文
+---------------------------------------------------------------------
+ 晶片勘誤和軟體補救措施
+ ==================
+
+作者: Will Deacon <will.deacon@arm.com>
+日期: 2015年11月27日
+
+一個不幸的現實:硬體經常帶有一些所謂的「瑕疵(errata)」,導致其在
+某些特定情況下會違背構架定義的行爲。就基於 ARM 的硬體而言,這些瑕疵
+大體可分爲以下幾類:
+
+ A 類:無可行補救措施的嚴重缺陷。
+ B 類:有可接受的補救措施的重大或嚴重缺陷。
+ C 類:在正常操作中不會顯現的小瑕疵。
+
+更多資訊,請在 infocenter.arm.com (需註冊)中查閱「軟體開發者勘誤
+筆記」(「Software Developers Errata Notice」)文檔。
+
+對於 Linux 而言,B 類缺陷可能需要作業系統的某些特別處理。例如,避免
+一個特殊的代碼序列,或是以一種特定的方式配置處理器。在某種不太常見的
+情況下,爲將 A 類缺陷當作 C 類處理,可能需要用類似的手段。這些手段被
+統稱爲「軟體補救措施」,且僅在少數情況需要(例如,那些需要一個運行在
+非安全異常級的補救措施 *並且* 能被 Linux 觸發的情況)。
+
+對於尚在討論中的可能對未受瑕疵影響的系統產生干擾的軟體補救措施,有一個
+相應的內核配置(Kconfig)選項被加在 「內核特性(Kernel Features)」->
+「基於可選方法框架的 ARM 瑕疵補救措施(ARM errata workarounds via
+the alternatives framework)"。這些選項被默認開啓,若探測到受影響的CPU,
+補丁將在運行時被使用。至於對系統運行影響較小的補救措施,內核配置選項
+並不存在,且代碼以某種規避瑕疵的方式被構造(帶注釋爲宜)。
+
+這種做法對於在任意內核原始碼樹中準確地判斷出哪個瑕疵已被軟體方法所補救
+稍微有點麻煩,所以在 Linux 內核中此文件作爲軟體補救措施的註冊表,
+並將在新的軟體補救措施被提交和向後移植(backported)到穩定內核時被更新。
+
+| 實現者 | 受影響的組件 | 勘誤編號 | 內核配置 |
++----------------+-----------------+-----------------+-------------------------+
+| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
+| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
+| ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 |
+| ARM | Cortex-A53 | #819472 | ARM64_ERRATUM_819472 |
+| ARM | Cortex-A53 | #845719 | ARM64_ERRATUM_845719 |
+| ARM | Cortex-A53 | #843419 | ARM64_ERRATUM_843419 |
+| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
+| ARM | Cortex-A57 | #852523 | N/A |
+| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
+| | | | |
+| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
+| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
+
diff --git a/Documentation/translations/zh_TW/arm64/tagged-pointers.txt b/Documentation/translations/zh_TW/arm64/tagged-pointers.txt
new file mode 100644
index 000000000000..87f88628401a
--- /dev/null
+++ b/Documentation/translations/zh_TW/arm64/tagged-pointers.txt
@@ -0,0 +1,57 @@
+SPDX-License-Identifier: GPL-2.0
+
+Chinese translated version of Documentation/arm64/tagged-pointers.rst
+
+If you have any comment or update to the content, please contact the
+original document maintainer directly. However, if you have a problem
+communicating in English you can also ask the Chinese maintainer for
+help. Contact the Chinese maintainer if this translation is outdated
+or if there is a problem with the translation.
+
+Maintainer: Will Deacon <will.deacon@arm.com>
+Chinese maintainer: Fu Wei <wefu@redhat.com>
+Traditional Chinese maintainer: Hu Haowen <src.res@email.cn>
+---------------------------------------------------------------------
+Documentation/arm64/tagged-pointers.rst 的中文翻譯
+
+如果想評論或更新本文的內容,請直接聯繫原文檔的維護者。如果你使用英文
+交流有困難的話,也可以向中文版維護者求助。如果本翻譯更新不及時或者翻
+譯存在問題,請聯繫中文版維護者。
+
+英文版維護者: Will Deacon <will.deacon@arm.com>
+中文版維護者: 傅煒 Fu Wei <wefu@redhat.com>
+中文版翻譯者: 傅煒 Fu Wei <wefu@redhat.com>
+中文版校譯者: 傅煒 Fu Wei <wefu@redhat.com>
+繁體中文版校譯者: 胡皓文 Hu Haowen <src.res@email.cn>
+
+以下爲正文
+---------------------------------------------------------------------
+ Linux 在 AArch64 中帶標記的虛擬地址
+ =================================
+
+作者: Will Deacon <will.deacon@arm.com>
+日期: 2013 年 06 月 12 日
+
+本文檔簡述了在 AArch64 地址轉換系統中提供的帶標記的虛擬地址及其在
+AArch64 Linux 中的潛在用途。
+
+內核提供的地址轉換表配置使通過 TTBR0 完成的虛擬地址轉換(即用戶空間
+映射),其虛擬地址的最高 8 位(63:56)會被轉換硬體所忽略。這種機制
+讓這些位可供應用程式自由使用,其注意事項如下:
+
+ (1) 內核要求所有傳遞到 EL1 的用戶空間地址帶有 0x00 標記。
+ 這意味著任何攜帶用戶空間虛擬地址的系統調用(syscall)
+ 參數 *必須* 在陷入內核前使它們的最高字節被清零。
+
+ (2) 非零標記在傳遞信號時不被保存。這意味著在應用程式中利用了
+ 標記的信號處理函數無法依賴 siginfo_t 的用戶空間虛擬
+ 地址所攜帶的包含其內部域信息的標記。此規則的一個例外是
+ 當信號是在調試觀察點的異常處理程序中產生的,此時標記的
+ 信息將被保存。
+
+ (3) 當使用帶標記的指針時需特別留心,因爲僅對兩個虛擬地址
+ 的高字節,C 編譯器很可能無法判斷它們是不同的。
+
+此構架會阻止對帶標記的 PC 指針的利用,因此在異常返回時,其高字節
+將被設置成一個爲 「55」 的擴展符。
+
diff --git a/Documentation/translations/zh_TW/cpu-freq/core.rst b/Documentation/translations/zh_TW/cpu-freq/core.rst
new file mode 100644
index 000000000000..3d890c2f2a61
--- /dev/null
+++ b/Documentation/translations/zh_TW/cpu-freq/core.rst
@@ -0,0 +1,108 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_TW.rst
+
+:Original: :doc:`../../../cpu-freq/core`
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+ Hu Haowen <src.res@email.cn>
+
+.. _tw_core.rst:
+
+
+====================================
+CPUFreq核心和CPUFreq通知器的通用說明
+====================================
+
+作者:
+ - Dominik Brodowski <linux@brodo.de>
+ - David Kimdon <dwhedon@debian.org>
+ - Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ - Viresh Kumar <viresh.kumar@linaro.org>
+
+.. 目錄:
+
+ 1. CPUFreq核心和接口
+ 2. CPUFreq通知器
+ 3. 含有Operating Performance Point (OPP)的CPUFreq表的生成
+
+1. CPUFreq核心和接口
+======================
+
+cpufreq核心代碼位於drivers/cpufreq/cpufreq.c中。這些cpufreq代碼爲CPUFreq架構的驅
+動程序(那些操作硬體切換頻率的代碼)以及 "通知器 "提供了一個標準化的接口。
+這些是設備驅動程序或需要了解策略變化的其它內核部分(如 ACPI 熱量管理)或所有頻率更改(除
+計時代碼外),甚至需要強制確定速度限制的通知器(如 ARM 架構上的 LCD 驅動程序)。
+此外, 內核 "常數" loops_per_jiffy會根據頻率變化而更新。
+
+cpufreq策略的引用計數由 cpufreq_cpu_get 和 cpufreq_cpu_put 來完成,以確保 cpufreq 驅
+動程序被正確地註冊到核心中,並且驅動程序在 cpufreq_put_cpu 被調用之前不會被卸載。這也保證
+了每個CPU核的cpufreq 策略在使用期間不會被釋放。
+
+2. CPUFreq 通知器
+====================
+
+CPUFreq通知器符合標準的內核通知器接口。
+關於通知器的細節請參閱 linux/include/linux/notifier.h。
+
+這裡有兩個不同的CPUfreq通知器 - 策略通知器和轉換通知器。
+
+
+2.1 CPUFreq策略通知器
+----------------------------
+
+當創建或移除策略時,這些都會被通知。
+
+階段是在通知器的第二個參數中指定的。當第一次創建策略時,階段是CPUFREQ_CREATE_POLICY,當
+策略被移除時,階段是CPUFREQ_REMOVE_POLICY。
+
+第三個參數 ``void *pointer`` 指向一個結構體cpufreq_policy,其包括min,max(新策略的下限和
+上限(單位爲kHz))這幾個值。
+
+
+2.2 CPUFreq轉換通知器
+--------------------------------
+
+當CPUfreq驅動切換CPU核心頻率時,策略中的每個在線CPU都會收到兩次通知,這些變化沒有任何外部干
+預。
+
+第二個參數指定階段 - CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
+
+第三個參數是一個包含如下值的結構體cpufreq_freqs:
+
+===== ====================
+cpu 受影響cpu的編號
+old 舊頻率
+new 新頻率
+flags cpufreq驅動的標誌
+===== ====================
+
+3. 含有Operating Performance Point (OPP)的CPUFreq表的生成
+==================================================================
+關於OPP的細節請參閱 Documentation/power/opp.rst
+
+dev_pm_opp_init_cpufreq_table -
+ 這個功能提供了一個隨時可用的轉換程序,用來將OPP層關於可用頻率的內部信息翻譯成一種容易提供給
+ cpufreq的格式。
+
+ .. Warning::
+
+ 不要在中斷上下文中使用此函數。
+
+ 例如::
+
+ soc_pm_init()
+ {
+ /* Do things */
+ r = dev_pm_opp_init_cpufreq_table(dev, &freq_table);
+ if (!r)
+ policy->freq_table = freq_table;
+ /* Do other things */
+ }
+
+ .. note::
+
+ 該函數只有在CONFIG_PM_OPP之外還啓用了CONFIG_CPU_FREQ時才可用。
+
+dev_pm_opp_free_cpufreq_table
+ 釋放dev_pm_opp_init_cpufreq_table分配的表。
+
diff --git a/Documentation/translations/zh_TW/cpu-freq/cpu-drivers.rst b/Documentation/translations/zh_TW/cpu-freq/cpu-drivers.rst
new file mode 100644
index 000000000000..2bb8197cd320
--- /dev/null
+++ b/Documentation/translations/zh_TW/cpu-freq/cpu-drivers.rst
@@ -0,0 +1,256 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_TW.rst
+
+:Original: :doc:`../../../cpu-freq/cpu-drivers`
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+ Hu Haowen <src.res@email.cn>
+
+.. _tw_cpu-drivers.rst:
+
+
+=======================================
+如何實現一個新的CPUFreq處理器驅動程序?
+=======================================
+
+作者:
+
+
+ - Dominik Brodowski <linux@brodo.de>
+ - Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ - Viresh Kumar <viresh.kumar@linaro.org>
+
+.. Contents
+
+ 1. 怎麼做?
+ 1.1 初始化
+ 1.2 Per-CPU 初始化
+ 1.3 驗證
+ 1.4 target/target_index 或 setpolicy?
+ 1.5 target/target_index
+ 1.6 setpolicy
+ 1.7 get_intermediate 與 target_intermediate
+ 2. 頻率表助手
+
+
+
+1. 怎麼做?
+===========
+
+如此,你剛剛得到了一個全新的CPU/晶片組及其數據手冊,並希望爲這個CPU/晶片組添加cpufreq
+支持?很好,這裡有一些至關重要的提示:
+
+
+1.1 初始化
+----------
+
+首先,在__initcall_level_7 (module_init())或更靠後的函數中檢查這個內核是否
+運行在正確的CPU和正確的晶片組上。如果是,則使用cpufreq_register_driver()向
+CPUfreq核心層註冊一個cpufreq_driver結構體。
+
+結構體cpufreq_driver應該包含什麼成員?
+
+ .name - 驅動的名字。
+
+ .init - 一個指向per-policy初始化函數的指針。
+
+ .verify - 一個指向"verification"函數的指針。
+
+ .setpolicy 或 .fast_switch 或 .target 或 .target_index - 差異見
+ 下文。
+
+並且可選擇
+
+ .flags - cpufreq核的提示。
+
+ .driver_data - cpufreq驅動程序的特定數據。
+
+ .get_intermediate 和 target_intermediate - 用於在改變CPU頻率時切換到穩定
+ 的頻率。
+
+ .get - 返回CPU的當前頻率。
+
+ .bios_limit - 返回HW/BIOS對CPU的最大頻率限制值。
+
+ .exit - 一個指向per-policy清理函數的指針,該函數在cpu熱插拔過程的CPU_POST_DEAD
+ 階段被調用。
+
+ .suspend - 一個指向per-policy暫停函數的指針,該函數在關中斷且在該策略的調節器停止
+ 後被調用。
+
+ .resume - 一個指向per-policy恢復函數的指針,該函數在關中斷且在調節器再一次開始前被
+ 調用。
+
+ .ready - 一個指向per-policy準備函數的指針,該函數在策略完全初始化之後被調用。
+
+ .attr - 一個指向NULL結尾的"struct freq_attr"列表的指針,該函數允許導出值到
+ sysfs。
+
+ .boost_enabled - 如果設置,則啓用提升(boost)頻率。
+
+ .set_boost - 一個指向per-policy函數的指針,該函數用來開啓/關閉提升(boost)頻率功能。
+
+
+1.2 Per-CPU 初始化
+------------------
+
+每當一個新的CPU被註冊到設備模型中,或者在cpufreq驅動註冊自己之後,如果此CPU的cpufreq策
+略不存在,則會調用per-policy的初始化函數cpufreq_driver.init。請注意,.init()和.exit()程序
+只對策略調用一次,而不是對策略管理的每個CPU調用一次。它需要一個 ``struct cpufreq_policy
+*policy`` 作爲參數。現在該怎麼做呢?
+
+如果有必要,請在你的CPU上激活CPUfreq功能支持。
+
+然後,驅動程序必須填寫以下數值:
+
++-----------------------------------+--------------------------------------+
+|policy->cpuinfo.min_freq 和 | |
+|policy->cpuinfo.max_freq | 該CPU支持的最低和最高頻率(kHz) |
+| | |
+| | |
++-----------------------------------+--------------------------------------+
+|policy->cpuinfo.transition_latency | |
+| | CPU在兩個頻率之間切換所需的時間,以 |
+| | 納秒爲單位(如適用,否則指定 |
+| | CPUFREQ_ETERNAL) |
++-----------------------------------+--------------------------------------+
+|policy->cur | 該CPU當前的工作頻率(如適用) |
+| | |
++-----------------------------------+--------------------------------------+
+|policy->min, | |
+|policy->max, | |
+|policy->policy and, if necessary, | |
+|policy->governor | 必須包含該cpu的 「默認策略」。稍後 |
+| | 會用這些值調用 |
+| | cpufreq_driver.verify and either |
+| | cpufreq_driver.setpolicy or |
+| | cpufreq_driver.target/target_index |
+| | |
++-----------------------------------+--------------------------------------+
+|policy->cpus | 用與這個CPU一起做DVFS的(在線+離線) |
+| | CPU(即與它共享時鐘/電壓軌)的掩碼更新 |
+| | 這個 |
+| | |
++-----------------------------------+--------------------------------------+
+
+對於設置其中的一些值(cpuinfo.min[max]_freq, policy->min[max]),頻率表助手可能會有幫
+助。關於它們的更多信息,請參見第2節。
+
+
+1.3 驗證
+--------
+
+當用戶決定設置一個新的策略(由 「policy,governor,min,max組成」)時,必須對這個策略進行驗證,
+以便糾正不兼容的值。爲了驗證這些值,cpufreq_verify_within_limits(``struct cpufreq_policy
+*policy``, ``unsigned int min_freq``, ``unsigned int max_freq``)函數可能會有幫助。
+關於頻率表助手的詳細內容請參見第2節。
+
+您需要確保至少有一個有效頻率(或工作範圍)在 policy->min 和 policy->max 範圍內。如果有必
+要,先增加policy->max,只有在沒有辦法的情況下,才減少policy->min。
+
+
+1.4 target 或 target_index 或 setpolicy 或 fast_switch?
+-------------------------------------------------------
+
+大多數cpufreq驅動甚至大多數cpu頻率升降算法只允許將CPU頻率設置爲預定義的固定值。對於這些,你
+可以使用->target(),->target_index()或->fast_switch()回調。
+
+有些cpufreq功能的處理器可以自己在某些限制之間切換頻率。這些應使用->setpolicy()回調。
+
+
+1.5. target/target_index
+------------------------
+
+target_index調用有兩個參數:``struct cpufreq_policy * policy``和``unsigned int``
+索引(於列出的頻率表)。
+
+當調用這裡時,CPUfreq驅動必須設置新的頻率。實際頻率必須由freq_table[index].frequency決定。
+
+它應該總是在錯誤的情況下恢復到之前的頻率(即policy->restore_freq),即使我們之前切換到中間頻率。
+
+已棄用
+----------
+目標調用有三個參數。``struct cpufreq_policy * policy``, unsigned int target_frequency,
+unsigned int relation.
+
+CPUfreq驅動在調用這裡時必須設置新的頻率。實際的頻率必須使用以下規則來確定。
+
+- 緊跟 "目標頻率"。
+- policy->min <= new_freq <= policy->max (這必須是有效的!!!)
+- 如果 relation==CPUFREQ_REL_L,嘗試選擇一個高於或等於 target_freq 的 new_freq。("L代表
+ 最低,但不能低於")
+- 如果 relation==CPUFREQ_REL_H,嘗試選擇一個低於或等於 target_freq 的 new_freq。("H代表
+ 最高,但不能高於")
+
+這裡,頻率表助手可能會幫助你--詳見第2節。
+
+1.6. fast_switch
+----------------
+
+這個函數用於從調度器的上下文進行頻率切換。並非所有的驅動都要實現它,因爲不允許在這個回調中睡眠。這
+個回調必須經過高度優化,以儘可能快地進行切換。
+
+這個函數有兩個參數: ``struct cpufreq_policy *policy`` 和 ``unsigned int target_frequency``。
+
+
+1.7 setpolicy
+-------------
+
+setpolicy調用只需要一個``struct cpufreq_policy * policy``作爲參數。需要將處理器內或晶片組內動態頻
+率切換的下限設置爲policy->min,上限設置爲policy->max,如果支持的話,當policy->policy爲
+CPUFREQ_POLICY_PERFORMANCE時選擇面向性能的設置,當CPUFREQ_POLICY_POWERSAVE時選擇面向省電的設置。
+也可以查看drivers/cpufreq/longrun.c中的參考實現。
+
+1.8 get_intermediate 和 target_intermediate
+--------------------------------------------
+
+僅適用於 target_index() 和 CPUFREQ_ASYNC_NOTIFICATION 未設置的驅動。
+
+get_intermediate應該返回一個平台想要切換到的穩定的中間頻率,target_intermediate()應該將CPU設置爲
+該頻率,然後再跳轉到'index'對應的頻率。核心會負責發送通知,驅動不必在target_intermediate()或
+target_index()中處理。
+
+在驅動程序不想因爲某個目標頻率切換到中間頻率的情況下,它們可以從get_intermediate()中返回'0'。在這種情況
+下,核心將直接調用->target_index()。
+
+注意:->target_index()應該在失敗的情況下恢復到policy->restore_freq,因爲core會爲此發送通知。
+
+
+2. 頻率表助手
+=============
+
+由於大多數cpufreq處理器只允許被設置爲幾個特定的頻率,因此,一個帶有一些函數的 「頻率表」可能會輔助處理器驅動
+程序的一些工作。這樣的 "頻率表" 由一個cpufreq_frequency_table條目構成的數組組成,"driver_data" 中包
+含了驅動程序的具體數值,"frequency" 中包含了相應的頻率,並設置了標誌。在表的最後,需要添加一個
+cpufreq_frequency_table條目,頻率設置爲CPUFREQ_TABLE_END。而如果想跳過表中的一個條目,則將頻率設置爲
+CPUFREQ_ENTRY_INVALID。這些條目不需要按照任何特定的順序排序,但如果它們是cpufreq 核心會對它們進行快速的DVFS,
+因爲搜索最佳匹配會更快。
+
+如果策略在其policy->freq_table欄位中包含一個有效的指針,cpufreq表就會被核心自動驗證。
+
+cpufreq_frequency_table_verify()保證至少有一個有效的頻率在policy->min和policy->max範圍內,並且所有其他
+標準都被滿足。這對->verify調用很有幫助。
+
+cpufreq_frequency_table_target()是對應於->target階段的頻率表助手。只要把數值傳遞給這個函數,這個函數就會返
+回包含CPU要設置的頻率的頻率表條目。
+
+以下宏可以作爲cpufreq_frequency_table的疊代器。
+
+cpufreq_for_each_entry(pos, table) - 遍歷頻率表的所有條目。
+
+cpufreq_for_each_valid_entry(pos, table) - 該函數遍歷所有條目,不包括CPUFREQ_ENTRY_INVALID頻率。
+使用參數 "pos"-一個``cpufreq_frequency_table * `` 作爲循環變量,使用參數 "table"-作爲你想疊代
+的``cpufreq_frequency_table * `` 。
+
+例如::
+
+ struct cpufreq_frequency_table *pos, *driver_freq_table;
+
+ cpufreq_for_each_entry(pos, driver_freq_table) {
+ /* Do something with pos */
+ pos->frequency = ...
+ }
+
+如果你需要在driver_freq_table中處理pos的位置,不要減去指針,因爲它的代價相當高。相反,使用宏
+cpufreq_for_each_entry_idx() 和 cpufreq_for_each_valid_entry_idx() 。
+
diff --git a/Documentation/translations/zh_TW/cpu-freq/cpufreq-stats.rst b/Documentation/translations/zh_TW/cpu-freq/cpufreq-stats.rst
new file mode 100644
index 000000000000..d80bfed50e8c
--- /dev/null
+++ b/Documentation/translations/zh_TW/cpu-freq/cpufreq-stats.rst
@@ -0,0 +1,132 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_TW.rst
+
+:Original: :doc:`../../../cpu-freq/cpufreq-stats`
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+ Hu Haowen <src.res@email.cn>
+
+.. _tw_cpufreq-stats.rst:
+
+
+==========================================
+sysfs CPUFreq Stats的一般說明
+==========================================
+
+用戶信息
+
+
+作者: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+
+.. Contents
+
+ 1. 簡介
+ 2. 提供的統計數據(舉例說明)
+ 3. 配置cpufreq-stats
+
+
+1. 簡介
+===============
+
+cpufreq-stats是一個爲每個CPU提供CPU頻率統計的驅動。
+這些統計數據在/sysfs中以一堆只讀接口的形式提供。這個接口(在配置好後)將出現在
+/sysfs(<sysfs root>/devices/system/cpu/cpuX/cpufreq/stats/)中cpufreq下的一個單
+獨的目錄中,提供給每個CPU。
+各種統計數據將在此目錄下形成只讀文件。
+
+此驅動是獨立於任何可能運行在你所用CPU上的特定cpufreq_driver而設計的。因此,它將與所有
+cpufreq_driver一起工作。
+
+
+2. 提供的統計數據(舉例說明)
+=====================================
+
+cpufreq stats提供了以下統計數據(在下面詳細解釋)。
+
+- time_in_state
+- total_trans
+- trans_table
+
+所有的統計數據將從統計驅動被載入的時間(或統計被重置的時間)開始,到某一統計數據被讀取的時間爲止。
+顯然,統計驅動不會有任何關於統計驅動載入之前的頻率轉換信息。
+
+::
+
+ <mysystem>:/sys/devices/system/cpu/cpu0/cpufreq/stats # ls -l
+ total 0
+ drwxr-xr-x 2 root root 0 May 14 16:06 .
+ drwxr-xr-x 3 root root 0 May 14 15:58 ..
+ --w------- 1 root root 4096 May 14 16:06 reset
+ -r--r--r-- 1 root root 4096 May 14 16:06 time_in_state
+ -r--r--r-- 1 root root 4096 May 14 16:06 total_trans
+ -r--r--r-- 1 root root 4096 May 14 16:06 trans_table
+
+- **reset**
+
+只寫屬性,可用於重置統計計數器。這對於評估不同調節器下的系統行爲非常有用,且無需重啓。
+
+
+- **time_in_state**
+
+此項給出了這個CPU所支持的每個頻率所花費的時間。cat輸出的每一行都會有"<frequency>
+<time>"對,表示這個CPU在<frequency>上花費了<time>個usertime單位的時間。這裡的
+usertime單位是10mS(類似於/proc中輸出的其他時間)。
+
+::
+
+ <mysystem>:/sys/devices/system/cpu/cpu0/cpufreq/stats # cat time_in_state
+ 3600000 2089
+ 3400000 136
+ 3200000 34
+ 3000000 67
+ 2800000 172488
+
+
+- **total_trans**
+
+給出了這個CPU上頻率轉換的總次數。cat的輸出將有一個單一的計數,這就是頻率轉換的總數。
+
+::
+
+ <mysystem>:/sys/devices/system/cpu/cpu0/cpufreq/stats # cat total_trans
+ 20
+
+- **trans_table**
+
+這將提供所有CPU頻率轉換的細粒度信息。這裡的cat輸出是一個二維矩陣,其中一個條目<i, j>(第
+i行,第j列)代表從Freq_i到Freq_j的轉換次數。Freq_i行和Freq_j列遵循驅動最初提供給cpufreq
+核的頻率表的排序順序,因此可以排序(升序或降序)或不排序。 這裡的輸出也包含了每行每列的實際
+頻率值,以便更好地閱讀。
+
+如果轉換表大於PAGE_SIZE,讀取時將返回一個-EFBIG錯誤。
+
+::
+
+ <mysystem>:/sys/devices/system/cpu/cpu0/cpufreq/stats # cat trans_table
+ From : To
+ : 3600000 3400000 3200000 3000000 2800000
+ 3600000: 0 5 0 0 0
+ 3400000: 4 0 2 0 0
+ 3200000: 0 1 0 2 0
+ 3000000: 0 0 1 0 3
+ 2800000: 0 0 0 2 0
+
+3. 配置cpufreq-stats
+============================
+
+要在你的內核中配置cpufreq-stats::
+
+ Config Main Menu
+ Power management options (ACPI, APM) --->
+ CPU Frequency scaling --->
+ [*] CPU Frequency scaling
+ [*] CPU frequency translation statistics
+
+
+"CPU Frequency scaling" (CONFIG_CPU_FREQ) 應該被啓用以配置cpufreq-stats。
+
+"CPU frequency translation statistics" (CONFIG_CPU_FREQ_STAT)提供了包括
+time_in_state、total_trans和trans_table的統計數據。
+
+一旦啓用了這個選項,並且你的CPU支持cpufrequency,你就可以在/sysfs中看到CPU頻率統計。
+
diff --git a/Documentation/translations/zh_TW/cpu-freq/index.rst b/Documentation/translations/zh_TW/cpu-freq/index.rst
new file mode 100644
index 000000000000..1a8e680f95ed
--- /dev/null
+++ b/Documentation/translations/zh_TW/cpu-freq/index.rst
@@ -0,0 +1,47 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_TW.rst
+
+:Original: :doc:`../../../cpu-freq/index`
+:Translator: Yanteng Si <siyanteng@loongson.cn>
+ Hu Haowen <src.res@email.cn>
+
+.. _tw_index.rst:
+
+
+=======================================================
+Linux CPUFreq - Linux(TM)內核中的CPU頻率和電壓升降代碼
+=======================================================
+
+Author: Dominik Brodowski <linux@brodo.de>
+
+ 時鐘升降允許你在運行中改變CPU的時鐘速度。這是一個很好的節省電池電量的方法,因爲時
+ 鐘速度越低,CPU消耗的電量越少。
+
+
+.. toctree::
+ :maxdepth: 1
+
+ core
+ cpu-drivers
+ cpufreq-stats
+
+郵件列表
+------------
+這裡有一個 CPU 頻率變化的 CVS 提交和通用列表,您可以在這裡報告bug、問題或提交補丁。要發
+布消息,請發送電子郵件到 linux-pm@vger.kernel.org。
+
+連結
+-----
+FTP檔案:
+* ftp://ftp.linux.org.uk/pub/linux/cpufreq/
+
+如何訪問CVS倉庫:
+* http://cvs.arm.linux.org.uk/
+
+CPUFreq郵件列表:
+* http://vger.kernel.org/vger-lists.html#linux-pm
+
+SA-1100的時鐘和電壓標度:
+* http://www.lartmaker.nl/projects/scaling
+
diff --git a/Documentation/translations/zh_TW/filesystems/debugfs.rst b/Documentation/translations/zh_TW/filesystems/debugfs.rst
new file mode 100644
index 000000000000..270dd94fddf1
--- /dev/null
+++ b/Documentation/translations/zh_TW/filesystems/debugfs.rst
@@ -0,0 +1,224 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_TW.rst
+
+:Original: :doc:`../../../filesystems/debugfs`
+
+=======
+Debugfs
+=======
+
+譯者
+::
+
+ 中文版維護者:羅楚成 Chucheng Luo <luochucheng@vivo.com>
+ 中文版翻譯者:羅楚成 Chucheng Luo <luochucheng@vivo.com>
+ 中文版校譯者: 羅楚成 Chucheng Luo <luochucheng@vivo.com>
+ 繁體中文版校譯者: 胡皓文 Hu Haowen <src.res@email.cn>
+
+
+
+版權所有2020 羅楚成 <luochucheng@vivo.com>
+版權所有2021 胡皓文 Hu Haowen <src.res@email.cn>
+
+
+Debugfs是內核開發人員在用戶空間獲取信息的簡單方法。與/proc不同,proc只提供進程
+信息。也不像sysfs,具有嚴格的「每個文件一個值「的規則。debugfs根本沒有規則,開發
+人員可以在這裡放置他們想要的任何信息。debugfs文件系統也不能用作穩定的ABI接口。
+從理論上講,debugfs導出文件的時候沒有任何約束。但是[1]實際情況並不總是那麼
+簡單。即使是debugfs接口,也最好根據需要進行設計,並儘量保持接口不變。
+
+
+Debugfs通常使用以下命令安裝::
+
+ mount -t debugfs none /sys/kernel/debug
+
+(或等效的/etc/fstab行)。
+debugfs根目錄默認僅可由root用戶訪問。要更改對文件樹的訪問,請使用「 uid」,「 gid」
+和「 mode」掛載選項。請注意,debugfs API僅按照GPL協議導出到模塊。
+
+使用debugfs的代碼應包含<linux/debugfs.h>。然後,首先是創建至少一個目錄來保存
+一組debugfs文件::
+
+ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
+
+如果成功,此調用將在指定的父目錄下創建一個名爲name的目錄。如果parent參數爲空,
+則會在debugfs根目錄中創建。創建目錄成功時,返回值是一個指向dentry結構體的指針。
+該dentry結構體的指針可用於在目錄中創建文件(以及最後將其清理乾淨)。ERR_PTR
+(-ERROR)返回值表明出錯。如果返回ERR_PTR(-ENODEV),則表明內核是在沒有debugfs
+支持的情況下構建的,並且下述函數都不會起作用。
+
+在debugfs目錄中創建文件的最通用方法是::
+
+ struct dentry *debugfs_create_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops);
+
+在這裡,name是要創建的文件的名稱,mode描述了訪問文件應具有的權限,parent指向
+應該保存文件的目錄,data將存儲在產生的inode結構體的i_private欄位中,而fops是
+一組文件操作函數,這些函數中實現文件操作的具體行爲。至少,read()和/或
+write()操作應提供;其他可以根據需要包括在內。同樣的,返回值將是指向創建文件
+的dentry指針,錯誤時返回ERR_PTR(-ERROR),系統不支持debugfs時返回值爲ERR_PTR
+(-ENODEV)。創建一個初始大小的文件,可以使用以下函數代替::
+
+ struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops,
+ loff_t file_size);
+
+file_size是初始文件大小。其他參數跟函數debugfs_create_file的相同。
+
+在許多情況下,沒必要自己去創建一組文件操作;對於一些簡單的情況,debugfs代碼提供
+了許多幫助函數。包含單個整數值的文件可以使用以下任何一項創建::
+
+ void debugfs_create_u8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value);
+ void debugfs_create_u16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value);
+ struct dentry *debugfs_create_u32(const char *name, umode_t mode,
+ struct dentry *parent, u32 *value);
+ void debugfs_create_u64(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value);
+
+這些文件支持讀取和寫入給定值。如果某個文件不支持寫入,只需根據需要設置mode
+參數位。這些文件中的值以十進位表示;如果需要使用十六進位,可以使用以下函數
+替代::
+
+ void debugfs_create_x8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value);
+ void debugfs_create_x16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value);
+ void debugfs_create_x32(const char *name, umode_t mode,
+ struct dentry *parent, u32 *value);
+ void debugfs_create_x64(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value);
+
+這些功能只有在開發人員知道導出值的大小的時候才有用。某些數據類型在不同的架構上
+有不同的寬度,這樣會使情況變得有些複雜。在這種特殊情況下可以使用以下函數::
+
+ void debugfs_create_size_t(const char *name, umode_t mode,
+ struct dentry *parent, size_t *value);
+
+不出所料,此函數將創建一個debugfs文件來表示類型爲size_t的變量。
+
+同樣地,也有導出無符號長整型變量的函數,分別以十進位和十六進位表示如下::
+
+ struct dentry *debugfs_create_ulong(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value);
+ void debugfs_create_xul(const char *name, umode_t mode,
+ struct dentry *parent, unsigned long *value);
+
+布爾值可以通過以下方式放置在debugfs中::
+
+ struct dentry *debugfs_create_bool(const char *name, umode_t mode,
+ struct dentry *parent, bool *value);
+
+
+讀取結果文件將產生Y(對於非零值)或N,後跟換行符寫入的時候,它只接受大寫或小寫
+值或1或0。任何其他輸入將被忽略。
+
+同樣,atomic_t類型的值也可以放置在debugfs中::
+
+ void debugfs_create_atomic_t(const char *name, umode_t mode,
+ struct dentry *parent, atomic_t *value)
+
+讀取此文件將獲得atomic_t值,寫入此文件將設置atomic_t值。
+
+另一個選擇是通過以下結構體和函數導出一個任意二進位數據塊::
+
+ struct debugfs_blob_wrapper {
+ void *data;
+ unsigned long size;
+ };
+
+ struct dentry *debugfs_create_blob(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_blob_wrapper *blob);
+
+讀取此文件將返回由指針指向debugfs_blob_wrapper結構體的數據。一些驅動使用「blobs」
+作爲一種返回幾行(靜態)格式化文本的簡單方法。這個函數可用於導出二進位信息,但
+似乎在主線中沒有任何代碼這樣做。請注意,使用debugfs_create_blob()命令創建的
+所有文件是只讀的。
+
+如果您要轉儲一個寄存器塊(在開發過程中經常會這麼做,但是這樣的調試代碼很少上傳
+到主線中。Debugfs提供兩個函數:一個用於創建僅寄存器文件,另一個把一個寄存器塊
+插入一個順序文件中::
+
+ struct debugfs_reg32 {
+ char *name;
+ unsigned long offset;
+ };
+
+ struct debugfs_regset32 {
+ struct debugfs_reg32 *regs;
+ int nregs;
+ void __iomem *base;
+ };
+
+ struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset);
+
+ void debugfs_print_regs32(struct seq_file *s, struct debugfs_reg32 *regs,
+ int nregs, void __iomem *base, char *prefix);
+
+「base」參數可能爲0,但您可能需要使用__stringify構建reg32數組,實際上有許多寄存器
+名稱(宏)是寄存器塊在基址上的字節偏移量。
+
+如果要在debugfs中轉儲u32數組,可以使用以下函數創建文件::
+
+ void debugfs_create_u32_array(const char *name, umode_t mode,
+ struct dentry *parent,
+ u32 *array, u32 elements);
+
+「array」參數提供數據,而「elements」參數爲數組中元素的數量。注意:數組創建後,數組
+大小無法更改。
+
+有一個函數來創建與設備相關的seq_file::
+
+ struct dentry *debugfs_create_devm_seqfile(struct device *dev,
+ const char *name,
+ struct dentry *parent,
+ int (*read_fn)(struct seq_file *s,
+ void *data));
+
+「dev」參數是與此debugfs文件相關的設備,並且「read_fn」是一個函數指針,這個函數在
+列印seq_file內容的時候被回調。
+
+還有一些其他的面向目錄的函數::
+
+ struct dentry *debugfs_rename(struct dentry *old_dir,
+ struct dentry *old_dentry,
+ struct dentry *new_dir,
+ const char *new_name);
+
+ struct dentry *debugfs_create_symlink(const char *name,
+ struct dentry *parent,
+ const char *target);
+
+調用debugfs_rename()將爲現有的debugfs文件重命名,可能同時切換目錄。 new_name
+函數調用之前不能存在;返回值爲old_dentry,其中包含更新的信息。可以使用
+debugfs_create_symlink()創建符號連結。
+
+所有debugfs用戶必須考慮的一件事是:
+
+debugfs不會自動清除在其中創建的任何目錄。如果一個模塊在不顯式刪除debugfs目錄的
+情況下卸載模塊,結果將會遺留很多野指針,從而導致系統不穩定。因此,所有debugfs
+用戶-至少是那些可以作爲模塊構建的用戶-必須做模塊卸載的時候準備刪除在此創建的
+所有文件和目錄。一份文件可以通過以下方式刪除::
+
+ void debugfs_remove(struct dentry *dentry);
+
+dentry值可以爲NULL或錯誤值,在這種情況下,不會有任何文件被刪除。
+
+很久以前,內核開發者使用debugfs時需要記錄他們創建的每個dentry指針,以便最後所有
+文件都可以被清理掉。但是,現在debugfs用戶能調用以下函數遞歸清除之前創建的文件::
+
+ void debugfs_remove_recursive(struct dentry *dentry);
+
+如果將對應頂層目錄的dentry傳遞給以上函數,則該目錄下的整個層次結構將會被刪除。
+
+注釋:
+[1] http://lwn.net/Articles/309298/
+
diff --git a/Documentation/translations/zh_TW/filesystems/index.rst b/Documentation/translations/zh_TW/filesystems/index.rst
new file mode 100644
index 000000000000..4e5dde0dca3c
--- /dev/null
+++ b/Documentation/translations/zh_TW/filesystems/index.rst
@@ -0,0 +1,31 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_TW.rst
+
+:Original: :ref:`Documentation/filesystems/index.rst <filesystems_index>`
+:Translator: Wang Wenhu <wenhu.wang@vivo.com>
+ Hu Haowen <src.res@email.cn>
+
+.. _tw_filesystems_index:
+
+========================
+Linux Kernel中的文件系統
+========================
+
+這份正在開發的手冊或許在未來某個輝煌的日子裡以易懂的形式將Linux虛擬\
+文件系統(VFS)層以及基於其上的各種文件系統如何工作呈現給大家。當前\
+可以看到下面的內容。
+
+文件系統
+========
+
+文件系統實現文檔。
+
+.. toctree::
+ :maxdepth: 2
+
+ virtiofs
+ debugfs
+ tmpfs
+
+
diff --git a/Documentation/translations/zh_TW/filesystems/sysfs.txt b/Documentation/translations/zh_TW/filesystems/sysfs.txt
new file mode 100644
index 000000000000..acd677f19d4f
--- /dev/null
+++ b/Documentation/translations/zh_TW/filesystems/sysfs.txt
@@ -0,0 +1,377 @@
+SPDX-License-Identifier: GPL-2.0
+
+Chinese translated version of Documentation/filesystems/sysfs.rst
+
+If you have any comment or update to the content, please contact the
+original document maintainer directly. However, if you have a problem
+communicating in English you can also ask the Chinese maintainer for
+help. Contact the Chinese maintainer if this translation is outdated
+or if there is a problem with the translation.
+
+Maintainer: Patrick Mochel <mochel@osdl.org>
+ Mike Murphy <mamurph@cs.clemson.edu>
+Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
+---------------------------------------------------------------------
+Documentation/filesystems/sysfs.rst 的中文翻譯
+
+如果想評論或更新本文的內容,請直接聯繫原文檔的維護者。如果你使用英文
+交流有困難的話,也可以向中文版維護者求助。如果本翻譯更新不及時或者翻
+譯存在問題,請聯繫中文版維護者。
+英文版維護者: Patrick Mochel <mochel@osdl.org>
+ Mike Murphy <mamurph@cs.clemson.edu>
+中文版維護者: 傅煒 Fu Wei <tekkamanninja@gmail.com>
+中文版翻譯者: 傅煒 Fu Wei <tekkamanninja@gmail.com>
+中文版校譯者: 傅煒 Fu Wei <tekkamanninja@gmail.com>
+繁體中文版校譯者:胡皓文 Hu Haowen <src.res@email.cn>
+
+
+以下爲正文
+---------------------------------------------------------------------
+sysfs - 用於導出內核對象(kobject)的文件系統
+
+Patrick Mochel <mochel@osdl.org>
+Mike Murphy <mamurph@cs.clemson.edu>
+
+修訂: 16 August 2011
+原始版本: 10 January 2003
+
+
+sysfs 簡介:
+~~~~~~~~~~
+
+sysfs 是一個最初基於 ramfs 且位於內存的文件系統。它提供導出內核
+數據結構及其屬性,以及它們之間的關聯到用戶空間的方法。
+
+sysfs 始終與 kobject 的底層結構緊密相關。請閱讀
+Documentation/core-api/kobject.rst 文檔以獲得更多關於 kobject 接口的
+信息。
+
+
+使用 sysfs
+~~~~~~~~~~~
+
+只要內核配置中定義了 CONFIG_SYSFS ,sysfs 總是被編譯進內核。你可
+通過以下命令掛載它:
+
+ mount -t sysfs sysfs /sys
+
+
+創建目錄
+~~~~~~~~
+
+任何 kobject 在系統中註冊,就會有一個目錄在 sysfs 中被創建。這個
+目錄是作爲該 kobject 的父對象所在目錄的子目錄創建的,以準確地傳遞
+內核的對象層次到用戶空間。sysfs 中的頂層目錄代表著內核對象層次的
+共同祖先;例如:某些對象屬於某個子系統。
+
+Sysfs 在與其目錄關聯的 kernfs_node 對象中內部保存一個指向實現
+目錄的 kobject 的指針。以前,這個 kobject 指針被 sysfs 直接用於
+kobject 文件打開和關閉的引用計數。而現在的 sysfs 實現中,kobject
+引用計數只能通過 sysfs_schedule_callback() 函數直接修改。
+
+
+屬性
+~~~~
+
+kobject 的屬性可在文件系統中以普通文件的形式導出。Sysfs 爲屬性定義
+了面向文件 I/O 操作的方法,以提供對內核屬性的讀寫。
+
+
+屬性應爲 ASCII 碼文本文件。以一個文件只存儲一個屬性值爲宜。但一個
+文件只包含一個屬性值可能影響效率,所以一個包含相同數據類型的屬性值
+數組也被廣泛地接受。
+
+混合類型、表達多行數據以及一些怪異的數據格式會遭到強烈反對。這樣做是
+很丟臉的,而且其代碼會在未通知作者的情況下被重寫。
+
+
+一個簡單的屬性結構定義如下:
+
+struct attribute {
+ char * name;
+ struct module *owner;
+ umode_t mode;
+};
+
+
+int sysfs_create_file(struct kobject * kobj, const struct attribute * attr);
+void sysfs_remove_file(struct kobject * kobj, const struct attribute * attr);
+
+
+一個單獨的屬性結構並不包含讀寫其屬性值的方法。子系統最好爲增刪特定
+對象類型的屬性定義自己的屬性結構體和封裝函數。
+
+例如:驅動程序模型定義的 device_attribute 結構體如下:
+
+struct device_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+};
+
+int device_create_file(struct device *, const struct device_attribute *);
+void device_remove_file(struct device *, const struct device_attribute *);
+
+爲了定義設備屬性,同時定義了一下輔助宏:
+
+#define DEVICE_ATTR(_name, _mode, _show, _store) \
+struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+
+例如:聲明
+
+static DEVICE_ATTR(foo, S_IWUSR | S_IRUGO, show_foo, store_foo);
+
+等同於如下代碼:
+
+static struct device_attribute dev_attr_foo = {
+ .attr = {
+ .name = "foo",
+ .mode = S_IWUSR | S_IRUGO,
+ .show = show_foo,
+ .store = store_foo,
+ },
+};
+
+
+子系統特有的回調函數
+~~~~~~~~~~~~~~~~~~~
+
+當一個子系統定義一個新的屬性類型時,必須實現一系列的 sysfs 操作,
+以幫助讀寫調用實現屬性所有者的顯示和儲存方法。
+
+struct sysfs_ops {
+ ssize_t (*show)(struct kobject *, struct attribute *, char *);
+ ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
+};
+
+[子系統應已經定義了一個 struct kobj_type 結構體作爲這個類型的
+描述符,並在此保存 sysfs_ops 的指針。更多的信息參見 kobject 的
+文檔]
+
+sysfs 會爲這個類型調用適當的方法。當一個文件被讀寫時,這個方法會
+將一般的kobject 和 attribute 結構體指針轉換爲適當的指針類型後
+調用相關聯的函數。
+
+
+示例:
+
+#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
+
+static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct device_attribute *dev_attr = to_dev_attr(attr);
+ struct device *dev = kobj_to_dev(kobj);
+ ssize_t ret = -EIO;
+
+ if (dev_attr->show)
+ ret = dev_attr->show(dev, dev_attr, buf);
+ if (ret >= (ssize_t)PAGE_SIZE) {
+ printk("dev_attr_show: %pS returned bad count\n",
+ dev_attr->show);
+ }
+ return ret;
+}
+
+
+
+讀寫屬性數據
+~~~~~~~~~~~~
+
+在聲明屬性時,必須指定 show() 或 store() 方法,以實現屬性的
+讀或寫。這些方法的類型應該和以下的設備屬性定義一樣簡單。
+
+ssize_t (*show)(struct device *dev, struct device_attribute *attr, char *buf);
+ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+
+也就是說,他們應只以一個處理對象、一個屬性和一個緩衝指針作爲參數。
+
+sysfs 會分配一個大小爲 (PAGE_SIZE) 的緩衝區並傳遞給這個方法。
+Sysfs 將會爲每次讀寫操作調用一次這個方法。這使得這些方法在執行時
+會出現以下的行爲:
+
+- 在讀方面(read(2)),show() 方法應該填充整個緩衝區。回想屬性
+ 應只導出了一個屬性值或是一個同類型屬性值的數組,所以這個代價將
+ 不會不太高。
+
+ 這使得用戶空間可以局部地讀和任意的向前搜索整個文件。如果用戶空間
+ 向後搜索到零或使用『0』偏移執行一個pread(2)操作,show()方法將
+ 再次被調用,以重新填充緩存。
+
+- 在寫方面(write(2)),sysfs 希望在第一次寫操作時得到整個緩衝區。
+ 之後 Sysfs 傳遞整個緩衝區給 store() 方法。
+
+ 當要寫 sysfs 文件時,用戶空間進程應首先讀取整個文件,修該想要
+ 改變的值,然後回寫整個緩衝區。
+
+ 在讀寫屬性值時,屬性方法的執行應操作相同的緩衝區。
+
+註記:
+
+- 寫操作導致的 show() 方法重載,會忽略當前文件位置。
+
+- 緩衝區應總是 PAGE_SIZE 大小。對於i386,這個值爲4096。
+
+- show() 方法應該返回寫入緩衝區的字節數,也就是 scnprintf()的
+ 返回值。
+
+- show() 方法在將格式化返回值返回用戶空間的時候,禁止使用snprintf()。
+ 如果可以保證不會發生緩衝區溢出,可以使用sprintf(),否則必須使用
+ scnprintf()。
+
+- store() 應返回緩衝區的已用字節數。如果整個緩存都已填滿,只需返回
+ count 參數。
+
+- show() 或 store() 可以返回錯誤值。當得到一個非法值,必須返回一個
+ 錯誤值。
+
+- 一個傳遞給方法的對象將會通過 sysfs 調用對象內嵌的引用計數固定在
+ 內存中。儘管如此,對象代表的物理實體(如設備)可能已不存在。如有必要,
+ 應該實現一個檢測機制。
+
+一個簡單的(未經實驗證實的)設備屬性實現如下:
+
+static ssize_t show_name(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", dev->name);
+}
+
+static ssize_t store_name(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ snprintf(dev->name, sizeof(dev->name), "%.*s",
+ (int)min(count, sizeof(dev->name) - 1), buf);
+ return count;
+}
+
+static DEVICE_ATTR(name, S_IRUGO, show_name, store_name);
+
+
+(注意:真正的實現不允許用戶空間設置設備名。)
+
+頂層目錄布局
+~~~~~~~~~~~~
+
+sysfs 目錄的安排顯示了內核數據結構之間的關係。
+
+頂層 sysfs 目錄如下:
+
+block/
+bus/
+class/
+dev/
+devices/
+firmware/
+net/
+fs/
+
+devices/ 包含了一個設備樹的文件系統表示。他直接映射了內部的內核
+設備樹,反映了設備的層次結構。
+
+bus/ 包含了內核中各種總線類型的平面目錄布局。每個總線目錄包含兩個
+子目錄:
+
+ devices/
+ drivers/
+
+devices/ 包含了系統中出現的每個設備的符號連結,他們指向 root/ 下的
+設備目錄。
+
+drivers/ 包含了每個已爲特定總線上的設備而掛載的驅動程序的目錄(這裡
+假定驅動沒有跨越多個總線類型)。
+
+fs/ 包含了一個爲文件系統設立的目錄。現在每個想要導出屬性的文件系統必須
+在 fs/ 下創建自己的層次結構(參見Documentation/filesystems/fuse.rst)。
+
+dev/ 包含兩個子目錄: char/ 和 block/。在這兩個子目錄中,有以
+<major>:<minor> 格式命名的符號連結。這些符號連結指向 sysfs 目錄
+中相應的設備。/sys/dev 提供一個通過一個 stat(2) 操作結果,查找
+設備 sysfs 接口快捷的方法。
+
+更多有關 driver-model 的特性信息可以在 Documentation/driver-api/driver-model/
+中找到。
+
+
+TODO: 完成這一節。
+
+
+當前接口
+~~~~~~~~
+
+以下的接口層普遍存在於當前的sysfs中:
+
+- 設備 (include/linux/device.h)
+----------------------------------
+結構體:
+
+struct device_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+};
+
+聲明:
+
+DEVICE_ATTR(_name, _mode, _show, _store);
+
+增/刪屬性:
+
+int device_create_file(struct device *dev, const struct device_attribute * attr);
+void device_remove_file(struct device *dev, const struct device_attribute * attr);
+
+
+- 總線驅動程序 (include/linux/device.h)
+--------------------------------------
+結構體:
+
+struct bus_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct bus_type *, char * buf);
+ ssize_t (*store)(struct bus_type *, const char * buf, size_t count);
+};
+
+聲明:
+
+BUS_ATTR(_name, _mode, _show, _store)
+
+增/刪屬性:
+
+int bus_create_file(struct bus_type *, struct bus_attribute *);
+void bus_remove_file(struct bus_type *, struct bus_attribute *);
+
+
+- 設備驅動程序 (include/linux/device.h)
+-----------------------------------------
+
+結構體:
+
+struct driver_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct device_driver *, char * buf);
+ ssize_t (*store)(struct device_driver *, const char * buf,
+ size_t count);
+};
+
+聲明:
+
+DRIVER_ATTR(_name, _mode, _show, _store)
+
+增/刪屬性:
+
+int driver_create_file(struct device_driver *, const struct driver_attribute *);
+void driver_remove_file(struct device_driver *, const struct driver_attribute *);
+
+
+文檔
+~~~~
+
+sysfs 目錄結構以及其中包含的屬性定義了一個內核與用戶空間之間的 ABI。
+對於任何 ABI,其自身的穩定和適當的文檔是非常重要的。所有新的 sysfs
+屬性必須在 Documentation/ABI 中有文檔。詳見 Documentation/ABI/README。
+
diff --git a/Documentation/translations/zh_TW/filesystems/tmpfs.rst b/Documentation/translations/zh_TW/filesystems/tmpfs.rst
new file mode 100644
index 000000000000..8d753a34785b
--- /dev/null
+++ b/Documentation/translations/zh_TW/filesystems/tmpfs.rst
@@ -0,0 +1,148 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_TW.rst
+
+:Original: Documentation/filesystems/tmpfs.rst
+
+Translated by Wang Qing <wangqing@vivo.com>
+and Hu Haowen <src.res@email.cn>
+
+=====
+Tmpfs
+=====
+
+Tmpfs是一個將所有文件都保存在虛擬內存中的文件系統。
+
+tmpfs中的所有內容都是臨時的,也就是說沒有任何文件會在硬碟上創建。
+如果卸載tmpfs實例,所有保存在其中的文件都會丟失。
+
+tmpfs將所有文件保存在內核緩存中,隨著文件內容增長或縮小可以將不需要的
+頁面swap出去。它具有最大限制,可以通過「mount -o remount ...」調整。
+
+和ramfs(創建tmpfs的模板)相比,tmpfs包含交換和限制檢查。和tmpfs相似的另
+一個東西是RAM磁碟(/dev/ram*),可以在物理RAM中模擬固定大小的硬碟,並在
+此之上創建一個普通的文件系統。Ramdisks無法swap,因此無法調整它們的大小。
+
+由於tmpfs完全保存於頁面緩存和swap中,因此所有tmpfs頁面將在/proc/meminfo
+中顯示爲「Shmem」,而在free(1)中顯示爲「Shared」。請注意,這些計數還包括
+共享內存(shmem,請參閱ipcs(1))。獲得計數的最可靠方法是使用df(1)和du(1)。
+
+tmpfs具有以下用途:
+
+1) 內核總有一個無法看到的內部掛載,用於共享匿名映射和SYSV共享內存。
+
+ 掛載不依賴於CONFIG_TMPFS。如果CONFIG_TMPFS未設置,tmpfs對用戶不可見。
+ 但是內部機制始終存在。
+
+2) glibc 2.2及更高版本期望將tmpfs掛載在/dev/shm上以用於POSIX共享內存
+ (shm_open,shm_unlink)。添加內容到/etc/fstab應注意如下:
+
+ tmpfs /dev/shm tmpfs defaults 0 0
+
+ 使用時需要記住創建掛載tmpfs的目錄。
+
+ SYSV共享內存無需掛載,內部已默認支持。(在2.3內核版本中,必須掛載
+ tmpfs的前身(shm fs)才能使用SYSV共享內存)
+
+3) 很多人(包括我)都覺的在/tmp和/var/tmp上掛載非常方便,並具有較大的
+ swap分區。目前循環掛載tmpfs可以正常工作,所以大多數發布都應當可以
+ 使用mkinitrd通過/tmp訪問/tmp。
+
+4) 也許還有更多我不知道的地方:-)
+
+
+tmpfs有三個用於調整大小的掛載選項:
+
+========= ===========================================================
+size tmpfs實例分配的字節數限制。默認值是不swap時物理RAM的一半。
+ 如果tmpfs實例過大,機器將死鎖,因爲OOM處理將無法釋放該內存。
+nr_blocks 與size相同,但以PAGE_SIZE爲單位。
+nr_inodes tmpfs實例的最大inode個數。默認值是物理內存頁數的一半,或者
+ (有高端內存的機器)低端內存RAM的頁數,二者以較低者為準。
+========= ===========================================================
+
+這些參數接受後綴k,m或g表示千,兆和千兆字節,可以在remount時更改。
+size參數也接受後綴%用來限制tmpfs實例占用物理RAM的百分比:
+未指定size或nr_blocks時,默認值爲size=50%
+
+如果nr_blocks=0(或size=0),block個數將不受限制;如果nr_inodes=0,
+inode個數將不受限制。這樣掛載通常是不明智的,因爲它允許任何具有寫權限的
+用戶通過訪問tmpfs耗盡機器上的所有內存;但同時這樣做也會增強在多個CPU的
+場景下的訪問。
+
+tmpfs具有爲所有文件設置NUMA內存分配策略掛載選項(如果啓用了CONFIG_NUMA),
+可以通過「mount -o remount ...」調整
+
+======================== =========================
+mpol=default 採用進程分配策略
+ (請參閱 set_mempolicy(2))
+mpol=prefer:Node 傾向從給定的節點分配
+mpol=bind:NodeList 只允許從指定的鍊表分配
+mpol=interleave 傾向於依次從每個節點分配
+mpol=interleave:NodeList 依次從每個節點分配
+mpol=local 優先本地節點分配內存
+======================== =========================
+
+NodeList格式是以逗號分隔的十進位數字表示大小和範圍,最大和最小範圍是用-
+分隔符的十進位數來表示。例如,mpol=bind0-3,5,7,9-15
+
+帶有有效NodeList的內存策略將按指定格式保存,在創建文件時使用。當任務在該
+文件系統上創建文件時,會使用到掛載時的內存策略NodeList選項,如果設置的話,
+由調用任務的cpuset[請參見Documentation/admin-guide/cgroup-v1/cpusets.rst]
+以及下面列出的可選標誌約束。如果NodeLists爲設置爲空集,則文件的內存策略將
+恢復爲「默認」策略。
+
+NUMA內存分配策略有可選標誌,可以用於模式結合。在掛載tmpfs時指定這些可選
+標誌可以在NodeList之前生效。
+Documentation/admin-guide/mm/numa_memory_policy.rst列出所有可用的內存
+分配策略模式標誌及其對內存策略。
+
+::
+
+ =static 相當於 MPOL_F_STATIC_NODES
+ =relative 相當於 MPOL_F_RELATIVE_NODES
+
+例如,mpol=bind=staticNodeList相當於MPOL_BIND|MPOL_F_STATIC_NODES的分配策略
+
+請注意,如果內核不支持NUMA,那麼使用mpol選項掛載tmpfs將會失敗;nodelist指定不
+在線的節點也會失敗。如果您的系統依賴於此,但內核會運行不帶NUMA功能(也許是安全
+revocery內核),或者具有較少的節點在線,建議從自動模式中省略mpol選項掛載選項。
+可以在以後通過「mount -o remount,mpol=Policy:NodeList MountPoint」添加到掛載點。
+
+要指定初始根目錄,可以使用如下掛載選項:
+
+==== ====================
+模式 權限用八進位數字表示
+uid 用戶ID
+gid 組ID
+==== ====================
+
+這些選項對remount沒有任何影響。您可以通過chmod(1),chown(1)和chgrp(1)的更改
+已經掛載的參數。
+
+tmpfs具有選擇32位還是64位inode的掛載選項:
+
+======= =============
+inode64 使用64位inode
+inode32 使用32位inode
+======= =============
+
+在32位內核上,默認是inode32,掛載時指定inode64會被拒絕。
+在64位內核上,默認配置是CONFIG_TMPFS_INODE64。inode64避免了單個設備上可能有多個
+具有相同inode編號的文件;比如32位應用程式使用glibc如果長期訪問tmpfs,一旦達到33
+位inode編號,就有EOVERFLOW失敗的危險,無法打開大於2GiB的文件,並返回EINVAL。
+
+所以'mount -t tmpfs -o size=10G,nr_inodes=10k,mode=700 tmpfs /mytmpfs'將在
+/mytmpfs上掛載tmpfs實例,分配只能由root用戶訪問的10GB RAM/SWAP,可以有10240個
+inode的實例。
+
+
+:作者:
+ Christoph Rohland <cr@sap.com>, 1.12.01
+:更新:
+ Hugh Dickins, 4 June 2007
+:更新:
+ KOSAKI Motohiro, 16 Mar 2010
+:更新:
+ Chris Down, 13 July 2020
+
diff --git a/Documentation/translations/zh_TW/filesystems/virtiofs.rst b/Documentation/translations/zh_TW/filesystems/virtiofs.rst
new file mode 100644
index 000000000000..2b05e84375dd
--- /dev/null
+++ b/Documentation/translations/zh_TW/filesystems/virtiofs.rst
@@ -0,0 +1,61 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_TW.rst
+
+:Original: :ref:`Documentation/filesystems/virtiofs.rst <virtiofs_index>`
+
+譯者
+::
+
+ 中文版維護者: 王文虎 Wang Wenhu <wenhu.wang@vivo.com>
+ 中文版翻譯者: 王文虎 Wang Wenhu <wenhu.wang@vivo.com>
+ 中文版校譯者: 王文虎 Wang Wenhu <wenhu.wang@vivo.com>
+ 中文版校譯者: 王文虎 Wang Wenhu <wenhu.wang@vivo.com>
+ 繁體中文版校譯者:胡皓文 Hu Haowen <src.res@email.cn>
+
+===========================================
+virtiofs: virtio-fs 主機<->客機共享文件系統
+===========================================
+
+- Copyright (C) 2020 Vivo Communication Technology Co. Ltd.
+
+介紹
+====
+Linux的virtiofs文件系統實現了一個半虛擬化VIRTIO類型「virtio-fs」設備的驅動,通過該\
+類型設備實現客機<->主機文件系統共享。它允許客機掛載一個已經導出到主機的目錄。
+
+客機通常需要訪問主機或者遠程系統上的文件。使用場景包括:在新客機安裝時讓文件對其\
+可見;從主機上的根文件系統啓動;對無狀態或臨時客機提供持久存儲和在客機之間共享目錄。
+
+儘管在某些任務可能通過使用已有的網絡文件系統完成,但是卻需要非常難以自動化的配置\
+步驟,且將存儲網絡暴露給客機。而virtio-fs設備通過提供不經過網絡的文件系統訪問文件\
+的設計方式解決了這些問題。
+
+另外,virto-fs設備發揮了主客機共存的優點提高了性能,並且提供了網絡文件系統所不具備
+的一些語義功能。
+
+用法
+====
+以``myfs``標籤將文件系統掛載到``/mnt``:
+
+.. code-block:: sh
+
+ guest# mount -t virtiofs myfs /mnt
+
+請查閱 https://virtio-fs.gitlab.io/ 了解配置QEMU和virtiofsd守護程序的詳細信息。
+
+內幕
+====
+由於virtio-fs設備將FUSE協議用於文件系統請求,因此Linux的virtiofs文件系統與FUSE文\
+件系統客戶端緊密集成在一起。客機充當FUSE客戶端而主機充當FUSE伺服器,內核與用戶空\
+間之間的/dev/fuse接口由virtio-fs設備接口代替。
+
+FUSE請求被置於虛擬隊列中由主機處理。主機填充緩衝區中的響應部分,而客機處理請求的完成部分。
+
+將/dev/fuse映射到虛擬隊列需要解決/dev/fuse和虛擬隊列之間語義上的差異。每次讀取\
+/dev/fuse設備時,FUSE客戶端都可以選擇要傳輸的請求,從而可以使某些請求優先於其他\
+請求。虛擬隊列有其隊列語義,無法更改已入隊請求的順序。在虛擬隊列已滿的情況下尤
+其關鍵,因爲此時不可能加入高優先級的請求。爲了解決此差異,virtio-fs設備採用「hiprio」\
+(高優先級)虛擬隊列,專門用於有別於普通請求的高優先級請求。
+
+
diff --git a/Documentation/translations/zh_TW/index.rst b/Documentation/translations/zh_TW/index.rst
index c02c4b5281e6..2a281036c406 100644
--- a/Documentation/translations/zh_TW/index.rst
+++ b/Documentation/translations/zh_TW/index.rst
@@ -89,6 +89,12 @@ TODOList:
大部分信息都是直接從內核原始碼獲取的,並根據需要添加補充材料(或者至少是在
我們設法添加的時候——可能不是所有的都是有需要的)。
+.. toctree::
+ :maxdepth: 2
+
+ cpu-freq/index
+ filesystems/index
+
TODOList:
* driver-api/index
@@ -97,7 +103,6 @@ TODOList:
* accounting/index
* block/index
* cdrom/index
-* cpu-freq/index
* ide/index
* fb/index
* fpga/index
@@ -123,7 +128,6 @@ TODOList:
* security/index
* sound/index
* crypto/index
-* filesystems/index
* vm/index
* bpf/index
* usb/index
@@ -136,6 +140,11 @@ TODOList:
體系結構無關文檔
----------------
+.. toctree::
+ :maxdepth: 2
+
+ arm64/index
+
TODOList:
* asm-annotations
diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst
index 0b5eefed027e..c432be070f67 100644
--- a/Documentation/userspace-api/index.rst
+++ b/Documentation/userspace-api/index.rst
@@ -27,6 +27,7 @@ place where this information is gathered.
iommu
media/index
sysfs-platform_profile
+ vduse
.. only:: subproject and html
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index b7070d76f076..2e8134059c87 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -299,6 +299,7 @@ Code Seq# Include File Comments
'z' 10-4F drivers/s390/crypto/zcrypt_api.h conflict!
'|' 00-7F linux/media.h
0x80 00-1F linux/fb.h
+0x81 00-1F linux/vduse.h
0x89 00-06 arch/x86/include/asm/sockios.h
0x89 0B-DF linux/sockios.h
0x89 E0-EF linux/sockios.h SIOCPROTOPRIVATE range
diff --git a/Documentation/userspace-api/vduse.rst b/Documentation/userspace-api/vduse.rst
new file mode 100644
index 000000000000..42ef59ea5314
--- /dev/null
+++ b/Documentation/userspace-api/vduse.rst
@@ -0,0 +1,233 @@
+==================================
+VDUSE - "vDPA Device in Userspace"
+==================================
+
+vDPA (virtio data path acceleration) device is a device that uses a
+datapath which complies with the virtio specifications with vendor
+specific control path. vDPA devices can be both physically located on
+the hardware or emulated by software. VDUSE is a framework that makes it
+possible to implement software-emulated vDPA devices in userspace. And
+to make the device emulation more secure, the emulated vDPA device's
+control path is handled in the kernel and only the data path is
+implemented in the userspace.
+
+Note that only virtio block device is supported by VDUSE framework now,
+which can reduce security risks when the userspace process that implements
+the data path is run by an unprivileged user. The support for other device
+types can be added after the security issue of corresponding device driver
+is clarified or fixed in the future.
+
+Create/Destroy VDUSE devices
+------------------------
+
+VDUSE devices are created as follows:
+
+1. Create a new VDUSE instance with ioctl(VDUSE_CREATE_DEV) on
+ /dev/vduse/control.
+
+2. Setup each virtqueue with ioctl(VDUSE_VQ_SETUP) on /dev/vduse/$NAME.
+
+3. Begin processing VDUSE messages from /dev/vduse/$NAME. The first
+ messages will arrive while attaching the VDUSE instance to vDPA bus.
+
+4. Send the VDPA_CMD_DEV_NEW netlink message to attach the VDUSE
+ instance to vDPA bus.
+
+VDUSE devices are destroyed as follows:
+
+1. Send the VDPA_CMD_DEV_DEL netlink message to detach the VDUSE
+ instance from vDPA bus.
+
+2. Close the file descriptor referring to /dev/vduse/$NAME.
+
+3. Destroy the VDUSE instance with ioctl(VDUSE_DESTROY_DEV) on
+ /dev/vduse/control.
+
+The netlink messages can be sent via vdpa tool in iproute2 or use the
+below sample codes:
+
+.. code-block:: c
+
+ static int netlink_add_vduse(const char *name, enum vdpa_command cmd)
+ {
+ struct nl_sock *nlsock;
+ struct nl_msg *msg;
+ int famid;
+
+ nlsock = nl_socket_alloc();
+ if (!nlsock)
+ return -ENOMEM;
+
+ if (genl_connect(nlsock))
+ goto free_sock;
+
+ famid = genl_ctrl_resolve(nlsock, VDPA_GENL_NAME);
+ if (famid < 0)
+ goto close_sock;
+
+ msg = nlmsg_alloc();
+ if (!msg)
+ goto close_sock;
+
+ if (!genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, famid, 0, 0, cmd, 0))
+ goto nla_put_failure;
+
+ NLA_PUT_STRING(msg, VDPA_ATTR_DEV_NAME, name);
+ if (cmd == VDPA_CMD_DEV_NEW)
+ NLA_PUT_STRING(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, "vduse");
+
+ if (nl_send_sync(nlsock, msg))
+ goto close_sock;
+
+ nl_close(nlsock);
+ nl_socket_free(nlsock);
+
+ return 0;
+ nla_put_failure:
+ nlmsg_free(msg);
+ close_sock:
+ nl_close(nlsock);
+ free_sock:
+ nl_socket_free(nlsock);
+ return -1;
+ }
+
+How VDUSE works
+---------------
+
+As mentioned above, a VDUSE device is created by ioctl(VDUSE_CREATE_DEV) on
+/dev/vduse/control. With this ioctl, userspace can specify some basic configuration
+such as device name (uniquely identify a VDUSE device), virtio features, virtio
+configuration space, the number of virtqueues and so on for this emulated device.
+Then a char device interface (/dev/vduse/$NAME) is exported to userspace for device
+emulation. Userspace can use the VDUSE_VQ_SETUP ioctl on /dev/vduse/$NAME to
+add per-virtqueue configuration such as the max size of virtqueue to the device.
+
+After the initialization, the VDUSE device can be attached to vDPA bus via
+the VDPA_CMD_DEV_NEW netlink message. Userspace needs to read()/write() on
+/dev/vduse/$NAME to receive/reply some control messages from/to VDUSE kernel
+module as follows:
+
+.. code-block:: c
+
+ static int vduse_message_handler(int dev_fd)
+ {
+ int len;
+ struct vduse_dev_request req;
+ struct vduse_dev_response resp;
+
+ len = read(dev_fd, &req, sizeof(req));
+ if (len != sizeof(req))
+ return -1;
+
+ resp.request_id = req.request_id;
+
+ switch (req.type) {
+
+ /* handle different types of messages */
+
+ }
+
+ len = write(dev_fd, &resp, sizeof(resp));
+ if (len != sizeof(resp))
+ return -1;
+
+ return 0;
+ }
+
+There are now three types of messages introduced by VDUSE framework:
+
+- VDUSE_GET_VQ_STATE: Get the state for virtqueue, userspace should return
+ avail index for split virtqueue or the device/driver ring wrap counters and
+ the avail and used index for packed virtqueue.
+
+- VDUSE_SET_STATUS: Set the device status, userspace should follow
+ the virtio spec: https://docs.oasis-open.org/virtio/virtio/v1.1/virtio-v1.1.html
+ to process this message. For example, fail to set the FEATURES_OK device
+ status bit if the device can not accept the negotiated virtio features
+ get from the VDUSE_DEV_GET_FEATURES ioctl.
+
+- VDUSE_UPDATE_IOTLB: Notify userspace to update the memory mapping for specified
+ IOVA range, userspace should firstly remove the old mapping, then setup the new
+ mapping via the VDUSE_IOTLB_GET_FD ioctl.
+
+After DRIVER_OK status bit is set via the VDUSE_SET_STATUS message, userspace is
+able to start the dataplane processing as follows:
+
+1. Get the specified virtqueue's information with the VDUSE_VQ_GET_INFO ioctl,
+ including the size, the IOVAs of descriptor table, available ring and used ring,
+ the state and the ready status.
+
+2. Pass the above IOVAs to the VDUSE_IOTLB_GET_FD ioctl so that those IOVA regions
+ can be mapped into userspace. Some sample codes is shown below:
+
+.. code-block:: c
+
+ static int perm_to_prot(uint8_t perm)
+ {
+ int prot = 0;
+
+ switch (perm) {
+ case VDUSE_ACCESS_WO:
+ prot |= PROT_WRITE;
+ break;
+ case VDUSE_ACCESS_RO:
+ prot |= PROT_READ;
+ break;
+ case VDUSE_ACCESS_RW:
+ prot |= PROT_READ | PROT_WRITE;
+ break;
+ }
+
+ return prot;
+ }
+
+ static void *iova_to_va(int dev_fd, uint64_t iova, uint64_t *len)
+ {
+ int fd;
+ void *addr;
+ size_t size;
+ struct vduse_iotlb_entry entry;
+
+ entry.start = iova;
+ entry.last = iova;
+
+ /*
+ * Find the first IOVA region that overlaps with the specified
+ * range [start, last] and return the corresponding file descriptor.
+ */
+ fd = ioctl(dev_fd, VDUSE_IOTLB_GET_FD, &entry);
+ if (fd < 0)
+ return NULL;
+
+ size = entry.last - entry.start + 1;
+ *len = entry.last - iova + 1;
+ addr = mmap(0, size, perm_to_prot(entry.perm), MAP_SHARED,
+ fd, entry.offset);
+ close(fd);
+ if (addr == MAP_FAILED)
+ return NULL;
+
+ /*
+ * Using some data structures such as linked list to store
+ * the iotlb mapping. The munmap(2) should be called for the
+ * cached mapping when the corresponding VDUSE_UPDATE_IOTLB
+ * message is received or the device is reset.
+ */
+
+ return addr + iova - entry.start;
+ }
+
+3. Setup the kick eventfd for the specified virtqueues with the VDUSE_VQ_SETUP_KICKFD
+ ioctl. The kick eventfd is used by VDUSE kernel module to notify userspace to
+ consume the available ring. This is optional since userspace can choose to poll the
+ available ring instead.
+
+4. Listen to the kick eventfd (optional) and consume the available ring. The buffer
+ described by the descriptors in the descriptor table should be also mapped into
+ userspace via the VDUSE_IOTLB_GET_FD ioctl before accessing.
+
+5. Inject an interrupt for specific virtqueue with the VDUSE_INJECT_VQ_IRQ ioctl
+ after the used ring is filled.
+
+For more details on the uAPI, please see include/uapi/linux/vduse.h.
diff --git a/Documentation/vm/damon/api.rst b/Documentation/vm/damon/api.rst
new file mode 100644
index 000000000000..08f34df45523
--- /dev/null
+++ b/Documentation/vm/damon/api.rst
@@ -0,0 +1,20 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============
+API Reference
+=============
+
+Kernel space programs can use every feature of DAMON using below APIs. All you
+need to do is including ``damon.h``, which is located in ``include/linux/`` of
+the source tree.
+
+Structures
+==========
+
+.. kernel-doc:: include/linux/damon.h
+
+
+Functions
+=========
+
+.. kernel-doc:: mm/damon/core.c
diff --git a/Documentation/vm/damon/design.rst b/Documentation/vm/damon/design.rst
new file mode 100644
index 000000000000..b05159c295f4
--- /dev/null
+++ b/Documentation/vm/damon/design.rst
@@ -0,0 +1,166 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======
+Design
+======
+
+Configurable Layers
+===================
+
+DAMON provides data access monitoring functionality while making the accuracy
+and the overhead controllable. The fundamental access monitorings require
+primitives that dependent on and optimized for the target address space. On
+the other hand, the accuracy and overhead tradeoff mechanism, which is the core
+of DAMON, is in the pure logic space. DAMON separates the two parts in
+different layers and defines its interface to allow various low level
+primitives implementations configurable with the core logic.
+
+Due to this separated design and the configurable interface, users can extend
+DAMON for any address space by configuring the core logics with appropriate low
+level primitive implementations. If appropriate one is not provided, users can
+implement the primitives on their own.
+
+For example, physical memory, virtual memory, swap space, those for specific
+processes, NUMA nodes, files, and backing memory devices would be supportable.
+Also, if some architectures or devices support special optimized access check
+primitives, those will be easily configurable.
+
+
+Reference Implementations of Address Space Specific Primitives
+==============================================================
+
+The low level primitives for the fundamental access monitoring are defined in
+two parts:
+
+1. Identification of the monitoring target address range for the address space.
+2. Access check of specific address range in the target space.
+
+DAMON currently provides the implementation of the primitives for only the
+virtual address spaces. Below two subsections describe how it works.
+
+
+VMA-based Target Address Range Construction
+-------------------------------------------
+
+Only small parts in the super-huge virtual address space of the processes are
+mapped to the physical memory and accessed. Thus, tracking the unmapped
+address regions is just wasteful. However, because DAMON can deal with some
+level of noise using the adaptive regions adjustment mechanism, tracking every
+mapping is not strictly required but could even incur a high overhead in some
+cases. That said, too huge unmapped areas inside the monitoring target should
+be removed to not take the time for the adaptive mechanism.
+
+For the reason, this implementation converts the complex mappings to three
+distinct regions that cover every mapped area of the address space. The two
+gaps between the three regions are the two biggest unmapped areas in the given
+address space. The two biggest unmapped areas would be the gap between the
+heap and the uppermost mmap()-ed region, and the gap between the lowermost
+mmap()-ed region and the stack in most of the cases. Because these gaps are
+exceptionally huge in usual address spaces, excluding these will be sufficient
+to make a reasonable trade-off. Below shows this in detail::
+
+ <heap>
+ <BIG UNMAPPED REGION 1>
+ <uppermost mmap()-ed region>
+ (small mmap()-ed regions and munmap()-ed regions)
+ <lowermost mmap()-ed region>
+ <BIG UNMAPPED REGION 2>
+ <stack>
+
+
+PTE Accessed-bit Based Access Check
+-----------------------------------
+
+The implementation for the virtual address space uses PTE Accessed-bit for
+basic access checks. It finds the relevant PTE Accessed bit from the address
+by walking the page table for the target task of the address. In this way, the
+implementation finds and clears the bit for next sampling target address and
+checks whether the bit set again after one sampling period. This could disturb
+other kernel subsystems using the Accessed bits, namely Idle page tracking and
+the reclaim logic. To avoid such disturbances, DAMON makes it mutually
+exclusive with Idle page tracking and uses ``PG_idle`` and ``PG_young`` page
+flags to solve the conflict with the reclaim logic, as Idle page tracking does.
+
+
+Address Space Independent Core Mechanisms
+=========================================
+
+Below four sections describe each of the DAMON core mechanisms and the five
+monitoring attributes, ``sampling interval``, ``aggregation interval``,
+``regions update interval``, ``minimum number of regions``, and ``maximum
+number of regions``.
+
+
+Access Frequency Monitoring
+---------------------------
+
+The output of DAMON says what pages are how frequently accessed for a given
+duration. The resolution of the access frequency is controlled by setting
+``sampling interval`` and ``aggregation interval``. In detail, DAMON checks
+access to each page per ``sampling interval`` and aggregates the results. In
+other words, counts the number of the accesses to each page. After each
+``aggregation interval`` passes, DAMON calls callback functions that previously
+registered by users so that users can read the aggregated results and then
+clears the results. This can be described in below simple pseudo-code::
+
+ while monitoring_on:
+ for page in monitoring_target:
+ if accessed(page):
+ nr_accesses[page] += 1
+ if time() % aggregation_interval == 0:
+ for callback in user_registered_callbacks:
+ callback(monitoring_target, nr_accesses)
+ for page in monitoring_target:
+ nr_accesses[page] = 0
+ sleep(sampling interval)
+
+The monitoring overhead of this mechanism will arbitrarily increase as the
+size of the target workload grows.
+
+
+Region Based Sampling
+---------------------
+
+To avoid the unbounded increase of the overhead, DAMON groups adjacent pages
+that assumed to have the same access frequencies into a region. As long as the
+assumption (pages in a region have the same access frequencies) is kept, only
+one page in the region is required to be checked. Thus, for each ``sampling
+interval``, DAMON randomly picks one page in each region, waits for one
+``sampling interval``, checks whether the page is accessed meanwhile, and
+increases the access frequency of the region if so. Therefore, the monitoring
+overhead is controllable by setting the number of regions. DAMON allows users
+to set the minimum and the maximum number of regions for the trade-off.
+
+This scheme, however, cannot preserve the quality of the output if the
+assumption is not guaranteed.
+
+
+Adaptive Regions Adjustment
+---------------------------
+
+Even somehow the initial monitoring target regions are well constructed to
+fulfill the assumption (pages in same region have similar access frequencies),
+the data access pattern can be dynamically changed. This will result in low
+monitoring quality. To keep the assumption as much as possible, DAMON
+adaptively merges and splits each region based on their access frequency.
+
+For each ``aggregation interval``, it compares the access frequencies of
+adjacent regions and merges those if the frequency difference is small. Then,
+after it reports and clears the aggregated access frequency of each region, it
+splits each region into two or three regions if the total number of regions
+will not exceed the user-specified maximum number of regions after the split.
+
+In this way, DAMON provides its best-effort quality and minimal overhead while
+keeping the bounds users set for their trade-off.
+
+
+Dynamic Target Space Updates Handling
+-------------------------------------
+
+The monitoring target address range could dynamically changed. For example,
+virtual memory could be dynamically mapped and unmapped. Physical memory could
+be hot-plugged.
+
+As the changes could be quite frequent in some cases, DAMON checks the dynamic
+memory mapping changes and applies it to the abstracted target area only for
+each of a user-specified time interval (``regions update interval``).
diff --git a/Documentation/vm/damon/faq.rst b/Documentation/vm/damon/faq.rst
new file mode 100644
index 000000000000..cb3d8b585a8b
--- /dev/null
+++ b/Documentation/vm/damon/faq.rst
@@ -0,0 +1,51 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================
+Frequently Asked Questions
+==========================
+
+Why a new subsystem, instead of extending perf or other user space tools?
+=========================================================================
+
+First, because it needs to be lightweight as much as possible so that it can be
+used online, any unnecessary overhead such as kernel - user space context
+switching cost should be avoided. Second, DAMON aims to be used by other
+programs including the kernel. Therefore, having a dependency on specific
+tools like perf is not desirable. These are the two biggest reasons why DAMON
+is implemented in the kernel space.
+
+
+Can 'idle pages tracking' or 'perf mem' substitute DAMON?
+=========================================================
+
+Idle page tracking is a low level primitive for access check of the physical
+address space. 'perf mem' is similar, though it can use sampling to minimize
+the overhead. On the other hand, DAMON is a higher-level framework for the
+monitoring of various address spaces. It is focused on memory management
+optimization and provides sophisticated accuracy/overhead handling mechanisms.
+Therefore, 'idle pages tracking' and 'perf mem' could provide a subset of
+DAMON's output, but cannot substitute DAMON.
+
+
+Does DAMON support virtual memory only?
+=======================================
+
+No. The core of the DAMON is address space independent. The address space
+specific low level primitive parts including monitoring target regions
+constructions and actual access checks can be implemented and configured on the
+DAMON core by the users. In this way, DAMON users can monitor any address
+space with any access check technique.
+
+Nonetheless, DAMON provides vma tracking and PTE Accessed bit check based
+implementations of the address space dependent functions for the virtual memory
+by default, for a reference and convenient use. In near future, we will
+provide those for physical memory address space.
+
+
+Can I simply monitor page granularity?
+======================================
+
+Yes. You can do so by setting the ``min_nr_regions`` attribute higher than the
+working set size divided by the page size. Because the monitoring target
+regions size is forced to be ``>=page size``, the region split will make no
+effect.
diff --git a/Documentation/vm/damon/index.rst b/Documentation/vm/damon/index.rst
new file mode 100644
index 000000000000..a2858baf3bf1
--- /dev/null
+++ b/Documentation/vm/damon/index.rst
@@ -0,0 +1,30 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================
+DAMON: Data Access MONitor
+==========================
+
+DAMON is a data access monitoring framework subsystem for the Linux kernel.
+The core mechanisms of DAMON (refer to :doc:`design` for the detail) make it
+
+ - *accurate* (the monitoring output is useful enough for DRAM level memory
+ management; It might not appropriate for CPU Cache levels, though),
+ - *light-weight* (the monitoring overhead is low enough to be applied online),
+ and
+ - *scalable* (the upper-bound of the overhead is in constant range regardless
+ of the size of target workloads).
+
+Using this framework, therefore, the kernel's memory management mechanisms can
+make advanced decisions. Experimental memory management optimization works
+that incurring high data accesses monitoring overhead could implemented again.
+In user space, meanwhile, users who have some special workloads can write
+personalized applications for better understanding and optimizations of their
+workloads and systems.
+
+.. toctree::
+ :maxdepth: 2
+
+ faq
+ design
+ api
+ plans
diff --git a/Documentation/vm/index.rst b/Documentation/vm/index.rst
index eff5fbd492d0..b51f0d8992f8 100644
--- a/Documentation/vm/index.rst
+++ b/Documentation/vm/index.rst
@@ -32,6 +32,7 @@ descriptions of data structures and algorithms.
arch_pgtable_helpers
balance
cleancache
+ damon/index
free_page_reporting
frontswap
highmem
diff --git a/Documentation/x86/x86_64/mm.rst b/Documentation/x86/x86_64/mm.rst
index ede1875719fb..9798676bb0bf 100644
--- a/Documentation/x86/x86_64/mm.rst
+++ b/Documentation/x86/x86_64/mm.rst
@@ -140,10 +140,6 @@ The direct mapping covers all memory in the system up to the highest
memory address (this means in some cases it can also include PCI memory
holes).
-vmalloc space is lazily synchronized into the different PML4/PML5 pages of
-the processes using the page fault handler, with init_top_pgt as
-reference.
-
We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
memory window (this size is arbitrary, it can be raised later if needed).
The mappings are not part of any other kernel PGD and are only available
diff --git a/MAINTAINERS b/MAINTAINERS
index a594d5d7edcc..eeb4c70b3d5b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -333,7 +333,7 @@ S: Maintained
F: drivers/platform/x86/acer-wmi.c
ACPI
-M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M: "Rafael J. Wysocki" <rafael@kernel.org>
M: Len Brown <lenb@kernel.org>
L: linux-acpi@vger.kernel.org
S: Supported
@@ -354,7 +354,7 @@ F: include/linux/fwnode.h
F: tools/power/acpi/
ACPI APEI
-M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M: "Rafael J. Wysocki" <rafael@kernel.org>
M: Len Brown <lenb@kernel.org>
R: James Morse <james.morse@arm.com>
R: Tony Luck <tony.luck@intel.com>
@@ -364,7 +364,6 @@ F: drivers/acpi/apei/
ACPI COMPONENT ARCHITECTURE (ACPICA)
M: Robert Moore <robert.moore@intel.com>
-M: Erik Kaneda <erik.kaneda@intel.com>
M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
L: linux-acpi@vger.kernel.org
L: devel@acpica.org
@@ -403,7 +402,7 @@ S: Maintained
F: drivers/platform/x86/i2c-multi-instantiate.c
ACPI PMIC DRIVERS
-M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M: "Rafael J. Wysocki" <rafael@kernel.org>
M: Len Brown <lenb@kernel.org>
R: Andy Shevchenko <andy@kernel.org>
R: Mika Westerberg <mika.westerberg@linux.intel.com>
@@ -985,6 +984,12 @@ S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/pm/powerplay/
+AMD PTDMA DRIVER
+M: Sanjay R Mehta <sanju.mehta@amd.com>
+L: dmaengine@vger.kernel.org
+S: Maintained
+F: drivers/dma/ptdma/
+
AMD SEATTLE DEVICE TREE SUPPORT
M: Brijesh Singh <brijeshkumar.singh@amd.com>
M: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
@@ -2328,14 +2333,14 @@ N: oxnas
ARM/PALM TREO SUPPORT
M: Tomas Cech <sleep_walker@suse.com>
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
W: http://hackndev.com
F: arch/arm/mach-pxa/palmtreo.*
ARM/PALMTX,PALMT5,PALMLD,PALMTE2,PALMTC SUPPORT
M: Marek Vasut <marek.vasut@gmail.com>
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
W: http://hackndev.com
F: arch/arm/mach-pxa/include/mach/palmld.h
@@ -2349,7 +2354,7 @@ F: arch/arm/mach-pxa/palmtx.c
ARM/PALMZ72 SUPPORT
M: Sergey Lapin <slapin@ossfans.org>
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
W: http://hackndev.com
F: arch/arm/mach-pxa/palmz72.*
@@ -2519,7 +2524,7 @@ N: s5pv210
ARM/SAMSUNG S5P SERIES 2D GRAPHICS ACCELERATION (G2D) SUPPORT
M: Andrzej Hajda <a.hajda@samsung.com>
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/platform/s5p-g2d/
@@ -2536,14 +2541,14 @@ ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT
M: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
M: Jacek Anaszewski <jacek.anaszewski@gmail.com>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/platform/s5p-jpeg/
ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT
M: Andrzej Hajda <a.hajda@samsung.com>
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/platform/s5p-mfc/
@@ -2733,11 +2738,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwamatsu/linux-visconti.git
F: Documentation/devicetree/bindings/arm/toshiba.yaml
F: Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml
F: Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml
+F: Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml
F: Documentation/devicetree/bindings/pinctrl/toshiba,tmpv7700-pinctrl.yaml
F: Documentation/devicetree/bindings/watchdog/toshiba,visconti-wdt.yaml
F: arch/arm64/boot/dts/toshiba/
F: drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
F: drivers/gpio/gpio-visconti.c
+F: drivers/pci/controller/dwc/pcie-visconti.c
F: drivers/pinctrl/visconti/
F: drivers/watchdog/visconti_wdt.c
N: visconti
@@ -3306,7 +3313,6 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
F: block/
F: drivers/block/
-F: fs/block_dev.c
F: include/linux/blk*
F: kernel/trace/blktrace.c
F: lib/sbitmap.c
@@ -3560,7 +3566,7 @@ BROADCOM BCM5301X ARM ARCHITECTURE
M: Hauke Mehrtens <hauke@hauke-m.de>
M: Rafał Miłecki <zajec5@gmail.com>
M: bcm-kernel-feedback-list@broadcom.com
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/boot/dts/bcm470*
F: arch/arm/boot/dts/bcm5301*
@@ -3570,7 +3576,7 @@ F: arch/arm/mach-bcm/bcm_5301x.c
BROADCOM BCM53573 ARM ARCHITECTURE
M: Rafał Miłecki <rafal@milecki.pl>
L: bcm-kernel-feedback-list@broadcom.com
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/boot/dts/bcm47189*
F: arch/arm/boot/dts/bcm53573*
@@ -4524,7 +4530,7 @@ F: .clang-format
CLANG/LLVM BUILD SUPPORT
M: Nathan Chancellor <nathan@kernel.org>
M: Nick Desaulniers <ndesaulniers@google.com>
-L: clang-built-linux@googlegroups.com
+L: llvm@lists.linux.dev
S: Supported
W: https://clangbuiltlinux.github.io/
B: https://github.com/ClangBuiltLinux/linux/issues
@@ -4540,7 +4546,7 @@ M: Sami Tolvanen <samitolvanen@google.com>
M: Kees Cook <keescook@chromium.org>
R: Nathan Chancellor <nathan@kernel.org>
R: Nick Desaulniers <ndesaulniers@google.com>
-L: clang-built-linux@googlegroups.com
+L: llvm@lists.linux.dev
S: Supported
B: https://github.com/ClangBuiltLinux/linux/issues
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/clang/features
@@ -4678,6 +4684,7 @@ F: drivers/platform/x86/compal-laptop.c
COMPILER ATTRIBUTES
M: Miguel Ojeda <ojeda@kernel.org>
+R: Nick Desaulniers <ndesaulniers@google.com>
S: Maintained
F: include/linux/compiler_attributes.h
@@ -4819,7 +4826,7 @@ W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php
F: drivers/cpufreq/vexpress-spc-cpufreq.c
CPU FREQUENCY SCALING FRAMEWORK
-M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M: "Rafael J. Wysocki" <rafael@kernel.org>
M: Viresh Kumar <viresh.kumar@linaro.org>
L: linux-pm@vger.kernel.org
S: Maintained
@@ -4837,7 +4844,7 @@ F: kernel/sched/cpufreq*.c
F: tools/testing/selftests/cpufreq/
CPU IDLE TIME MANAGEMENT FRAMEWORK
-M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M: "Rafael J. Wysocki" <rafael@kernel.org>
M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org
S: Maintained
@@ -4866,7 +4873,7 @@ CPUIDLE DRIVER - ARM BIG LITTLE
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
F: drivers/cpuidle/cpuidle-big_little.c
@@ -4886,14 +4893,14 @@ CPUIDLE DRIVER - ARM PSCI
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
M: Sudeep Holla <sudeep.holla@arm.com>
L: linux-pm@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/cpuidle/cpuidle-psci.c
CPUIDLE DRIVER - ARM PSCI PM DOMAIN
M: Ulf Hansson <ulf.hansson@linaro.org>
L: linux-pm@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/cpuidle/cpuidle-psci.h
F: drivers/cpuidle/cpuidle-psci-domain.c
@@ -5147,6 +5154,17 @@ F: net/ax25/ax25_out.c
F: net/ax25/ax25_timer.c
F: net/ax25/sysctl_net_ax25.c
+DATA ACCESS MONITOR
+M: SeongJae Park <sjpark@amazon.de>
+L: linux-mm@kvack.org
+S: Maintained
+F: Documentation/admin-guide/mm/damon/
+F: Documentation/vm/damon/
+F: include/linux/damon.h
+F: include/trace/events/damon.h
+F: mm/damon/
+F: tools/testing/selftests/damon/
+
DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER
L: netdev@vger.kernel.org
S: Orphan
@@ -7253,7 +7271,7 @@ F: tools/firewire/
FIRMWARE FRAMEWORK FOR ARMV8-A
M: Sudeep Holla <sudeep.holla@arm.com>
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/firmware/arm_ffa/
F: include/linux/arm_ffa.h
@@ -7432,7 +7450,7 @@ F: include/linux/platform_data/video-imxfb.h
FREESCALE IMX DDR PMU DRIVER
M: Frank Li <Frank.li@nxp.com>
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/admin-guide/perf/imx-ddr.rst
F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.yaml
@@ -7524,7 +7542,7 @@ F: drivers/tty/serial/ucc_uart.c
FREESCALE SOC DRIVERS
M: Li Yang <leoyang.li@nxp.com>
L: linuxppc-dev@lists.ozlabs.org
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/misc/fsl,dpaa2-console.yaml
F: Documentation/devicetree/bindings/soc/fsl/
@@ -7572,7 +7590,7 @@ W: ftp://ftp.openlinux.org/pub/people/hch/vxfs
F: fs/freevxfs/
FREEZER
-M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M: "Rafael J. Wysocki" <rafael@kernel.org>
M: Pavel Machek <pavel@ucw.cz>
L: linux-pm@vger.kernel.org
S: Supported
@@ -7825,7 +7843,7 @@ S: Supported
F: drivers/i2c/muxes/i2c-demux-pinctrl.c
GENERIC PM DOMAINS
-M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M: "Rafael J. Wysocki" <rafael@kernel.org>
M: Kevin Hilman <khilman@kernel.org>
M: Ulf Hansson <ulf.hansson@linaro.org>
L: linux-pm@vger.kernel.org
@@ -8291,7 +8309,7 @@ W: http://drama.obuda.kando.hu/~fero/cgi-bin/hgafb.shtml
F: drivers/video/fbdev/hgafb.c
HIBERNATION (aka Software Suspend, aka swsusp)
-M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M: "Rafael J. Wysocki" <rafael@kernel.org>
M: Pavel Machek <pavel@ucw.cz>
L: linux-pm@vger.kernel.org
S: Supported
@@ -10604,10 +10622,10 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
F: drivers/ata/sata_promise.*
LIBATA SUBSYSTEM (Serial and Parallel ATA drivers)
-M: Jens Axboe <axboe@kernel.dk>
+M: Damien Le Moal <damien.lemoal@opensource.wdc.com>
L: linux-ide@vger.kernel.org
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/libata.git
F: Documentation/devicetree/bindings/ata/
F: drivers/ata/
F: include/linux/ata.h
@@ -11172,7 +11190,7 @@ F: drivers/net/wireless/marvell/libertas/
MARVELL MACCHIATOBIN SUPPORT
M: Russell King <linux@armlinux.org.uk>
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
@@ -12620,6 +12638,7 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/dvb-frontends/mn88473*
MODULE SUPPORT
+M: Luis Chamberlain <mcgrof@kernel.org>
M: Jessica Yu <jeyu@kernel.org>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
@@ -13390,7 +13409,7 @@ F: include/linux/nvme-fc.h
NVM EXPRESS TARGET DRIVER
M: Christoph Hellwig <hch@lst.de>
M: Sagi Grimberg <sagi@grimberg.me>
-M: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+M: Chaitanya Kulkarni <kch@nvidia.com>
L: linux-nvme@lists.infradead.org
S: Supported
W: http://git.infradead.org/nvme.git
@@ -14252,7 +14271,7 @@ F: drivers/pci/controller/pcie-altera.c
PCI DRIVER FOR APPLIEDMICRO XGENE
M: Toan Le <toan@os.amperecomputing.com>
L: linux-pci@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/pci/xgene-pci.txt
F: drivers/pci/controller/pci-xgene.c
@@ -14260,7 +14279,7 @@ F: drivers/pci/controller/pci-xgene.c
PCI DRIVER FOR ARM VERSATILE PLATFORM
M: Rob Herring <robh@kernel.org>
L: linux-pci@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/pci/versatile.yaml
F: drivers/pci/controller/pci-versatile.c
@@ -14268,7 +14287,7 @@ F: drivers/pci/controller/pci-versatile.c
PCI DRIVER FOR ARMADA 8K
M: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
L: linux-pci@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/pci/pci-armada8k.txt
F: drivers/pci/controller/dwc/pcie-armada8k.c
@@ -14286,7 +14305,7 @@ M: Mingkai Hu <mingkai.hu@nxp.com>
M: Roy Zang <roy.zang@nxp.com>
L: linuxppc-dev@lists.ozlabs.org
L: linux-pci@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/pci/controller/dwc/*layerscape*
@@ -14366,7 +14385,7 @@ F: drivers/pci/controller/pci-tegra.c
PCI DRIVER FOR NXP LAYERSCAPE GEN4 CONTROLLER
M: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
L: linux-pci@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt
F: drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
@@ -14401,7 +14420,7 @@ PCI DRIVER FOR TI DRA7XX/J721E
M: Kishon Vijay Abraham I <kishon@ti.com>
L: linux-omap@vger.kernel.org
L: linux-pci@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: Documentation/devicetree/bindings/pci/ti-pci.txt
F: drivers/pci/controller/cadence/pci-j721e.c
@@ -14457,7 +14476,7 @@ F: drivers/pci/controller/pcie-altera-msi.c
PCI MSI DRIVER FOR APPLIEDMICRO XGENE
M: Toan Le <toan@os.amperecomputing.com>
L: linux-pci@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
F: drivers/pci/controller/pci-xgene-msi.c
@@ -14541,6 +14560,13 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt
F: drivers/pci/controller/dwc/pcie-histb.c
+PCIE DRIVER FOR INTEL KEEM BAY
+M: Srikanth Thokala <srikanth.thokala@intel.com>
+L: linux-pci@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/pci/intel,keembay-pcie*
+F: drivers/pci/controller/dwc/pcie-keembay.c
+
PCIE DRIVER FOR INTEL LGM GW SOC
M: Rahul Tanwar <rtanwar@maxlinear.com>
L: linux-pci@vger.kernel.org
@@ -14942,7 +14968,7 @@ F: kernel/time/*timer*
F: kernel/time/namespace.c
POWER MANAGEMENT CORE
-M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M: "Rafael J. Wysocki" <rafael@kernel.org>
L: linux-pm@vger.kernel.org
S: Supported
B: https://bugzilla.kernel.org
@@ -14967,7 +14993,7 @@ F: include/linux/dtpm.h
POWER STATE COORDINATION INTERFACE (PSCI)
M: Mark Rutland <mark.rutland@arm.com>
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/firmware/psci/
F: include/linux/psci.h
@@ -15492,7 +15518,7 @@ F: arch/hexagon/
QUALCOMM HIDMA DRIVER
M: Sinan Kaya <okaya@kernel.org>
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-arm-msm@vger.kernel.org
L: dmaengine@vger.kernel.org
S: Supported
@@ -17206,7 +17232,7 @@ SECURE MONITOR CALL(SMC) CALLING CONVENTION (SMCCC)
M: Mark Rutland <mark.rutland@arm.com>
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
M: Sudeep Holla <sudeep.holla@arm.com>
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/firmware/smccc/
F: include/linux/arm-smccc.h
@@ -17323,7 +17349,7 @@ F: drivers/media/pci/solo6x10/
SOFTWARE DELEGATED EXCEPTION INTERFACE (SDEI)
M: James Morse <james.morse@arm.com>
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/arm/firmware/sdei.txt
F: drivers/firmware/arm_sdei.c
@@ -17920,7 +17946,7 @@ F: arch/sh/
F: drivers/sh/
SUSPEND TO RAM
-M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M: "Rafael J. Wysocki" <rafael@kernel.org>
M: Len Brown <len.brown@intel.com>
M: Pavel Machek <pavel@ucw.cz>
L: linux-pm@vger.kernel.org
@@ -18110,7 +18136,7 @@ F: drivers/mfd/syscon.c
SYSTEM CONTROL & POWER/MANAGEMENT INTERFACE (SCPI/SCMI) Message Protocol drivers
M: Sudeep Holla <sudeep.holla@arm.com>
R: Cristian Marussi <cristian.marussi@arm.com>
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/firmware/arm,sc[mp]i.yaml
F: drivers/clk/clk-sc[mp]i.c
@@ -18483,7 +18509,7 @@ TEXAS INSTRUMENTS' SYSTEM CONTROL INTERFACE (TISCI) PROTOCOL DRIVER
M: Nishanth Menon <nm@ti.com>
M: Tero Kristo <kristo@kernel.org>
M: Santosh Shilimkar <ssantosh@kernel.org>
-L: linux-arm-kernel@lists.infradead.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/arm/keystone/ti,k3-sci-common.yaml
F: Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
@@ -18540,6 +18566,7 @@ F: drivers/thermal/
F: include/linux/cpu_cooling.h
F: include/linux/thermal.h
F: include/uapi/linux/thermal.h
+F: tools/thermal/
THERMAL DRIVER FOR AMLOGIC SOCS
M: Guillaume La Roque <glaroque@baylibre.com>
diff --git a/Makefile b/Makefile
index d45fc2edf186..34a0afc3a8eb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
-PATCHLEVEL = 14
+PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
NAME = Opossums on Parade
# *DOCUMENTATION*
@@ -802,6 +802,8 @@ else
# Disabled for clang while comment to attribute conversion happens and
# https://github.com/ClangBuiltLinux/linux/issues/636 is discussed.
KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough=5,)
+# gcc inanely warns about local variables called 'main'
+KBUILD_CFLAGS += -Wno-main
endif
# These warnings generated too much noise in a regular build.
@@ -847,12 +849,6 @@ endif
DEBUG_CFLAGS :=
-# Workaround for GCC versions < 5.0
-# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801
-ifdef CONFIG_CC_IS_GCC
-DEBUG_CFLAGS += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
-endif
-
ifdef CONFIG_DEBUG_INFO
ifdef CONFIG_DEBUG_INFO_SPLIT
diff --git a/arch/Kconfig b/arch/Kconfig
index 3743174da870..8df1c7102643 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -889,7 +889,7 @@ config HAVE_SOFTIRQ_ON_OWN_STACK
bool
help
Architecture provides a function to run __do_softirq() on a
- seperate stack.
+ separate stack.
config PGTABLE_LEVELS
int
diff --git a/arch/alpha/include/asm/agp.h b/arch/alpha/include/asm/agp.h
index 7173eada1567..7874f063d000 100644
--- a/arch/alpha/include/asm/agp.h
+++ b/arch/alpha/include/asm/agp.h
@@ -6,8 +6,8 @@
/* dummy for now */
-#define map_page_into_agp(page)
-#define unmap_page_from_agp(page)
+#define map_page_into_agp(page) do { } while (0)
+#define unmap_page_from_agp(page) do { } while (0)
#define flush_agp_cache() mb()
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index 0fab5ac90775..c9cb554fbe54 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -60,7 +60,7 @@ extern inline void set_hae(unsigned long new_hae)
* Change virtual addresses to physical addresses and vv.
*/
#ifdef USE_48_BIT_KSEG
-static inline unsigned long virt_to_phys(void *address)
+static inline unsigned long virt_to_phys(volatile void *address)
{
return (unsigned long)address - IDENT_ADDR;
}
@@ -70,7 +70,7 @@ static inline void * phys_to_virt(unsigned long address)
return (void *) (address + IDENT_ADDR);
}
#else
-static inline unsigned long virt_to_phys(void *address)
+static inline unsigned long virt_to_phys(volatile void *address)
{
unsigned long phys = (unsigned long)address;
@@ -106,7 +106,7 @@ static inline void * phys_to_virt(unsigned long address)
extern unsigned long __direct_map_base;
extern unsigned long __direct_map_size;
-static inline unsigned long __deprecated virt_to_bus(void *address)
+static inline unsigned long __deprecated virt_to_bus(volatile void *address)
{
unsigned long phys = virt_to_phys(address);
unsigned long bus = phys + __direct_map_base;
diff --git a/arch/alpha/include/asm/setup.h b/arch/alpha/include/asm/setup.h
new file mode 100644
index 000000000000..262aab99e391
--- /dev/null
+++ b/arch/alpha/include/asm/setup.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ALPHA_SETUP_H
+#define __ALPHA_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+/*
+ * We leave one page for the initial stack page, and one page for
+ * the initial process structure. Also, the console eats 3 MB for
+ * the initial bootloader (one of which we can reclaim later).
+ */
+#define BOOT_PCB 0x20000000
+#define BOOT_ADDR 0x20000000
+/* Remove when official MILO sources have ELF support: */
+#define BOOT_SIZE (16*1024)
+
+#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
+#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
+#else
+#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
+#endif
+
+#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
+#define SWAPPER_PGD KERNEL_START
+#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
+#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
+#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
+#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
+
+#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
+
+/*
+ * This is setup by the secondary bootstrap loader. Because
+ * the zero page is zeroed out as soon as the vm system is
+ * initialized, we need to copy things out into a more permanent
+ * place.
+ */
+#define PARAM ZERO_PGE
+#define COMMAND_LINE ((char *)(absolute_pointer(PARAM + 0x0000)))
+#define INITRD_START (*(unsigned long *) (PARAM+0x100))
+#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
+
+#endif
diff --git a/arch/alpha/include/uapi/asm/setup.h b/arch/alpha/include/uapi/asm/setup.h
index 13b7ee465b0e..f881ea5947cb 100644
--- a/arch/alpha/include/uapi/asm/setup.h
+++ b/arch/alpha/include/uapi/asm/setup.h
@@ -1,43 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __ALPHA_SETUP_H
-#define __ALPHA_SETUP_H
+#ifndef _UAPI__ALPHA_SETUP_H
+#define _UAPI__ALPHA_SETUP_H
#define COMMAND_LINE_SIZE 256
-/*
- * We leave one page for the initial stack page, and one page for
- * the initial process structure. Also, the console eats 3 MB for
- * the initial bootloader (one of which we can reclaim later).
- */
-#define BOOT_PCB 0x20000000
-#define BOOT_ADDR 0x20000000
-/* Remove when official MILO sources have ELF support: */
-#define BOOT_SIZE (16*1024)
-
-#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
-#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
-#else
-#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
-#endif
-
-#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
-#define SWAPPER_PGD KERNEL_START
-#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
-#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
-#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
-#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
-
-#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
-
-/*
- * This is setup by the secondary bootstrap loader. Because
- * the zero page is zeroed out as soon as the vm system is
- * initialized, we need to copy things out into a more permanent
- * place.
- */
-#define PARAM ZERO_PGE
-#define COMMAND_LINE ((char*)(PARAM + 0x0000))
-#define INITRD_START (*(unsigned long *) (PARAM+0x100))
-#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
-
-#endif
+#endif /* _UAPI__ALPHA_SETUP_H */
diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c
index 0021580d79ad..5808a66e2a81 100644
--- a/arch/alpha/kernel/pci-sysfs.c
+++ b/arch/alpha/kernel/pci-sysfs.c
@@ -60,6 +60,8 @@ static int __pci_mmap_fits(struct pci_dev *pdev, int num,
* @sparse: address space type
*
* Use the bus mapping routines to map a PCI resource into userspace.
+ *
+ * Return: %0 on success, negative error code otherwise
*/
static int pci_mmap_resource(struct kobject *kobj,
struct bin_attribute *attr,
@@ -106,7 +108,7 @@ static int pci_mmap_resource_dense(struct file *filp, struct kobject *kobj,
/**
* pci_remove_resource_files - cleanup resource files
- * @dev: dev to cleanup
+ * @pdev: pci_dev to cleanup
*
* If we created resource files for @dev, remove them from sysfs and
* free their resources.
@@ -221,10 +223,12 @@ static int pci_create_attr(struct pci_dev *pdev, int num)
}
/**
- * pci_create_resource_files - create resource files in sysfs for @dev
- * @dev: dev in question
+ * pci_create_resource_files - create resource files in sysfs for @pdev
+ * @pdev: pci_dev in question
*
* Walk the resources in @dev creating files for each resource available.
+ *
+ * Return: %0 on success, or negative error code
*/
int pci_create_resource_files(struct pci_dev *pdev)
{
@@ -296,7 +300,7 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
/**
* pci_adjust_legacy_attr - adjustment of legacy file attributes
- * @b: bus to create files under
+ * @bus: bus to create files under
* @mmap_type: I/O port or memory
*
* Adjust file name and size for sparse mappings.
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index 57235e5c0cea..6b83e3f2b41c 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -20,11 +20,6 @@
#include <asm/unaligned.h>
#include <asm/kprobes.h>
-void __init trap_init(void)
-{
- return;
-}
-
void die(const char *str, struct pt_regs *regs, unsigned long address)
{
show_kernel_fault_diag(str, regs, address);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 99863022436d..fc196421b2ce 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -124,7 +124,6 @@ config ARM
select PCI_SYSCALL if PCI
select PERF_USE_VMALLOC
select RTC_LIB
- select SET_FS
select SYS_SUPPORTS_APM_EMULATION
select TRACE_IRQFLAGS_SUPPORT if !CPU_V7M
# Above selects are sorted alphabetically; please add new ones
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 173da685a52e..847c31e7c368 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -308,7 +308,8 @@ $(BOOT_TARGETS): vmlinux
@$(kecho) ' Kernel: $(boot)/$@ is ready'
$(INSTALL_TARGETS):
- $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
+ $(CONFIG_SHELL) $(srctree)/$(boot)/install.sh "$(KERNELRELEASE)" \
+ $(boot)/$(patsubst %install,%Image,$@) System.map "$(INSTALL_PATH)"
PHONY += vdso_install
vdso_install:
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 0b3cd7a33a26..54a09f9464fb 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -96,23 +96,11 @@ $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
$(obj)/bootpImage: $(obj)/bootp/bootp FORCE
$(call if_changed,objcopy)
-PHONY += initrd install zinstall uinstall
+PHONY += initrd
initrd:
@test "$(INITRD_PHYS)" != "" || \
(echo This machine does not support INITRD; exit -1)
@test "$(INITRD)" != "" || \
(echo You must specify INITRD; exit -1)
-install:
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
- $(obj)/Image System.map "$(INSTALL_PATH)"
-
-zinstall:
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
- $(obj)/zImage System.map "$(INSTALL_PATH)"
-
-uinstall:
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
- $(obj)/uImage System.map "$(INSTALL_PATH)"
-
subdir- := bootp compressed dts
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 9d91ae1091b0..91265e7ff672 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -85,6 +85,8 @@ compress-$(CONFIG_KERNEL_LZ4) = lz4
libfdt_objs := fdt_rw.o fdt_ro.o fdt_wip.o fdt.o
ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y)
+CFLAGS_REMOVE_atags_to_fdt.o += -Wframe-larger-than=${CONFIG_FRAME_WARN}
+CFLAGS_atags_to_fdt.o += -Wframe-larger-than=1280
OBJS += $(libfdt_objs) atags_to_fdt.o
endif
ifeq ($(CONFIG_USE_OF),y)
diff --git a/arch/arm/boot/dts/omap34xx.dtsi b/arch/arm/boot/dts/omap34xx.dtsi
index feaa43b78535..8b8451399784 100644
--- a/arch/arm/boot/dts/omap34xx.dtsi
+++ b/arch/arm/boot/dts/omap34xx.dtsi
@@ -24,7 +24,6 @@
};
};
- /* see Documentation/devicetree/bindings/opp/opp.txt */
cpu0_opp_table: opp-table {
compatible = "operating-points-v2-ti-cpu";
syscon = <&scm_conf>;
diff --git a/arch/arm/boot/dts/omap36xx.dtsi b/arch/arm/boot/dts/omap36xx.dtsi
index 20844dbc002e..22b33098b1a2 100644
--- a/arch/arm/boot/dts/omap36xx.dtsi
+++ b/arch/arm/boot/dts/omap36xx.dtsi
@@ -29,7 +29,6 @@
};
};
- /* see Documentation/devicetree/bindings/opp/opp.txt */
cpu0_opp_table: opp-table {
compatible = "operating-points-v2-ti-cpu";
syscon = <&scm_conf>;
diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig
index b935162a8bba..33074fdab2ea 100644
--- a/arch/arm/configs/dove_defconfig
+++ b/arch/arm/configs/dove_defconfig
@@ -56,7 +56,6 @@ CONFIG_ATA=y
CONFIG_SATA_MV=y
CONFIG_NETDEVICES=y
CONFIG_MV643XX_ETH=y
-CONFIG_INPUT_POLLDEV=y
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 363f1b1b08e3..58f4834289e6 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -284,7 +284,6 @@ CONFIG_RT2800USB=m
CONFIG_MWIFIEX=m
CONFIG_MWIFIEX_SDIO=m
CONFIG_INPUT_FF_MEMLESS=m
-CONFIG_INPUT_POLLDEV=y
CONFIG_INPUT_MATRIXKMAP=y
CONFIG_INPUT_MOUSEDEV=m
CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h
index 595e538f5bfb..4b69cf850451 100644
--- a/arch/arm/include/asm/div64.h
+++ b/arch/arm/include/asm/div64.h
@@ -52,17 +52,6 @@ static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
#else
-/*
- * gcc versions earlier than 4.0 are simply too problematic for the
- * __div64_const32() code in asm-generic/div64.h. First there is
- * gcc PR 15089 that tend to trig on more complex constructs, spurious
- * .global __udivsi3 are inserted even if none of those symbols are
- * referenced in the generated code, and those gcc versions are not able
- * to do constant propagation on long long values anyway.
- */
-
-#define __div64_const32_is_OK (__GNUC__ >= 4)
-
static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
{
unsigned long long res;
diff --git a/arch/arm/include/asm/gpio.h b/arch/arm/include/asm/gpio.h
index c50e383358c4..f3bb8a2bf788 100644
--- a/arch/arm/include/asm/gpio.h
+++ b/arch/arm/include/asm/gpio.h
@@ -2,10 +2,6 @@
#ifndef _ARCH_ARM_GPIO_H
#define _ARCH_ARM_GPIO_H
-#if CONFIG_ARCH_NR_GPIO > 0
-#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
-#endif
-
/* Note: this may rely upon the value of ARCH_NR_GPIOS set in mach/gpio.h */
#include <asm-generic/gpio.h>
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 91d6b7856be4..93051e2f402c 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -19,7 +19,6 @@ struct pt_regs {
struct svc_pt_regs {
struct pt_regs regs;
u32 dacr;
- u32 addr_limit;
};
#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index fd02761ba06c..24c19d63ff0a 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -22,7 +22,21 @@ extern const unsigned long sys_call_table[];
static inline int syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
- return task_thread_info(task)->syscall;
+ if (IS_ENABLED(CONFIG_AEABI) && !IS_ENABLED(CONFIG_OABI_COMPAT))
+ return task_thread_info(task)->abi_syscall;
+
+ return task_thread_info(task)->abi_syscall & __NR_SYSCALL_MASK;
+}
+
+static inline bool __in_oabi_syscall(struct task_struct *task)
+{
+ return IS_ENABLED(CONFIG_OABI_COMPAT) &&
+ (task_thread_info(task)->abi_syscall & __NR_OABI_SYSCALL_BASE);
+}
+
+static inline bool in_oabi_syscall(void)
+{
+ return __in_oabi_syscall(current);
}
static inline void syscall_rollback(struct task_struct *task,
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index a02799bd0cdf..9a18da3e10cc 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -31,8 +31,6 @@ struct task_struct;
#include <asm/types.h>
-typedef unsigned long mm_segment_t;
-
struct cpu_context_save {
__u32 r4;
__u32 r5;
@@ -54,7 +52,6 @@ struct cpu_context_save {
struct thread_info {
unsigned long flags; /* low level flags */
int preempt_count; /* 0 => preemptable, <0 => bug */
- mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
__u32 cpu; /* cpu */
__u32 cpu_domain; /* cpu domain */
@@ -62,7 +59,7 @@ struct thread_info {
unsigned long stack_canary;
#endif
struct cpu_context_save cpu_context; /* cpu context */
- __u32 syscall; /* syscall number */
+ __u32 abi_syscall; /* ABI type and syscall nr */
__u8 used_cp[16]; /* thread used copro */
unsigned long tp_value[2]; /* TLS registers */
union fp_state fpstate __attribute__((aligned(8)));
@@ -77,7 +74,6 @@ struct thread_info {
.task = &tsk, \
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .addr_limit = KERNEL_DS, \
}
/*
diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h
index e6eb7a2aaf1e..6451a433912c 100644
--- a/arch/arm/include/asm/uaccess-asm.h
+++ b/arch/arm/include/asm/uaccess-asm.h
@@ -84,12 +84,8 @@
* if \disable is set.
*/
.macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
- ldr \tmp1, [\tsk, #TI_ADDR_LIMIT]
- ldr \tmp2, =TASK_SIZE
- str \tmp2, [\tsk, #TI_ADDR_LIMIT]
DACR( mrc p15, 0, \tmp0, c3, c0, 0)
DACR( str \tmp0, [sp, #SVC_DACR])
- str \tmp1, [sp, #SVC_ADDR_LIMIT]
.if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
/* kernel=client, user=no access */
mov \tmp2, #DACR_UACCESS_DISABLE
@@ -106,9 +102,7 @@
/* Restore the user access state previously saved by uaccess_entry */
.macro uaccess_exit, tsk, tmp0, tmp1
- ldr \tmp1, [sp, #SVC_ADDR_LIMIT]
DACR( ldr \tmp0, [sp, #SVC_DACR])
- str \tmp1, [\tsk, #TI_ADDR_LIMIT]
DACR( mcr p15, 0, \tmp0, c3, c0, 0)
.endm
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index a13d90206472..084d1c07c2d0 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -52,32 +52,8 @@ static __always_inline void uaccess_restore(unsigned int flags)
extern int __get_user_bad(void);
extern int __put_user_bad(void);
-/*
- * Note that this is actually 0x1,0000,0000
- */
-#define KERNEL_DS 0x00000000
-
#ifdef CONFIG_MMU
-#define USER_DS TASK_SIZE
-#define get_fs() (current_thread_info()->addr_limit)
-
-static inline void set_fs(mm_segment_t fs)
-{
- current_thread_info()->addr_limit = fs;
-
- /*
- * Prevent a mispredicted conditional call to set_fs from forwarding
- * the wrong address limit to access_ok under speculation.
- */
- dsb(nsh);
- isb();
-
- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
-}
-
-#define uaccess_kernel() (get_fs() == KERNEL_DS)
-
/*
* We use 33-bit arithmetic here. Success returns zero, failure returns
* addr_limit. We take advantage that addr_limit will be zero for KERNEL_DS,
@@ -89,7 +65,7 @@ static inline void set_fs(mm_segment_t fs)
__asm__(".syntax unified\n" \
"adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \
: "=&r" (flag), "=&r" (roksum) \
- : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
+ : "r" (addr), "Ir" (size), "0" (TASK_SIZE) \
: "cc"); \
flag; })
@@ -120,7 +96,7 @@ static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
" subshs %1, %1, %2\n"
" movlo %0, #0\n"
: "+r" (safe_ptr), "=&r" (tmp)
- : "r" (size), "r" (current_thread_info()->addr_limit)
+ : "r" (size), "r" (TASK_SIZE)
: "cc");
csdb();
@@ -194,7 +170,7 @@ extern int __get_user_64t_4(void *);
#define __get_user_check(x, p) \
({ \
- unsigned long __limit = current_thread_info()->addr_limit - 1; \
+ unsigned long __limit = TASK_SIZE - 1; \
register typeof(*(p)) __user *__p asm("r0") = (p); \
register __inttype(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
@@ -245,7 +221,7 @@ extern int __put_user_8(void *, unsigned long long);
#define __put_user_check(__pu_val, __ptr, __err, __s) \
({ \
- unsigned long __limit = current_thread_info()->addr_limit - 1; \
+ unsigned long __limit = TASK_SIZE - 1; \
register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
register const void __user *__p asm("r0") = __ptr; \
register unsigned long __l asm("r1") = __limit; \
@@ -262,19 +238,8 @@ extern int __put_user_8(void *, unsigned long long);
#else /* CONFIG_MMU */
-/*
- * uClinux has only one addr space, so has simplified address limits.
- */
-#define USER_DS KERNEL_DS
-
-#define uaccess_kernel() (true)
#define __addr_ok(addr) ((void)(addr), 1)
#define __range_ok(addr, size) ((void)(addr), 0)
-#define get_fs() (KERNEL_DS)
-
-static inline void set_fs(mm_segment_t fs)
-{
-}
#define get_user(x, p) __get_user(x, p)
#define __put_user_check __put_user_nocheck
@@ -283,9 +248,6 @@ static inline void set_fs(mm_segment_t fs)
#define access_ok(addr, size) (__range_ok(addr, size) == 0)
-#define user_addr_max() \
- (uaccess_kernel() ? ~0UL : get_fs())
-
#ifdef CONFIG_CPU_SPECTRE
/*
* When mitigating Spectre variant 1, it is not worth fixing the non-
@@ -308,11 +270,11 @@ static inline void set_fs(mm_segment_t fs)
#define __get_user(x, ptr) \
({ \
long __gu_err = 0; \
- __get_user_err((x), (ptr), __gu_err); \
+ __get_user_err((x), (ptr), __gu_err, TUSER()); \
__gu_err; \
})
-#define __get_user_err(x, ptr, err) \
+#define __get_user_err(x, ptr, err, __t) \
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
@@ -321,18 +283,19 @@ do { \
might_fault(); \
__ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
- case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
- case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
- case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
+ case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
+ case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
+ case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
default: (__gu_val) = __get_user_bad(); \
} \
uaccess_restore(__ua_flags); \
(x) = (__typeof__(*(ptr)))__gu_val; \
} while (0)
+#endif
#define __get_user_asm(x, addr, err, instr) \
__asm__ __volatile__( \
- "1: " TUSER(instr) " %1, [%2], #0\n" \
+ "1: " instr " %1, [%2], #0\n" \
"2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
@@ -348,40 +311,38 @@ do { \
: "r" (addr), "i" (-EFAULT) \
: "cc")
-#define __get_user_asm_byte(x, addr, err) \
- __get_user_asm(x, addr, err, ldrb)
+#define __get_user_asm_byte(x, addr, err, __t) \
+ __get_user_asm(x, addr, err, "ldrb" __t)
#if __LINUX_ARM_ARCH__ >= 6
-#define __get_user_asm_half(x, addr, err) \
- __get_user_asm(x, addr, err, ldrh)
+#define __get_user_asm_half(x, addr, err, __t) \
+ __get_user_asm(x, addr, err, "ldrh" __t)
#else
#ifndef __ARMEB__
-#define __get_user_asm_half(x, __gu_addr, err) \
+#define __get_user_asm_half(x, __gu_addr, err, __t) \
({ \
unsigned long __b1, __b2; \
- __get_user_asm_byte(__b1, __gu_addr, err); \
- __get_user_asm_byte(__b2, __gu_addr + 1, err); \
+ __get_user_asm_byte(__b1, __gu_addr, err, __t); \
+ __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
(x) = __b1 | (__b2 << 8); \
})
#else
-#define __get_user_asm_half(x, __gu_addr, err) \
+#define __get_user_asm_half(x, __gu_addr, err, __t) \
({ \
unsigned long __b1, __b2; \
- __get_user_asm_byte(__b1, __gu_addr, err); \
- __get_user_asm_byte(__b2, __gu_addr + 1, err); \
+ __get_user_asm_byte(__b1, __gu_addr, err, __t); \
+ __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
(x) = (__b1 << 8) | __b2; \
})
#endif
#endif /* __LINUX_ARM_ARCH__ >= 6 */
-#define __get_user_asm_word(x, addr, err) \
- __get_user_asm(x, addr, err, ldr)
-#endif
-
+#define __get_user_asm_word(x, addr, err, __t) \
+ __get_user_asm(x, addr, err, "ldr" __t)
#define __put_user_switch(x, ptr, __err, __fn) \
do { \
@@ -425,7 +386,7 @@ do { \
#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
do { \
unsigned long __pu_addr = (unsigned long)__pu_ptr; \
- __put_user_nocheck_##__size(x, __pu_addr, __err); \
+ __put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\
} while (0)
#define __put_user_nocheck_1 __put_user_asm_byte
@@ -433,9 +394,11 @@ do { \
#define __put_user_nocheck_4 __put_user_asm_word
#define __put_user_nocheck_8 __put_user_asm_dword
+#endif /* !CONFIG_CPU_SPECTRE */
+
#define __put_user_asm(x, __pu_addr, err, instr) \
__asm__ __volatile__( \
- "1: " TUSER(instr) " %1, [%2], #0\n" \
+ "1: " instr " %1, [%2], #0\n" \
"2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
@@ -450,36 +413,36 @@ do { \
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc")
-#define __put_user_asm_byte(x, __pu_addr, err) \
- __put_user_asm(x, __pu_addr, err, strb)
+#define __put_user_asm_byte(x, __pu_addr, err, __t) \
+ __put_user_asm(x, __pu_addr, err, "strb" __t)
#if __LINUX_ARM_ARCH__ >= 6
-#define __put_user_asm_half(x, __pu_addr, err) \
- __put_user_asm(x, __pu_addr, err, strh)
+#define __put_user_asm_half(x, __pu_addr, err, __t) \
+ __put_user_asm(x, __pu_addr, err, "strh" __t)
#else
#ifndef __ARMEB__
-#define __put_user_asm_half(x, __pu_addr, err) \
+#define __put_user_asm_half(x, __pu_addr, err, __t) \
({ \
unsigned long __temp = (__force unsigned long)(x); \
- __put_user_asm_byte(__temp, __pu_addr, err); \
- __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
+ __put_user_asm_byte(__temp, __pu_addr, err, __t); \
+ __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\
})
#else
-#define __put_user_asm_half(x, __pu_addr, err) \
+#define __put_user_asm_half(x, __pu_addr, err, __t) \
({ \
unsigned long __temp = (__force unsigned long)(x); \
- __put_user_asm_byte(__temp >> 8, __pu_addr, err); \
- __put_user_asm_byte(__temp, __pu_addr + 1, err); \
+ __put_user_asm_byte(__temp >> 8, __pu_addr, err, __t); \
+ __put_user_asm_byte(__temp, __pu_addr + 1, err, __t); \
})
#endif
#endif /* __LINUX_ARM_ARCH__ >= 6 */
-#define __put_user_asm_word(x, __pu_addr, err) \
- __put_user_asm(x, __pu_addr, err, str)
+#define __put_user_asm_word(x, __pu_addr, err, __t) \
+ __put_user_asm(x, __pu_addr, err, "str" __t)
#ifndef __ARMEB__
#define __reg_oper0 "%R2"
@@ -489,12 +452,12 @@ do { \
#define __reg_oper1 "%R2"
#endif
-#define __put_user_asm_dword(x, __pu_addr, err) \
+#define __put_user_asm_dword(x, __pu_addr, err, __t) \
__asm__ __volatile__( \
- ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
- ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
- THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
- THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
+ ARM( "1: str" __t " " __reg_oper1 ", [%1], #4\n" ) \
+ ARM( "2: str" __t " " __reg_oper0 ", [%1]\n" ) \
+ THUMB( "1: str" __t " " __reg_oper1 ", [%1]\n" ) \
+ THUMB( "2: str" __t " " __reg_oper0 ", [%1, #4]\n" ) \
"3:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
@@ -510,7 +473,49 @@ do { \
: "r" (x), "i" (-EFAULT) \
: "cc")
-#endif /* !CONFIG_CPU_SPECTRE */
+#define HAVE_GET_KERNEL_NOFAULT
+
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ const type *__pk_ptr = (src); \
+ unsigned long __src = (unsigned long)(__pk_ptr); \
+ type __val; \
+ int __err = 0; \
+ switch (sizeof(type)) { \
+ case 1: __get_user_asm_byte(__val, __src, __err, ""); break; \
+ case 2: __get_user_asm_half(__val, __src, __err, ""); break; \
+ case 4: __get_user_asm_word(__val, __src, __err, ""); break; \
+ case 8: { \
+ u32 *__v32 = (u32*)&__val; \
+ __get_user_asm_word(__v32[0], __src, __err, ""); \
+ if (__err) \
+ break; \
+ __get_user_asm_word(__v32[1], __src+4, __err, ""); \
+ break; \
+ } \
+ default: __err = __get_user_bad(); break; \
+ } \
+ *(type *)(dst) = __val; \
+ if (__err) \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ const type *__pk_ptr = (dst); \
+ unsigned long __dst = (unsigned long)__pk_ptr; \
+ int __err = 0; \
+ type __val = *(type *)src; \
+ switch (sizeof(type)) { \
+ case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
+ case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \
+ case 4: __put_user_asm_word(__val, __dst, __err, ""); break; \
+ case 8: __put_user_asm_dword(__val, __dst, __err, ""); break; \
+ default: __err = __put_user_bad(); break; \
+ } \
+ if (__err) \
+ goto err_label; \
+} while (0)
#ifdef CONFIG_MMU
extern unsigned long __must_check
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index 1e2c3eb04353..ce9689118dbb 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -24,10 +24,6 @@ __asm__(".syntax unified");
#ifdef CONFIG_THUMB2_KERNEL
-#if __GNUC__ < 4
-#error Thumb-2 kernel requires gcc >= 4
-#endif
-
/* The CPSR bit describing the instruction set (Thumb) */
#define PSR_ISETSTATE PSR_T_BIT
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index ae7749e15726..a1149911464c 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -15,6 +15,7 @@
#define _UAPI__ASM_ARM_UNISTD_H
#define __NR_OABI_SYSCALL_BASE 0x900000
+#define __NR_SYSCALL_MASK 0x0fffff
#if defined(__thumb__) || defined(__ARM_EABI__)
#define __NR_SYSCALL_BASE 0
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 64944701bf6a..a646a3f6440f 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -43,11 +43,11 @@ int main(void)
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
- DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain));
DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context));
+ DEFINE(TI_ABI_SYSCALL, offsetof(struct thread_info, abi_syscall));
DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp));
DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value));
DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate));
@@ -88,7 +88,6 @@ int main(void)
DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
DEFINE(SVC_DACR, offsetof(struct svc_pt_regs, dacr));
- DEFINE(SVC_ADDR_LIMIT, offsetof(struct svc_pt_regs, addr_limit));
DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs));
BLANK();
DEFINE(SIGFRAME_RC3_OFFSET, offsetof(struct sigframe, retcode[3]));
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 7f0b7aba1498..d9c99db50243 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -49,10 +49,6 @@ __ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
disable_irq_notrace @ disable interrupts
- ldr r2, [tsk, #TI_ADDR_LIMIT]
- ldr r1, =TASK_SIZE
- cmp r2, r1
- blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
movs r1, r1, lsl #16
bne fast_work_pending
@@ -87,10 +83,6 @@ __ret_fast_syscall:
bl do_rseq_syscall
#endif
disable_irq_notrace @ disable interrupts
- ldr r2, [tsk, #TI_ADDR_LIMIT]
- ldr r1, =TASK_SIZE
- cmp r2, r1
- blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
movs r1, r1, lsl #16
beq no_work_pending
@@ -129,10 +121,6 @@ ret_slow_syscall:
#endif
disable_irq_notrace @ disable interrupts
ENTRY(ret_to_user_from_irq)
- ldr r2, [tsk, #TI_ADDR_LIMIT]
- ldr r1, =TASK_SIZE
- cmp r2, r1
- blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS]
movs r1, r1, lsl #16
bne slow_work_pending
@@ -226,6 +214,7 @@ ENTRY(vector_swi)
/* saved_psr and saved_pc are now dead */
uaccess_disable tbl
+ get_thread_info tsk
adr tbl, sys_call_table @ load syscall table pointer
@@ -237,13 +226,17 @@ ENTRY(vector_swi)
* get the old ABI syscall table address.
*/
bics r10, r10, #0xff000000
+ strne r10, [tsk, #TI_ABI_SYSCALL]
+ streq scno, [tsk, #TI_ABI_SYSCALL]
eorne scno, r10, #__NR_OABI_SYSCALL_BASE
ldrne tbl, =sys_oabi_call_table
#elif !defined(CONFIG_AEABI)
bic scno, scno, #0xff000000 @ mask off SWI op-code
+ str scno, [tsk, #TI_ABI_SYSCALL]
eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
+#else
+ str scno, [tsk, #TI_ABI_SYSCALL]
#endif
- get_thread_info tsk
/*
* Reload the registers that may have been corrupted on entry to
* the syscall assembly (by tracing or context tracking.)
@@ -288,7 +281,6 @@ ENDPROC(vector_swi)
* context switches, and waiting for our parent to respond.
*/
__sys_trace:
- mov r1, scno
add r0, sp, #S_OFF
bl syscall_trace_enter
mov scno, r0
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index bb5ad8a6a4c3..0e2d3051741e 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -106,7 +106,7 @@ void __show_regs(struct pt_regs *regs)
unsigned long flags;
char buf[64];
#ifndef CONFIG_CPU_V7M
- unsigned int domain, fs;
+ unsigned int domain;
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Get the domain register for the parent context. In user
@@ -115,14 +115,11 @@ void __show_regs(struct pt_regs *regs)
*/
if (user_mode(regs)) {
domain = DACR_UACCESS_ENABLE;
- fs = get_fs();
} else {
domain = to_svc_pt_regs(regs)->dacr;
- fs = to_svc_pt_regs(regs)->addr_limit;
}
#else
domain = get_domain();
- fs = get_fs();
#endif
#endif
@@ -158,8 +155,6 @@ void __show_regs(struct pt_regs *regs)
if ((domain & domain_mask(DOMAIN_USER)) ==
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
segment = "none";
- else if (fs == KERNEL_DS)
- segment = "kernel";
else
segment = "user";
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index b008859680bc..43b963ea4a0e 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -25,6 +25,7 @@
#include <linux/tracehook.h>
#include <linux/unistd.h>
+#include <asm/syscall.h>
#include <asm/traps.h>
#define CREATE_TRACE_POINTS
@@ -785,7 +786,8 @@ long arch_ptrace(struct task_struct *child, long request,
break;
case PTRACE_SET_SYSCALL:
- task_thread_info(child)->syscall = data;
+ task_thread_info(child)->abi_syscall = data &
+ __NR_SYSCALL_MASK;
ret = 0;
break;
@@ -844,14 +846,14 @@ static void tracehook_report_syscall(struct pt_regs *regs,
if (dir == PTRACE_SYSCALL_EXIT)
tracehook_report_syscall_exit(regs, 0);
else if (tracehook_report_syscall_entry(regs))
- current_thread_info()->syscall = -1;
+ current_thread_info()->abi_syscall = -1;
regs->ARM_ip = ip;
}
-asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
+asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
- current_thread_info()->syscall = scno;
+ int scno;
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
@@ -862,11 +864,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
return -1;
#else
/* XXX: remove this once OABI gets fixed */
- secure_computing_strict(current_thread_info()->syscall);
+ secure_computing_strict(syscall_get_nr(current, regs));
#endif
/* Tracer or seccomp may have changed syscall. */
- scno = current_thread_info()->syscall;
+ scno = syscall_get_nr(current, regs);
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, scno);
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 4e0dcff3f5b0..d0a800be0486 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -669,14 +669,6 @@ struct page *get_signal_page(void)
return page;
}
-/* Defer to generic check */
-asmlinkage void addr_limit_check_failed(void)
-{
-#ifdef CONFIG_MMU
- addr_limit_user_check();
-#endif
-}
-
#ifdef CONFIG_DEBUG_RSEQ
asmlinkage void do_rseq_syscall(struct pt_regs *regs)
{
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index 075a2e0ed2c1..68112c172025 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -80,9 +80,12 @@
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/ipc.h>
+#include <linux/ipc_namespace.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <asm/syscall.h>
+
struct oldabi_stat64 {
unsigned long long st_dev;
unsigned int __pad1;
@@ -191,60 +194,87 @@ struct oabi_flock64 {
pid_t l_pid;
} __attribute__ ((packed,aligned(4)));
-static long do_locks(unsigned int fd, unsigned int cmd,
- unsigned long arg)
+static int get_oabi_flock(struct flock64 *kernel, struct oabi_flock64 __user *arg)
{
- struct flock64 kernel;
struct oabi_flock64 user;
- mm_segment_t fs;
- long ret;
if (copy_from_user(&user, (struct oabi_flock64 __user *)arg,
sizeof(user)))
return -EFAULT;
- kernel.l_type = user.l_type;
- kernel.l_whence = user.l_whence;
- kernel.l_start = user.l_start;
- kernel.l_len = user.l_len;
- kernel.l_pid = user.l_pid;
-
- fs = get_fs();
- set_fs(KERNEL_DS);
- ret = sys_fcntl64(fd, cmd, (unsigned long)&kernel);
- set_fs(fs);
-
- if (!ret && (cmd == F_GETLK64 || cmd == F_OFD_GETLK)) {
- user.l_type = kernel.l_type;
- user.l_whence = kernel.l_whence;
- user.l_start = kernel.l_start;
- user.l_len = kernel.l_len;
- user.l_pid = kernel.l_pid;
- if (copy_to_user((struct oabi_flock64 __user *)arg,
- &user, sizeof(user)))
- ret = -EFAULT;
- }
- return ret;
+
+ kernel->l_type = user.l_type;
+ kernel->l_whence = user.l_whence;
+ kernel->l_start = user.l_start;
+ kernel->l_len = user.l_len;
+ kernel->l_pid = user.l_pid;
+
+ return 0;
+}
+
+static int put_oabi_flock(struct flock64 *kernel, struct oabi_flock64 __user *arg)
+{
+ struct oabi_flock64 user;
+
+ user.l_type = kernel->l_type;
+ user.l_whence = kernel->l_whence;
+ user.l_start = kernel->l_start;
+ user.l_len = kernel->l_len;
+ user.l_pid = kernel->l_pid;
+
+ if (copy_to_user((struct oabi_flock64 __user *)arg,
+ &user, sizeof(user)))
+ return -EFAULT;
+
+ return 0;
}
asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
unsigned long arg)
{
+ void __user *argp = (void __user *)arg;
+ struct fd f = fdget_raw(fd);
+ struct flock64 flock;
+ long err = -EBADF;
+
+ if (!f.file)
+ goto out;
+
switch (cmd) {
- case F_OFD_GETLK:
- case F_OFD_SETLK:
- case F_OFD_SETLKW:
case F_GETLK64:
+ case F_OFD_GETLK:
+ err = security_file_fcntl(f.file, cmd, arg);
+ if (err)
+ break;
+ err = get_oabi_flock(&flock, argp);
+ if (err)
+ break;
+ err = fcntl_getlk64(f.file, cmd, &flock);
+ if (!err)
+ err = put_oabi_flock(&flock, argp);
+ break;
case F_SETLK64:
case F_SETLKW64:
- return do_locks(fd, cmd, arg);
-
+ case F_OFD_SETLK:
+ case F_OFD_SETLKW:
+ err = security_file_fcntl(f.file, cmd, arg);
+ if (err)
+ break;
+ err = get_oabi_flock(&flock, argp);
+ if (err)
+ break;
+ err = fcntl_setlk64(fd, f.file, cmd, &flock);
+ break;
default:
- return sys_fcntl64(fd, cmd, arg);
+ err = sys_fcntl64(fd, cmd, arg);
+ break;
}
+ fdput(f);
+out:
+ return err;
}
struct oabi_epoll_event {
- __u32 events;
+ __poll_t events;
__u64 data;
} __attribute__ ((packed,aligned(4)));
@@ -264,55 +294,34 @@ asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
return do_epoll_ctl(epfd, op, fd, &kernel, false);
}
-
-asmlinkage long sys_oabi_epoll_wait(int epfd,
- struct oabi_epoll_event __user *events,
- int maxevents, int timeout)
-{
- struct epoll_event *kbuf;
- struct oabi_epoll_event e;
- mm_segment_t fs;
- long ret, err, i;
-
- if (maxevents <= 0 ||
- maxevents > (INT_MAX/sizeof(*kbuf)) ||
- maxevents > (INT_MAX/sizeof(*events)))
- return -EINVAL;
- if (!access_ok(events, sizeof(*events) * maxevents))
- return -EFAULT;
- kbuf = kmalloc_array(maxevents, sizeof(*kbuf), GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
- fs = get_fs();
- set_fs(KERNEL_DS);
- ret = sys_epoll_wait(epfd, kbuf, maxevents, timeout);
- set_fs(fs);
- err = 0;
- for (i = 0; i < ret; i++) {
- e.events = kbuf[i].events;
- e.data = kbuf[i].data;
- err = __copy_to_user(events, &e, sizeof(e));
- if (err)
- break;
- events++;
- }
- kfree(kbuf);
- return err ? -EFAULT : ret;
-}
#else
asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
struct oabi_epoll_event __user *event)
{
return -EINVAL;
}
+#endif
-asmlinkage long sys_oabi_epoll_wait(int epfd,
- struct oabi_epoll_event __user *events,
- int maxevents, int timeout)
+struct epoll_event __user *
+epoll_put_uevent(__poll_t revents, __u64 data,
+ struct epoll_event __user *uevent)
{
- return -EINVAL;
+ if (in_oabi_syscall()) {
+ struct oabi_epoll_event __user *oevent = (void __user *)uevent;
+
+ if (__put_user(revents, &oevent->events) ||
+ __put_user(data, &oevent->data))
+ return NULL;
+
+ return (void __user *)(oevent+1);
+ }
+
+ if (__put_user(revents, &uevent->events) ||
+ __put_user(data, &uevent->data))
+ return NULL;
+
+ return uevent+1;
}
-#endif
struct oabi_sembuf {
unsigned short sem_num;
@@ -321,46 +330,52 @@ struct oabi_sembuf {
unsigned short __pad;
};
+#define sc_semopm sem_ctls[2]
+
+#ifdef CONFIG_SYSVIPC
asmlinkage long sys_oabi_semtimedop(int semid,
struct oabi_sembuf __user *tsops,
unsigned nsops,
const struct old_timespec32 __user *timeout)
{
+ struct ipc_namespace *ns;
struct sembuf *sops;
- struct old_timespec32 local_timeout;
long err;
int i;
+ ns = current->nsproxy->ipc_ns;
+ if (nsops > ns->sc_semopm)
+ return -E2BIG;
if (nsops < 1 || nsops > SEMOPM)
return -EINVAL;
- if (!access_ok(tsops, sizeof(*tsops) * nsops))
- return -EFAULT;
- sops = kmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
+ sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
if (!sops)
return -ENOMEM;
err = 0;
for (i = 0; i < nsops; i++) {
struct oabi_sembuf osb;
- err |= __copy_from_user(&osb, tsops, sizeof(osb));
+ err |= copy_from_user(&osb, tsops, sizeof(osb));
sops[i].sem_num = osb.sem_num;
sops[i].sem_op = osb.sem_op;
sops[i].sem_flg = osb.sem_flg;
tsops++;
}
- if (timeout) {
- /* copy this as well before changing domain protection */
- err |= copy_from_user(&local_timeout, timeout, sizeof(*timeout));
- timeout = &local_timeout;
- }
if (err) {
err = -EFAULT;
- } else {
- mm_segment_t fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_semtimedop_time32(semid, sops, nsops, timeout);
- set_fs(fs);
+ goto out;
}
- kfree(sops);
+
+ if (timeout) {
+ struct timespec64 ts;
+ err = get_old_timespec32(&ts, timeout);
+ if (err)
+ goto out;
+ err = __do_semtimedop(semid, sops, nsops, &ts, ns);
+ goto out;
+ }
+ err = __do_semtimedop(semid, sops, nsops, NULL, ns);
+out:
+ kvfree(sops);
return err;
}
@@ -387,6 +402,27 @@ asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third,
return sys_ipc(call, first, second, third, ptr, fifth);
}
}
+#else
+asmlinkage long sys_oabi_semtimedop(int semid,
+ struct oabi_sembuf __user *tsops,
+ unsigned nsops,
+ const struct old_timespec32 __user *timeout)
+{
+ return -ENOSYS;
+}
+
+asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops,
+ unsigned nsops)
+{
+ return -ENOSYS;
+}
+
+asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third,
+ void __user *ptr, long fifth)
+{
+ return -ENOSYS;
+}
+#endif
asmlinkage long sys_oabi_bind(int fd, struct sockaddr __user *addr, int addrlen)
{
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 64308e3a5d0c..4a7edc6e848f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -122,17 +122,8 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
unsigned long top)
{
unsigned long first;
- mm_segment_t fs;
int i;
- /*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
- */
- fs = get_fs();
- set_fs(KERNEL_DS);
-
printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
for (first = bottom & ~31; first < top; first += 32) {
@@ -145,7 +136,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
if (p >= bottom && p < top) {
unsigned long val;
- if (__get_user(val, (unsigned long *)p) == 0)
+ if (get_kernel_nofault(val, (unsigned long *)p))
sprintf(str + i * 9, " %08lx", val);
else
sprintf(str + i * 9, " ????????");
@@ -153,11 +144,9 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
}
printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
}
-
- set_fs(fs);
}
-static void __dump_instr(const char *lvl, struct pt_regs *regs)
+static void dump_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
const int thumb = thumb_mode(regs);
@@ -173,10 +162,20 @@ static void __dump_instr(const char *lvl, struct pt_regs *regs)
for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad;
- if (thumb)
- bad = get_user(val, &((u16 *)addr)[i]);
- else
- bad = get_user(val, &((u32 *)addr)[i]);
+ if (!user_mode(regs)) {
+ if (thumb) {
+ u16 val16;
+ bad = get_kernel_nofault(val16, &((u16 *)addr)[i]);
+ val = val16;
+ } else {
+ bad = get_kernel_nofault(val, &((u32 *)addr)[i]);
+ }
+ } else {
+ if (thumb)
+ bad = get_user(val, &((u16 *)addr)[i]);
+ else
+ bad = get_user(val, &((u32 *)addr)[i]);
+ }
if (!bad)
p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
@@ -189,20 +188,6 @@ static void __dump_instr(const char *lvl, struct pt_regs *regs)
printk("%sCode: %s\n", lvl, str);
}
-static void dump_instr(const char *lvl, struct pt_regs *regs)
-{
- mm_segment_t fs;
-
- if (!user_mode(regs)) {
- fs = get_fs();
- set_fs(KERNEL_DS);
- __dump_instr(lvl, regs);
- set_fs(fs);
- } else {
- __dump_instr(lvl, regs);
- }
-}
-
#ifdef CONFIG_ARM_UNWIND
static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
const char *loglvl)
@@ -781,11 +766,6 @@ void abort(void)
panic("Oops failed to kill thread");
}
-void __init trap_init(void)
-{
- return;
-}
-
#ifdef CONFIG_KUSER_HELPERS
static void __init kuser_init(void *vectors)
{
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index f8016e3db65d..480a20766137 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -109,8 +109,7 @@
ENTRY(arm_copy_from_user)
#ifdef CONFIG_CPU_SPECTRE
- get_thread_info r3
- ldr r3, [r3, #TI_ADDR_LIMIT]
+ ldr r3, =TASK_SIZE
uaccess_mask_range_ptr r1, r2, r3, ip
#endif
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index ebfe4cb3d912..842ea5ede485 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -109,8 +109,7 @@
ENTRY(__copy_to_user_std)
WEAK(arm_copy_to_user)
#ifdef CONFIG_CPU_SPECTRE
- get_thread_info r3
- ldr r3, [r3, #TI_ADDR_LIMIT]
+ ldr r3, =TASK_SIZE
uaccess_mask_range_ptr r0, r2, r3, ip
#endif
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 7e0a9b692d87..e842209e135d 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -266,7 +266,7 @@
249 common lookup_dcookie sys_lookup_dcookie
250 common epoll_create sys_epoll_create
251 common epoll_ctl sys_epoll_ctl sys_oabi_epoll_ctl
-252 common epoll_wait sys_epoll_wait sys_oabi_epoll_wait
+252 common epoll_wait sys_epoll_wait
253 common remap_file_pages sys_remap_file_pages
# 254 for set_thread_area
# 255 for get_thread_area
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 077f2ec4eeb2..5c7ae4c3954b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -86,7 +86,7 @@ config ARM64
select ARCH_SUPPORTS_LTO_CLANG_THIN
select ARCH_SUPPORTS_CFI_CLANG
select ARCH_SUPPORTS_ATOMIC_RMW
- select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
+ select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
select ARCH_WANT_DEFAULT_BPF_JIT
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 79c1a750e357..eaa6ca062d89 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -107,11 +107,6 @@ struct compat_statfs {
#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
#define COMPAT_MINSIGSTKSZ 2048
-static inline void __user *arch_compat_alloc_user_space(long len)
-{
- return (void __user *)compat_user_stack_pointer() - len;
-}
-
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index b5f08621fa29..190b494e22ab 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -430,17 +430,6 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
__actu_ret; \
})
-extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
-#define raw_copy_in_user(to, from, n) \
-({ \
- unsigned long __aciu_ret; \
- uaccess_ttbr0_enable(); \
- __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \
- __uaccess_mask_ptr(from), (n)); \
- uaccess_ttbr0_disable(); \
- __aciu_ret; \
-})
-
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 4e99e4b912ef..844f6ae58662 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -649,11 +649,11 @@ __SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
#define __NR_inotify_rm_watch 318
__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
#define __NR_mbind 319
-__SYSCALL(__NR_mbind, compat_sys_mbind)
+__SYSCALL(__NR_mbind, sys_mbind)
#define __NR_get_mempolicy 320
-__SYSCALL(__NR_get_mempolicy, compat_sys_get_mempolicy)
+__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
#define __NR_set_mempolicy 321
-__SYSCALL(__NR_set_mempolicy, compat_sys_set_mempolicy)
+__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
#define __NR_openat 322
__SYSCALL(__NR_openat, compat_sys_openat)
#define __NR_mkdirat 323
@@ -699,7 +699,7 @@ __SYSCALL(__NR_tee, sys_tee)
#define __NR_vmsplice 343
__SYSCALL(__NR_vmsplice, sys_vmsplice)
#define __NR_move_pages 344
-__SYSCALL(__NR_move_pages, compat_sys_move_pages)
+__SYSCALL(__NR_move_pages, sys_move_pages)
#define __NR_getcpu 345
__SYSCALL(__NR_getcpu, sys_getcpu)
#define __NR_epoll_pwait 346
@@ -811,7 +811,7 @@ __SYSCALL(__NR_rseq, sys_rseq)
#define __NR_io_pgetevents 399
__SYSCALL(__NR_io_pgetevents, compat_sys_io_pgetevents)
#define __NR_migrate_pages 400
-__SYSCALL(__NR_migrate_pages, compat_sys_migrate_pages)
+__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
#define __NR_kexec_file_load 401
__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
/* 402 is unused */
diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
index 7fa6828bb488..587543c6c51c 100644
--- a/arch/arm64/kernel/cacheinfo.c
+++ b/arch/arm64/kernel/cacheinfo.c
@@ -43,7 +43,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
this_leaf->type = type;
}
-static int __init_cache_level(unsigned int cpu)
+int init_cache_level(unsigned int cpu)
{
unsigned int ctype, level, leaves, fw_level;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@@ -78,7 +78,7 @@ static int __init_cache_level(unsigned int cpu)
return 0;
}
-static int __populate_cache_leaves(unsigned int cpu)
+int populate_cache_leaves(unsigned int cpu)
{
unsigned int level, idx;
enum cache_type type;
@@ -97,6 +97,3 @@ static int __populate_cache_leaves(unsigned int cpu)
}
return 0;
}
-
-DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
-DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 1006ed2d7c60..2276689b5411 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -82,14 +82,29 @@ int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
{
- if (!acpi_disabled) {
- struct pci_config_window *cfg = bridge->bus->sysdata;
- struct acpi_device *adev = to_acpi_device(cfg->parent);
- struct device *bus_dev = &bridge->bus->dev;
+ struct pci_config_window *cfg;
+ struct acpi_device *adev;
+ struct device *bus_dev;
- ACPI_COMPANION_SET(&bridge->dev, adev);
- set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev)));
- }
+ if (acpi_disabled)
+ return 0;
+
+ cfg = bridge->bus->sysdata;
+
+ /*
+ * On Hyper-V there is no corresponding ACPI device for a root bridge,
+ * therefore ->parent is set as NULL by the driver. And set 'adev' as
+ * NULL in this case because there is no proper ACPI device.
+ */
+ if (!cfg->parent)
+ adev = NULL;
+ else
+ adev = to_acpi_device(cfg->parent);
+
+ bus_dev = &bridge->bus->dev;
+
+ ACPI_COMPANION_SET(&bridge->dev, adev);
+ set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev)));
return 0;
}
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 6dd56a49790a..0941180a86d3 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
lib-y := clear_user.o delay.o copy_from_user.o \
- copy_to_user.o copy_in_user.o copy_page.o \
+ copy_to_user.o copy_page.o \
clear_page.o csum.o insn.o memchr.o memcpy.o \
memset.o memcmp.o strcmp.o strncmp.o strlen.o \
strnlen.o strchr.o strrchr.o tishift.o
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
deleted file mode 100644
index dbea3799c3ef..000000000000
--- a/arch/arm64/lib/copy_in_user.S
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copy from user space to user space
- *
- * Copyright (C) 2012 ARM Ltd.
- */
-
-#include <linux/linkage.h>
-
-#include <asm/asm-uaccess.h>
-#include <asm/assembler.h>
-#include <asm/cache.h>
-
-/*
- * Copy from user space to user space (alignment handled by the hardware)
- *
- * Parameters:
- * x0 - to
- * x1 - from
- * x2 - n
- * Returns:
- * x0 - bytes not copied
- */
- .macro ldrb1 reg, ptr, val
- user_ldst 9998f, ldtrb, \reg, \ptr, \val
- .endm
-
- .macro strb1 reg, ptr, val
- user_ldst 9998f, sttrb, \reg, \ptr, \val
- .endm
-
- .macro ldrh1 reg, ptr, val
- user_ldst 9997f, ldtrh, \reg, \ptr, \val
- .endm
-
- .macro strh1 reg, ptr, val
- user_ldst 9997f, sttrh, \reg, \ptr, \val
- .endm
-
- .macro ldr1 reg, ptr, val
- user_ldst 9997f, ldtr, \reg, \ptr, \val
- .endm
-
- .macro str1 reg, ptr, val
- user_ldst 9997f, sttr, \reg, \ptr, \val
- .endm
-
- .macro ldp1 reg1, reg2, ptr, val
- user_ldp 9997f, \reg1, \reg2, \ptr, \val
- .endm
-
- .macro stp1 reg1, reg2, ptr, val
- user_stp 9997f, \reg1, \reg2, \ptr, \val
- .endm
-
-end .req x5
-srcin .req x15
-SYM_FUNC_START(__arch_copy_in_user)
- add end, x0, x2
- mov srcin, x1
-#include "copy_template.S"
- mov x0, #0
- ret
-SYM_FUNC_END(__arch_copy_in_user)
-EXPORT_SYMBOL(__arch_copy_in_user)
-
- .section .fixup,"ax"
- .align 2
-9997: cmp dst, dstin
- b.ne 9998f
- // Before being absolutely sure we couldn't copy anything, try harder
-USER(9998f, ldtrb tmp1w, [srcin])
-USER(9998f, sttrb tmp1w, [dst])
- add dst, dst, #1
-9998: sub x0, end, dst // bytes not copied
- ret
- .previous
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index b16be52233c6..37a81754d9b6 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -30,6 +30,7 @@
#include <linux/crash_dump.h>
#include <linux/hugetlb.h>
#include <linux/acpi_iort.h>
+#include <linux/kmemleak.h>
#include <asm/boot.h>
#include <asm/fixmap.h>
@@ -101,6 +102,11 @@ static void __init reserve_crashkernel(void)
pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
crash_base, crash_base + crash_size, crash_size >> 20);
+ /*
+ * The crashkernel memory will be removed from the kernel linear
+ * map. Inform kmemleak so that it won't try to access it.
+ */
+ kmemleak_ignore_phys(crash_base);
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
}
@@ -222,7 +228,21 @@ early_param("mem", early_mem);
void __init arm64_memblock_init(void)
{
- const s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
+ s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
+
+ /*
+ * Corner case: 52-bit VA capable systems running KVM in nVHE mode may
+ * be limited in their ability to support a linear map that exceeds 51
+ * bits of VA space, depending on the placement of the ID map. Given
+ * that the placement of the ID map may be randomized, let's simply
+ * limit the kernel's linear map to 51 bits as well if we detect this
+ * configuration.
+ */
+ if (IS_ENABLED(CONFIG_KVM) && vabits_actual == 52 &&
+ is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
+ pr_info("Capping linear region to 51 bits for KVM in nVHE mode on LVA capable hardware.\n");
+ linear_region_size = min_t(u64, linear_region_size, BIT(51));
+ }
/* Remove memory above our supported physical address size */
memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 9ff0de1b2b93..cfd9deb347c3 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1502,8 +1502,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
return ret;
}
-void arch_remove_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap)
+void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c
index 5d8b969cd8f3..bdbe988d8dbc 100644
--- a/arch/h8300/kernel/traps.c
+++ b/arch/h8300/kernel/traps.c
@@ -39,10 +39,6 @@ void __init base_trap_init(void)
{
}
-void __init trap_init(void)
-{
-}
-
asmlinkage void set_esp0(unsigned long ssp)
{
current->thread.esp0 = ssp;
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
index 904134b37232..edfc35dafeb1 100644
--- a/arch/hexagon/kernel/traps.c
+++ b/arch/hexagon/kernel/traps.c
@@ -28,10 +28,6 @@
#define TRAP_SYSCALL 1
#define TRAP_DEBUG 0xdb
-void __init trap_init(void)
-{
-}
-
#ifdef CONFIG_GENERIC_BUG
/* Maybe should resemble arch/sh/kernel/traps.c ?? */
int is_valid_bugaddr(unsigned long addr)
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 064a967a7b6e..5c6da8d83c1a 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -484,8 +484,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
return ret;
}
-void arch_remove_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap)
+void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
diff --git a/arch/m68k/include/asm/raw_io.h b/arch/m68k/include/asm/raw_io.h
index 911826ea83ce..80eb2396d01e 100644
--- a/arch/m68k/include/asm/raw_io.h
+++ b/arch/m68k/include/asm/raw_io.h
@@ -17,21 +17,21 @@
* two accesses to memory, which may be undesirable for some devices.
*/
#define in_8(addr) \
- ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
+ ({ u8 __v = (*(__force volatile u8 *) (unsigned long)(addr)); __v; })
#define in_be16(addr) \
- ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
+ ({ u16 __v = (*(__force volatile u16 *) (unsigned long)(addr)); __v; })
#define in_be32(addr) \
- ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
+ ({ u32 __v = (*(__force volatile u32 *) (unsigned long)(addr)); __v; })
#define in_le16(addr) \
- ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; })
+ ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (unsigned long)(addr)); __v; })
#define in_le32(addr) \
- ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; })
+ ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (unsigned long)(addr)); __v; })
-#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
-#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
-#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
-#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w))
-#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l))
+#define out_8(addr,b) (void)((*(__force volatile u8 *) (unsigned long)(addr)) = (b))
+#define out_be16(addr,w) (void)((*(__force volatile u16 *) (unsigned long)(addr)) = (w))
+#define out_be32(addr,l) (void)((*(__force volatile u32 *) (unsigned long)(addr)) = (l))
+#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (unsigned long)(addr)) = cpu_to_le16(w))
+#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (unsigned long)(addr)) = cpu_to_le32(l))
#define raw_inb in_8
#define raw_inw in_be16
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index e1e90c49a496..dfd6202fd403 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -171,7 +171,6 @@ static int bcd2int (unsigned char b)
int mvme147_hwclk(int op, struct rtc_time *t)
{
-#warning check me!
if (!op) {
m147_rtc->ctrl = RTC_READ;
t->tm_year = bcd2int (m147_rtc->bcd_year);
@@ -183,6 +182,9 @@ int mvme147_hwclk(int op, struct rtc_time *t)
m147_rtc->ctrl = 0;
if (t->tm_year < 70)
t->tm_year += 100;
+ } else {
+ /* FIXME Setting the time is not yet supported */
+ return -EOPNOTSUPP;
}
return 0;
}
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index b59593c7cfb9..b4422c2dfbbf 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -436,7 +436,6 @@ int bcd2int (unsigned char b)
int mvme16x_hwclk(int op, struct rtc_time *t)
{
-#warning check me!
if (!op) {
rtc->ctrl = RTC_READ;
t->tm_year = bcd2int (rtc->bcd_year);
@@ -448,6 +447,9 @@ int mvme16x_hwclk(int op, struct rtc_time *t)
rtc->ctrl = 0;
if (t->tm_year < 70)
t->tm_year += 100;
+ } else {
+ /* FIXME Setting the time is not yet supported */
+ return -EOPNOTSUPP;
}
return 0;
}
diff --git a/arch/microblaze/Kbuild b/arch/microblaze/Kbuild
index a4e40e534e6a..a1c597889319 100644
--- a/arch/microblaze/Kbuild
+++ b/arch/microblaze/Kbuild
@@ -1 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-y += kernel/
+obj-y += mm/
+obj-$(CONFIG_PCI) += pci/
+obj-y += boot/dts/
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 6d4af39e3890..9adc6b6434df 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -50,17 +50,12 @@ KBUILD_CFLAGS += -ffixed-r31 $(CPUFLAGS-y) $(CPUFLAGS-1) $(CPUFLAGS-2)
head-y := arch/microblaze/kernel/head.o
libs-y += arch/microblaze/lib/
-core-y += arch/microblaze/kernel/
-core-y += arch/microblaze/mm/
-core-$(CONFIG_PCI) += arch/microblaze/pci/
boot := arch/microblaze/boot
# Are we making a simpleImage.<boardname> target? If so, crack out the boardname
DTB:=$(subst simpleImage.,,$(filter simpleImage.%, $(MAKECMDGOALS)))
-core-y += $(boot)/dts/
-
export DTB
all: linux.bin
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
index 600d018cf354..0a515cde1c18 100644
--- a/arch/mips/cavium-octeon/octeon-memcpy.S
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -154,8 +154,6 @@ FEXPORT(__raw_copy_from_user)
EXPORT_SYMBOL(__raw_copy_from_user)
FEXPORT(__raw_copy_to_user)
EXPORT_SYMBOL(__raw_copy_to_user)
-FEXPORT(__raw_copy_in_user)
-EXPORT_SYMBOL(__raw_copy_in_user)
/*
* Note: dst & src may be unaligned, len may be 0
* Temps
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index aaf9d5e0aa2c..791894c4d8fb 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -116,7 +116,6 @@ CONFIG_8139TOO=y
CONFIG_R8169=y
CONFIG_USB_USBNET=m
CONFIG_USB_NET_CDC_EEM=m
-CONFIG_INPUT_POLLDEV=m
CONFIG_INPUT_EVDEV=y
# CONFIG_MOUSE_PS2_ALPS is not set
# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
diff --git a/arch/mips/configs/pic32mzda_defconfig b/arch/mips/configs/pic32mzda_defconfig
index 63fe2da1b37f..fd567247adc7 100644
--- a/arch/mips/configs/pic32mzda_defconfig
+++ b/arch/mips/configs/pic32mzda_defconfig
@@ -34,7 +34,6 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SCAN_ASYNC=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_INPUT_LEDS=m
-CONFIG_INPUT_POLLDEV=y
CONFIG_INPUT_MOUSEDEV=m
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_EVBUG=m
diff --git a/arch/mips/configs/rt305x_defconfig b/arch/mips/configs/rt305x_defconfig
index fec5851c164b..eb359db15dba 100644
--- a/arch/mips/configs/rt305x_defconfig
+++ b/arch/mips/configs/rt305x_defconfig
@@ -90,7 +90,6 @@ CONFIG_PPPOE=m
CONFIG_PPP_ASYNC=m
CONFIG_ISDN=y
CONFIG_INPUT=m
-CONFIG_INPUT_POLLDEV=m
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_MISC=y
diff --git a/arch/mips/configs/xway_defconfig b/arch/mips/configs/xway_defconfig
index 9abbc0debc2a..eeb689f715cb 100644
--- a/arch/mips/configs/xway_defconfig
+++ b/arch/mips/configs/xway_defconfig
@@ -96,7 +96,6 @@ CONFIG_PPPOE=m
CONFIG_PPP_ASYNC=m
CONFIG_ISDN=y
CONFIG_INPUT=m
-CONFIG_INPUT_POLLDEV=m
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_MISC=y
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 53f015a1b0a7..bbb3bc5a42fd 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -96,14 +96,6 @@ struct compat_statfs {
#define COMPAT_OFF_T_MAX 0x7fffffff
-static inline void __user *arch_compat_alloc_user_space(long len)
-{
- struct pt_regs *regs = (struct pt_regs *)
- ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
-
- return (void __user *) (regs->regs[29] - len);
-}
-
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 783fecce65c8..f8f74f9f5883 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -428,7 +428,6 @@ do { \
extern size_t __raw_copy_from_user(void *__to, const void *__from, size_t __n);
extern size_t __raw_copy_to_user(void *__to, const void *__from, size_t __n);
-extern size_t __raw_copy_in_user(void *__to, const void *__from, size_t __n);
static inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
@@ -480,31 +479,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
-static inline unsigned long
-raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
-{
- register void __user *__cu_to_r __asm__("$4");
- register const void __user *__cu_from_r __asm__("$5");
- register long __cu_len_r __asm__("$6");
-
- __cu_to_r = to;
- __cu_from_r = from;
- __cu_len_r = n;
-
- __asm__ __volatile__(
- ".set\tnoreorder\n\t"
- __MODULE_JAL(__raw_copy_in_user)
- ".set\tnoat\n\t"
- __UA_ADDU "\t$1, %1, %2\n\t"
- ".set\tat\n\t"
- ".set\treorder"
- : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
- :
- : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
- DADDI_SCRATCH, "memory");
- return __cu_len_r;
-}
-
extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
/*
diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
index 53d8ea7d36e6..495dd058231d 100644
--- a/arch/mips/kernel/cacheinfo.c
+++ b/arch/mips/kernel/cacheinfo.c
@@ -17,7 +17,7 @@ do { \
leaf++; \
} while (0)
-static int __init_cache_level(unsigned int cpu)
+int init_cache_level(unsigned int cpu)
{
struct cpuinfo_mips *c = &current_cpu_data;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@@ -74,7 +74,7 @@ static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
cpumask_set_cpu(cpu1, cpu_map);
}
-static int __populate_cache_leaves(unsigned int cpu)
+int populate_cache_leaves(unsigned int cpu)
{
struct cpuinfo_mips *c = &current_cpu_data;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@@ -114,6 +114,3 @@ static int __populate_cache_leaves(unsigned int cpu)
return 0;
}
-
-DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
-DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index 56c8d3cf42ed..70e32de2bcaa 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -239,9 +239,9 @@
228 n32 clock_nanosleep sys_clock_nanosleep_time32
229 n32 tgkill sys_tgkill
230 n32 utimes sys_utimes_time32
-231 n32 mbind compat_sys_mbind
-232 n32 get_mempolicy compat_sys_get_mempolicy
-233 n32 set_mempolicy compat_sys_set_mempolicy
+231 n32 mbind sys_mbind
+232 n32 get_mempolicy sys_get_mempolicy
+233 n32 set_mempolicy sys_set_mempolicy
234 n32 mq_open compat_sys_mq_open
235 n32 mq_unlink sys_mq_unlink
236 n32 mq_timedsend sys_mq_timedsend_time32
@@ -258,7 +258,7 @@
247 n32 inotify_init sys_inotify_init
248 n32 inotify_add_watch sys_inotify_add_watch
249 n32 inotify_rm_watch sys_inotify_rm_watch
-250 n32 migrate_pages compat_sys_migrate_pages
+250 n32 migrate_pages sys_migrate_pages
251 n32 openat sys_openat
252 n32 mkdirat sys_mkdirat
253 n32 mknodat sys_mknodat
@@ -279,7 +279,7 @@
268 n32 sync_file_range sys_sync_file_range
269 n32 tee sys_tee
270 n32 vmsplice sys_vmsplice
-271 n32 move_pages compat_sys_move_pages
+271 n32 move_pages sys_move_pages
272 n32 set_robust_list compat_sys_set_robust_list
273 n32 get_robust_list compat_sys_get_robust_list
274 n32 kexec_load compat_sys_kexec_load
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 201237fd0f43..a61c35edaa74 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -279,9 +279,9 @@
265 o32 clock_nanosleep sys_clock_nanosleep_time32
266 o32 tgkill sys_tgkill
267 o32 utimes sys_utimes_time32
-268 o32 mbind sys_mbind compat_sys_mbind
-269 o32 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
-270 o32 set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+268 o32 mbind sys_mbind
+269 o32 get_mempolicy sys_get_mempolicy
+270 o32 set_mempolicy sys_set_mempolicy
271 o32 mq_open sys_mq_open compat_sys_mq_open
272 o32 mq_unlink sys_mq_unlink
273 o32 mq_timedsend sys_mq_timedsend_time32
@@ -298,7 +298,7 @@
284 o32 inotify_init sys_inotify_init
285 o32 inotify_add_watch sys_inotify_add_watch
286 o32 inotify_rm_watch sys_inotify_rm_watch
-287 o32 migrate_pages sys_migrate_pages compat_sys_migrate_pages
+287 o32 migrate_pages sys_migrate_pages
288 o32 openat sys_openat compat_sys_openat
289 o32 mkdirat sys_mkdirat
290 o32 mknodat sys_mknodat
@@ -319,7 +319,7 @@
305 o32 sync_file_range sys_sync_file_range sys32_sync_file_range
306 o32 tee sys_tee
307 o32 vmsplice sys_vmsplice
-308 o32 move_pages sys_move_pages compat_sys_move_pages
+308 o32 move_pages sys_move_pages
309 o32 set_robust_list sys_set_robust_list compat_sys_set_robust_list
310 o32 get_robust_list sys_get_robust_list compat_sys_get_robust_list
311 o32 kexec_load sys_kexec_load compat_sys_kexec_load
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index e19fb98b5d38..277c32296636 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -666,8 +666,6 @@ FEXPORT(__raw_copy_from_user)
EXPORT_SYMBOL(__raw_copy_from_user)
FEXPORT(__raw_copy_to_user)
EXPORT_SYMBOL(__raw_copy_to_user)
-FEXPORT(__raw_copy_in_user)
-EXPORT_SYMBOL(__raw_copy_in_user)
#endif
/* Legacy Mode, user <-> user */
__BUILD_COPY_USER LEGACY_MODE USEROP USEROP
@@ -703,13 +701,4 @@ EXPORT_SYMBOL(__raw_copy_to_user)
__BUILD_COPY_USER EVA_MODE KERNELOP USEROP
END(__raw_copy_to_user)
-/*
- * __copy_in_user (EVA)
- */
-
-LEAF(__raw_copy_in_user)
-EXPORT_SYMBOL(__raw_copy_in_user)
-__BUILD_COPY_USER EVA_MODE USEROP USEROP
-END(__raw_copy_in_user)
-
#endif
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c
index 41725eaf8bac..b3d34d646652 100644
--- a/arch/nds32/kernel/setup.c
+++ b/arch/nds32/kernel/setup.c
@@ -244,7 +244,6 @@ static void __init setup_memory(void)
unsigned long ram_start_pfn;
unsigned long free_ram_start_pfn;
phys_addr_t memory_start, memory_end;
- struct memblock_region *region;
memory_end = memory_start = 0;
diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c
index ee0d9ae192a5..f06421c645af 100644
--- a/arch/nds32/kernel/traps.c
+++ b/arch/nds32/kernel/traps.c
@@ -183,11 +183,6 @@ void __pgd_error(const char *file, int line, unsigned long val)
}
extern char *exception_vector, *exception_vector_end;
-void __init trap_init(void)
-{
- return;
-}
-
void __init early_trap_init(void)
{
unsigned long ivb = 0;
diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c
index b172da4eb1a9..596986a74a26 100644
--- a/arch/nios2/kernel/traps.c
+++ b/arch/nios2/kernel/traps.c
@@ -105,11 +105,6 @@ void show_stack(struct task_struct *task, unsigned long *stack,
printk("%s\n", loglvl);
}
-void __init trap_init(void)
-{
- /* Nothing to do here */
-}
-
/* Breakpoint handler */
asmlinkage void breakpoint_c(struct pt_regs *fp)
{
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index 4d61333c2623..aa1e709405ac 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -231,11 +231,6 @@ void unhandled_exception(struct pt_regs *regs, int ea, int vector)
die("Oops", regs, 9);
}
-void __init trap_init(void)
-{
- /* Nothing needs to be done */
-}
-
asmlinkage void do_trap(struct pt_regs *regs, unsigned long address)
{
force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 3001a7d8fc76..4742b6f169b7 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -10,7 +10,6 @@ config PARISC
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_UBSAN_SANITIZE_ALL
- select ARCH_HAS_STRNLEN_USER
select ARCH_NO_SG_CHAIN
select ARCH_SUPPORTS_HUGETLBFS if PA20
select ARCH_SUPPORTS_MEMORY_FAILURE
@@ -65,7 +64,6 @@ config PARISC
select HAVE_KPROBES_ON_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
- select SET_FS
select TRACE_IRQFLAGS_SUPPORT
help
diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile
index dff453687530..9fe54878167d 100644
--- a/arch/parisc/boot/compressed/Makefile
+++ b/arch/parisc/boot/compressed/Makefile
@@ -26,7 +26,7 @@ endif
OBJECTS += $(obj)/head.o $(obj)/real2.o $(obj)/firmware.o $(obj)/misc.o $(obj)/piggy.o
LDFLAGS_vmlinux := -X -e startup --as-needed -T
-$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) $(LIBGCC)
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) $(LIBGCC) FORCE
$(call if_changed,ld)
sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\|parisc_kernel_start\)$$/\#define SZ\2 0x\1/p'
@@ -34,7 +34,7 @@ sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\|parisc_kernel_start\
quiet_cmd_sizes = GEN $@
cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@
-$(obj)/sizes.h: vmlinux
+$(obj)/sizes.h: vmlinux FORCE
$(call if_changed,sizes)
AFLAGS_head.o += -I$(objtree)/$(obj) -DBOOTLOADER
@@ -70,19 +70,19 @@ suffix-$(CONFIG_KERNEL_LZMA) := lzma
suffix-$(CONFIG_KERNEL_LZO) := lzo
suffix-$(CONFIG_KERNEL_XZ) := xz
-$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
$(call if_changed,gzip)
-$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE
$(call if_changed,bzip2)
-$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lz4)
-$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lzma)
-$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lzo)
-$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
$(call if_changed,xzkern)
LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
-$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y)
+$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE
$(call if_changed,ld)
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index 7611d48c599e..dd14e3131325 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -111,7 +111,6 @@ CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
CONFIG_PPPOE=m
# CONFIG_WLAN is not set
-CONFIG_INPUT_POLLDEV=y
CONFIG_KEYBOARD_HIL_OLD=m
CONFIG_KEYBOARD_HIL=m
CONFIG_MOUSE_SERIAL=y
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index b5d90e82b65d..c04f5a637c39 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -163,12 +163,6 @@ struct compat_shmid64_ds {
#define COMPAT_ELF_NGREG 80
typedef compat_ulong_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
-static __inline__ void __user *arch_compat_alloc_user_space(long len)
-{
- struct pt_regs *regs = &current->thread.regs;
- return (void __user *)regs->gr[30];
-}
-
static inline int __is_compat_task(struct task_struct *t)
{
return test_tsk_thread_flag(t, TIF_32BIT);
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index d00313d1274e..0561568f7b48 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -184,7 +184,7 @@ extern int npmem_ranges;
#include <asm-generic/getorder.h>
#include <asm/pdc.h>
-#define PAGE0 ((struct zeropage *)__PAGE_OFFSET)
+#define PAGE0 ((struct zeropage *)absolute_pointer(__PAGE_OFFSET))
/* DEFINITION OF THE ZERO-PAGE (PAG0) */
/* based on work by Jason Eckhardt (jason@equator.com) */
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index b5fbcd2c1780..eeb7da064289 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -101,10 +101,6 @@ DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data);
#define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
-typedef struct {
- int seg;
-} mm_segment_t;
-
#define ARCH_MIN_TASKALIGN 8
struct thread_struct {
diff --git a/arch/parisc/include/asm/rt_sigframe.h b/arch/parisc/include/asm/rt_sigframe.h
index 2b3010ade00e..4b9e3d707571 100644
--- a/arch/parisc/include/asm/rt_sigframe.h
+++ b/arch/parisc/include/asm/rt_sigframe.h
@@ -2,7 +2,7 @@
#ifndef _ASM_PARISC_RT_SIGFRAME_H
#define _ASM_PARISC_RT_SIGFRAME_H
-#define SIGRETURN_TRAMP 4
+#define SIGRETURN_TRAMP 3
#define SIGRESTARTBLOCK_TRAMP 5
#define TRAMP_SIZE (SIGRETURN_TRAMP + SIGRESTARTBLOCK_TRAMP)
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 0bd38a972cea..00ad50fef769 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -11,7 +11,6 @@
struct thread_info {
struct task_struct *task; /* main task structure */
unsigned long flags; /* thread_info flags (see TIF_*) */
- mm_segment_t addr_limit; /* user-level address space limit */
__u32 cpu; /* current CPU */
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
};
@@ -21,7 +20,6 @@ struct thread_info {
.task = &tsk, \
.flags = 0, \
.cpu = 0, \
- .addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \
}
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index ed2cd4fb479b..192ad9e11b25 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -11,14 +11,6 @@
#include <linux/bug.h>
#include <linux/string.h>
-#define KERNEL_DS ((mm_segment_t){0})
-#define USER_DS ((mm_segment_t){1})
-
-#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
-
-#define get_fs() (current_thread_info()->addr_limit)
-#define set_fs(x) (current_thread_info()->addr_limit = (x))
-
/*
* Note that since kernel addresses are in a separate address space on
* parisc, we don't need to do anything for access_ok().
@@ -33,11 +25,11 @@
#define get_user __get_user
#if !defined(CONFIG_64BIT)
-#define LDD_USER(val, ptr) __get_user_asm64(val, ptr)
-#define STD_USER(x, ptr) __put_user_asm64(x, ptr)
+#define LDD_USER(sr, val, ptr) __get_user_asm64(sr, val, ptr)
+#define STD_USER(sr, x, ptr) __put_user_asm64(sr, x, ptr)
#else
-#define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr)
-#define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
+#define LDD_USER(sr, val, ptr) __get_user_asm(sr, val, "ldd", ptr)
+#define STD_USER(sr, x, ptr) __put_user_asm(sr, "std", x, ptr)
#endif
/*
@@ -67,28 +59,15 @@ struct exception_table_entry {
#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
-/*
- * load_sr2() preloads the space register %%sr2 - based on the value of
- * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
- * is 0), or with the current value of %%sr3 to access user space (USER_DS)
- * memory. The following __get_user_asm() and __put_user_asm() functions have
- * %%sr2 hard-coded to access the requested memory.
- */
-#define load_sr2() \
- __asm__(" or,= %0,%%r0,%%r0\n\t" \
- " mfsp %%sr3,%0\n\t" \
- " mtsp %0,%%sr2\n\t" \
- : : "r"(get_fs()) : )
-
-#define __get_user_internal(val, ptr) \
+#define __get_user_internal(sr, val, ptr) \
({ \
register long __gu_err __asm__ ("r8") = 0; \
\
switch (sizeof(*(ptr))) { \
- case 1: __get_user_asm(val, "ldb", ptr); break; \
- case 2: __get_user_asm(val, "ldh", ptr); break; \
- case 4: __get_user_asm(val, "ldw", ptr); break; \
- case 8: LDD_USER(val, ptr); break; \
+ case 1: __get_user_asm(sr, val, "ldb", ptr); break; \
+ case 2: __get_user_asm(sr, val, "ldh", ptr); break; \
+ case 4: __get_user_asm(sr, val, "ldw", ptr); break; \
+ case 8: LDD_USER(sr, val, ptr); break; \
default: BUILD_BUG(); \
} \
\
@@ -97,15 +76,14 @@ struct exception_table_entry {
#define __get_user(val, ptr) \
({ \
- load_sr2(); \
- __get_user_internal(val, ptr); \
+ __get_user_internal("%%sr3,", val, ptr); \
})
-#define __get_user_asm(val, ldx, ptr) \
+#define __get_user_asm(sr, val, ldx, ptr) \
{ \
register long __gu_val; \
\
- __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
+ __asm__("1: " ldx " 0(" sr "%2),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__gu_val), "=r"(__gu_err) \
@@ -114,9 +92,22 @@ struct exception_table_entry {
(val) = (__force __typeof__(*(ptr))) __gu_val; \
}
+#define HAVE_GET_KERNEL_NOFAULT
+#define __get_kernel_nofault(dst, src, type, err_label) \
+{ \
+ type __z; \
+ long __err; \
+ __err = __get_user_internal("%%sr0,", __z, (type *)(src)); \
+ if (unlikely(__err)) \
+ goto err_label; \
+ else \
+ *(type *)(dst) = __z; \
+}
+
+
#if !defined(CONFIG_64BIT)
-#define __get_user_asm64(val, ptr) \
+#define __get_user_asm64(sr, val, ptr) \
{ \
union { \
unsigned long long l; \
@@ -124,8 +115,8 @@ struct exception_table_entry {
} __gu_tmp; \
\
__asm__(" copy %%r0,%R0\n" \
- "1: ldw 0(%%sr2,%2),%0\n" \
- "2: ldw 4(%%sr2,%2),%R0\n" \
+ "1: ldw 0(" sr "%2),%0\n" \
+ "2: ldw 4(" sr "%2),%R0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
@@ -138,16 +129,16 @@ struct exception_table_entry {
#endif /* !defined(CONFIG_64BIT) */
-#define __put_user_internal(x, ptr) \
+#define __put_user_internal(sr, x, ptr) \
({ \
register long __pu_err __asm__ ("r8") = 0; \
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
\
switch (sizeof(*(ptr))) { \
- case 1: __put_user_asm("stb", __x, ptr); break; \
- case 2: __put_user_asm("sth", __x, ptr); break; \
- case 4: __put_user_asm("stw", __x, ptr); break; \
- case 8: STD_USER(__x, ptr); break; \
+ case 1: __put_user_asm(sr, "stb", __x, ptr); break; \
+ case 2: __put_user_asm(sr, "sth", __x, ptr); break; \
+ case 4: __put_user_asm(sr, "stw", __x, ptr); break; \
+ case 8: STD_USER(sr, __x, ptr); break; \
default: BUILD_BUG(); \
} \
\
@@ -156,10 +147,20 @@ struct exception_table_entry {
#define __put_user(x, ptr) \
({ \
- load_sr2(); \
- __put_user_internal(x, ptr); \
+ __put_user_internal("%%sr3,", x, ptr); \
})
+#define __put_kernel_nofault(dst, src, type, err_label) \
+{ \
+ type __z = *(type *)(src); \
+ long __err; \
+ __err = __put_user_internal("%%sr0,", __z, (type *)(dst)); \
+ if (unlikely(__err)) \
+ goto err_label; \
+}
+
+
+
/*
* The "__put_user/kernel_asm()" macros tell gcc they read from memory
@@ -170,26 +171,26 @@ struct exception_table_entry {
* r8 is already listed as err.
*/
-#define __put_user_asm(stx, x, ptr) \
- __asm__ __volatile__ ( \
- "1: " stx " %2,0(%%sr2,%1)\n" \
- "9:\n" \
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
- : "=r"(__pu_err) \
+#define __put_user_asm(sr, stx, x, ptr) \
+ __asm__ __volatile__ ( \
+ "1: " stx " %2,0(" sr "%1)\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ : "=r"(__pu_err) \
: "r"(ptr), "r"(x), "0"(__pu_err))
#if !defined(CONFIG_64BIT)
-#define __put_user_asm64(__val, ptr) do { \
- __asm__ __volatile__ ( \
- "1: stw %2,0(%%sr2,%1)\n" \
- "2: stw %R2,4(%%sr2,%1)\n" \
- "9:\n" \
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
- : "=r"(__pu_err) \
- : "r"(ptr), "r"(__val), "0"(__pu_err)); \
+#define __put_user_asm64(sr, __val, ptr) do { \
+ __asm__ __volatile__ ( \
+ "1: stw %2,0(" sr "%1)\n" \
+ "2: stw %R2,4(" sr "%1)\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
+ : "=r"(__pu_err) \
+ : "r"(ptr), "r"(__val), "0"(__pu_err)); \
} while (0)
#endif /* !defined(CONFIG_64BIT) */
@@ -200,14 +201,12 @@ struct exception_table_entry {
*/
extern long strncpy_from_user(char *, const char __user *, long);
-extern unsigned lclear_user(void __user *, unsigned long);
-extern long lstrnlen_user(const char __user *, long);
+extern __must_check unsigned lclear_user(void __user *, unsigned long);
+extern __must_check long strnlen_user(const char __user *src, long n);
/*
* Complex access routines -- macros
*/
-#define user_addr_max() (~0UL)
-#define strnlen_user lstrnlen_user
#define clear_user lclear_user
#define __clear_user lclear_user
@@ -215,8 +214,6 @@ unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src,
unsigned long len);
unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
unsigned long len);
-unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src,
- unsigned long len);
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 33113ba24054..22924a3f1728 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -230,7 +230,6 @@ int main(void)
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
- DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(THREAD_SZ, sizeof(struct thread_info));
/* THREAD_SZ_ALGN includes space for a stack frame. */
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index e8a6a751dfd8..00297e8e1c88 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -32,7 +32,6 @@ EXPORT_SYMBOL(__xchg64);
#include <linux/uaccess.h>
EXPORT_SYMBOL(lclear_user);
-EXPORT_SYMBOL(lstrnlen_user);
#ifndef CONFIG_64BIT
/* Needed so insmod can set dp value */
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 3fb86ee507dd..cceb09855e03 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -150,8 +150,6 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_PA11
dma_ops_init();
#endif
-
- clear_sched_clock_stable();
}
/*
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index db1a47cf424d..bbfe23c40c01 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -237,18 +237,22 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
#endif
usp = (regs->gr[30] & ~(0x01UL));
+ sigframe_size = PARISC_RT_SIGFRAME_SIZE;
#ifdef CONFIG_64BIT
if (is_compat_task()) {
/* The gcc alloca implementation leaves garbage in the upper 32 bits of sp */
usp = (compat_uint_t)usp;
+ sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
}
#endif
- /*FIXME: frame_size parameter is unused, remove it. */
- frame = get_sigframe(&ksig->ka, usp, sizeof(*frame));
+ frame = get_sigframe(&ksig->ka, usp, sigframe_size);
DBG(1,"SETUP_RT_FRAME: START\n");
DBG(1,"setup_rt_frame: frame %p info %p\n", frame, ksig->info);
+ start = (unsigned long) frame;
+ if (start >= user_addr_max() - sigframe_size)
+ return -EFAULT;
#ifdef CONFIG_64BIT
@@ -284,32 +288,21 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
already in userspace. The first words of tramp are used to
save the previous sigrestartblock trampoline that might be
on the stack. We start the sigreturn trampoline at
- SIGRESTARTBLOCK_TRAMP+X. */
+ SIGRESTARTBLOCK_TRAMP. */
err |= __put_user(in_syscall ? INSN_LDI_R25_1 : INSN_LDI_R25_0,
&frame->tramp[SIGRESTARTBLOCK_TRAMP+0]);
- err |= __put_user(INSN_LDI_R20,
- &frame->tramp[SIGRESTARTBLOCK_TRAMP+1]);
err |= __put_user(INSN_BLE_SR2_R0,
+ &frame->tramp[SIGRESTARTBLOCK_TRAMP+1]);
+ err |= __put_user(INSN_LDI_R20,
&frame->tramp[SIGRESTARTBLOCK_TRAMP+2]);
- err |= __put_user(INSN_NOP, &frame->tramp[SIGRESTARTBLOCK_TRAMP+3]);
-
-#if DEBUG_SIG
- /* Assert that we're flushing in the correct space... */
- {
- unsigned long sid;
- asm ("mfsp %%sr3,%0" : "=r" (sid));
- DBG(1,"setup_rt_frame: Flushing 64 bytes at space %#x offset %p\n",
- sid, frame->tramp);
- }
-#endif
- start = (unsigned long) &frame->tramp[0];
- end = (unsigned long) &frame->tramp[TRAMP_SIZE];
+ start = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP+0];
+ end = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP+3];
flush_user_dcache_range_asm(start, end);
flush_user_icache_range_asm(start, end);
/* TRAMP Words 0-4, Length 5 = SIGRESTARTBLOCK_TRAMP
- * TRAMP Words 5-9, Length 4 = SIGRETURN_TRAMP
+ * TRAMP Words 5-7, Length 3 = SIGRETURN_TRAMP
* So the SIGRETURN_TRAMP is at the end of SIGRESTARTBLOCK_TRAMP
*/
rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP];
@@ -353,11 +346,6 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
/* The syscall return path will create IAOQ values from r31.
*/
- sigframe_size = PARISC_RT_SIGFRAME_SIZE;
-#ifdef CONFIG_64BIT
- if (is_compat_task())
- sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
-#endif
if (in_syscall) {
regs->gr[31] = haddr;
#ifdef CONFIG_64BIT
@@ -501,7 +489,6 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
DBG(1,"ERESTARTNOHAND: returning -EINTR\n");
regs->gr[28] = -EINTR;
break;
-
case -ERESTARTSYS:
if (!(ka->sa.sa_flags & SA_RESTART)) {
DBG(1,"ERESTARTSYS: putting -EINTR\n");
@@ -529,6 +516,10 @@ insert_restart_trampoline(struct pt_regs *regs)
unsigned long end = (unsigned long) &usp[5];
long err = 0;
+ /* check that we don't exceed the stack */
+ if (A(&usp[0]) >= user_addr_max() - 5 * sizeof(int))
+ return;
+
/* Setup a trampoline to restart the syscall
* with __NR_restart_syscall
*
@@ -569,10 +560,6 @@ insert_restart_trampoline(struct pt_regs *regs)
}
/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- *
* We need to be able to restore the syscall arguments (r21-r26) to
* restart syscalls. Thus, the syscall path should save them in the
* pt_regs structure (it's okay to do so since they are caller-save
diff --git a/arch/parisc/kernel/signal32.h b/arch/parisc/kernel/signal32.h
index f166250f2d06..a5bdbb5678b7 100644
--- a/arch/parisc/kernel/signal32.h
+++ b/arch/parisc/kernel/signal32.h
@@ -36,7 +36,7 @@ struct compat_regfile {
compat_int_t rf_sar;
};
-#define COMPAT_SIGRETURN_TRAMP 4
+#define COMPAT_SIGRETURN_TRAMP 3
#define COMPAT_SIGRESTARTBLOCK_TRAMP 5
#define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + \
COMPAT_SIGRESTARTBLOCK_TRAMP)
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index 0bf854b70612..bf751e0732b7 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -292,9 +292,9 @@
258 32 clock_nanosleep sys_clock_nanosleep_time32
258 64 clock_nanosleep sys_clock_nanosleep
259 common tgkill sys_tgkill
-260 common mbind sys_mbind compat_sys_mbind
-261 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
-262 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+260 common mbind sys_mbind
+261 common get_mempolicy sys_get_mempolicy
+262 common set_mempolicy sys_set_mempolicy
# 263 was vserver
264 common add_key sys_add_key
265 common request_key sys_request_key
@@ -331,7 +331,7 @@
292 64 sync_file_range sys_sync_file_range
293 common tee sys_tee
294 common vmsplice sys_vmsplice
-295 common move_pages sys_move_pages compat_sys_move_pages
+295 common move_pages sys_move_pages
296 common getcpu sys_getcpu
297 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
298 common statfs64 sys_statfs64 compat_sys_statfs64
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 08e4d480abe1..9fb1e794831b 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -265,6 +265,9 @@ static int __init init_cr16_clocksource(void)
(cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
continue;
+ /* mark sched_clock unstable */
+ clear_sched_clock_stable();
+
clocksource_cr16.name = "cr16_unstable";
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
clocksource_cr16.rating = 0;
@@ -272,10 +275,6 @@ static int __init init_cr16_clocksource(void)
}
}
- /* XXX: We may want to mark sched_clock stable here if cr16 clocks are
- * in sync:
- * (clocksource_cr16.flags == CLOCK_SOURCE_IS_CONTINUOUS) */
-
/* register at clocksource framework */
clocksource_register_hz(&clocksource_cr16,
100 * PAGE0->mem_10msec);
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 8d8441d4562a..747c328fb886 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -859,7 +859,3 @@ void __init early_trap_init(void)
initialize_ivt(&fault_vector_20);
}
-
-void __init trap_init(void)
-{
-}
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index 36d6a8638ead..b428d29e45fb 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -28,21 +28,6 @@
#include <linux/linkage.h>
/*
- * get_sr gets the appropriate space value into
- * sr1 for kernel/user space access, depending
- * on the flag stored in the task structure.
- */
-
- .macro get_sr
- mfctl %cr30,%r1
- ldw TI_SEGMENT(%r1),%r22
- mfsp %sr3,%r1
- or,<> %r22,%r0,%r0
- copy %r0,%r1
- mtsp %r1,%sr1
- .endm
-
- /*
* unsigned long lclear_user(void *to, unsigned long n)
*
* Returns 0 for success.
@@ -51,10 +36,9 @@
ENTRY_CFI(lclear_user)
comib,=,n 0,%r25,$lclu_done
- get_sr
$lclu_loop:
addib,<> -1,%r25,$lclu_loop
-1: stbs,ma %r0,1(%sr1,%r26)
+1: stbs,ma %r0,1(%sr3,%r26)
$lclu_done:
bv %r0(%r2)
@@ -67,40 +51,6 @@ $lclu_done:
ENDPROC_CFI(lclear_user)
- /*
- * long lstrnlen_user(char *s, long n)
- *
- * Returns 0 if exception before zero byte or reaching N,
- * N+1 if N would be exceeded,
- * else strlen + 1 (i.e. includes zero byte).
- */
-
-ENTRY_CFI(lstrnlen_user)
- comib,= 0,%r25,$lslen_nzero
- copy %r26,%r24
- get_sr
-1: ldbs,ma 1(%sr1,%r26),%r1
-$lslen_loop:
- comib,=,n 0,%r1,$lslen_done
- addib,<> -1,%r25,$lslen_loop
-2: ldbs,ma 1(%sr1,%r26),%r1
-$lslen_done:
- bv %r0(%r2)
- sub %r26,%r24,%r28
-
-$lslen_nzero:
- b $lslen_done
- ldo 1(%r26),%r26 /* special case for N == 0 */
-
-3: b $lslen_done
- copy %r24,%r26 /* reset r26 so 0 is returned on fault */
-
- ASM_EXCEPTIONTABLE_ENTRY(1b,3b)
- ASM_EXCEPTIONTABLE_ENTRY(2b,3b)
-
-ENDPROC_CFI(lstrnlen_user)
-
-
/*
* unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
*
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index 4b75388190b4..ea70a0e08321 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -38,14 +38,6 @@ unsigned long raw_copy_from_user(void *dst, const void __user *src,
}
EXPORT_SYMBOL(raw_copy_from_user);
-unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long len)
-{
- mtsp(get_user_space(), 1);
- mtsp(get_user_space(), 2);
- return pa_memcpy((void __force *)dst, (void __force *)src, len);
-}
-
-
void * memcpy(void * dst,const void *src, size_t count)
{
mtsp(get_kernel_space(), 1);
@@ -54,7 +46,6 @@ void * memcpy(void * dst,const void *src, size_t count)
return dst;
}
-EXPORT_SYMBOL(raw_copy_in_user);
EXPORT_SYMBOL(memcpy);
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 6900d0ac2421..089ee3ea55c8 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -35,7 +35,6 @@ endif
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
-pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
- -include $(srctree)/include/linux/compiler_attributes.h \
$(LINUXINCLUDE)
ifdef CONFIG_PPC64_BOOT_WRAPPER
@@ -70,6 +69,7 @@ ifeq ($(call cc-option-yn, -fstack-protector),y)
BOOTCFLAGS += -fno-stack-protector
endif
+BOOTCFLAGS += -include $(srctree)/include/linux/compiler_attributes.h
BOOTCFLAGS += -I$(objtree)/$(obj) -I$(srctree)/$(obj)
DTC_FLAGS ?= -p 1024
diff --git a/arch/powerpc/include/asm/asm-const.h b/arch/powerpc/include/asm/asm-const.h
index 0ce2368bd20f..dbfa5e1e3198 100644
--- a/arch/powerpc/include/asm/asm-const.h
+++ b/arch/powerpc/include/asm/asm-const.h
@@ -12,16 +12,6 @@
# define ASM_CONST(x) __ASM_CONST(x)
#endif
-/*
- * Inline assembly memory constraint
- *
- * GCC 4.9 doesn't properly handle pre update memory constraint "m<>"
- *
- */
-#if defined(GCC_VERSION) && GCC_VERSION < 50000
-#define UPD_CONSTR ""
-#else
#define UPD_CONSTR "<>"
-#endif
#endif /* _ASM_POWERPC_ASM_CONST_H */
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index e33dcf134cdd..7afc96fb6524 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -83,22 +83,6 @@ struct compat_statfs {
#define COMPAT_OFF_T_MAX 0x7fffffff
-static inline void __user *arch_compat_alloc_user_space(long len)
-{
- struct pt_regs *regs = current->thread.regs;
- unsigned long usp = regs->gpr[1];
-
- /*
- * We can't access below the stack pointer in the 32bit ABI and
- * can access 288 bytes in the 64bit big-endian ABI,
- * or 512 bytes with the new ELFv2 little-endian ABI.
- */
- if (!is_32bit_task())
- usp -= USER_REDZONE_SIZE;
-
- return (void __user *) (usp - len);
-}
-
/*
* ipc64_perm is actually 32/64bit clean but since the compat layer refers to
* it we may as well define it.
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 29b55e2e035c..7bef917cc84e 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -330,10 +330,10 @@
256 64 sys_debug_setcontext sys_ni_syscall
256 spu sys_debug_setcontext sys_ni_syscall
# 257 reserved for vserver
-258 nospu migrate_pages sys_migrate_pages compat_sys_migrate_pages
-259 nospu mbind sys_mbind compat_sys_mbind
-260 nospu get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
-261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+258 nospu migrate_pages sys_migrate_pages
+259 nospu mbind sys_mbind
+260 nospu get_mempolicy sys_get_mempolicy
+261 nospu set_mempolicy sys_set_mempolicy
262 nospu mq_open sys_mq_open compat_sys_mq_open
263 nospu mq_unlink sys_mq_unlink
264 32 mq_timedsend sys_mq_timedsend_time32
@@ -381,7 +381,7 @@
298 common faccessat sys_faccessat
299 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
-301 common move_pages sys_move_pages compat_sys_move_pages
+301 common move_pages sys_move_pages
302 common getcpu sys_getcpu
303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
304 32 utimensat sys_utimensat_time32
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 4390f8d72126..aac8c0412ff9 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -2219,11 +2219,6 @@ DEFINE_INTERRUPT_HANDLER(kernel_bad_stack)
die("Bad kernel stack pointer", regs, SIGABRT);
}
-void __init trap_init(void)
-{
-}
-
-
#ifdef CONFIG_PPC_EMULATED_STATS
#define WARN_EMULATED_SETUP(type) .type = { .name = #type }
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index ad198b439222..c3c4e31462ec 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -119,8 +119,7 @@ int __ref arch_add_memory(int nid, u64 start, u64 size,
return rc;
}
-void __ref arch_remove_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap)
+void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 44246ba59768..91cf23495ccb 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -286,7 +286,7 @@ static int pseries_remove_memblock(unsigned long base, unsigned long memblock_si
{
unsigned long block_sz, start_pfn;
int sections_per_block;
- int i, nid;
+ int i;
start_pfn = base >> PAGE_SHIFT;
@@ -297,10 +297,9 @@ static int pseries_remove_memblock(unsigned long base, unsigned long memblock_si
block_sz = pseries_memory_block_size();
sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
- nid = memory_add_physaddr_to_nid(base);
for (i = 0; i < sections_per_block; i++) {
- __remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
+ __remove_memory(base, MIN_MEMORY_BLOCK_SIZE);
base += MIN_MEMORY_BLOCK_SIZE;
}
@@ -387,7 +386,7 @@ static int dlpar_remove_lmb(struct drmem_lmb *lmb)
block_sz = pseries_memory_block_size();
- __remove_memory(mem_block->nid, lmb->base_addr, block_sz);
+ __remove_memory(lmb->base_addr, block_sz);
put_device(&mem_block->dev);
/* Update memory regions for memory remove */
@@ -660,7 +659,7 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
rc = dlpar_online_lmb(lmb);
if (rc) {
- __remove_memory(nid, lmb->base_addr, block_sz);
+ __remove_memory(lmb->base_addr, block_sz);
invalidate_lmb_associativity_index(lmb);
} else {
lmb->flags |= DRCONF_MEM_ASSIGNED;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index aac669a6c3d8..301a54233c7e 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -41,6 +41,7 @@ config RISCV
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
+ select BUILDTIME_TABLE_SORT if MMU
select CLONE_BACKWARDS
select CLINT_TIMER if !MMU
select COMMON_CLK
@@ -51,7 +52,7 @@ config RISCV
select GENERIC_EARLY_IOREMAP
select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
select GENERIC_IDLE_POLL_SETUP
- select GENERIC_IOREMAP
+ select GENERIC_IOREMAP if MMU
select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
@@ -235,7 +236,7 @@ config ARCH_RV32I
config ARCH_RV64I
bool "RV64I"
select 64BIT
- select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
+ select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 01906a90c501..0eb4568fbd29 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -132,8 +132,11 @@ $(BOOT_TARGETS): vmlinux
Image.%: Image
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-zinstall install:
- $(Q)$(MAKE) $(build)=$(boot) $@
+install: install-image = Image
+zinstall: install-image = Image.gz
+install zinstall:
+ $(CONFIG_SHELL) $(srctree)/$(boot)/install.sh $(KERNELRELEASE) \
+ $(boot)/$(install-image) System.map "$(INSTALL_PATH)"
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
index 6bf299f70c27..becd0621071c 100644
--- a/arch/riscv/boot/Makefile
+++ b/arch/riscv/boot/Makefile
@@ -58,11 +58,3 @@ $(obj)/Image.lzo: $(obj)/Image FORCE
$(obj)/loader.bin: $(obj)/loader FORCE
$(call if_changed,objcopy)
-
-install:
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
- $(obj)/Image System.map "$(INSTALL_PATH)"
-
-zinstall:
- $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
- $(obj)/Image.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
index baea7d204639..b254c60589a1 100644
--- a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
+++ b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
@@ -16,10 +16,14 @@
aliases {
ethernet0 = &emac1;
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
};
chosen {
- stdout-path = &serial0;
+ stdout-path = "serial0:115200n8";
};
cpus {
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index bc68231a8fb7..4ebc80315f01 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -39,10 +39,12 @@ CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCIE_XILINX=y
+CONFIG_PCIE_FU740=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_NVME=m
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_SCSI_VIRTIO=y
@@ -108,6 +110,8 @@ CONFIG_NFS_V4_1=y
CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=m
CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_PRINTK_TIME=y
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index f4b490cd0e5d..f53c40026c7a 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -42,6 +42,9 @@
*/
#define ELF_ET_DYN_BASE ((TASK_SIZE / 3) * 2)
+#ifdef CONFIG_64BIT
+#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
+#endif
/*
* This yields a mask that user programs can use to figure out what
* instruction set this CPU supports. This could be done in user space,
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
index d86781357044..90deabfe63ea 100644
--- a/arch/riscv/kernel/cacheinfo.c
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -113,7 +113,7 @@ static void fill_cacheinfo(struct cacheinfo **this_leaf,
}
}
-static int __init_cache_level(unsigned int cpu)
+int init_cache_level(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct device_node *np = of_cpu_device_node_get(cpu);
@@ -155,7 +155,7 @@ static int __init_cache_level(unsigned int cpu)
return 0;
}
-static int __populate_cache_leaves(unsigned int cpu)
+int populate_cache_leaves(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
@@ -187,6 +187,3 @@ static int __populate_cache_leaves(unsigned int cpu)
return 0;
}
-
-DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
-DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 0a98fd0ddfe9..0daaa3e4630d 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -199,11 +199,6 @@ int is_valid_bugaddr(unsigned long pc)
}
#endif /* CONFIG_GENERIC_BUG */
-/* stvec & scratch is already set from head.S */
-void __init trap_init(void)
-{
-}
-
#ifdef CONFIG_VMAP_STACK
static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
overflow_stack)__aligned(16);
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
index af776555ded9..9c9f35091ef0 100644
--- a/arch/riscv/kernel/vmlinux-xip.lds.S
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -121,7 +121,6 @@ SECTIONS
}
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
- EXCEPTION_TABLE(0x10)
.rel.dyn : AT(ADDR(.rel.dyn) - LOAD_OFFSET) {
*(.rel.dyn*)
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 502d0826ecb1..5104f3a871e3 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -4,6 +4,8 @@
* Copyright (C) 2017 SiFive
*/
+#define RO_EXCEPTION_TABLE_ALIGN 16
+
#ifdef CONFIG_XIP_KERNEL
#include "vmlinux-xip.lds.S"
#else
@@ -112,8 +114,6 @@ SECTIONS
*(.srodata*)
}
- EXCEPTION_TABLE(0x10)
-
. = ALIGN(SECTION_ALIGN);
_data = .;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 5af51ace0588..2bd90c51efd3 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -110,6 +110,7 @@ config S390
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 11ffc7c37ada..37b6115ed80e 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -804,6 +804,7 @@ CONFIG_DEBUG_VM_PGFLAGS=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_DEBUG_PER_CPU_MAPS=y
+CONFIG_KFENCE=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index e1642d2cba59..56a1cc85c5d7 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -397,7 +397,6 @@ CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
-# CONFIG_BLK_DEV_XPRAM is not set
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_RBD=m
CONFIG_BLK_DEV_NVME=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index d576aaab27c9..aceccf3b9a88 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -35,7 +35,6 @@ CONFIG_NET=y
# CONFIG_ETHTOOL_NETLINK is not set
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_RAM=y
-# CONFIG_BLK_DEV_XPRAM is not set
# CONFIG_DCSSBLK is not set
# CONFIG_DASD is not set
CONFIG_ENCLOSURE_SERVICES=y
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 8d49505b4a43..cdc7ae72529d 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -176,16 +176,6 @@ static inline int is_compat_task(void)
return test_thread_flag(TIF_31BIT);
}
-static inline void __user *arch_compat_alloc_user_space(long len)
-{
- unsigned long stack;
-
- stack = KSTK_ESP(current);
- if (is_compat_task())
- stack &= 0x7fffffffUL;
- return (void __user *) (stack - len);
-}
-
#endif
struct compat_ipc64_perm {
diff --git a/arch/s390/include/asm/cpu_mcf.h b/arch/s390/include/asm/cpu_mcf.h
index ca0e0e5ddbc4..f87a4788c19c 100644
--- a/arch/s390/include/asm/cpu_mcf.h
+++ b/arch/s390/include/asm/cpu_mcf.h
@@ -24,13 +24,6 @@ enum cpumf_ctr_set {
#define CPUMF_LCCTL_ENABLE_SHIFT 16
#define CPUMF_LCCTL_ACTCTL_SHIFT 0
-static const u64 cpumf_ctr_ctl[CPUMF_CTR_SET_MAX] = {
- [CPUMF_CTR_SET_BASIC] = 0x02,
- [CPUMF_CTR_SET_USER] = 0x04,
- [CPUMF_CTR_SET_CRYPTO] = 0x08,
- [CPUMF_CTR_SET_EXT] = 0x01,
- [CPUMF_CTR_SET_MT_DIAG] = 0x20,
-};
static inline void ctr_set_enable(u64 *state, u64 ctrsets)
{
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index e317fd4866c1..f16f4d054ae2 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -18,6 +18,7 @@ extern struct mutex smp_cpu_state_mutex;
extern unsigned int smp_cpu_mt_shift;
extern unsigned int smp_cpu_mtid;
extern __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
+extern cpumask_t cpu_setup_mask;
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index 3d8a4b94c620..dd00d98804ec 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -34,16 +34,6 @@ static inline bool on_stack(struct stack_info *info,
return addr >= info->begin && addr + len <= info->end;
}
-static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
- struct pt_regs *regs)
-{
- if (regs)
- return (unsigned long) kernel_stack_pointer(regs);
- if (task == current)
- return current_stack_pointer();
- return (unsigned long) task->thread.ksp;
-}
-
/*
* Stack layout of a C stack frame.
*/
@@ -74,6 +64,16 @@ struct stack_frame {
((unsigned long)__builtin_frame_address(0) - \
offsetof(struct stack_frame, back_chain))
+static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if (regs)
+ return (unsigned long)kernel_stack_pointer(regs);
+ if (task == current)
+ return current_frame_address();
+ return (unsigned long)task->thread.ksp;
+}
+
/*
* To keep this simple mark register 2-6 as being changed (volatile)
* by the called function, even though register 6 is saved/nonvolatile.
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 9ed9aa37e836..ce550d06abc3 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -227,9 +227,6 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s
__get_user(x, ptr); \
})
-unsigned long __must_check
-raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
-
/*
* Copy a null terminated string from userspace.
*/
diff --git a/arch/s390/include/asm/unwind.h b/arch/s390/include/asm/unwind.h
index de9006b0cfeb..5ebf534ef753 100644
--- a/arch/s390/include/asm/unwind.h
+++ b/arch/s390/include/asm/unwind.h
@@ -55,10 +55,10 @@ static inline bool unwind_error(struct unwind_state *state)
return state->error;
}
-static inline void unwind_start(struct unwind_state *state,
- struct task_struct *task,
- struct pt_regs *regs,
- unsigned long first_frame)
+static __always_inline void unwind_start(struct unwind_state *state,
+ struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long first_frame)
{
task = task ?: current;
first_frame = first_frame ?: get_stack_pointer(task, regs);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index b9716a7e326d..4c9b967290ae 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -140,10 +140,10 @@ _LPP_OFFSET = __LC_LPP
TSTMSK __LC_MCCK_CODE,(MCCK_CODE_STG_ERROR|MCCK_CODE_STG_KEY_ERROR)
jnz \errlabel
TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_DEGRAD
- jz oklabel\@
+ jz .Loklabel\@
TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_FAIL_ADDR
jnz \errlabel
-oklabel\@:
+.Loklabel\@:
.endm
#if IS_ENABLED(CONFIG_KVM)
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 0a464d328467..1d94ffdf347b 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -341,13 +341,13 @@ NOKPROBE_SYMBOL(prepare_ftrace_return);
*/
int ftrace_enable_ftrace_graph_caller(void)
{
- brcl_disable(__va(ftrace_graph_caller));
+ brcl_disable(ftrace_graph_caller);
return 0;
}
int ftrace_disable_ftrace_graph_caller(void)
{
- brcl_enable(__va(ftrace_graph_caller));
+ brcl_enable(ftrace_graph_caller);
return 0;
}
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 2e3bb633acf6..4a99154fe651 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -158,6 +158,14 @@ static size_t cfdiag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
return need;
}
+static const u64 cpumf_ctr_ctl[CPUMF_CTR_SET_MAX] = {
+ [CPUMF_CTR_SET_BASIC] = 0x02,
+ [CPUMF_CTR_SET_USER] = 0x04,
+ [CPUMF_CTR_SET_CRYPTO] = 0x08,
+ [CPUMF_CTR_SET_EXT] = 0x01,
+ [CPUMF_CTR_SET_MT_DIAG] = 0x20,
+};
+
/* Read out all counter sets and save them in the provided data buffer.
* The last 64 byte host an artificial trailer entry.
*/
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 5a01872f5984..67e5fff96ee0 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -50,6 +50,7 @@
#include <linux/compat.h>
#include <linux/start_kernel.h>
#include <linux/hugetlb.h>
+#include <linux/kmemleak.h>
#include <asm/boot_data.h>
#include <asm/ipl.h>
@@ -356,9 +357,12 @@ void *restart_stack;
unsigned long stack_alloc(void)
{
#ifdef CONFIG_VMAP_STACK
- return (unsigned long)__vmalloc_node(THREAD_SIZE, THREAD_SIZE,
- THREADINFO_GFP, NUMA_NO_NODE,
- __builtin_return_address(0));
+ void *ret;
+
+ ret = __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP,
+ NUMA_NO_NODE, __builtin_return_address(0));
+ kmemleak_not_leak(ret);
+ return (unsigned long)ret;
#else
return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
#endif
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2a991e43ead3..1a04e5bdf655 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -95,6 +95,7 @@ __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
#endif
static unsigned int smp_max_threads __initdata = -1U;
+cpumask_t cpu_setup_mask;
static int __init early_nosmt(char *s)
{
@@ -902,13 +903,14 @@ static void smp_start_secondary(void *cpuvoid)
vtime_init();
vdso_getcpu_init();
pfault_init();
+ cpumask_set_cpu(cpu, &cpu_setup_mask);
+ update_cpu_masks();
notify_cpu_starting(cpu);
if (topology_cpu_dedicated(cpu))
set_cpu_flag(CIF_DEDICATED_CPU);
else
clear_cpu_flag(CIF_DEDICATED_CPU);
set_cpu_online(cpu, true);
- update_cpu_masks();
inc_irq_stat(CPU_RST);
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
@@ -950,10 +952,13 @@ early_param("possible_cpus", _setup_possible_cpus);
int __cpu_disable(void)
{
unsigned long cregs[16];
+ int cpu;
/* Handle possible pending IPIs */
smp_handle_ext_call();
- set_cpu_online(smp_processor_id(), false);
+ cpu = smp_processor_id();
+ set_cpu_online(cpu, false);
+ cpumask_clear_cpu(cpu, &cpu_setup_mask);
update_cpu_masks();
/* Disable pseudo page faults on this cpu. */
pfault_fini();
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index aa9d68b8ee14..df5261e5cfe1 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -274,9 +274,9 @@
265 common statfs64 sys_statfs64 compat_sys_statfs64
266 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
267 common remap_file_pages sys_remap_file_pages sys_remap_file_pages
-268 common mbind sys_mbind compat_sys_mbind
-269 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
-270 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+268 common mbind sys_mbind sys_mbind
+269 common get_mempolicy sys_get_mempolicy sys_get_mempolicy
+270 common set_mempolicy sys_set_mempolicy sys_set_mempolicy
271 common mq_open sys_mq_open compat_sys_mq_open
272 common mq_unlink sys_mq_unlink sys_mq_unlink
273 common mq_timedsend sys_mq_timedsend sys_mq_timedsend_time32
@@ -293,7 +293,7 @@
284 common inotify_init sys_inotify_init sys_inotify_init
285 common inotify_add_watch sys_inotify_add_watch sys_inotify_add_watch
286 common inotify_rm_watch sys_inotify_rm_watch sys_inotify_rm_watch
-287 common migrate_pages sys_migrate_pages compat_sys_migrate_pages
+287 common migrate_pages sys_migrate_pages sys_migrate_pages
288 common openat sys_openat compat_sys_openat
289 common mkdirat sys_mkdirat sys_mkdirat
290 common mknodat sys_mknodat sys_mknodat
@@ -317,7 +317,7 @@
307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range
308 common tee sys_tee sys_tee
309 common vmsplice sys_vmsplice sys_vmsplice
-310 common move_pages sys_move_pages compat_sys_move_pages
+310 common move_pages sys_move_pages sys_move_pages
311 common getcpu sys_getcpu sys_getcpu
312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
313 common utimes sys_utimes sys_utimes_time32
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index d2458a29618f..58f8291950cb 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -67,7 +67,7 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
static cpumask_t mask;
cpumask_clear(&mask);
- if (!cpu_online(cpu))
+ if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
goto out;
cpumask_set_cpu(cpu, &mask);
switch (topology_mode) {
@@ -88,7 +88,7 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
case TOPOLOGY_MODE_SINGLE:
break;
}
- cpumask_and(&mask, &mask, cpu_online_mask);
+ cpumask_and(&mask, &mask, &cpu_setup_mask);
out:
cpumask_copy(dst, &mask);
}
@@ -99,16 +99,16 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
int i;
cpumask_clear(&mask);
- if (!cpu_online(cpu))
+ if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
goto out;
cpumask_set_cpu(cpu, &mask);
if (topology_mode != TOPOLOGY_MODE_HW)
goto out;
cpu -= cpu % (smp_cpu_mtid + 1);
- for (i = 0; i <= smp_cpu_mtid; i++)
- if (cpu_present(cpu + i))
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ if (cpumask_test_cpu(cpu + i, &cpu_setup_mask))
cpumask_set_cpu(cpu + i, &mask);
- cpumask_and(&mask, &mask, cpu_online_mask);
+ }
out:
cpumask_copy(dst, &mask);
}
@@ -569,6 +569,7 @@ void __init topology_init_early(void)
alloc_masks(info, &book_info, 2);
alloc_masks(info, &drawer_info, 3);
out:
+ cpumask_set_cpu(0, &cpu_setup_mask);
__arch_update_cpu_topology();
__arch_update_dedicated_flag(NULL);
}
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 94ca99bde59d..a596e69d3c47 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -204,69 +204,6 @@ unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long
}
EXPORT_SYMBOL(raw_copy_to_user);
-static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
- unsigned long size)
-{
- unsigned long tmp1, tmp2;
-
- tmp1 = -4096UL;
- /* FIXME: copy with reduced length. */
- asm volatile(
- " lgr 0,%[spec]\n"
- "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
- " jz 2f\n"
- "1: algr %0,%3\n"
- " slgr %1,%3\n"
- " slgr %2,%3\n"
- " j 0b\n"
- "2:slgr %0,%0\n"
- "3: \n"
- EX_TABLE(0b,3b)
- : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
- : [spec] "d" (0x810081UL)
- : "cc", "memory", "0");
- return size;
-}
-
-static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
- unsigned long size)
-{
- unsigned long tmp1;
-
- asm volatile(
- " sacf 256\n"
- " aghi %0,-1\n"
- " jo 5f\n"
- " bras %3,3f\n"
- "0: aghi %0,257\n"
- "1: mvc 0(1,%1),0(%2)\n"
- " la %1,1(%1)\n"
- " la %2,1(%2)\n"
- " aghi %0,-1\n"
- " jnz 1b\n"
- " j 5f\n"
- "2: mvc 0(256,%1),0(%2)\n"
- " la %1,256(%1)\n"
- " la %2,256(%2)\n"
- "3: aghi %0,-256\n"
- " jnm 2b\n"
- "4: ex %0,1b-0b(%3)\n"
- "5: slgr %0,%0\n"
- "6: sacf 768\n"
- EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
- : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
- : : "cc", "memory");
- return size;
-}
-
-unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
-{
- if (copy_with_mvcos())
- return copy_in_user_mvcos(to, from, n);
- return copy_in_user_mvc(to, from, n);
-}
-EXPORT_SYMBOL(raw_copy_in_user);
-
static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
{
unsigned long tmp1, tmp2;
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 9bb2c7512cd5..4d3b33ce81c6 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -27,7 +27,6 @@
/**
* gmap_alloc - allocate and initialize a guest address space
- * @mm: pointer to the parent mm_struct
* @limit: maximum address of the gmap address space
*
* Returns a guest address space structure.
@@ -504,7 +503,7 @@ EXPORT_SYMBOL_GPL(gmap_translate);
/**
* gmap_unlink - disconnect a page table from the gmap shadow tables
- * @gmap: pointer to guest mapping meta data structure
+ * @mm: pointer to the parent mm_struct
* @table: pointer to the host page table
* @vmaddr: vm address associated with the host page table
*/
@@ -527,7 +526,7 @@ static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
unsigned long gaddr);
/**
- * gmap_link - set up shadow page tables to connect a host to a guest address
+ * __gmap_link - set up shadow page tables to connect a host to a guest address
* @gmap: pointer to guest mapping meta data structure
* @gaddr: guest address
* @vmaddr: vm address
@@ -1971,7 +1970,7 @@ out_free:
EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
/**
- * gmap_shadow_lookup_pgtable - find a shadow page table
+ * gmap_shadow_pgt_lookup - find a shadow page table
* @sg: pointer to the shadow guest address space structure
* @saddr: the address in the shadow aguest address space
* @pgt: parent gmap address of the page table to get shadowed
@@ -2165,7 +2164,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
}
EXPORT_SYMBOL_GPL(gmap_shadow_page);
-/**
+/*
* gmap_shadow_notify - handle notifications for shadow gmap
*
* Called with sg->parent->shadow_lock.
@@ -2225,7 +2224,7 @@ static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
/**
* ptep_notify - call all invalidation callbacks for a specific pte.
* @mm: pointer to the process mm_struct
- * @addr: virtual address in the process address space
+ * @vmaddr: virtual address in the process address space
* @pte: pointer to the page table entry
* @bits: bits from the pgste that caused the notify call
*
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index f14e7e61cd8e..a04faf49001a 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -307,8 +307,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
return rc;
}
-void arch_remove_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap)
+void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index eec3a9d7176e..034721a68d8f 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -834,7 +834,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
}
EXPORT_SYMBOL(set_guest_storage_key);
-/**
+/*
* Conditionally set a guest storage key (handling csske).
* oldkey will be updated when either mr or mc is set and a pointer is given.
*
@@ -867,7 +867,7 @@ int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
}
EXPORT_SYMBOL(cond_set_guest_storage_key);
-/**
+/*
* Reset a guest reference bit (rrbe), returning the reference and changed bit.
*
* Returns < 0 in case of error, otherwise the cc to be reported to the guest.
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 51dc2215a2b7..be077b39da33 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -383,8 +383,8 @@ static int clp_find_pci(struct clp_req_rsp_list_pci *rrb, u32 fid,
rc = clp_list_pci_req(rrb, &resume_token, &nentries);
if (rc)
return rc;
+ fh_list = rrb->response.fh_list;
for (i = 0; i < nentries; i++) {
- fh_list = rrb->response.fh_list;
if (fh_list[i].fid == fid) {
*entry = fh_list[i];
return 0;
@@ -449,14 +449,17 @@ int clp_get_state(u32 fid, enum zpci_state *state)
struct clp_fh_list_entry entry;
int rc;
- *state = ZPCI_FN_STATE_RESERVED;
rrb = clp_alloc_block(GFP_ATOMIC);
if (!rrb)
return -ENOMEM;
rc = clp_find_pci(rrb, fid, &entry);
- if (!rc)
+ if (!rc) {
*state = entry.config_state;
+ } else if (rc == -ENODEV) {
+ *state = ZPCI_FN_STATE_RESERVED;
+ rc = 0;
+ }
clp_free_block(rrb);
return rc;
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index ce26c7f8950a..506784702430 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -414,8 +414,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
return ret;
}
-void arch_remove_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap)
+void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
unsigned long start_pfn = PFN_DOWN(start);
unsigned long nr_pages = size >> PAGE_SHIFT;
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index 8b63410e830f..bd949fcf9d63 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -116,25 +116,6 @@ struct compat_statfs {
#define COMPAT_OFF_T_MAX 0x7fffffff
-#ifdef CONFIG_COMPAT
-static inline void __user *arch_compat_alloc_user_space(long len)
-{
- struct pt_regs *regs = current_thread_info()->kregs;
- unsigned long usp = regs->u_regs[UREG_I6];
-
- if (test_thread_64bit_stack(usp))
- usp += STACK_BIAS;
-
- if (test_thread_flag(TIF_32BIT))
- usp &= 0xffffffffUL;
-
- usp -= len;
- usp &= ~0x7UL;
-
- return (void __user *) usp;
-}
-#endif
-
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 8e645ddac58e..30f171b7b00c 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -39,6 +39,7 @@ struct mdesc_hdr {
u32 node_sz; /* node block size */
u32 name_sz; /* name block size */
u32 data_sz; /* data block size */
+ char data[];
} __attribute__((aligned(16)));
struct mdesc_elem {
@@ -612,7 +613,7 @@ EXPORT_SYMBOL(mdesc_get_node_info);
static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
{
- return (struct mdesc_elem *) (mdesc + 1);
+ return (struct mdesc_elem *) mdesc->data;
}
static void *name_block(struct mdesc_hdr *mdesc)
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 093849bfda50..d1cc410d2f64 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -455,7 +455,7 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
distance = fp - psp;
rval = (csp - distance);
- if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
+ if (raw_copy_in_user((void __user *)rval, (void __user *)psp, distance))
rval = 0;
else if (!stack_64bit) {
if (put_user(((u32)csp),
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 4276b9e003ca..6cc124a3bb98 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -435,9 +435,9 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs,
(_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
if (!wsaved) {
- err |= copy_in_user((u32 __user *)sf,
- (u32 __user *)(regs->u_regs[UREG_FP]),
- sizeof(struct reg_window32));
+ err |= raw_copy_in_user((u32 __user *)sf,
+ (u32 __user *)(regs->u_regs[UREG_FP]),
+ sizeof(struct reg_window32));
} else {
struct reg_window *rp;
@@ -567,9 +567,9 @@ static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs,
err |= put_compat_sigset(&sf->mask, oldset, sizeof(compat_sigset_t));
if (!wsaved) {
- err |= copy_in_user((u32 __user *)sf,
- (u32 __user *)(regs->u_regs[UREG_FP]),
- sizeof(struct reg_window32));
+ err |= raw_copy_in_user((u32 __user *)sf,
+ (u32 __user *)(regs->u_regs[UREG_FP]),
+ sizeof(struct reg_window32));
} else {
struct reg_window *rp;
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index cea23cf95600..2a78d2af1265 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -406,10 +406,10 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
err |= copy_to_user(&sf->mask, sigmask_to_save(), sizeof(sigset_t));
if (!wsaved) {
- err |= copy_in_user((u64 __user *)sf,
- (u64 __user *)(regs->u_regs[UREG_FP] +
- STACK_BIAS),
- sizeof(struct reg_window));
+ err |= raw_copy_in_user((u64 __user *)sf,
+ (u64 __user *)(regs->u_regs[UREG_FP] +
+ STACK_BIAS),
+ sizeof(struct reg_window));
} else {
struct reg_window *rp;
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index 7893104718c2..c37764dc764d 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -365,12 +365,12 @@
299 common unshare sys_unshare
300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
301 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
-302 common migrate_pages sys_migrate_pages compat_sys_migrate_pages
-303 common mbind sys_mbind compat_sys_mbind
-304 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
-305 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+302 common migrate_pages sys_migrate_pages
+303 common mbind sys_mbind
+304 common get_mempolicy sys_get_mempolicy
+305 common set_mempolicy sys_set_mempolicy
306 common kexec_load sys_kexec_load compat_sys_kexec_load
-307 common move_pages sys_move_pages compat_sys_move_pages
+307 common move_pages sys_move_pages
308 common getcpu sys_getcpu
309 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
310 32 utimensat sys_utimensat_time32
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 8de2646fbdb1..c18b45f75d41 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -24,6 +24,7 @@ config UML
select SET_FS
select TRACE_IRQFLAGS_SUPPORT
select TTY # Needed for line.c
+ select HAVE_ARCH_VMAP_STACK
config MMU
bool
diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c
index 0b802834f40a..c08066633023 100644
--- a/arch/um/drivers/virt-pci.c
+++ b/arch/um/drivers/virt-pci.c
@@ -56,6 +56,13 @@ static unsigned long um_pci_msi_used[BITS_TO_LONGS(MAX_MSI_VECTORS)];
#define UM_VIRT_PCI_MAXDELAY 40000
+struct um_pci_message_buffer {
+ struct virtio_pcidev_msg hdr;
+ u8 data[8];
+};
+
+static struct um_pci_message_buffer __percpu *um_pci_msg_bufs;
+
static int um_pci_send_cmd(struct um_pci_device *dev,
struct virtio_pcidev_msg *cmd,
unsigned int cmd_size,
@@ -68,11 +75,12 @@ static int um_pci_send_cmd(struct um_pci_device *dev,
[1] = extra ? &extra_sg : &in_sg,
[2] = extra ? &in_sg : NULL,
};
+ struct um_pci_message_buffer *buf;
int delay_count = 0;
int ret, len;
bool posted;
- if (WARN_ON(cmd_size < sizeof(*cmd)))
+ if (WARN_ON(cmd_size < sizeof(*cmd) || cmd_size > sizeof(*buf)))
return -EINVAL;
switch (cmd->op) {
@@ -88,6 +96,9 @@ static int um_pci_send_cmd(struct um_pci_device *dev,
break;
}
+ buf = get_cpu_var(um_pci_msg_bufs);
+ memcpy(buf, cmd, cmd_size);
+
if (posted) {
u8 *ncmd = kmalloc(cmd_size + extra_size, GFP_ATOMIC);
@@ -102,7 +113,10 @@ static int um_pci_send_cmd(struct um_pci_device *dev,
} else {
/* try without allocating memory */
posted = false;
+ cmd = (void *)buf;
}
+ } else {
+ cmd = (void *)buf;
}
sg_init_one(&out_sg, cmd, cmd_size);
@@ -118,11 +132,12 @@ static int um_pci_send_cmd(struct um_pci_device *dev,
posted ? cmd : HANDLE_NO_FREE(cmd),
GFP_ATOMIC);
if (ret)
- return ret;
+ goto out;
if (posted) {
virtqueue_kick(dev->cmd_vq);
- return 0;
+ ret = 0;
+ goto out;
}
/* kick and poll for getting a response on the queue */
@@ -148,6 +163,8 @@ static int um_pci_send_cmd(struct um_pci_device *dev,
}
clear_bit(UM_PCI_STAT_WAITING, &dev->status);
+out:
+ put_cpu_var(um_pci_msg_bufs);
return ret;
}
@@ -161,12 +178,17 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
.size = size,
.addr = offset,
};
- /* maximum size - we may only use parts of it */
- u8 data[8];
+ /* buf->data is maximum size - we may only use parts of it */
+ struct um_pci_message_buffer *buf;
+ u8 *data;
+ unsigned long ret = ~0ULL;
if (!dev)
return ~0ULL;
+ buf = get_cpu_var(um_pci_msg_bufs);
+ data = buf->data;
+
memset(data, 0xff, sizeof(data));
switch (size) {
@@ -179,27 +201,34 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
break;
default:
WARN(1, "invalid config space read size %d\n", size);
- return ~0ULL;
+ goto out;
}
- if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0,
- data, sizeof(data)))
- return ~0ULL;
+ if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, 8))
+ goto out;
switch (size) {
case 1:
- return data[0];
+ ret = data[0];
+ break;
case 2:
- return le16_to_cpup((void *)data);
+ ret = le16_to_cpup((void *)data);
+ break;
case 4:
- return le32_to_cpup((void *)data);
+ ret = le32_to_cpup((void *)data);
+ break;
#ifdef CONFIG_64BIT
case 8:
- return le64_to_cpup((void *)data);
+ ret = le64_to_cpup((void *)data);
+ break;
#endif
default:
- return ~0ULL;
+ break;
}
+
+out:
+ put_cpu_var(um_pci_msg_bufs);
+ return ret;
}
static void um_pci_cfgspace_write(void *priv, unsigned int offset, int size,
@@ -272,8 +301,13 @@ static void um_pci_bar_copy_from(void *priv, void *buffer,
static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
int size)
{
- /* maximum size - we may only use parts of it */
- u8 data[8];
+ /* buf->data is maximum size - we may only use parts of it */
+ struct um_pci_message_buffer *buf;
+ u8 *data;
+ unsigned long ret = ~0ULL;
+
+ buf = get_cpu_var(um_pci_msg_bufs);
+ data = buf->data;
switch (size) {
case 1:
@@ -285,25 +319,33 @@ static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
break;
default:
WARN(1, "invalid config space read size %d\n", size);
- return ~0ULL;
+ goto out;
}
um_pci_bar_copy_from(priv, data, offset, size);
switch (size) {
case 1:
- return data[0];
+ ret = data[0];
+ break;
case 2:
- return le16_to_cpup((void *)data);
+ ret = le16_to_cpup((void *)data);
+ break;
case 4:
- return le32_to_cpup((void *)data);
+ ret = le32_to_cpup((void *)data);
+ break;
#ifdef CONFIG_64BIT
case 8:
- return le64_to_cpup((void *)data);
+ ret = le64_to_cpup((void *)data);
+ break;
#endif
default:
- return ~0ULL;
+ break;
}
+
+out:
+ put_cpu_var(um_pci_msg_bufs);
+ return ret;
}
static void um_pci_bar_copy_to(void *priv, unsigned int offset,
@@ -810,7 +852,7 @@ void *pci_root_bus_fwnode(struct pci_bus *bus)
return um_pci_fwnode;
}
-int um_pci_init(void)
+static int um_pci_init(void)
{
int err, i;
@@ -823,10 +865,16 @@ int um_pci_init(void)
"No virtio device ID configured for PCI - no PCI support\n"))
return 0;
- bridge = pci_alloc_host_bridge(0);
- if (!bridge)
+ um_pci_msg_bufs = alloc_percpu(struct um_pci_message_buffer);
+ if (!um_pci_msg_bufs)
return -ENOMEM;
+ bridge = pci_alloc_host_bridge(0);
+ if (!bridge) {
+ err = -ENOMEM;
+ goto free;
+ }
+
um_pci_fwnode = irq_domain_alloc_named_fwnode("um-pci");
if (!um_pci_fwnode) {
err = -ENOMEM;
@@ -878,18 +926,22 @@ free:
irq_domain_remove(um_pci_inner_domain);
if (um_pci_fwnode)
irq_domain_free_fwnode(um_pci_fwnode);
- pci_free_resource_list(&bridge->windows);
- pci_free_host_bridge(bridge);
+ if (bridge) {
+ pci_free_resource_list(&bridge->windows);
+ pci_free_host_bridge(bridge);
+ }
+ free_percpu(um_pci_msg_bufs);
return err;
}
module_init(um_pci_init);
-void um_pci_exit(void)
+static void um_pci_exit(void)
{
unregister_virtio_driver(&um_pci_virtio_driver);
irq_domain_remove(um_pci_msi_domain);
irq_domain_remove(um_pci_inner_domain);
pci_free_resource_list(&bridge->windows);
pci_free_host_bridge(bridge);
+ free_percpu(um_pci_msg_bufs);
}
module_exit(um_pci_exit);
diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
index 4412d6febade..d51e445df797 100644
--- a/arch/um/drivers/virtio_uml.c
+++ b/arch/um/drivers/virtio_uml.c
@@ -27,6 +27,7 @@
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <linux/time-internal.h>
+#include <linux/virtio-uml.h>
#include <shared/as-layout.h>
#include <irq_kern.h>
#include <init.h>
@@ -1139,7 +1140,7 @@ static int virtio_uml_probe(struct platform_device *pdev)
rc = os_connect_socket(pdata->socket_path);
} while (rc == -EINTR);
if (rc < 0)
- return rc;
+ goto error_free;
vu_dev->sock = rc;
spin_lock_init(&vu_dev->sock_lock);
@@ -1160,6 +1161,8 @@ static int virtio_uml_probe(struct platform_device *pdev)
error_init:
os_close_file(vu_dev->sock);
+error_free:
+ kfree(vu_dev);
return rc;
}
diff --git a/arch/um/kernel/skas/clone.c b/arch/um/kernel/skas/clone.c
index 5afac0fef24e..ff5061f29167 100644
--- a/arch/um/kernel/skas/clone.c
+++ b/arch/um/kernel/skas/clone.c
@@ -24,8 +24,7 @@
void __attribute__ ((__section__ (".__syscall_stub")))
stub_clone_handler(void)
{
- int stack;
- struct stub_data *data = (void *) ((unsigned long)&stack & ~(UM_KERN_PAGE_SIZE - 1));
+ struct stub_data *data = get_stub_page();
long err;
err = stub_syscall2(__NR_clone, CLONE_PARENT | CLONE_FILES | SIGCHLD,
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index ad12f78bda7e..3198c4767387 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -311,7 +311,3 @@ void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
{
do_IRQ(WINCH_IRQ, regs);
}
-
-void trap_init(void)
-{
-}
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 9c9c4a888b1d..e81885384f60 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -156,7 +156,6 @@ CONFIG_FORCEDETH=y
CONFIG_8139TOO=y
# CONFIG_8139TOO_PIO is not set
CONFIG_R8169=y
-CONFIG_INPUT_POLLDEV=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TABLET=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index b60bd2d86034..e8a7a0af2bda 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -148,7 +148,6 @@ CONFIG_SKY2=y
CONFIG_FORCEDETH=y
CONFIG_8139TOO=y
CONFIG_R8169=y
-CONFIG_INPUT_POLLDEV=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TABLET=y
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 61f18b72552b..960a021d543e 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -286,7 +286,7 @@
272 i386 fadvise64_64 sys_ia32_fadvise64_64
273 i386 vserver
274 i386 mbind sys_mbind
-275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
+275 i386 get_mempolicy sys_get_mempolicy
276 i386 set_mempolicy sys_set_mempolicy
277 i386 mq_open sys_mq_open compat_sys_mq_open
278 i386 mq_unlink sys_mq_unlink
@@ -328,7 +328,7 @@
314 i386 sync_file_range sys_ia32_sync_file_range
315 i386 tee sys_tee
316 i386 vmsplice sys_vmsplice
-317 i386 move_pages sys_move_pages compat_sys_move_pages
+317 i386 move_pages sys_move_pages
318 i386 getcpu sys_getcpu
319 i386 epoll_pwait sys_epoll_pwait
320 i386 utimensat sys_utimensat_time32
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 807b6a1de8e8..18b5500ea8bf 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -398,7 +398,7 @@
530 x32 set_robust_list compat_sys_set_robust_list
531 x32 get_robust_list compat_sys_get_robust_list
532 x32 vmsplice sys_vmsplice
-533 x32 move_pages compat_sys_move_pages
+533 x32 move_pages sys_move_pages
534 x32 preadv compat_sys_preadv64
535 x32 pwritev compat_sys_pwritev64
536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 90e682a92820..32a1ad356c18 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -99,7 +99,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
/*
* IPI implementation on Hyper-V.
*/
-static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
+static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
+ bool exclude_self)
{
struct hv_send_ipi_ex **arg;
struct hv_send_ipi_ex *ipi_arg;
@@ -123,7 +124,10 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
if (!cpumask_equal(mask, cpu_present_mask)) {
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
- nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
+ if (exclude_self)
+ nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask);
+ else
+ nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
}
if (nr_bank < 0)
goto ipi_mask_ex_done;
@@ -138,15 +142,25 @@ ipi_mask_ex_done:
return hv_result_success(status);
}
-static bool __send_ipi_mask(const struct cpumask *mask, int vector)
+static bool __send_ipi_mask(const struct cpumask *mask, int vector,
+ bool exclude_self)
{
- int cur_cpu, vcpu;
+ int cur_cpu, vcpu, this_cpu = smp_processor_id();
struct hv_send_ipi ipi_arg;
u64 status;
+ unsigned int weight;
trace_hyperv_send_ipi_mask(mask, vector);
- if (cpumask_empty(mask))
+ weight = cpumask_weight(mask);
+
+ /*
+ * Do nothing if
+ * 1. the mask is empty
+ * 2. the mask only contains self when exclude_self is true
+ */
+ if (weight == 0 ||
+ (exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask)))
return true;
if (!hv_hypercall_pg)
@@ -172,6 +186,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
ipi_arg.cpu_mask = 0;
for_each_cpu(cur_cpu, mask) {
+ if (exclude_self && cur_cpu == this_cpu)
+ continue;
vcpu = hv_cpu_number_to_vp_number(cur_cpu);
if (vcpu == VP_INVAL)
return false;
@@ -191,7 +207,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
return hv_result_success(status);
do_ex_hypercall:
- return __send_ipi_mask_ex(mask, vector);
+ return __send_ipi_mask_ex(mask, vector, exclude_self);
}
static bool __send_ipi_one(int cpu, int vector)
@@ -208,7 +224,7 @@ static bool __send_ipi_one(int cpu, int vector)
return false;
if (vp >= 64)
- return __send_ipi_mask_ex(cpumask_of(cpu), vector);
+ return __send_ipi_mask_ex(cpumask_of(cpu), vector, false);
status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
return hv_result_success(status);
@@ -222,20 +238,13 @@ static void hv_send_ipi(int cpu, int vector)
static void hv_send_ipi_mask(const struct cpumask *mask, int vector)
{
- if (!__send_ipi_mask(mask, vector))
+ if (!__send_ipi_mask(mask, vector, false))
orig_apic.send_IPI_mask(mask, vector);
}
static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
{
- unsigned int this_cpu = smp_processor_id();
- struct cpumask new_mask;
- const struct cpumask *local_mask;
-
- cpumask_copy(&new_mask, mask);
- cpumask_clear_cpu(this_cpu, &new_mask);
- local_mask = &new_mask;
- if (!__send_ipi_mask(local_mask, vector))
+ if (!__send_ipi_mask(mask, vector, true))
orig_apic.send_IPI_mask_allbutself(mask, vector);
}
@@ -246,7 +255,7 @@ static void hv_send_ipi_allbutself(int vector)
static void hv_send_ipi_all(int vector)
{
- if (!__send_ipi_mask(cpu_online_mask, vector))
+ if (!__send_ipi_mask(cpu_online_mask, vector, false))
orig_apic.send_IPI_all(vector);
}
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 4ae01cdb99de..7516e4199b3c 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -156,19 +156,6 @@ struct compat_shmid64_ds {
(!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
#endif
-static inline void __user *arch_compat_alloc_user_space(long len)
-{
- compat_uptr_t sp = task_pt_regs(current)->sp;
-
- /*
- * -128 for the x32 ABI redzone. For IA32, it is not strictly
- * necessary, but not harmful.
- */
- sp -= 128;
-
- return (void __user *)round_down(sp - len, 16);
-}
-
static inline bool in_x32_syscall(void)
{
#ifdef CONFIG_X86_X32_ABI
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index c9fa7be3df82..5c95d242f38d 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -301,8 +301,8 @@ do { \
unsigned int __gu_low, __gu_high; \
const unsigned int __user *__gu_ptr; \
__gu_ptr = (const void __user *)(ptr); \
- __get_user_asm(__gu_low, ptr, "l", "=r", label); \
- __get_user_asm(__gu_high, ptr+1, "l", "=r", label); \
+ __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \
+ __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \
(x) = ((unsigned long long)__gu_high << 32) | __gu_low; \
} while (0)
#else
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index e7265a552f4f..45697e04d771 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -58,13 +58,6 @@ raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
return copy_user_generic((__force void *)dst, src, size);
}
-static __always_inline __must_check
-unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
-{
- return copy_user_generic((__force void *)dst,
- (__force void *)src, size);
-}
-
extern long __copy_user_nocache(void *dst, const void __user *src,
unsigned size, int zerorest);
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index d66af2950e06..b5e36bd0425b 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -985,7 +985,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
this_leaf->priv = base->nb;
}
-static int __init_cache_level(unsigned int cpu)
+int init_cache_level(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@@ -1014,7 +1014,7 @@ static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
id4_regs->id = c->apicid >> index_msb;
}
-static int __populate_cache_leaves(unsigned int cpu)
+int populate_cache_leaves(unsigned int cpu)
{
unsigned int idx, ret;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@@ -1033,6 +1033,3 @@ static int __populate_cache_leaves(unsigned int cpu)
return 0;
}
-
-DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
-DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 78a32b956e81..5afd98559193 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -135,7 +135,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
static void __init pcpu_fc_free(void *ptr, size_t size)
{
- memblock_free(__pa(ptr), size);
+ memblock_free_ptr(ptr, size);
}
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 74b78840182d..bd90b8fe81e4 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -801,8 +801,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
return __add_pages(nid, start_pfn, nr_pages, params);
}
-void arch_remove_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap)
+void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index ddeaba947eb3..a6e11763763f 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1255,8 +1255,7 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end)
remove_pagetable(start, end, true, NULL);
}
-void __ref arch_remove_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap)
+void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 1a50434c8a4d..ef885370719a 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -49,8 +49,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
p = early_alloc(PMD_SIZE, nid, false);
if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
return;
- else if (p)
- memblock_free(__pa(p), PMD_SIZE);
+ memblock_free_ptr(p, PMD_SIZE);
}
p = early_alloc(PAGE_SIZE, nid, true);
@@ -86,8 +85,7 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
p = early_alloc(PUD_SIZE, nid, false);
if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
return;
- else if (p)
- memblock_free(__pa(p), PUD_SIZE);
+ memblock_free_ptr(p, PUD_SIZE);
}
p = early_alloc(PAGE_SIZE, nid, true);
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index a1b5c71099e6..1e9b93b088db 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -355,7 +355,7 @@ void __init numa_reset_distance(void)
/* numa_distance could be 1LU marking allocation failure, test cnt */
if (numa_distance_cnt)
- memblock_free(__pa(numa_distance), size);
+ memblock_free_ptr(numa_distance, size);
numa_distance_cnt = 0;
numa_distance = NULL; /* enable table creation */
}
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index 737491b13728..e801e30089c4 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -517,8 +517,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
}
/* free the copied physical distance table */
- if (phys_dist)
- memblock_free(__pa(phys_dist), phys_size);
+ memblock_free_ptr(phys_dist, phys_size);
return;
no_emu:
diff --git a/arch/x86/pci/numachip.c b/arch/x86/pci/numachip.c
index 01a085d9135a..4f0147d4e225 100644
--- a/arch/x86/pci/numachip.c
+++ b/arch/x86/pci/numachip.c
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <asm/pci_x86.h>
+#include <asm/numachip/numachip.h>
static u8 limit __read_mostly;
diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
index 7d2525691854..101081ad64b6 100644
--- a/arch/x86/pci/sta2x11-fixup.c
+++ b/arch/x86/pci/sta2x11-fixup.c
@@ -146,8 +146,7 @@ static void sta2x11_map_ep(struct pci_dev *pdev)
dev_err(dev, "sta2x11: could not set DMA offset\n");
dev->bus_dma_limit = max_amba_addr;
- pci_set_consistent_dma_mask(pdev, max_amba_addr);
- pci_set_dma_mask(pdev, max_amba_addr);
+ dma_set_mask_and_coherent(&pdev->dev, max_amba_addr);
/* Configure AHB mapping */
pci_write_config_dword(pdev, AHB_PEXLBASE(0), 0);
diff --git a/arch/x86/um/shared/sysdep/stub_32.h b/arch/x86/um/shared/sysdep/stub_32.h
index b95db9daf0e8..4c6c2be0c899 100644
--- a/arch/x86/um/shared/sysdep/stub_32.h
+++ b/arch/x86/um/shared/sysdep/stub_32.h
@@ -101,4 +101,16 @@ static inline void remap_stack_and_trap(void)
"memory");
}
+static __always_inline void *get_stub_page(void)
+{
+ unsigned long ret;
+
+ asm volatile (
+ "movl %%esp,%0 ;"
+ "andl %1,%0"
+ : "=a" (ret)
+ : "g" (~(UM_KERN_PAGE_SIZE - 1)));
+
+ return (void *)ret;
+}
#endif
diff --git a/arch/x86/um/shared/sysdep/stub_64.h b/arch/x86/um/shared/sysdep/stub_64.h
index 6e2626b77a2e..e9c4b2b38803 100644
--- a/arch/x86/um/shared/sysdep/stub_64.h
+++ b/arch/x86/um/shared/sysdep/stub_64.h
@@ -108,4 +108,16 @@ static inline void remap_stack_and_trap(void)
__syscall_clobber, "r10", "r8", "r9");
}
+static __always_inline void *get_stub_page(void)
+{
+ unsigned long ret;
+
+ asm volatile (
+ "movq %%rsp,%0 ;"
+ "andq %1,%0"
+ : "=a" (ret)
+ : "g" (~(UM_KERN_PAGE_SIZE - 1)));
+
+ return (void *)ret;
+}
#endif
diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c
index 21836eaf1725..f7eefba034f9 100644
--- a/arch/x86/um/stub_segv.c
+++ b/arch/x86/um/stub_segv.c
@@ -11,9 +11,8 @@
void __attribute__ ((__section__ (".__syscall_stub")))
stub_segv_handler(int sig, siginfo_t *info, void *p)
{
- int stack;
+ struct faultinfo *f = get_stub_page();
ucontext_t *uc = p;
- struct faultinfo *f = (void *)(((unsigned long)&stack) & ~(UM_KERN_PAGE_SIZE - 1));
GET_FAULTINFO_FROM_MC(*f, &uc->uc_mcontext);
trap_myself();
diff --git a/block/Makefile b/block/Makefile
index 6cf40276d3cf..41aa1ba69c90 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -3,7 +3,7 @@
# Makefile for the kernel block layer
#
-obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-sysfs.o \
+obj-$(CONFIG_BLOCK) := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-timeout.o \
blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
diff --git a/fs/block_dev.c b/block/bdev.c
index 45df6cbccf12..cf2780cb44a7 100644
--- a/fs/block_dev.c
+++ b/block/bdev.c
@@ -7,12 +7,10 @@
#include <linux/init.h>
#include <linux/mm.h>
-#include <linux/fcntl.h>
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/major.h>
#include <linux/device_cgroup.h>
-#include <linux/highmem.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/module.h>
@@ -20,30 +18,22 @@
#include <linux/magic.h>
#include <linux/buffer_head.h>
#include <linux/swap.h>
-#include <linux/pagevec.h>
#include <linux/writeback.h>
-#include <linux/mpage.h>
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/uio.h>
#include <linux/namei.h>
-#include <linux/log2.h>
#include <linux/cleancache.h>
-#include <linux/task_io_accounting_ops.h>
-#include <linux/falloc.h>
#include <linux/part_stat.h>
#include <linux/uaccess.h>
-#include <linux/suspend.h>
-#include "internal.h"
-#include "../block/blk.h"
+#include "../fs/internal.h"
+#include "blk.h"
struct bdev_inode {
struct block_device bdev;
struct inode vfs_inode;
};
-static const struct address_space_operations def_blk_aops;
-
static inline struct bdev_inode *BDEV_I(struct inode *inode)
{
return container_of(inode, struct bdev_inode, vfs_inode);
@@ -194,332 +184,6 @@ int sb_min_blocksize(struct super_block *sb, int size)
EXPORT_SYMBOL(sb_min_blocksize);
-static int
-blkdev_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh, int create)
-{
- bh->b_bdev = I_BDEV(inode);
- bh->b_blocknr = iblock;
- set_buffer_mapped(bh);
- return 0;
-}
-
-static struct inode *bdev_file_inode(struct file *file)
-{
- return file->f_mapping->host;
-}
-
-static unsigned int dio_bio_write_op(struct kiocb *iocb)
-{
- unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
-
- /* avoid the need for a I/O completion work item */
- if (iocb->ki_flags & IOCB_DSYNC)
- op |= REQ_FUA;
- return op;
-}
-
-#define DIO_INLINE_BIO_VECS 4
-
-static void blkdev_bio_end_io_simple(struct bio *bio)
-{
- struct task_struct *waiter = bio->bi_private;
-
- WRITE_ONCE(bio->bi_private, NULL);
- blk_wake_io_task(waiter);
-}
-
-static ssize_t
-__blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
- unsigned int nr_pages)
-{
- struct file *file = iocb->ki_filp;
- struct block_device *bdev = I_BDEV(bdev_file_inode(file));
- struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
- loff_t pos = iocb->ki_pos;
- bool should_dirty = false;
- struct bio bio;
- ssize_t ret;
- blk_qc_t qc;
-
- if ((pos | iov_iter_alignment(iter)) &
- (bdev_logical_block_size(bdev) - 1))
- return -EINVAL;
-
- if (nr_pages <= DIO_INLINE_BIO_VECS)
- vecs = inline_vecs;
- else {
- vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
- GFP_KERNEL);
- if (!vecs)
- return -ENOMEM;
- }
-
- bio_init(&bio, vecs, nr_pages);
- bio_set_dev(&bio, bdev);
- bio.bi_iter.bi_sector = pos >> 9;
- bio.bi_write_hint = iocb->ki_hint;
- bio.bi_private = current;
- bio.bi_end_io = blkdev_bio_end_io_simple;
- bio.bi_ioprio = iocb->ki_ioprio;
-
- ret = bio_iov_iter_get_pages(&bio, iter);
- if (unlikely(ret))
- goto out;
- ret = bio.bi_iter.bi_size;
-
- if (iov_iter_rw(iter) == READ) {
- bio.bi_opf = REQ_OP_READ;
- if (iter_is_iovec(iter))
- should_dirty = true;
- } else {
- bio.bi_opf = dio_bio_write_op(iocb);
- task_io_account_write(ret);
- }
- if (iocb->ki_flags & IOCB_NOWAIT)
- bio.bi_opf |= REQ_NOWAIT;
- if (iocb->ki_flags & IOCB_HIPRI)
- bio_set_polled(&bio, iocb);
-
- qc = submit_bio(&bio);
- for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (!READ_ONCE(bio.bi_private))
- break;
- if (!(iocb->ki_flags & IOCB_HIPRI) ||
- !blk_poll(bdev_get_queue(bdev), qc, true))
- blk_io_schedule();
- }
- __set_current_state(TASK_RUNNING);
-
- bio_release_pages(&bio, should_dirty);
- if (unlikely(bio.bi_status))
- ret = blk_status_to_errno(bio.bi_status);
-
-out:
- if (vecs != inline_vecs)
- kfree(vecs);
-
- bio_uninit(&bio);
-
- return ret;
-}
-
-struct blkdev_dio {
- union {
- struct kiocb *iocb;
- struct task_struct *waiter;
- };
- size_t size;
- atomic_t ref;
- bool multi_bio : 1;
- bool should_dirty : 1;
- bool is_sync : 1;
- struct bio bio;
-};
-
-static struct bio_set blkdev_dio_pool;
-
-static int blkdev_iopoll(struct kiocb *kiocb, bool wait)
-{
- struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
- struct request_queue *q = bdev_get_queue(bdev);
-
- return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait);
-}
-
-static void blkdev_bio_end_io(struct bio *bio)
-{
- struct blkdev_dio *dio = bio->bi_private;
- bool should_dirty = dio->should_dirty;
-
- if (bio->bi_status && !dio->bio.bi_status)
- dio->bio.bi_status = bio->bi_status;
-
- if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
- if (!dio->is_sync) {
- struct kiocb *iocb = dio->iocb;
- ssize_t ret;
-
- if (likely(!dio->bio.bi_status)) {
- ret = dio->size;
- iocb->ki_pos += ret;
- } else {
- ret = blk_status_to_errno(dio->bio.bi_status);
- }
-
- dio->iocb->ki_complete(iocb, ret, 0);
- if (dio->multi_bio)
- bio_put(&dio->bio);
- } else {
- struct task_struct *waiter = dio->waiter;
-
- WRITE_ONCE(dio->waiter, NULL);
- blk_wake_io_task(waiter);
- }
- }
-
- if (should_dirty) {
- bio_check_pages_dirty(bio);
- } else {
- bio_release_pages(bio, false);
- bio_put(bio);
- }
-}
-
-static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
- unsigned int nr_pages)
-{
- struct file *file = iocb->ki_filp;
- struct inode *inode = bdev_file_inode(file);
- struct block_device *bdev = I_BDEV(inode);
- struct blk_plug plug;
- struct blkdev_dio *dio;
- struct bio *bio;
- bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
- bool is_read = (iov_iter_rw(iter) == READ), is_sync;
- loff_t pos = iocb->ki_pos;
- blk_qc_t qc = BLK_QC_T_NONE;
- int ret = 0;
-
- if ((pos | iov_iter_alignment(iter)) &
- (bdev_logical_block_size(bdev) - 1))
- return -EINVAL;
-
- bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool);
-
- dio = container_of(bio, struct blkdev_dio, bio);
- dio->is_sync = is_sync = is_sync_kiocb(iocb);
- if (dio->is_sync) {
- dio->waiter = current;
- bio_get(bio);
- } else {
- dio->iocb = iocb;
- }
-
- dio->size = 0;
- dio->multi_bio = false;
- dio->should_dirty = is_read && iter_is_iovec(iter);
-
- /*
- * Don't plug for HIPRI/polled IO, as those should go straight
- * to issue
- */
- if (!is_poll)
- blk_start_plug(&plug);
-
- for (;;) {
- bio_set_dev(bio, bdev);
- bio->bi_iter.bi_sector = pos >> 9;
- bio->bi_write_hint = iocb->ki_hint;
- bio->bi_private = dio;
- bio->bi_end_io = blkdev_bio_end_io;
- bio->bi_ioprio = iocb->ki_ioprio;
-
- ret = bio_iov_iter_get_pages(bio, iter);
- if (unlikely(ret)) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
- break;
- }
-
- if (is_read) {
- bio->bi_opf = REQ_OP_READ;
- if (dio->should_dirty)
- bio_set_pages_dirty(bio);
- } else {
- bio->bi_opf = dio_bio_write_op(iocb);
- task_io_account_write(bio->bi_iter.bi_size);
- }
- if (iocb->ki_flags & IOCB_NOWAIT)
- bio->bi_opf |= REQ_NOWAIT;
-
- dio->size += bio->bi_iter.bi_size;
- pos += bio->bi_iter.bi_size;
-
- nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
- if (!nr_pages) {
- bool polled = false;
-
- if (iocb->ki_flags & IOCB_HIPRI) {
- bio_set_polled(bio, iocb);
- polled = true;
- }
-
- qc = submit_bio(bio);
-
- if (polled)
- WRITE_ONCE(iocb->ki_cookie, qc);
- break;
- }
-
- if (!dio->multi_bio) {
- /*
- * AIO needs an extra reference to ensure the dio
- * structure which is embedded into the first bio
- * stays around.
- */
- if (!is_sync)
- bio_get(bio);
- dio->multi_bio = true;
- atomic_set(&dio->ref, 2);
- } else {
- atomic_inc(&dio->ref);
- }
-
- submit_bio(bio);
- bio = bio_alloc(GFP_KERNEL, nr_pages);
- }
-
- if (!is_poll)
- blk_finish_plug(&plug);
-
- if (!is_sync)
- return -EIOCBQUEUED;
-
- for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (!READ_ONCE(dio->waiter))
- break;
-
- if (!(iocb->ki_flags & IOCB_HIPRI) ||
- !blk_poll(bdev_get_queue(bdev), qc, true))
- blk_io_schedule();
- }
- __set_current_state(TASK_RUNNING);
-
- if (!ret)
- ret = blk_status_to_errno(dio->bio.bi_status);
- if (likely(!ret))
- ret = dio->size;
-
- bio_put(&dio->bio);
- return ret;
-}
-
-static ssize_t
-blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
-{
- unsigned int nr_pages;
-
- if (!iov_iter_count(iter))
- return 0;
-
- nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
- if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_VECS)
- return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
-
- return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages));
-}
-
-static __init int blkdev_init(void)
-{
- return bioset_init(&blkdev_dio_pool, 4,
- offsetof(struct blkdev_dio, bio),
- BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
-}
-module_init(blkdev_init);
-
int __sync_blockdev(struct block_device *bdev, int wait)
{
if (!bdev)
@@ -637,81 +301,6 @@ out:
}
EXPORT_SYMBOL(thaw_bdev);
-static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
-{
- return block_write_full_page(page, blkdev_get_block, wbc);
-}
-
-static int blkdev_readpage(struct file * file, struct page * page)
-{
- return block_read_full_page(page, blkdev_get_block);
-}
-
-static void blkdev_readahead(struct readahead_control *rac)
-{
- mpage_readahead(rac, blkdev_get_block);
-}
-
-static int blkdev_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
-{
- return block_write_begin(mapping, pos, len, flags, pagep,
- blkdev_get_block);
-}
-
-static int blkdev_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
-{
- int ret;
- ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
-
- unlock_page(page);
- put_page(page);
-
- return ret;
-}
-
-/*
- * private llseek:
- * for a block special file file_inode(file)->i_size is zero
- * so we compute the size by hand (just as in block_read/write above)
- */
-static loff_t block_llseek(struct file *file, loff_t offset, int whence)
-{
- struct inode *bd_inode = bdev_file_inode(file);
- loff_t retval;
-
- inode_lock(bd_inode);
- retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
- inode_unlock(bd_inode);
- return retval;
-}
-
-static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
- int datasync)
-{
- struct inode *bd_inode = bdev_file_inode(filp);
- struct block_device *bdev = I_BDEV(bd_inode);
- int error;
-
- error = file_write_and_wait_range(filp, start, end);
- if (error)
- return error;
-
- /*
- * There is no need to serialise calls to blkdev_issue_flush with
- * i_mutex and doing so causes performance issues with concurrent
- * O_SYNC writers to a block device.
- */
- error = blkdev_issue_flush(bdev);
- if (error == -EOPNOTSUPP)
- error = 0;
-
- return error;
-}
-
/**
* bdev_read_page() - Start reading a page from a block device
* @bdev: The device to read the page from
@@ -1305,35 +894,6 @@ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
}
EXPORT_SYMBOL(blkdev_get_by_path);
-static int blkdev_open(struct inode * inode, struct file * filp)
-{
- struct block_device *bdev;
-
- /*
- * Preserve backwards compatibility and allow large file access
- * even if userspace doesn't ask for it explicitly. Some mkfs
- * binary needs it. We might want to drop this workaround
- * during an unstable branch.
- */
- filp->f_flags |= O_LARGEFILE;
-
- filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
-
- if (filp->f_flags & O_NDELAY)
- filp->f_mode |= FMODE_NDELAY;
- if (filp->f_flags & O_EXCL)
- filp->f_mode |= FMODE_EXCL;
- if ((filp->f_flags & O_ACCMODE) == 3)
- filp->f_mode |= FMODE_WRITE_IOCTL;
-
- bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp);
- if (IS_ERR(bdev))
- return PTR_ERR(bdev);
- filp->f_mapping = bdev->bd_inode->i_mapping;
- filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
- return 0;
-}
-
void blkdev_put(struct block_device *bdev, fmode_t mode)
{
struct gendisk *disk = bdev->bd_disk;
@@ -1397,203 +957,6 @@ void blkdev_put(struct block_device *bdev, fmode_t mode)
}
EXPORT_SYMBOL(blkdev_put);
-static int blkdev_close(struct inode * inode, struct file * filp)
-{
- struct block_device *bdev = I_BDEV(bdev_file_inode(filp));
- blkdev_put(bdev, filp->f_mode);
- return 0;
-}
-
-static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
-{
- struct block_device *bdev = I_BDEV(bdev_file_inode(file));
- fmode_t mode = file->f_mode;
-
- /*
- * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
- * to updated it before every ioctl.
- */
- if (file->f_flags & O_NDELAY)
- mode |= FMODE_NDELAY;
- else
- mode &= ~FMODE_NDELAY;
-
- return blkdev_ioctl(bdev, mode, cmd, arg);
-}
-
-/*
- * Write data to the block device. Only intended for the block device itself
- * and the raw driver which basically is a fake block device.
- *
- * Does not take i_mutex for the write and thus is not for general purpose
- * use.
- */
-static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
- struct file *file = iocb->ki_filp;
- struct inode *bd_inode = bdev_file_inode(file);
- loff_t size = i_size_read(bd_inode);
- struct blk_plug plug;
- size_t shorted = 0;
- ssize_t ret;
-
- if (bdev_read_only(I_BDEV(bd_inode)))
- return -EPERM;
-
- if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
- return -ETXTBSY;
-
- if (!iov_iter_count(from))
- return 0;
-
- if (iocb->ki_pos >= size)
- return -ENOSPC;
-
- if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
- return -EOPNOTSUPP;
-
- size -= iocb->ki_pos;
- if (iov_iter_count(from) > size) {
- shorted = iov_iter_count(from) - size;
- iov_iter_truncate(from, size);
- }
-
- blk_start_plug(&plug);
- ret = __generic_file_write_iter(iocb, from);
- if (ret > 0)
- ret = generic_write_sync(iocb, ret);
- iov_iter_reexpand(from, iov_iter_count(from) + shorted);
- blk_finish_plug(&plug);
- return ret;
-}
-
-static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
-{
- struct file *file = iocb->ki_filp;
- struct inode *bd_inode = bdev_file_inode(file);
- loff_t size = i_size_read(bd_inode);
- loff_t pos = iocb->ki_pos;
- size_t shorted = 0;
- ssize_t ret;
-
- if (pos >= size)
- return 0;
-
- size -= pos;
- if (iov_iter_count(to) > size) {
- shorted = iov_iter_count(to) - size;
- iov_iter_truncate(to, size);
- }
-
- ret = generic_file_read_iter(iocb, to);
- iov_iter_reexpand(to, iov_iter_count(to) + shorted);
- return ret;
-}
-
-static int blkdev_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
-{
- return generic_writepages(mapping, wbc);
-}
-
-static const struct address_space_operations def_blk_aops = {
- .set_page_dirty = __set_page_dirty_buffers,
- .readpage = blkdev_readpage,
- .readahead = blkdev_readahead,
- .writepage = blkdev_writepage,
- .write_begin = blkdev_write_begin,
- .write_end = blkdev_write_end,
- .writepages = blkdev_writepages,
- .direct_IO = blkdev_direct_IO,
- .migratepage = buffer_migrate_page_norefs,
- .is_dirty_writeback = buffer_check_dirty_writeback,
-};
-
-#define BLKDEV_FALLOC_FL_SUPPORTED \
- (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
- FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
-
-static long blkdev_fallocate(struct file *file, int mode, loff_t start,
- loff_t len)
-{
- struct block_device *bdev = I_BDEV(bdev_file_inode(file));
- loff_t end = start + len - 1;
- loff_t isize;
- int error;
-
- /* Fail if we don't recognize the flags. */
- if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
- return -EOPNOTSUPP;
-
- /* Don't go off the end of the device. */
- isize = i_size_read(bdev->bd_inode);
- if (start >= isize)
- return -EINVAL;
- if (end >= isize) {
- if (mode & FALLOC_FL_KEEP_SIZE) {
- len = isize - start;
- end = start + len - 1;
- } else
- return -EINVAL;
- }
-
- /*
- * Don't allow IO that isn't aligned to logical block size.
- */
- if ((start | len) & (bdev_logical_block_size(bdev) - 1))
- return -EINVAL;
-
- /* Invalidate the page cache, including dirty pages. */
- error = truncate_bdev_range(bdev, file->f_mode, start, end);
- if (error)
- return error;
-
- switch (mode) {
- case FALLOC_FL_ZERO_RANGE:
- case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
- error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
- GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
- break;
- case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
- error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
- GFP_KERNEL, BLKDEV_ZERO_NOFALLBACK);
- break;
- case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
- error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
- GFP_KERNEL, 0);
- break;
- default:
- return -EOPNOTSUPP;
- }
- if (error)
- return error;
-
- /*
- * Invalidate the page cache again; if someone wandered in and dirtied
- * a page, we just discard it - userspace has no way of knowing whether
- * the write happened before or after discard completing...
- */
- return truncate_bdev_range(bdev, file->f_mode, start, end);
-}
-
-const struct file_operations def_blk_fops = {
- .open = blkdev_open,
- .release = blkdev_close,
- .llseek = block_llseek,
- .read_iter = blkdev_read_iter,
- .write_iter = blkdev_write_iter,
- .iopoll = blkdev_iopoll,
- .mmap = generic_file_mmap,
- .fsync = blkdev_fsync,
- .unlocked_ioctl = block_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = compat_blkdev_ioctl,
-#endif
- .splice_read = generic_file_splice_read,
- .splice_write = iter_file_splice_write,
- .fallocate = blkdev_fallocate,
-};
-
/**
* lookup_bdev - lookup a struct block_device by name
* @pathname: special file representing the block device
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 65d3a63aecc6..108a352051be 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2135,6 +2135,18 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
}
}
+/*
+ * Allow 4x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
+ * queues. This is important for md arrays to benefit from merging
+ * requests.
+ */
+static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
+{
+ if (plug->multiple_queues)
+ return BLK_MAX_REQUEST_COUNT * 4;
+ return BLK_MAX_REQUEST_COUNT;
+}
+
/**
* blk_mq_submit_bio - Create and send a request to block device.
* @bio: Bio pointer.
@@ -2231,7 +2243,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
else
last = list_entry_rq(plug->mq_list.prev);
- if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
+ if (request_count >= blk_plug_max_rq_count(plug) || (last &&
blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
blk_flush_plug_list(plug, false);
trace_block_plug(q);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 55c49015e533..7c4e7993ba97 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -2458,6 +2458,7 @@ int blk_throtl_init(struct request_queue *q)
void blk_throtl_exit(struct request_queue *q)
{
BUG_ON(!q->td);
+ del_timer_sync(&q->td->service_queue.pending_timer);
throtl_shutdown_wq(q);
blkcg_deactivate_policy(q, &blkcg_policy_throtl);
free_percpu(q->td->latency_buckets[READ]);
diff --git a/block/blk.h b/block/blk.h
index 8c96b0c90c48..7d2a0ba7ed21 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -373,4 +373,6 @@ static inline void bio_clear_hipri(struct bio *bio)
bio->bi_opf &= ~REQ_HIPRI;
}
+extern const struct address_space_operations def_blk_aops;
+
#endif /* BLK_INTERNAL_H */
diff --git a/block/fops.c b/block/fops.c
new file mode 100644
index 000000000000..ffce6f6c68dd
--- /dev/null
+++ b/block/fops.c
@@ -0,0 +1,640 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
+ * Copyright (C) 2016 - 2020 Christoph Hellwig
+ */
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
+#include <linux/mpage.h>
+#include <linux/uio.h>
+#include <linux/namei.h>
+#include <linux/task_io_accounting_ops.h>
+#include <linux/falloc.h>
+#include <linux/suspend.h>
+#include "blk.h"
+
+static struct inode *bdev_file_inode(struct file *file)
+{
+ return file->f_mapping->host;
+}
+
+static int blkdev_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh, int create)
+{
+ bh->b_bdev = I_BDEV(inode);
+ bh->b_blocknr = iblock;
+ set_buffer_mapped(bh);
+ return 0;
+}
+
+static unsigned int dio_bio_write_op(struct kiocb *iocb)
+{
+ unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
+
+ /* avoid the need for a I/O completion work item */
+ if (iocb->ki_flags & IOCB_DSYNC)
+ op |= REQ_FUA;
+ return op;
+}
+
+#define DIO_INLINE_BIO_VECS 4
+
+static void blkdev_bio_end_io_simple(struct bio *bio)
+{
+ struct task_struct *waiter = bio->bi_private;
+
+ WRITE_ONCE(bio->bi_private, NULL);
+ blk_wake_io_task(waiter);
+}
+
+static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
+ struct iov_iter *iter, unsigned int nr_pages)
+{
+ struct file *file = iocb->ki_filp;
+ struct block_device *bdev = I_BDEV(bdev_file_inode(file));
+ struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
+ loff_t pos = iocb->ki_pos;
+ bool should_dirty = false;
+ struct bio bio;
+ ssize_t ret;
+ blk_qc_t qc;
+
+ if ((pos | iov_iter_alignment(iter)) &
+ (bdev_logical_block_size(bdev) - 1))
+ return -EINVAL;
+
+ if (nr_pages <= DIO_INLINE_BIO_VECS)
+ vecs = inline_vecs;
+ else {
+ vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
+ GFP_KERNEL);
+ if (!vecs)
+ return -ENOMEM;
+ }
+
+ bio_init(&bio, vecs, nr_pages);
+ bio_set_dev(&bio, bdev);
+ bio.bi_iter.bi_sector = pos >> 9;
+ bio.bi_write_hint = iocb->ki_hint;
+ bio.bi_private = current;
+ bio.bi_end_io = blkdev_bio_end_io_simple;
+ bio.bi_ioprio = iocb->ki_ioprio;
+
+ ret = bio_iov_iter_get_pages(&bio, iter);
+ if (unlikely(ret))
+ goto out;
+ ret = bio.bi_iter.bi_size;
+
+ if (iov_iter_rw(iter) == READ) {
+ bio.bi_opf = REQ_OP_READ;
+ if (iter_is_iovec(iter))
+ should_dirty = true;
+ } else {
+ bio.bi_opf = dio_bio_write_op(iocb);
+ task_io_account_write(ret);
+ }
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ bio.bi_opf |= REQ_NOWAIT;
+ if (iocb->ki_flags & IOCB_HIPRI)
+ bio_set_polled(&bio, iocb);
+
+ qc = submit_bio(&bio);
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (!READ_ONCE(bio.bi_private))
+ break;
+ if (!(iocb->ki_flags & IOCB_HIPRI) ||
+ !blk_poll(bdev_get_queue(bdev), qc, true))
+ blk_io_schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+
+ bio_release_pages(&bio, should_dirty);
+ if (unlikely(bio.bi_status))
+ ret = blk_status_to_errno(bio.bi_status);
+
+out:
+ if (vecs != inline_vecs)
+ kfree(vecs);
+
+ bio_uninit(&bio);
+
+ return ret;
+}
+
+struct blkdev_dio {
+ union {
+ struct kiocb *iocb;
+ struct task_struct *waiter;
+ };
+ size_t size;
+ atomic_t ref;
+ bool multi_bio : 1;
+ bool should_dirty : 1;
+ bool is_sync : 1;
+ struct bio bio;
+};
+
+static struct bio_set blkdev_dio_pool;
+
+static int blkdev_iopoll(struct kiocb *kiocb, bool wait)
+{
+ struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait);
+}
+
+static void blkdev_bio_end_io(struct bio *bio)
+{
+ struct blkdev_dio *dio = bio->bi_private;
+ bool should_dirty = dio->should_dirty;
+
+ if (bio->bi_status && !dio->bio.bi_status)
+ dio->bio.bi_status = bio->bi_status;
+
+ if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
+ if (!dio->is_sync) {
+ struct kiocb *iocb = dio->iocb;
+ ssize_t ret;
+
+ if (likely(!dio->bio.bi_status)) {
+ ret = dio->size;
+ iocb->ki_pos += ret;
+ } else {
+ ret = blk_status_to_errno(dio->bio.bi_status);
+ }
+
+ dio->iocb->ki_complete(iocb, ret, 0);
+ if (dio->multi_bio)
+ bio_put(&dio->bio);
+ } else {
+ struct task_struct *waiter = dio->waiter;
+
+ WRITE_ONCE(dio->waiter, NULL);
+ blk_wake_io_task(waiter);
+ }
+ }
+
+ if (should_dirty) {
+ bio_check_pages_dirty(bio);
+ } else {
+ bio_release_pages(bio, false);
+ bio_put(bio);
+ }
+}
+
+static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ unsigned int nr_pages)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = bdev_file_inode(file);
+ struct block_device *bdev = I_BDEV(inode);
+ struct blk_plug plug;
+ struct blkdev_dio *dio;
+ struct bio *bio;
+ bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
+ bool is_read = (iov_iter_rw(iter) == READ), is_sync;
+ loff_t pos = iocb->ki_pos;
+ blk_qc_t qc = BLK_QC_T_NONE;
+ int ret = 0;
+
+ if ((pos | iov_iter_alignment(iter)) &
+ (bdev_logical_block_size(bdev) - 1))
+ return -EINVAL;
+
+ bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool);
+
+ dio = container_of(bio, struct blkdev_dio, bio);
+ dio->is_sync = is_sync = is_sync_kiocb(iocb);
+ if (dio->is_sync) {
+ dio->waiter = current;
+ bio_get(bio);
+ } else {
+ dio->iocb = iocb;
+ }
+
+ dio->size = 0;
+ dio->multi_bio = false;
+ dio->should_dirty = is_read && iter_is_iovec(iter);
+
+ /*
+ * Don't plug for HIPRI/polled IO, as those should go straight
+ * to issue
+ */
+ if (!is_poll)
+ blk_start_plug(&plug);
+
+ for (;;) {
+ bio_set_dev(bio, bdev);
+ bio->bi_iter.bi_sector = pos >> 9;
+ bio->bi_write_hint = iocb->ki_hint;
+ bio->bi_private = dio;
+ bio->bi_end_io = blkdev_bio_end_io;
+ bio->bi_ioprio = iocb->ki_ioprio;
+
+ ret = bio_iov_iter_get_pages(bio, iter);
+ if (unlikely(ret)) {
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
+ break;
+ }
+
+ if (is_read) {
+ bio->bi_opf = REQ_OP_READ;
+ if (dio->should_dirty)
+ bio_set_pages_dirty(bio);
+ } else {
+ bio->bi_opf = dio_bio_write_op(iocb);
+ task_io_account_write(bio->bi_iter.bi_size);
+ }
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ bio->bi_opf |= REQ_NOWAIT;
+
+ dio->size += bio->bi_iter.bi_size;
+ pos += bio->bi_iter.bi_size;
+
+ nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
+ if (!nr_pages) {
+ bool polled = false;
+
+ if (iocb->ki_flags & IOCB_HIPRI) {
+ bio_set_polled(bio, iocb);
+ polled = true;
+ }
+
+ qc = submit_bio(bio);
+
+ if (polled)
+ WRITE_ONCE(iocb->ki_cookie, qc);
+ break;
+ }
+
+ if (!dio->multi_bio) {
+ /*
+ * AIO needs an extra reference to ensure the dio
+ * structure which is embedded into the first bio
+ * stays around.
+ */
+ if (!is_sync)
+ bio_get(bio);
+ dio->multi_bio = true;
+ atomic_set(&dio->ref, 2);
+ } else {
+ atomic_inc(&dio->ref);
+ }
+
+ submit_bio(bio);
+ bio = bio_alloc(GFP_KERNEL, nr_pages);
+ }
+
+ if (!is_poll)
+ blk_finish_plug(&plug);
+
+ if (!is_sync)
+ return -EIOCBQUEUED;
+
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (!READ_ONCE(dio->waiter))
+ break;
+
+ if (!(iocb->ki_flags & IOCB_HIPRI) ||
+ !blk_poll(bdev_get_queue(bdev), qc, true))
+ blk_io_schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+
+ if (!ret)
+ ret = blk_status_to_errno(dio->bio.bi_status);
+ if (likely(!ret))
+ ret = dio->size;
+
+ bio_put(&dio->bio);
+ return ret;
+}
+
+static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+{
+ unsigned int nr_pages;
+
+ if (!iov_iter_count(iter))
+ return 0;
+
+ nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
+ if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_VECS)
+ return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
+
+ return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages));
+}
+
+static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
+{
+ return block_write_full_page(page, blkdev_get_block, wbc);
+}
+
+static int blkdev_readpage(struct file * file, struct page * page)
+{
+ return block_read_full_page(page, blkdev_get_block);
+}
+
+static void blkdev_readahead(struct readahead_control *rac)
+{
+ mpage_readahead(rac, blkdev_get_block);
+}
+
+static int blkdev_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags, struct page **pagep,
+ void **fsdata)
+{
+ return block_write_begin(mapping, pos, len, flags, pagep,
+ blkdev_get_block);
+}
+
+static int blkdev_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied, struct page *page,
+ void *fsdata)
+{
+ int ret;
+ ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+
+ unlock_page(page);
+ put_page(page);
+
+ return ret;
+}
+
+static int blkdev_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ return generic_writepages(mapping, wbc);
+}
+
+const struct address_space_operations def_blk_aops = {
+ .set_page_dirty = __set_page_dirty_buffers,
+ .readpage = blkdev_readpage,
+ .readahead = blkdev_readahead,
+ .writepage = blkdev_writepage,
+ .write_begin = blkdev_write_begin,
+ .write_end = blkdev_write_end,
+ .writepages = blkdev_writepages,
+ .direct_IO = blkdev_direct_IO,
+ .migratepage = buffer_migrate_page_norefs,
+ .is_dirty_writeback = buffer_check_dirty_writeback,
+};
+
+/*
+ * for a block special file file_inode(file)->i_size is zero
+ * so we compute the size by hand (just as in block_read/write above)
+ */
+static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct inode *bd_inode = bdev_file_inode(file);
+ loff_t retval;
+
+ inode_lock(bd_inode);
+ retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
+ inode_unlock(bd_inode);
+ return retval;
+}
+
+static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
+ int datasync)
+{
+ struct inode *bd_inode = bdev_file_inode(filp);
+ struct block_device *bdev = I_BDEV(bd_inode);
+ int error;
+
+ error = file_write_and_wait_range(filp, start, end);
+ if (error)
+ return error;
+
+ /*
+ * There is no need to serialise calls to blkdev_issue_flush with
+ * i_mutex and doing so causes performance issues with concurrent
+ * O_SYNC writers to a block device.
+ */
+ error = blkdev_issue_flush(bdev);
+ if (error == -EOPNOTSUPP)
+ error = 0;
+
+ return error;
+}
+
+static int blkdev_open(struct inode *inode, struct file *filp)
+{
+ struct block_device *bdev;
+
+ /*
+ * Preserve backwards compatibility and allow large file access
+ * even if userspace doesn't ask for it explicitly. Some mkfs
+ * binary needs it. We might want to drop this workaround
+ * during an unstable branch.
+ */
+ filp->f_flags |= O_LARGEFILE;
+ filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
+
+ if (filp->f_flags & O_NDELAY)
+ filp->f_mode |= FMODE_NDELAY;
+ if (filp->f_flags & O_EXCL)
+ filp->f_mode |= FMODE_EXCL;
+ if ((filp->f_flags & O_ACCMODE) == 3)
+ filp->f_mode |= FMODE_WRITE_IOCTL;
+
+ bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp);
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
+ filp->f_mapping = bdev->bd_inode->i_mapping;
+ filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
+ return 0;
+}
+
+static int blkdev_close(struct inode *inode, struct file *filp)
+{
+ struct block_device *bdev = I_BDEV(bdev_file_inode(filp));
+
+ blkdev_put(bdev, filp->f_mode);
+ return 0;
+}
+
+static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+ struct block_device *bdev = I_BDEV(bdev_file_inode(file));
+ fmode_t mode = file->f_mode;
+
+ /*
+ * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
+ * to updated it before every ioctl.
+ */
+ if (file->f_flags & O_NDELAY)
+ mode |= FMODE_NDELAY;
+ else
+ mode &= ~FMODE_NDELAY;
+
+ return blkdev_ioctl(bdev, mode, cmd, arg);
+}
+
+/*
+ * Write data to the block device. Only intended for the block device itself
+ * and the raw driver which basically is a fake block device.
+ *
+ * Does not take i_mutex for the write and thus is not for general purpose
+ * use.
+ */
+static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *bd_inode = bdev_file_inode(file);
+ loff_t size = i_size_read(bd_inode);
+ struct blk_plug plug;
+ size_t shorted = 0;
+ ssize_t ret;
+
+ if (bdev_read_only(I_BDEV(bd_inode)))
+ return -EPERM;
+
+ if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
+ return -ETXTBSY;
+
+ if (!iov_iter_count(from))
+ return 0;
+
+ if (iocb->ki_pos >= size)
+ return -ENOSPC;
+
+ if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
+ return -EOPNOTSUPP;
+
+ size -= iocb->ki_pos;
+ if (iov_iter_count(from) > size) {
+ shorted = iov_iter_count(from) - size;
+ iov_iter_truncate(from, size);
+ }
+
+ blk_start_plug(&plug);
+ ret = __generic_file_write_iter(iocb, from);
+ if (ret > 0)
+ ret = generic_write_sync(iocb, ret);
+ iov_iter_reexpand(from, iov_iter_count(from) + shorted);
+ blk_finish_plug(&plug);
+ return ret;
+}
+
+static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *bd_inode = bdev_file_inode(file);
+ loff_t size = i_size_read(bd_inode);
+ loff_t pos = iocb->ki_pos;
+ size_t shorted = 0;
+ ssize_t ret;
+
+ if (pos >= size)
+ return 0;
+
+ size -= pos;
+ if (iov_iter_count(to) > size) {
+ shorted = iov_iter_count(to) - size;
+ iov_iter_truncate(to, size);
+ }
+
+ ret = generic_file_read_iter(iocb, to);
+ iov_iter_reexpand(to, iov_iter_count(to) + shorted);
+ return ret;
+}
+
+#define BLKDEV_FALLOC_FL_SUPPORTED \
+ (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
+ FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
+
+static long blkdev_fallocate(struct file *file, int mode, loff_t start,
+ loff_t len)
+{
+ struct block_device *bdev = I_BDEV(bdev_file_inode(file));
+ loff_t end = start + len - 1;
+ loff_t isize;
+ int error;
+
+ /* Fail if we don't recognize the flags. */
+ if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
+ return -EOPNOTSUPP;
+
+ /* Don't go off the end of the device. */
+ isize = i_size_read(bdev->bd_inode);
+ if (start >= isize)
+ return -EINVAL;
+ if (end >= isize) {
+ if (mode & FALLOC_FL_KEEP_SIZE) {
+ len = isize - start;
+ end = start + len - 1;
+ } else
+ return -EINVAL;
+ }
+
+ /*
+ * Don't allow IO that isn't aligned to logical block size.
+ */
+ if ((start | len) & (bdev_logical_block_size(bdev) - 1))
+ return -EINVAL;
+
+ /* Invalidate the page cache, including dirty pages. */
+ error = truncate_bdev_range(bdev, file->f_mode, start, end);
+ if (error)
+ return error;
+
+ switch (mode) {
+ case FALLOC_FL_ZERO_RANGE:
+ case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
+ error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
+ GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
+ break;
+ case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
+ error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
+ GFP_KERNEL, BLKDEV_ZERO_NOFALLBACK);
+ break;
+ case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
+ error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
+ GFP_KERNEL, 0);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (error)
+ return error;
+
+ /*
+ * Invalidate the page cache again; if someone wandered in and dirtied
+ * a page, we just discard it - userspace has no way of knowing whether
+ * the write happened before or after discard completing...
+ */
+ return truncate_bdev_range(bdev, file->f_mode, start, end);
+}
+
+const struct file_operations def_blk_fops = {
+ .open = blkdev_open,
+ .release = blkdev_close,
+ .llseek = blkdev_llseek,
+ .read_iter = blkdev_read_iter,
+ .write_iter = blkdev_write_iter,
+ .iopoll = blkdev_iopoll,
+ .mmap = generic_file_mmap,
+ .fsync = blkdev_fsync,
+ .unlocked_ioctl = block_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_blkdev_ioctl,
+#endif
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
+ .fallocate = blkdev_fallocate,
+};
+
+static __init int blkdev_init(void)
+{
+ return bioset_init(&blkdev_dio_pool, 4,
+ offsetof(struct blkdev_dio, bio),
+ BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
+}
+module_init(blkdev_init);
diff --git a/block/genhd.c b/block/genhd.c
index 567549a011d1..7b6e5e1cf956 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -183,6 +183,7 @@ static struct blk_major_name {
void (*probe)(dev_t devt);
} *major_names[BLKDEV_MAJOR_HASH_SIZE];
static DEFINE_MUTEX(major_names_lock);
+static DEFINE_SPINLOCK(major_names_spinlock);
/* index in the above - for now: assume no multimajor ranges */
static inline int major_to_index(unsigned major)
@@ -195,11 +196,11 @@ void blkdev_show(struct seq_file *seqf, off_t offset)
{
struct blk_major_name *dp;
- mutex_lock(&major_names_lock);
+ spin_lock(&major_names_spinlock);
for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next)
if (dp->major == offset)
seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
- mutex_unlock(&major_names_lock);
+ spin_unlock(&major_names_spinlock);
}
#endif /* CONFIG_PROC_FS */
@@ -271,6 +272,7 @@ int __register_blkdev(unsigned int major, const char *name,
p->next = NULL;
index = major_to_index(major);
+ spin_lock(&major_names_spinlock);
for (n = &major_names[index]; *n; n = &(*n)->next) {
if ((*n)->major == major)
break;
@@ -279,6 +281,7 @@ int __register_blkdev(unsigned int major, const char *name,
*n = p;
else
ret = -EBUSY;
+ spin_unlock(&major_names_spinlock);
if (ret < 0) {
printk("register_blkdev: cannot get major %u for %s\n",
@@ -298,6 +301,7 @@ void unregister_blkdev(unsigned int major, const char *name)
int index = major_to_index(major);
mutex_lock(&major_names_lock);
+ spin_lock(&major_names_spinlock);
for (n = &major_names[index]; *n; n = &(*n)->next)
if ((*n)->major == major)
break;
@@ -307,6 +311,7 @@ void unregister_blkdev(unsigned int major, const char *name)
p = *n;
*n = p->next;
}
+ spin_unlock(&major_names_spinlock);
mutex_unlock(&major_names_lock);
kfree(p);
}
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 8cc195c4c861..24f662d8bd39 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -54,6 +54,7 @@ struct acpi_memory_info {
struct acpi_memory_device {
struct acpi_device *device;
struct list_head res_list;
+ int mgid;
};
static acpi_status
@@ -169,12 +170,33 @@ static void acpi_unbind_memory_blocks(struct acpi_memory_info *info)
static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
{
acpi_handle handle = mem_device->device->handle;
+ mhp_t mhp_flags = MHP_NID_IS_MGID;
int result, num_enabled = 0;
struct acpi_memory_info *info;
- mhp_t mhp_flags = MHP_NONE;
- int node;
+ u64 total_length = 0;
+ int node, mgid;
node = acpi_get_node(handle);
+
+ list_for_each_entry(info, &mem_device->res_list, list) {
+ if (!info->length)
+ continue;
+ /* We want a single node for the whole memory group */
+ if (node < 0)
+ node = memory_add_physaddr_to_nid(info->start_addr);
+ total_length += info->length;
+ }
+
+ if (!total_length) {
+ dev_err(&mem_device->device->dev, "device is empty\n");
+ return -EINVAL;
+ }
+
+ mgid = memory_group_register_static(node, PFN_UP(total_length));
+ if (mgid < 0)
+ return mgid;
+ mem_device->mgid = mgid;
+
/*
* Tell the VM there is more memory here...
* Note: Assume that this function returns zero on success
@@ -182,22 +204,16 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
* (i.e. memory-hot-remove function)
*/
list_for_each_entry(info, &mem_device->res_list, list) {
- if (info->enabled) { /* just sanity check...*/
- num_enabled++;
- continue;
- }
/*
* If the memory block size is zero, please ignore it.
* Don't try to do the following memory hotplug flowchart.
*/
if (!info->length)
continue;
- if (node < 0)
- node = memory_add_physaddr_to_nid(info->start_addr);
if (mhp_supports_memmap_on_memory(info->length))
mhp_flags |= MHP_MEMMAP_ON_MEMORY;
- result = __add_memory(node, info->start_addr, info->length,
+ result = __add_memory(mgid, info->start_addr, info->length,
mhp_flags);
/*
@@ -239,19 +255,14 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
{
- acpi_handle handle = mem_device->device->handle;
struct acpi_memory_info *info, *n;
- int nid = acpi_get_node(handle);
list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
if (!info->enabled)
continue;
- if (nid == NUMA_NO_NODE)
- nid = memory_add_physaddr_to_nid(info->start_addr);
-
acpi_unbind_memory_blocks(info);
- __remove_memory(nid, info->start_addr, info->length);
+ __remove_memory(info->start_addr, info->length);
list_del(&info->list);
kfree(info);
}
@@ -262,6 +273,10 @@ static void acpi_memory_device_free(struct acpi_memory_device *mem_device)
if (!mem_device)
return;
+ /* In case we succeeded adding *some* memory, unregistering fails. */
+ if (mem_device->mgid >= 0)
+ memory_group_unregister(mem_device->mgid);
+
acpi_memory_free_device_resources(mem_device);
mem_device->device->driver_data = NULL;
kfree(mem_device);
@@ -282,6 +297,7 @@ static int acpi_memory_device_add(struct acpi_device *device,
INIT_LIST_HEAD(&mem_device->res_list);
mem_device->device = device;
+ mem_device->mgid = -1;
sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
device->driver_data = mem_device;
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index a4d4eebba1da..bd482108310c 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1008,23 +1008,14 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return ret_val;
}
-/**
- * cppc_get_desired_perf - Get the value of desired performance register.
- * @cpunum: CPU from which to get desired performance.
- * @desired_perf: address of a variable to store the returned desired performance
- *
- * Return: 0 for success, -EIO otherwise.
- */
-int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
+static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
- struct cpc_register_resource *desired_reg;
- struct cppc_pcc_data *pcc_ss_data = NULL;
-
- desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
+ struct cpc_register_resource *reg = &cpc_desc->cpc_regs[reg_idx];
- if (CPC_IN_PCC(desired_reg)) {
+ if (CPC_IN_PCC(reg)) {
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = 0;
if (pcc_ss_id < 0)
@@ -1035,7 +1026,7 @@ int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
down_write(&pcc_ss_data->pcc_lock);
if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
- cpc_read(cpunum, desired_reg, desired_perf);
+ cpc_read(cpunum, reg, perf);
else
ret = -EIO;
@@ -1044,13 +1035,37 @@ int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
return ret;
}
- cpc_read(cpunum, desired_reg, desired_perf);
+ cpc_read(cpunum, reg, perf);
return 0;
}
+
+/**
+ * cppc_get_desired_perf - Get the desired performance register value.
+ * @cpunum: CPU from which to get desired performance.
+ * @desired_perf: Return address.
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
+{
+ return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
+}
EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
/**
+ * cppc_get_nominal_perf - Get the nominal performance register value.
+ * @cpunum: CPU from which to get nominal performance.
+ * @nominal_perf: Return address.
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
+{
+ return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
+}
+
+/**
* cppc_get_perf_caps - Get a CPU's performance capabilities.
* @cpunum: CPU from which to get capabilities info.
* @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 1f6007abcf18..89c22bc55057 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -288,10 +288,18 @@ invalid_guid:
void __init init_prmt(void)
{
+ struct acpi_table_header *tbl;
acpi_status status;
- int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
+ int mc;
+
+ status = acpi_get_table(ACPI_SIG_PRMT, 0, &tbl);
+ if (ACPI_FAILURE(status))
+ return;
+
+ mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
sizeof (struct acpi_table_prmt_header),
0, acpi_parse_prmt, 0);
+ acpi_put_table(tbl);
/*
* Return immediately if PRMT table is not present or no PRM module found.
*/
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index b24513ec3fae..5b54c80b9d32 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -16,7 +16,6 @@
#include <linux/signal.h>
#include <linux/kthread.h>
#include <linux/dmi.h>
-#include <linux/nls.h>
#include <linux/dma-map-ops.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/pgtable.h>
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 3a308461246a..bd92b549fd5a 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -449,25 +449,30 @@ int acpi_s2idle_prepare_late(void)
if (pm_debug_messages_on)
lpi_check_constraints();
- if (lps0_dsm_func_mask_microsoft > 0) {
+ /* Screen off */
+ if (lps0_dsm_func_mask > 0)
+ acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
+ ACPI_LPS0_SCREEN_OFF_AMD :
+ ACPI_LPS0_SCREEN_OFF,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+
+ if (lps0_dsm_func_mask_microsoft > 0)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
- lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+
+ /* LPS0 entry */
+ if (lps0_dsm_func_mask > 0)
+ acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
+ ACPI_LPS0_ENTRY_AMD :
+ ACPI_LPS0_ENTRY,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+ if (lps0_dsm_func_mask_microsoft > 0) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
- } else if (acpi_s2idle_vendor_amd()) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD,
- lps0_dsm_func_mask, lps0_dsm_guid);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD,
- lps0_dsm_func_mask, lps0_dsm_guid);
- } else {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
- lps0_dsm_func_mask, lps0_dsm_guid);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
- lps0_dsm_func_mask, lps0_dsm_guid);
+ /* modern standby entry */
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
}
-
return 0;
}
@@ -476,24 +481,30 @@ void acpi_s2idle_restore_early(void)
if (!lps0_device_handle || sleep_no_lps0)
return;
- if (lps0_dsm_func_mask_microsoft > 0) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
- lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ /* Modern standby exit */
+ if (lps0_dsm_func_mask_microsoft > 0)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
- lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
- } else if (acpi_s2idle_vendor_amd()) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD,
- lps0_dsm_func_mask, lps0_dsm_guid);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD,
- lps0_dsm_func_mask, lps0_dsm_guid);
- } else {
+
+ /* LPS0 exit */
+ if (lps0_dsm_func_mask > 0)
+ acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
+ ACPI_LPS0_EXIT_AMD :
+ ACPI_LPS0_EXIT,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+ if (lps0_dsm_func_mask_microsoft > 0)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
- lps0_dsm_func_mask, lps0_dsm_guid);
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+
+ /* Screen on */
+ if (lps0_dsm_func_mask_microsoft > 0)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
- lps0_dsm_func_mask, lps0_dsm_guid);
- }
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ if (lps0_dsm_func_mask > 0)
+ acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
+ ACPI_LPS0_SCREEN_ON_AMD :
+ ACPI_LPS0_SCREEN_ON,
+ lps0_dsm_func_mask, lps0_dsm_guid);
}
static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = {
diff --git a/drivers/auxdisplay/cfag12864b.c b/drivers/auxdisplay/cfag12864b.c
index fd430e6866a1..6526aa51fb1d 100644
--- a/drivers/auxdisplay/cfag12864b.c
+++ b/drivers/auxdisplay/cfag12864b.c
@@ -33,7 +33,7 @@
*/
static unsigned int cfag12864b_rate = CONFIG_CFAG12864B_RATE;
-module_param(cfag12864b_rate, uint, S_IRUGO);
+module_param(cfag12864b_rate, uint, 0444);
MODULE_PARM_DESC(cfag12864b_rate,
"Refresh rate (hertz)");
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 24fd6f369ebe..304accde365c 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -637,9 +637,7 @@ static int panel_notify_sys(struct notifier_block *this, unsigned long code,
}
static struct notifier_block panel_notifier = {
- panel_notify_sys,
- NULL,
- 0
+ .notifier_call = panel_notify_sys,
};
int charlcd_register(struct charlcd *lcd)
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 2e5e7c993933..8b2a0eb3f32a 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -323,8 +323,8 @@ static int hd44780_remove(struct platform_device *pdev)
{
struct charlcd *lcd = platform_get_drvdata(pdev);
- kfree(lcd->drvdata);
charlcd_unregister(lcd);
+ kfree(lcd->drvdata);
kfree(lcd);
return 0;
diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
index 03c95ad4216c..e871b94a1911 100644
--- a/drivers/auxdisplay/ks0108.c
+++ b/drivers/auxdisplay/ks0108.c
@@ -28,11 +28,11 @@
*/
static unsigned int ks0108_port = CONFIG_KS0108_PORT;
-module_param(ks0108_port, uint, S_IRUGO);
+module_param(ks0108_port, uint, 0444);
MODULE_PARM_DESC(ks0108_port, "Parallel port where the LCD is connected");
static unsigned int ks0108_delay = CONFIG_KS0108_DELAY;
-module_param(ks0108_delay, uint, S_IRUGO);
+module_param(ks0108_delay, uint, 0444);
MODULE_PARM_DESC(ks0108_delay, "Delay between each control writing (microseconds)");
/*
@@ -167,19 +167,7 @@ static struct parport_driver ks0108_parport_driver = {
.detach = ks0108_parport_detach,
.devmodel = true,
};
-
-static int __init ks0108_init(void)
-{
- return parport_register_driver(&ks0108_parport_driver);
-}
-
-static void __exit ks0108_exit(void)
-{
- parport_unregister_driver(&ks0108_parport_driver);
-}
-
-module_init(ks0108_init);
-module_exit(ks0108_exit);
+module_parport_driver(ks0108_parport_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Miguel Ojeda <ojeda@kernel.org>");
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index 46c503486e96..00fb4120a5b3 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -264,7 +264,7 @@ void __init numa_free_distance(void)
size = numa_distance_cnt * numa_distance_cnt *
sizeof(numa_distance[0]);
- memblock_free(__pa(numa_distance), size);
+ memblock_free_ptr(numa_distance, size);
numa_distance_cnt = 0;
numa_distance = NULL;
}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 921312a8d957..43407665918f 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -149,6 +149,7 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
}
DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
{
@@ -165,6 +166,7 @@ void topology_set_thermal_pressure(const struct cpumask *cpus,
for_each_cpu(cpu, cpus)
WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
}
+EXPORT_SYMBOL_GPL(topology_set_thermal_pressure);
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index e3fd2dbf4eea..365cd4a7f239 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -82,6 +82,12 @@ static struct bus_type memory_subsys = {
*/
static DEFINE_XARRAY(memory_blocks);
+/*
+ * Memory groups, indexed by memory group id (mgid).
+ */
+static DEFINE_XARRAY_FLAGS(memory_groups, XA_FLAGS_ALLOC);
+#define MEMORY_GROUP_MARK_DYNAMIC XA_MARK_1
+
static BLOCKING_NOTIFIER_HEAD(memory_chain);
int register_memory_notifier(struct notifier_block *nb)
@@ -177,7 +183,8 @@ static int memory_block_online(struct memory_block *mem)
struct zone *zone;
int ret;
- zone = zone_for_pfn_range(mem->online_type, mem->nid, start_pfn, nr_pages);
+ zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group,
+ start_pfn, nr_pages);
/*
* Although vmemmap pages have a different lifecycle than the pages
@@ -193,7 +200,7 @@ static int memory_block_online(struct memory_block *mem)
}
ret = online_pages(start_pfn + nr_vmemmap_pages,
- nr_pages - nr_vmemmap_pages, zone);
+ nr_pages - nr_vmemmap_pages, zone, mem->group);
if (ret) {
if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
@@ -205,7 +212,8 @@ static int memory_block_online(struct memory_block *mem)
* now already properly populated.
*/
if (nr_vmemmap_pages)
- adjust_present_page_count(zone, nr_vmemmap_pages);
+ adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
+ nr_vmemmap_pages);
return ret;
}
@@ -215,24 +223,23 @@ static int memory_block_offline(struct memory_block *mem)
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
- struct zone *zone;
int ret;
/*
* Unaccount before offlining, such that unpopulated zone and kthreads
* can properly be torn down in offline_pages().
*/
- if (nr_vmemmap_pages) {
- zone = page_zone(pfn_to_page(start_pfn));
- adjust_present_page_count(zone, -nr_vmemmap_pages);
- }
+ if (nr_vmemmap_pages)
+ adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
+ -nr_vmemmap_pages);
ret = offline_pages(start_pfn + nr_vmemmap_pages,
- nr_pages - nr_vmemmap_pages);
+ nr_pages - nr_vmemmap_pages, mem->group);
if (ret) {
/* offline_pages() failed. Account back. */
if (nr_vmemmap_pages)
- adjust_present_page_count(zone, nr_vmemmap_pages);
+ adjust_present_page_count(pfn_to_page(start_pfn),
+ mem->group, nr_vmemmap_pages);
return ret;
}
@@ -374,12 +381,13 @@ static ssize_t phys_device_show(struct device *dev,
#ifdef CONFIG_MEMORY_HOTREMOVE
static int print_allowed_zone(char *buf, int len, int nid,
+ struct memory_group *group,
unsigned long start_pfn, unsigned long nr_pages,
int online_type, struct zone *default_zone)
{
struct zone *zone;
- zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
+ zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages);
if (zone == default_zone)
return 0;
@@ -392,9 +400,10 @@ static ssize_t valid_zones_show(struct device *dev,
struct memory_block *mem = to_memory_block(dev);
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+ struct memory_group *group = mem->group;
struct zone *default_zone;
+ int nid = mem->nid;
int len = 0;
- int nid;
/*
* Check the existing zone. Make sure that we do that only on the
@@ -413,14 +422,13 @@ static ssize_t valid_zones_show(struct device *dev,
goto out;
}
- nid = mem->nid;
- default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn,
- nr_pages);
+ default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group,
+ start_pfn, nr_pages);
len += sysfs_emit_at(buf, len, "%s", default_zone->name);
- len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages,
+ len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
MMOP_ONLINE_KERNEL, default_zone);
- len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages,
+ len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
MMOP_ONLINE_MOVABLE, default_zone);
out:
len += sysfs_emit_at(buf, len, "\n");
@@ -634,7 +642,8 @@ int register_memory(struct memory_block *memory)
}
static int init_memory_block(unsigned long block_id, unsigned long state,
- unsigned long nr_vmemmap_pages)
+ unsigned long nr_vmemmap_pages,
+ struct memory_group *group)
{
struct memory_block *mem;
int ret = 0;
@@ -652,6 +661,12 @@ static int init_memory_block(unsigned long block_id, unsigned long state,
mem->state = state;
mem->nid = NUMA_NO_NODE;
mem->nr_vmemmap_pages = nr_vmemmap_pages;
+ INIT_LIST_HEAD(&mem->group_next);
+
+ if (group) {
+ mem->group = group;
+ list_add(&mem->group_next, &group->memory_blocks);
+ }
ret = register_memory(mem);
@@ -671,7 +686,7 @@ static int add_memory_block(unsigned long base_section_nr)
if (section_count == 0)
return 0;
return init_memory_block(memory_block_id(base_section_nr),
- MEM_ONLINE, 0);
+ MEM_ONLINE, 0, NULL);
}
static void unregister_memory(struct memory_block *memory)
@@ -681,6 +696,11 @@ static void unregister_memory(struct memory_block *memory)
WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL);
+ if (memory->group) {
+ list_del(&memory->group_next);
+ memory->group = NULL;
+ }
+
/* drop the ref. we got via find_memory_block() */
put_device(&memory->dev);
device_unregister(&memory->dev);
@@ -694,7 +714,8 @@ static void unregister_memory(struct memory_block *memory)
* Called under device_hotplug_lock.
*/
int create_memory_block_devices(unsigned long start, unsigned long size,
- unsigned long vmemmap_pages)
+ unsigned long vmemmap_pages,
+ struct memory_group *group)
{
const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
@@ -707,7 +728,8 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
return -EINVAL;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
- ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages);
+ ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages,
+ group);
if (ret)
break;
}
@@ -891,3 +913,164 @@ int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
for_each_memory_block_cb);
}
+
+/*
+ * This is an internal helper to unify allocation and initialization of
+ * memory groups. Note that the passed memory group will be copied to a
+ * dynamically allocated memory group. After this call, the passed
+ * memory group should no longer be used.
+ */
+static int memory_group_register(struct memory_group group)
+{
+ struct memory_group *new_group;
+ uint32_t mgid;
+ int ret;
+
+ if (!node_possible(group.nid))
+ return -EINVAL;
+
+ new_group = kzalloc(sizeof(group), GFP_KERNEL);
+ if (!new_group)
+ return -ENOMEM;
+ *new_group = group;
+ INIT_LIST_HEAD(&new_group->memory_blocks);
+
+ ret = xa_alloc(&memory_groups, &mgid, new_group, xa_limit_31b,
+ GFP_KERNEL);
+ if (ret) {
+ kfree(new_group);
+ return ret;
+ } else if (group.is_dynamic) {
+ xa_set_mark(&memory_groups, mgid, MEMORY_GROUP_MARK_DYNAMIC);
+ }
+ return mgid;
+}
+
+/**
+ * memory_group_register_static() - Register a static memory group.
+ * @nid: The node id.
+ * @max_pages: The maximum number of pages we'll have in this static memory
+ * group.
+ *
+ * Register a new static memory group and return the memory group id.
+ * All memory in the group belongs to a single unit, such as a DIMM. All
+ * memory belonging to a static memory group is added in one go to be removed
+ * in one go -- it's static.
+ *
+ * Returns an error if out of memory, if the node id is invalid, if no new
+ * memory groups can be registered, or if max_pages is invalid (0). Otherwise,
+ * returns the new memory group id.
+ */
+int memory_group_register_static(int nid, unsigned long max_pages)
+{
+ struct memory_group group = {
+ .nid = nid,
+ .s = {
+ .max_pages = max_pages,
+ },
+ };
+
+ if (!max_pages)
+ return -EINVAL;
+ return memory_group_register(group);
+}
+EXPORT_SYMBOL_GPL(memory_group_register_static);
+
+/**
+ * memory_group_register_dynamic() - Register a dynamic memory group.
+ * @nid: The node id.
+ * @unit_pages: Unit in pages in which is memory added/removed in this dynamic
+ * memory group.
+ *
+ * Register a new dynamic memory group and return the memory group id.
+ * Memory within a dynamic memory group is added/removed dynamically
+ * in unit_pages.
+ *
+ * Returns an error if out of memory, if the node id is invalid, if no new
+ * memory groups can be registered, or if unit_pages is invalid (0, not a
+ * power of two, smaller than a single memory block). Otherwise, returns the
+ * new memory group id.
+ */
+int memory_group_register_dynamic(int nid, unsigned long unit_pages)
+{
+ struct memory_group group = {
+ .nid = nid,
+ .is_dynamic = true,
+ .d = {
+ .unit_pages = unit_pages,
+ },
+ };
+
+ if (!unit_pages || !is_power_of_2(unit_pages) ||
+ unit_pages < PHYS_PFN(memory_block_size_bytes()))
+ return -EINVAL;
+ return memory_group_register(group);
+}
+EXPORT_SYMBOL_GPL(memory_group_register_dynamic);
+
+/**
+ * memory_group_unregister() - Unregister a memory group.
+ * @mgid: the memory group id
+ *
+ * Unregister a memory group. If any memory block still belongs to this
+ * memory group, unregistering will fail.
+ *
+ * Returns -EINVAL if the memory group id is invalid, returns -EBUSY if some
+ * memory blocks still belong to this memory group and returns 0 if
+ * unregistering succeeded.
+ */
+int memory_group_unregister(int mgid)
+{
+ struct memory_group *group;
+
+ if (mgid < 0)
+ return -EINVAL;
+
+ group = xa_load(&memory_groups, mgid);
+ if (!group)
+ return -EINVAL;
+ if (!list_empty(&group->memory_blocks))
+ return -EBUSY;
+ xa_erase(&memory_groups, mgid);
+ kfree(group);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(memory_group_unregister);
+
+/*
+ * This is an internal helper only to be used in core memory hotplug code to
+ * lookup a memory group. We don't care about locking, as we don't expect a
+ * memory group to get unregistered while adding memory to it -- because
+ * the group and the memory is managed by the same driver.
+ */
+struct memory_group *memory_group_find_by_id(int mgid)
+{
+ return xa_load(&memory_groups, mgid);
+}
+
+/*
+ * This is an internal helper only to be used in core memory hotplug code to
+ * walk all dynamic memory groups excluding a given memory group, either
+ * belonging to a specific node, or belonging to any node.
+ */
+int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
+ struct memory_group *excluded, void *arg)
+{
+ struct memory_group *group;
+ unsigned long index;
+ int ret = 0;
+
+ xa_for_each_marked(&memory_groups, index, group,
+ MEMORY_GROUP_MARK_DYNAMIC) {
+ if (group == excluded)
+ continue;
+#ifdef CONFIG_NUMA
+ if (nid != NUMA_NO_NODE && group->nid != nid)
+ continue;
+#endif /* CONFIG_NUMA */
+ ret = func(group, arg);
+ if (ret)
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index be16bbff11cc..c56d34f8158f 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -785,8 +785,6 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
static int __ref get_nid_for_pfn(unsigned long pfn)
{
- if (!pfn_valid_within(pfn))
- return -1;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
if (system_state < SYSTEM_RUNNING)
return early_pfn_to_nid(pfn);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index d568772152c2..cbea78e79f3d 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1642,7 +1642,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
dev->power.may_skip_resume = true;
- dev->power.must_resume = false;
+ dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
dpm_watchdog_set(&wd, dev);
device_lock(dev);
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 3bad3266a2ad..b91a3a9bf9f6 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -12,14 +12,11 @@
/**
* dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
* @dev: Device entry
- * @irq: Device wake-up capable interrupt
* @wirq: Wake irq specific data
*
- * Internal function to attach either a device IO interrupt or a
- * dedicated wake-up interrupt as a wake IRQ.
+ * Internal function to attach a dedicated wake-up interrupt as a wake IRQ.
*/
-static int dev_pm_attach_wake_irq(struct device *dev, int irq,
- struct wake_irq *wirq)
+static int dev_pm_attach_wake_irq(struct device *dev, struct wake_irq *wirq)
{
unsigned long flags;
@@ -65,7 +62,7 @@ int dev_pm_set_wake_irq(struct device *dev, int irq)
wirq->dev = dev;
wirq->irq = irq;
- err = dev_pm_attach_wake_irq(dev, irq, wirq);
+ err = dev_pm_attach_wake_irq(dev, wirq);
if (err)
kfree(wirq);
@@ -196,7 +193,7 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
if (err)
goto err_free_name;
- err = dev_pm_attach_wake_irq(dev, irq, wirq);
+ err = dev_pm_attach_wake_irq(dev, wirq);
if (err)
goto err_free_irq;
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index c84be0028f63..26798da661bd 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -129,8 +129,8 @@ static int __init n64cart_probe(struct platform_device *pdev)
}
reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (!reg_base)
- return -EINVAL;
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 57c6ae7debd9..9b3bd083b411 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -762,7 +762,7 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_free_vblk;
/* Default queue sizing is to fill the ring. */
- if (likely(!virtblk_queue_depth)) {
+ if (!virtblk_queue_depth) {
queue_depth = vblk->vqs[0].vq->num_free;
/* ... but without indirect descs, we use 2 descs per req */
if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
@@ -836,7 +836,7 @@ static int virtblk_probe(struct virtio_device *vdev)
else
blk_size = queue_logical_block_size(q);
- if (unlikely(blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)) {
+ if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE) {
dev_err(&vdev->dev,
"block size is changed unexpectedly, now is %u\n",
blk_size);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index bb466981dc1b..6f3272b58ced 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -591,7 +591,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
if (msg[2] != 0) {
/* Error clearing flags */
- dev_warn(smi_info->io.dev,
+ dev_warn_ratelimited(smi_info->io.dev,
"Error clearing flags: %2.2x\n", msg[2]);
}
smi_info->si_state = SI_NORMAL;
@@ -683,10 +683,10 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- dev_warn(smi_info->io.dev,
- "Couldn't get irq info: %x.\n", msg[2]);
- dev_warn(smi_info->io.dev,
- "Maybe ok, but ipmi might run very slowly.\n");
+ dev_warn_ratelimited(smi_info->io.dev,
+ "Couldn't get irq info: %x,\n"
+ "Maybe ok, but ipmi might run very slowly.\n",
+ msg[2]);
smi_info->si_state = SI_NORMAL;
break;
}
@@ -721,7 +721,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0)
- dev_warn(smi_info->io.dev,
+ dev_warn_ratelimited(smi_info->io.dev,
"Could not set the global enables: 0x%x.\n",
msg[2]);
@@ -1343,7 +1343,7 @@ retry:
if (cc != IPMI_CC_NO_ERROR &&
++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
- dev_warn(smi_info->io.dev,
+ dev_warn_ratelimited(smi_info->io.dev,
"BMC returned 0x%2.2x, retry get bmc device id\n",
cc);
goto retry;
@@ -1605,7 +1605,7 @@ static ssize_t name##_show(struct device *dev, \
\
return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
} \
-static DEVICE_ATTR(name, 0444, name##_show, NULL)
+static DEVICE_ATTR_RO(name)
static ssize_t type_show(struct device *dev,
struct device_attribute *attr,
@@ -1615,7 +1615,7 @@ static ssize_t type_show(struct device *dev,
return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
}
-static DEVICE_ATTR(type, 0444, type_show, NULL);
+static DEVICE_ATTR_RO(type);
static ssize_t interrupts_enabled_show(struct device *dev,
struct device_attribute *attr,
@@ -1626,8 +1626,7 @@ static ssize_t interrupts_enabled_show(struct device *dev,
return snprintf(buf, 10, "%d\n", enabled);
}
-static DEVICE_ATTR(interrupts_enabled, 0444,
- interrupts_enabled_show, NULL);
+static DEVICE_ATTR_RO(interrupts_enabled);
IPMI_SI_ATTR(short_timeouts);
IPMI_SI_ATTR(long_timeouts);
@@ -1658,7 +1657,7 @@ static ssize_t params_show(struct device *dev,
smi_info->io.irq,
smi_info->io.slave_addr);
}
-static DEVICE_ATTR(params, 0444, params_show, NULL);
+static DEVICE_ATTR_RO(params);
static struct attribute *ipmi_si_dev_attrs[] = {
&dev_attr_type.attr,
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index 053089f83677..3236706771b1 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -176,10 +176,6 @@ static const struct parent_map gcc_parent_map_2[] = {
{ P_GPLL0_OUT_ODD, 2 },
};
-static const struct clk_parent_data gcc_parent_data_2[] = {
- { .fw_name = "bi_tcxo" },
- { .hw = &gpll0_out_odd.clkr.hw },
-};
static const struct clk_parent_data gcc_parent_data_2_ao[] = {
{ .fw_name = "bi_tcxo_ao" },
{ .hw = &gpll0_out_odd.clkr.hw },
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a5c5f70acfc9..954749afb5fe 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -133,6 +133,18 @@ config ARM_MEDIATEK_CPUFREQ
help
This adds the CPUFreq driver support for MediaTek SoCs.
+config ARM_MEDIATEK_CPUFREQ_HW
+ tristate "MediaTek CPUFreq HW driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ default m
+ help
+ Support for the CPUFreq HW driver.
+ Some MediaTek chipsets have a HW engine to offload the steps
+ necessary for changing the frequency of the CPUs. Firmware loaded
+ in this engine exposes a programming interface to the OS.
+ The driver implements the cpufreq interface for this HW engine.
+ Say Y if you want to support CPUFreq HW.
+
config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
depends on ARCH_OMAP2PLUS
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 27d3bd7ea9d4..48ee5859030c 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_IMX_CPUFREQ_DT) += imx-cpufreq-dt.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o
+obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ_HW) += mediatek-cpufreq-hw.o
obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index b49612895c78..28467d83c745 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -889,6 +889,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->fast_switch_possible = !acpi_pstate_strict &&
!(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
+ if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
+ pr_warn(FW_WARN "P-state 0 is not max freq\n");
+
return result;
err_unreg:
@@ -918,16 +921,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
-{
- struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
- policy->cpu);
- unsigned int freq = policy->freq_table[0].frequency;
-
- if (perf->states[0].core_frequency * 1000 != freq)
- pr_warn(FW_WARN "P-state 0 is not max freq\n");
-}
-
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = policy->driver_data;
@@ -955,7 +948,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
.bios_limit = acpi_processor_get_bios_limit,
.init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit,
- .ready = acpi_cpufreq_cpu_ready,
.resume = acpi_cpufreq_resume,
.name = "acpi-cpufreq",
.attr = acpi_cpufreq_attr,
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 231e585f6ba2..ca1d103ec449 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -137,11 +137,15 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },
{ .compatible = "qcom,qcs404", },
+ { .compatible = "qcom,sa8155p" },
{ .compatible = "qcom,sc7180", },
{ .compatible = "qcom,sc7280", },
{ .compatible = "qcom,sc8180x", },
{ .compatible = "qcom,sdm845", },
+ { .compatible = "qcom,sm6350", },
{ .compatible = "qcom,sm8150", },
+ { .compatible = "qcom,sm8250", },
+ { .compatible = "qcom,sm8350", },
{ .compatible = "st,stih407", },
{ .compatible = "st,stih410", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index ece52863ba62..8fcaba541539 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -143,8 +143,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
}
- dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
-
return 0;
out_clk_put:
@@ -184,6 +182,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.exit = cpufreq_exit,
.online = cpufreq_online,
.offline = cpufreq_offline,
+ .register_em = cpufreq_register_em_with_opp,
.name = "cpufreq-dt",
.attr = cpufreq_dt_attr,
.suspend = cpufreq_generic_suspend,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 06c526d66dd3..5782b15a8caa 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1491,6 +1491,19 @@ static int cpufreq_online(unsigned int cpu)
write_lock_irqsave(&cpufreq_driver_lock, flags);
list_add(&policy->policy_list, &cpufreq_policy_list);
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ /*
+ * Register with the energy model before
+ * sched_cpufreq_governor_change() is called, which will result
+ * in rebuilding of the sched domains, which should only be done
+ * once the energy model is properly initialized for the policy
+ * first.
+ *
+ * Also, this should be called before the policy is registered
+ * with cooling framework.
+ */
+ if (cpufreq_driver->register_em)
+ cpufreq_driver->register_em(policy);
}
ret = cpufreq_init_policy(policy);
@@ -1504,10 +1517,6 @@ static int cpufreq_online(unsigned int cpu)
kobject_uevent(&policy->kobj, KOBJ_ADD);
- /* Callback for handling stuff after policy is ready */
- if (cpufreq_driver->ready)
- cpufreq_driver->ready(policy);
-
if (cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 5bf5fc759881..90beb26ed34e 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -192,7 +192,6 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
policy->clk = clks[ARM].clk;
cpufreq_generic_init(policy, freq_table, transition_latency);
policy->suspend_freq = max_freq;
- dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
return 0;
}
@@ -204,6 +203,7 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.target_index = imx6q_set_target,
.get = cpufreq_generic_get,
.init = imx6q_cpufreq_init,
+ .register_em = cpufreq_register_em_with_opp,
.name = "imx6q-cpufreq",
.attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b4ffe6c8a0d0..1097f826ad70 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -32,7 +32,6 @@
#include <asm/cpu_device_id.h>
#include <asm/cpufeature.h>
#include <asm/intel-family.h>
-#include "../drivers/thermal/intel/thermal_interrupt.h"
#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
@@ -220,7 +219,6 @@ struct global_params {
* @sched_flags: Store scheduler flags for possible cross CPU update
* @hwp_boost_min: Last HWP boosted min performance
* @suspended: Whether or not the driver has been suspended.
- * @hwp_notify_work: workqueue for HWP notifications.
*
* This structure stores per CPU instance data for all CPUs.
*/
@@ -259,7 +257,6 @@ struct cpudata {
unsigned int sched_flags;
u32 hwp_boost_min;
bool suspended;
- struct delayed_work hwp_notify_work;
};
static struct cpudata **all_cpu_data;
@@ -271,6 +268,7 @@ static struct cpudata **all_cpu_data;
* @get_min: Callback to get minimum P state
* @get_turbo: Callback to get turbo P state
* @get_scaling: Callback to get frequency scaling factor
+ * @get_cpu_scaling: Get frequency scaling factor for a given cpu
* @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference
* @get_val: Callback to convert P state to actual MSR write value
* @get_vid: Callback to get VID data for Atom platforms
@@ -284,6 +282,7 @@ struct pstate_funcs {
int (*get_min)(void);
int (*get_turbo)(void);
int (*get_scaling)(void);
+ int (*get_cpu_scaling)(int cpu);
int (*get_aperf_mperf_shift)(void);
u64 (*get_val)(struct cpudata*, int pstate);
void (*get_vid)(struct cpudata *);
@@ -387,6 +386,15 @@ static int intel_pstate_get_cppc_guaranteed(int cpu)
return cppc_perf.nominal_perf;
}
+static u32 intel_pstate_cppc_nominal(int cpu)
+{
+ u64 nominal_perf;
+
+ if (cppc_get_nominal_perf(cpu, &nominal_perf))
+ return 0;
+
+ return nominal_perf;
+}
#else /* CONFIG_ACPI_CPPC_LIB */
static inline void intel_pstate_set_itmt_prio(int cpu)
{
@@ -473,20 +481,6 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
acpi_processor_unregister_performance(policy->cpu);
}
-
-static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps)
-{
- return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf;
-}
-
-static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu,
- struct cppc_perf_caps *caps)
-{
- if (cppc_get_perf_caps(cpu->cpu, caps))
- return false;
-
- return caps->highest_perf && caps->lowest_perf <= caps->highest_perf;
-}
#else /* CONFIG_ACPI */
static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
@@ -509,15 +503,8 @@ static inline int intel_pstate_get_cppc_guaranteed(int cpu)
}
#endif /* CONFIG_ACPI_CPPC_LIB */
-static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu)
-{
- pr_debug("CPU%d: Using PERF_CTL scaling for HWP\n", cpu->cpu);
-
- cpu->pstate.scaling = cpu->pstate.perf_ctl_scaling;
-}
-
/**
- * intel_pstate_hybrid_hwp_calibrate - Calibrate HWP performance levels.
+ * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
* @cpu: Target CPU.
*
* On hybrid processors, HWP may expose more performance levels than there are
@@ -525,115 +512,46 @@ static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu)
* scaling factor between HWP performance levels and CPU frequency will be less
* than the scaling factor between P-state values and CPU frequency.
*
- * In that case, the scaling factor between HWP performance levels and CPU
- * frequency needs to be determined which can be done with the help of the
- * observation that certain HWP performance levels should correspond to certain
- * P-states, like for example the HWP highest performance should correspond
- * to the maximum turbo P-state of the CPU.
+ * In that case, adjust the CPU parameters used in computations accordingly.
*/
-static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu)
+static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
{
int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
int perf_ctl_turbo = pstate_funcs.get_turbo();
int turbo_freq = perf_ctl_turbo * perf_ctl_scaling;
- int perf_ctl_max = pstate_funcs.get_max();
- int max_freq = perf_ctl_max * perf_ctl_scaling;
- int scaling = INT_MAX;
- int freq;
+ int scaling = cpu->pstate.scaling;
pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
- pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, perf_ctl_max);
+ pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, pstate_funcs.get_max());
pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
-
pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
-
-#ifdef CONFIG_ACPI
- if (IS_ENABLED(CONFIG_ACPI_CPPC_LIB)) {
- struct cppc_perf_caps caps;
-
- if (intel_pstate_cppc_perf_caps(cpu, &caps)) {
- if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) {
- pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu);
-
- /*
- * If the CPPC nominal performance is valid, it
- * can be assumed to correspond to cpu_khz.
- */
- if (caps.nominal_perf == perf_ctl_max_phys) {
- intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
- return;
- }
- scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf);
- } else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) {
- pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu);
-
- /*
- * If the CPPC guaranteed performance is valid,
- * it can be assumed to correspond to max_freq.
- */
- if (caps.guaranteed_perf == perf_ctl_max) {
- intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
- return;
- }
- scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf);
- }
- }
- }
-#endif
- /*
- * If using the CPPC data to compute the HWP-to-frequency scaling factor
- * doesn't work, use the HWP_CAP gauranteed perf for this purpose with
- * the assumption that it corresponds to max_freq.
- */
- if (scaling > perf_ctl_scaling) {
- pr_debug("CPU%d: Using HWP_CAP guaranteed\n", cpu->cpu);
-
- if (cpu->pstate.max_pstate == perf_ctl_max) {
- intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
- return;
- }
- scaling = DIV_ROUND_UP(max_freq, cpu->pstate.max_pstate);
- if (scaling > perf_ctl_scaling) {
- /*
- * This should not happen, because it would mean that
- * the number of HWP perf levels was less than the
- * number of P-states, so use the PERF_CTL scaling in
- * that case.
- */
- pr_debug("CPU%d: scaling (%d) out of range\n", cpu->cpu,
- scaling);
-
- intel_pstate_hybrid_hwp_perf_ctl_parity(cpu);
- return;
- }
- }
+ pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
/*
- * If the product of the HWP performance scaling factor obtained above
- * and the HWP_CAP highest performance is greater than the maximum turbo
- * frequency corresponding to the pstate_funcs.get_turbo() return value,
- * the scaling factor is too high, so recompute it so that the HWP_CAP
- * highest performance corresponds to the maximum turbo frequency.
+ * If the product of the HWP performance scaling factor and the HWP_CAP
+ * highest performance is greater than the maximum turbo frequency
+ * corresponding to the pstate_funcs.get_turbo() return value, the
+ * scaling factor is too high, so recompute it to make the HWP_CAP
+ * highest performance correspond to the maximum turbo frequency.
*/
if (turbo_freq < cpu->pstate.turbo_pstate * scaling) {
- pr_debug("CPU%d: scaling too high (%d)\n", cpu->cpu, scaling);
-
cpu->pstate.turbo_freq = turbo_freq;
scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate);
- }
-
- cpu->pstate.scaling = scaling;
+ cpu->pstate.scaling = scaling;
- pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
+ pr_debug("CPU%d: refined HWP-to-frequency scaling factor: %d\n",
+ cpu->cpu, scaling);
+ }
cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
perf_ctl_scaling);
- freq = perf_ctl_max_phys * perf_ctl_scaling;
- cpu->pstate.max_pstate_physical = DIV_ROUND_UP(freq, scaling);
+ cpu->pstate.max_pstate_physical =
+ DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling,
+ scaling);
cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
/*
@@ -1628,40 +1546,6 @@ static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
/************************** sysfs end ************************/
-static void intel_pstate_notify_work(struct work_struct *work)
-{
- mutex_lock(&intel_pstate_driver_lock);
- cpufreq_update_policy(smp_processor_id());
- wrmsrl(MSR_HWP_STATUS, 0);
- mutex_unlock(&intel_pstate_driver_lock);
-}
-
-void notify_hwp_interrupt(void)
-{
- unsigned int this_cpu = smp_processor_id();
- struct cpudata *cpudata;
- u64 value;
-
- if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
- return;
-
- rdmsrl(MSR_HWP_STATUS, value);
- if (!(value & 0x01))
- return;
-
- cpudata = all_cpu_data[this_cpu];
- schedule_delayed_work_on(this_cpu, &cpudata->hwp_notify_work, msecs_to_jiffies(10));
-}
-
-static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
-{
- /* Enable HWP notification interrupt for guaranteed performance change */
- if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
- INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
- }
-}
-
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt as we don't process them */
@@ -1671,8 +1555,6 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
if (cpudata->epp_default == -EINVAL)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
-
- intel_pstate_enable_hwp_interrupt(cpudata);
}
static int atom_get_min_pstate(void)
@@ -1900,6 +1782,38 @@ static int knl_get_turbo_pstate(void)
return ret;
}
+#ifdef CONFIG_ACPI_CPPC_LIB
+static u32 hybrid_ref_perf;
+
+static int hybrid_get_cpu_scaling(int cpu)
+{
+ return DIV_ROUND_UP(core_get_scaling() * hybrid_ref_perf,
+ intel_pstate_cppc_nominal(cpu));
+}
+
+static void intel_pstate_cppc_set_cpu_scaling(void)
+{
+ u32 min_nominal_perf = U32_MAX;
+ int cpu;
+
+ for_each_present_cpu(cpu) {
+ u32 nominal_perf = intel_pstate_cppc_nominal(cpu);
+
+ if (nominal_perf && nominal_perf < min_nominal_perf)
+ min_nominal_perf = nominal_perf;
+ }
+
+ if (min_nominal_perf < U32_MAX) {
+ hybrid_ref_perf = min_nominal_perf;
+ pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling;
+ }
+}
+#else
+static inline void intel_pstate_cppc_set_cpu_scaling(void)
+{
+}
+#endif /* CONFIG_ACPI_CPPC_LIB */
+
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
@@ -1928,10 +1842,8 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
- bool hybrid_cpu = boot_cpu_has(X86_FEATURE_HYBRID_CPU);
int perf_ctl_max_phys = pstate_funcs.get_max_physical();
- int perf_ctl_scaling = hybrid_cpu ? cpu_khz / perf_ctl_max_phys :
- pstate_funcs.get_scaling();
+ int perf_ctl_scaling = pstate_funcs.get_scaling();
cpu->pstate.min_pstate = pstate_funcs.get_min();
cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
@@ -1940,10 +1852,13 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
if (hwp_active && !hwp_mode_bdw) {
__intel_pstate_get_hwp_cap(cpu);
- if (hybrid_cpu)
- intel_pstate_hybrid_hwp_calibrate(cpu);
- else
+ if (pstate_funcs.get_cpu_scaling) {
+ cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu);
+ if (cpu->pstate.scaling != perf_ctl_scaling)
+ intel_pstate_hybrid_hwp_adjust(cpu);
+ } else {
cpu->pstate.scaling = perf_ctl_scaling;
+ }
} else {
cpu->pstate.scaling = perf_ctl_scaling;
cpu->pstate.max_pstate = pstate_funcs.get_max();
@@ -3315,6 +3230,9 @@ static int __init intel_pstate_init(void)
if (!default_driver)
default_driver = &intel_pstate;
+ if (boot_cpu_has(X86_FEATURE_HYBRID_CPU))
+ intel_pstate_cppc_set_cpu_scaling();
+
goto hwp_cpu_matched;
}
} else {
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
new file mode 100644
index 000000000000..0cf18dd46b92
--- /dev/null
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cpufreq.h>
+#include <linux/energy_model.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+
+#define LUT_MAX_ENTRIES 32U
+#define LUT_FREQ GENMASK(11, 0)
+#define LUT_ROW_SIZE 0x4
+#define CPUFREQ_HW_STATUS BIT(0)
+#define SVS_HW_STATUS BIT(1)
+#define POLL_USEC 1000
+#define TIMEOUT_USEC 300000
+
+enum {
+ REG_FREQ_LUT_TABLE,
+ REG_FREQ_ENABLE,
+ REG_FREQ_PERF_STATE,
+ REG_FREQ_HW_STATE,
+ REG_EM_POWER_TBL,
+ REG_FREQ_LATENCY,
+
+ REG_ARRAY_SIZE,
+};
+
+struct mtk_cpufreq_data {
+ struct cpufreq_frequency_table *table;
+ void __iomem *reg_bases[REG_ARRAY_SIZE];
+ int nr_opp;
+};
+
+static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x110,
+};
+
+static int __maybe_unused
+mtk_cpufreq_get_cpu_power(unsigned long *mW,
+ unsigned long *KHz, struct device *cpu_dev)
+{
+ struct mtk_cpufreq_data *data;
+ struct cpufreq_policy *policy;
+ int i;
+
+ policy = cpufreq_cpu_get_raw(cpu_dev->id);
+ if (!policy)
+ return 0;
+
+ data = policy->driver_data;
+
+ for (i = 0; i < data->nr_opp; i++) {
+ if (data->table[i].frequency < *KHz)
+ break;
+ }
+ i--;
+
+ *KHz = data->table[i].frequency;
+ *mW = readl_relaxed(data->reg_bases[REG_EM_POWER_TBL] +
+ i * LUT_ROW_SIZE) / 1000;
+
+ return 0;
+}
+
+static int mtk_cpufreq_hw_target_index(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct mtk_cpufreq_data *data = policy->driver_data;
+
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+
+ return 0;
+}
+
+static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
+{
+ struct mtk_cpufreq_data *data;
+ struct cpufreq_policy *policy;
+ unsigned int index;
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (!policy)
+ return 0;
+
+ data = policy->driver_data;
+
+ index = readl_relaxed(data->reg_bases[REG_FREQ_PERF_STATE]);
+ index = min(index, LUT_MAX_ENTRIES - 1);
+
+ return data->table[index].frequency;
+}
+
+static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct mtk_cpufreq_data *data = policy->driver_data;
+ unsigned int index;
+
+ index = cpufreq_table_find_index_dl(policy, target_freq);
+
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+
+ return policy->freq_table[index].frequency;
+}
+
+static int mtk_cpu_create_freq_table(struct platform_device *pdev,
+ struct mtk_cpufreq_data *data)
+{
+ struct device *dev = &pdev->dev;
+ u32 temp, i, freq, prev_freq = 0;
+ void __iomem *base_table;
+
+ data->table = devm_kcalloc(dev, LUT_MAX_ENTRIES + 1,
+ sizeof(*data->table), GFP_KERNEL);
+ if (!data->table)
+ return -ENOMEM;
+
+ base_table = data->reg_bases[REG_FREQ_LUT_TABLE];
+
+ for (i = 0; i < LUT_MAX_ENTRIES; i++) {
+ temp = readl_relaxed(base_table + (i * LUT_ROW_SIZE));
+ freq = FIELD_GET(LUT_FREQ, temp) * 1000;
+
+ if (freq == prev_freq)
+ break;
+
+ data->table[i].frequency = freq;
+
+ dev_dbg(dev, "index=%d freq=%d\n", i, data->table[i].frequency);
+
+ prev_freq = freq;
+ }
+
+ data->table[i].frequency = CPUFREQ_TABLE_END;
+ data->nr_opp = i;
+
+ return 0;
+}
+
+static int mtk_cpu_resources_init(struct platform_device *pdev,
+ struct cpufreq_policy *policy,
+ const u16 *offsets)
+{
+ struct mtk_cpufreq_data *data;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ int ret, i;
+ int index;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ index = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
+ "#performance-domain-cells",
+ policy->cpus);
+ if (index < 0)
+ return index;
+
+ base = devm_platform_ioremap_resource(pdev, index);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++)
+ data->reg_bases[i] = base + offsets[i];
+
+ ret = mtk_cpu_create_freq_table(pdev, data);
+ if (ret) {
+ dev_info(dev, "Domain-%d failed to create freq table\n", index);
+ return ret;
+ }
+
+ policy->freq_table = data->table;
+ policy->driver_data = data;
+
+ return 0;
+}
+
+static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+{
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ int sig, pwr_hw = CPUFREQ_HW_STATUS | SVS_HW_STATUS;
+ struct mtk_cpufreq_data *data;
+ unsigned int latency;
+ int ret;
+
+ /* Get the bases of cpufreq for domains */
+ ret = mtk_cpu_resources_init(pdev, policy, platform_get_drvdata(pdev));
+ if (ret) {
+ dev_info(&pdev->dev, "CPUFreq resource init failed\n");
+ return ret;
+ }
+
+ data = policy->driver_data;
+
+ latency = readl_relaxed(data->reg_bases[REG_FREQ_LATENCY]) * 1000;
+ if (!latency)
+ latency = CPUFREQ_ETERNAL;
+
+ policy->cpuinfo.transition_latency = latency;
+ policy->fast_switch_possible = true;
+
+ /* HW should be in enabled state to proceed now */
+ writel_relaxed(0x1, data->reg_bases[REG_FREQ_ENABLE]);
+ if (readl_poll_timeout(data->reg_bases[REG_FREQ_HW_STATE], sig,
+ (sig & pwr_hw) == pwr_hw, POLL_USEC,
+ TIMEOUT_USEC)) {
+ if (!(sig & CPUFREQ_HW_STATUS)) {
+ pr_info("cpufreq hardware of CPU%d is not enabled\n",
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ pr_info("SVS of CPU%d is not enabled\n", policy->cpu);
+ }
+
+ return 0;
+}
+
+static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct mtk_cpufreq_data *data = policy->driver_data;
+
+ /* HW should be in paused state now */
+ writel_relaxed(0x0, data->reg_bases[REG_FREQ_ENABLE]);
+
+ return 0;
+}
+
+static void mtk_cpufreq_register_em(struct cpufreq_policy *policy)
+{
+ struct em_data_callback em_cb = EM_DATA_CB(mtk_cpufreq_get_cpu_power);
+ struct mtk_cpufreq_data *data = policy->driver_data;
+
+ em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp,
+ &em_cb, policy->cpus, true);
+}
+
+static struct cpufreq_driver cpufreq_mtk_hw_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_IS_COOLING_DEV,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = mtk_cpufreq_hw_target_index,
+ .get = mtk_cpufreq_hw_get,
+ .init = mtk_cpufreq_hw_cpu_init,
+ .exit = mtk_cpufreq_hw_cpu_exit,
+ .register_em = mtk_cpufreq_register_em,
+ .fast_switch = mtk_cpufreq_hw_fast_switch,
+ .name = "mtk-cpufreq-hw",
+ .attr = cpufreq_generic_attr,
+};
+
+static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
+{
+ const void *data;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -EINVAL;
+
+ platform_set_drvdata(pdev, (void *) data);
+ cpufreq_mtk_hw_driver.driver_data = pdev;
+
+ ret = cpufreq_register_driver(&cpufreq_mtk_hw_driver);
+ if (ret)
+ dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
+
+ return ret;
+}
+
+static int mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
+{
+ return cpufreq_unregister_driver(&cpufreq_mtk_hw_driver);
+}
+
+static const struct of_device_id mtk_cpufreq_hw_match[] = {
+ { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
+ {}
+};
+
+static struct platform_driver mtk_cpufreq_hw_driver = {
+ .probe = mtk_cpufreq_hw_driver_probe,
+ .remove = mtk_cpufreq_hw_driver_remove,
+ .driver = {
+ .name = "mtk-cpufreq-hw",
+ .of_match_table = mtk_cpufreq_hw_match,
+ },
+};
+module_platform_driver(mtk_cpufreq_hw_driver);
+
+MODULE_AUTHOR("Hector Yuan <hector.yuan@mediatek.com>");
+MODULE_DESCRIPTION("Mediatek cpufreq-hw driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 87019d5a9547..866163883b48 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -448,8 +448,6 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy)
policy->driver_data = info;
policy->clk = info->cpu_clk;
- dev_pm_opp_of_register_em(info->cpu_dev, policy->cpus);
-
return 0;
}
@@ -471,6 +469,7 @@ static struct cpufreq_driver mtk_cpufreq_driver = {
.get = cpufreq_generic_get,
.init = mtk_cpufreq_init,
.exit = mtk_cpufreq_exit,
+ .register_em = cpufreq_register_em_with_opp,
.name = "mtk-cpufreq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index e035ee216b0f..1b50df06c6bc 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -131,7 +131,6 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
/* FIXME: what's the actual transition time? */
cpufreq_generic_init(policy, freq_table, 300 * 1000);
- dev_pm_opp_of_register_em(mpu_dev, policy->cpus);
return 0;
}
@@ -150,6 +149,7 @@ static struct cpufreq_driver omap_driver = {
.get = cpufreq_generic_get,
.init = omap_cpu_init,
.exit = omap_cpu_exit,
+ .register_em = cpufreq_register_em_with_opp,
.name = "omap",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index f86859bf76f1..a2be0df7e174 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -7,12 +7,14 @@
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/interconnect.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#define LUT_MAX_ENTRIES 40U
#define LUT_SRC GENMASK(31, 30)
@@ -22,10 +24,13 @@
#define CLK_HW_DIV 2
#define LUT_TURBO_IND 1
+#define HZ_PER_KHZ 1000
+
struct qcom_cpufreq_soc_data {
u32 reg_enable;
u32 reg_freq_lut;
u32 reg_volt_lut;
+ u32 reg_current_vote;
u32 reg_perf_state;
u8 lut_row_size;
};
@@ -34,6 +39,16 @@ struct qcom_cpufreq_data {
void __iomem *base;
struct resource *res;
const struct qcom_cpufreq_soc_data *soc_data;
+
+ /*
+ * Mutex to synchronize between de-init sequence and re-starting LMh
+ * polling/interrupts
+ */
+ struct mutex throttle_lock;
+ int throttle_irq;
+ bool cancel_throttle;
+ struct delayed_work throttle_work;
+ struct cpufreq_policy *policy;
};
static unsigned long cpu_hw_rate, xo_rate;
@@ -251,10 +266,92 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
}
}
+static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
+{
+ unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote);
+
+ return (val & 0x3FF) * 19200;
+}
+
+static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
+{
+ unsigned long max_capacity, capacity, freq_hz, throttled_freq;
+ struct cpufreq_policy *policy = data->policy;
+ int cpu = cpumask_first(policy->cpus);
+ struct device *dev = get_cpu_device(cpu);
+ struct dev_pm_opp *opp;
+ unsigned int freq;
+
+ /*
+ * Get the h/w throttled frequency, normalize it using the
+ * registered opp table and use it to calculate thermal pressure.
+ */
+ freq = qcom_lmh_get_throttle_freq(data);
+ freq_hz = freq * HZ_PER_KHZ;
+
+ opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
+ if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
+ dev_pm_opp_find_freq_ceil(dev, &freq_hz);
+
+ throttled_freq = freq_hz / HZ_PER_KHZ;
+
+ /* Update thermal pressure */
+
+ max_capacity = arch_scale_cpu_capacity(cpu);
+ capacity = mult_frac(max_capacity, throttled_freq, policy->cpuinfo.max_freq);
+
+ /* Don't pass boost capacity to scheduler */
+ if (capacity > max_capacity)
+ capacity = max_capacity;
+
+ arch_set_thermal_pressure(policy->cpus, max_capacity - capacity);
+
+ /*
+ * In the unlikely case policy is unregistered do not enable
+ * polling or h/w interrupt
+ */
+ mutex_lock(&data->throttle_lock);
+ if (data->cancel_throttle)
+ goto out;
+
+ /*
+ * If h/w throttled frequency is higher than what cpufreq has requested
+ * for, then stop polling and switch back to interrupt mechanism.
+ */
+ if (throttled_freq >= qcom_cpufreq_hw_get(cpu))
+ enable_irq(data->throttle_irq);
+ else
+ mod_delayed_work(system_highpri_wq, &data->throttle_work,
+ msecs_to_jiffies(10));
+
+out:
+ mutex_unlock(&data->throttle_lock);
+}
+
+static void qcom_lmh_dcvs_poll(struct work_struct *work)
+{
+ struct qcom_cpufreq_data *data;
+
+ data = container_of(work, struct qcom_cpufreq_data, throttle_work.work);
+ qcom_lmh_dcvs_notify(data);
+}
+
+static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
+{
+ struct qcom_cpufreq_data *c_data = data;
+
+ /* Disable interrupt and enable polling */
+ disable_irq_nosync(c_data->throttle_irq);
+ qcom_lmh_dcvs_notify(c_data);
+
+ return 0;
+}
+
static const struct qcom_cpufreq_soc_data qcom_soc_data = {
.reg_enable = 0x0,
.reg_freq_lut = 0x110,
.reg_volt_lut = 0x114,
+ .reg_current_vote = 0x704,
.reg_perf_state = 0x920,
.lut_row_size = 32,
};
@@ -274,6 +371,51 @@ static const struct of_device_id qcom_cpufreq_hw_match[] = {
};
MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
+static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
+{
+ struct qcom_cpufreq_data *data = policy->driver_data;
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ char irq_name[15];
+ int ret;
+
+ /*
+ * Look for LMh interrupt. If no interrupt line is specified /
+ * if there is an error, allow cpufreq to be enabled as usual.
+ */
+ data->throttle_irq = platform_get_irq(pdev, index);
+ if (data->throttle_irq <= 0)
+ return data->throttle_irq == -EPROBE_DEFER ? -EPROBE_DEFER : 0;
+
+ data->cancel_throttle = false;
+ data->policy = policy;
+
+ mutex_init(&data->throttle_lock);
+ INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
+
+ snprintf(irq_name, sizeof(irq_name), "dcvsh-irq-%u", policy->cpu);
+ ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
+ IRQF_ONESHOT, irq_name, data);
+ if (ret) {
+ dev_err(&pdev->dev, "Error registering %s: %d\n", irq_name, ret);
+ return 0;
+ }
+
+ return 0;
+}
+
+static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
+{
+ if (data->throttle_irq <= 0)
+ return;
+
+ mutex_lock(&data->throttle_lock);
+ data->cancel_throttle = true;
+ mutex_unlock(&data->throttle_lock);
+
+ cancel_delayed_work_sync(&data->throttle_work);
+ free_irq(data->throttle_irq, data);
+}
+
static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
{
struct platform_device *pdev = cpufreq_get_driver_data();
@@ -348,6 +490,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
}
policy->driver_data = data;
+ policy->dvfs_possible_from_any_cpu = true;
ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
if (ret) {
@@ -362,14 +505,16 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
goto error;
}
- dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
-
if (policy_has_boost_freq(policy)) {
ret = cpufreq_enable_boost_support();
if (ret)
dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
}
+ ret = qcom_cpufreq_hw_lmh_init(policy, index);
+ if (ret)
+ goto error;
+
return 0;
error:
kfree(data);
@@ -389,6 +534,7 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
dev_pm_opp_remove_all_dynamic(cpu_dev);
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
+ qcom_cpufreq_hw_lmh_exit(data);
kfree(policy->freq_table);
kfree(data);
iounmap(base);
@@ -412,6 +558,7 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.get = qcom_cpufreq_hw_get,
.init = qcom_cpufreq_hw_cpu_init,
.exit = qcom_cpufreq_hw_cpu_exit,
+ .register_em = cpufreq_register_em_with_opp,
.fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw",
.attr = qcom_cpufreq_hw_attr,
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 75f818d04b48..1e0cd4d165f0 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -22,7 +22,9 @@
struct scmi_data {
int domain_id;
+ int nr_opp;
struct device *cpu_dev;
+ cpumask_var_t opp_shared_cpus;
};
static struct scmi_protocol_handle *ph;
@@ -123,9 +125,6 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
struct device *cpu_dev;
struct scmi_data *priv;
struct cpufreq_frequency_table *freq_table;
- struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
- cpumask_var_t opp_shared_cpus;
- bool power_scale_mw;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@@ -133,9 +132,15 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL))
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
+ if (!zalloc_cpumask_var(&priv->opp_shared_cpus, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out_free_priv;
+ }
+
/* Obtain CPUs that share SCMI performance controls */
ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
if (ret) {
@@ -148,14 +153,14 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
* The OPP 'sharing cpus' info may come from DT through an empty opp
* table and opp-shared.
*/
- ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, opp_shared_cpus);
- if (ret || !cpumask_weight(opp_shared_cpus)) {
+ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
+ if (ret || !cpumask_weight(priv->opp_shared_cpus)) {
/*
* Either opp-table is not set or no opp-shared was found.
* Use the CPU mask from SCMI to designate CPUs sharing an OPP
* table.
*/
- cpumask_copy(opp_shared_cpus, policy->cpus);
+ cpumask_copy(priv->opp_shared_cpus, policy->cpus);
}
/*
@@ -180,7 +185,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
- ret = dev_pm_opp_set_sharing_cpus(cpu_dev, opp_shared_cpus);
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
if (ret) {
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
@@ -188,21 +193,13 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
- power_scale_mw = perf_ops->power_scale_mw_get(ph);
- em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb,
- opp_shared_cpus, power_scale_mw);
- }
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto out_free_opp;
+ priv->nr_opp = nr_opp;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
- goto out_free_priv;
+ goto out_free_opp;
}
priv->cpu_dev = cpu_dev;
@@ -223,17 +220,16 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->fast_switch_possible =
perf_ops->fast_switch_possible(ph, cpu_dev);
- free_cpumask_var(opp_shared_cpus);
return 0;
-out_free_priv:
- kfree(priv);
-
out_free_opp:
dev_pm_opp_remove_all_dynamic(cpu_dev);
out_free_cpumask:
- free_cpumask_var(opp_shared_cpus);
+ free_cpumask_var(priv->opp_shared_cpus);
+
+out_free_priv:
+ kfree(priv);
return ret;
}
@@ -244,11 +240,33 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+ free_cpumask_var(priv->opp_shared_cpus);
kfree(priv);
return 0;
}
+static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
+{
+ struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
+ bool power_scale_mw = perf_ops->power_scale_mw_get(ph);
+ struct scmi_data *priv = policy->driver_data;
+
+ /*
+ * This callback will be called for each policy, but we don't need to
+ * register with EM every time. Despite not being part of the same
+ * policy, some CPUs may still share their perf-domains, and a CPU from
+ * another policy may already have registered with EM on behalf of CPUs
+ * of this policy.
+ */
+ if (!priv->nr_opp)
+ return;
+
+ em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp,
+ &em_cb, priv->opp_shared_cpus,
+ power_scale_mw);
+}
+
static struct cpufreq_driver scmi_cpufreq_driver = {
.name = "scmi",
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
@@ -261,6 +279,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
.get = scmi_cpufreq_get_rate,
.init = scmi_cpufreq_init,
.exit = scmi_cpufreq_exit,
+ .register_em = scmi_cpufreq_register_em,
};
static int scmi_cpufreq_probe(struct scmi_device *sdev)
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index d6a698a1b5d1..bda3e7d42964 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -163,8 +163,6 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
policy->fast_switch_possible = false;
- dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
-
return 0;
out_free_cpufreq_table:
@@ -200,6 +198,7 @@ static struct cpufreq_driver scpi_cpufreq_driver = {
.init = scpi_cpufreq_init,
.exit = scpi_cpufreq_exit,
.target_index = scpi_cpufreq_set_target,
+ .register_em = cpufreq_register_em_with_opp,
};
static int scpi_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 1a251e635ebd..b8704232c27b 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -145,16 +145,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static void sh_cpufreq_cpu_ready(struct cpufreq_policy *policy)
-{
- struct device *dev = get_cpu_device(policy->cpu);
-
- dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, "
- "Maximum %u.%03u MHz.\n",
- policy->min / 1000, policy->min % 1000,
- policy->max / 1000, policy->max % 1000);
-}
-
static struct cpufreq_driver sh_cpufreq_driver = {
.name = "sh",
.flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
@@ -163,7 +153,6 @@ static struct cpufreq_driver sh_cpufreq_driver = {
.verify = sh_cpufreq_verify,
.init = sh_cpufreq_cpu_init,
.exit = sh_cpufreq_cpu_exit,
- .ready = sh_cpufreq_cpu_ready,
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 51dfa9ae6cf5..d295f405c4bb 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -15,7 +15,6 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
-#include <linux/cpu_cooling.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -47,7 +46,6 @@ static bool bL_switching_enabled;
#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
-static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
static struct clk *clk[MAX_CLUSTERS];
static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
static atomic_t cluster_usage[MAX_CLUSTERS + 1];
@@ -442,8 +440,6 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
policy->freq_table = freq_table[cur_cluster];
policy->cpuinfo.transition_latency = 1000000; /* 1 ms */
- dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
-
if (is_bL_switching_enabled())
per_cpu(cpu_last_req_freq, policy->cpu) =
clk_get_cpu_rate(policy->cpu);
@@ -455,12 +451,6 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev;
- int cur_cluster = cpu_to_cluster(policy->cpu);
-
- if (cur_cluster < MAX_CLUSTERS) {
- cpufreq_cooling_unregister(cdev[cur_cluster]);
- cdev[cur_cluster] = NULL;
- }
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@@ -473,17 +463,6 @@ static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
return 0;
}
-static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy)
-{
- int cur_cluster = cpu_to_cluster(policy->cpu);
-
- /* Do not register a cpu_cooling device if we are in IKS mode */
- if (cur_cluster >= MAX_CLUSTERS)
- return;
-
- cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
-}
-
static struct cpufreq_driver ve_spc_cpufreq_driver = {
.name = "vexpress-spc",
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
@@ -493,7 +472,7 @@ static struct cpufreq_driver ve_spc_cpufreq_driver = {
.get = ve_spc_cpufreq_get_rate,
.init = ve_spc_cpufreq_init,
.exit = ve_spc_cpufreq_exit,
- .ready = ve_spc_cpufreq_ready,
+ .register_em = cpufreq_register_em_with_opp,
.attr = cpufreq_generic_attr,
};
@@ -553,6 +532,9 @@ static int ve_spc_cpufreq_probe(struct platform_device *pdev)
for (i = 0; i < MAX_CLUSTERS; i++)
mutex_init(&cluster_lock[i]);
+ if (!is_bL_switching_enabled())
+ ve_spc_cpufreq_driver.flags |= CPUFREQ_IS_COOLING_DEV;
+
ret = cpufreq_register_driver(&ve_spc_cpufreq_driver);
if (ret) {
pr_info("%s: Failed registering platform driver: %s, err: %d\n",
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index 96bc7b5c6532..6c61817996a3 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -306,9 +306,7 @@ static int nitrox_device_flr(struct pci_dev *pdev)
return -ENOMEM;
}
- /* check flr support */
- if (pcie_has_flr(pdev))
- pcie_flr(pdev);
+ pcie_reset_flr(pdev, PCI_RESET_DO_RESET);
pci_restore_state(pdev);
diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile
index 32954059b37b..d1aaabc940f3 100644
--- a/drivers/cxl/Makefile
+++ b/drivers/cxl/Makefile
@@ -1,11 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CXL_BUS) += cxl_core.o
+obj-$(CONFIG_CXL_BUS) += core/
obj-$(CONFIG_CXL_MEM) += cxl_pci.o
obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o
obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o
-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL
-cxl_core-y := core.o
cxl_pci-y := pci.o
cxl_acpi-y := acpi.o
cxl_pmem-y := pmem.o
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 8ae89273f58e..54e9d4d2cf5f 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -243,6 +243,9 @@ static struct acpi_device *to_cxl_host_bridge(struct device *dev)
{
struct acpi_device *adev = to_acpi_device(dev);
+ if (!acpi_pci_find_root(adev->handle))
+ return NULL;
+
if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
return adev;
return NULL;
@@ -266,10 +269,6 @@ static int add_host_bridge_uport(struct device *match, void *arg)
if (!bridge)
return 0;
- pci_root = acpi_pci_find_root(bridge->handle);
- if (!pci_root)
- return -ENXIO;
-
dport = find_dport_by_dev(root_port, match);
if (!dport) {
dev_dbg(host, "host bridge expected and not found\n");
@@ -282,6 +281,11 @@ static int add_host_bridge_uport(struct device *match, void *arg)
return PTR_ERR(port);
dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
+ /*
+ * Note that this lookup already succeeded in
+ * to_cxl_host_bridge(), so no need to check for failure here
+ */
+ pci_root = acpi_pci_find_root(bridge->handle);
ctx = (struct cxl_walk_context){
.dev = host,
.root = pci_root->bus,
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
new file mode 100644
index 000000000000..0fdbf3c6ac1a
--- /dev/null
+++ b/drivers/cxl/core/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CXL_BUS) += cxl_core.o
+
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL -I$(srctree)/drivers/cxl
+cxl_core-y := bus.o
+cxl_core-y += pmem.o
+cxl_core-y += regs.o
+cxl_core-y += memdev.o
diff --git a/drivers/cxl/core.c b/drivers/cxl/core/bus.c
index 2b90b7c3b9d7..267d8042bec2 100644
--- a/drivers/cxl/core.c
+++ b/drivers/cxl/core/bus.c
@@ -6,14 +6,22 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/idr.h>
-#include "cxl.h"
-#include "mem.h"
+#include <cxlmem.h>
+#include <cxl.h>
+#include "core.h"
/**
* DOC: cxl core
*
- * The CXL core provides a sysfs hierarchy for control devices and a rendezvous
- * point for cross-device interleave coordination through cxl ports.
+ * The CXL core provides a set of interfaces that can be consumed by CXL aware
+ * drivers. The interfaces allow for creation, modification, and destruction of
+ * regions, memory devices, ports, and decoders. CXL aware drivers must register
+ * with the CXL core via these interfaces in order to be able to participate in
+ * cross-device interleave coordination. The CXL core also establishes and
+ * maintains the bridge to the nvdimm subsystem.
+ *
+ * CXL core introduces sysfs hierarchy to control the devices that are
+ * instantiated by the core.
*/
static DEFINE_IDA(cxl_port_ida);
@@ -30,7 +38,7 @@ static struct attribute *cxl_base_attributes[] = {
NULL,
};
-static struct attribute_group cxl_base_attribute_group = {
+struct attribute_group cxl_base_attribute_group = {
.attrs = cxl_base_attributes,
};
@@ -507,11 +515,6 @@ err:
return ERR_PTR(rc);
}
-static void unregister_dev(void *dev)
-{
- device_unregister(dev);
-}
-
struct cxl_decoder *
devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
resource_size_t base, resource_size_t len,
@@ -536,7 +539,7 @@ devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
if (rc)
goto err;
- rc = devm_add_action_or_reset(host, unregister_dev, dev);
+ rc = devm_add_action_or_reset(host, unregister_cxl_dev, dev);
if (rc)
return ERR_PTR(rc);
return cxld;
@@ -548,429 +551,6 @@ err:
EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
/**
- * cxl_probe_component_regs() - Detect CXL Component register blocks
- * @dev: Host device of the @base mapping
- * @base: Mapping containing the HDM Decoder Capability Header
- * @map: Map object describing the register block information found
- *
- * See CXL 2.0 8.2.4 Component Register Layout and Definition
- * See CXL 2.0 8.2.5.5 CXL Device Register Interface
- *
- * Probe for component register information and return it in map object.
- */
-void cxl_probe_component_regs(struct device *dev, void __iomem *base,
- struct cxl_component_reg_map *map)
-{
- int cap, cap_count;
- u64 cap_array;
-
- *map = (struct cxl_component_reg_map) { 0 };
-
- /*
- * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in
- * CXL 2.0 8.2.4 Table 141.
- */
- base += CXL_CM_OFFSET;
-
- cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET);
-
- if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
- dev_err(dev,
- "Couldn't locate the CXL.cache and CXL.mem capability array header./n");
- return;
- }
-
- /* It's assumed that future versions will be backward compatible */
- cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array);
-
- for (cap = 1; cap <= cap_count; cap++) {
- void __iomem *register_block;
- u32 hdr;
- int decoder_cnt;
- u16 cap_id, offset;
- u32 length;
-
- hdr = readl(base + cap * 0x4);
-
- cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
- offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
- register_block = base + offset;
-
- switch (cap_id) {
- case CXL_CM_CAP_CAP_ID_HDM:
- dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
- offset);
-
- hdr = readl(register_block);
-
- decoder_cnt = cxl_hdm_decoder_count(hdr);
- length = 0x20 * decoder_cnt + 0x10;
-
- map->hdm_decoder.valid = true;
- map->hdm_decoder.offset = CXL_CM_OFFSET + offset;
- map->hdm_decoder.size = length;
- break;
- default:
- dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
- offset);
- break;
- }
- }
-}
-EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
-
-static void cxl_nvdimm_bridge_release(struct device *dev)
-{
- struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
-
- kfree(cxl_nvb);
-}
-
-static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
- &cxl_base_attribute_group,
- NULL,
-};
-
-static const struct device_type cxl_nvdimm_bridge_type = {
- .name = "cxl_nvdimm_bridge",
- .release = cxl_nvdimm_bridge_release,
- .groups = cxl_nvdimm_bridge_attribute_groups,
-};
-
-struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
-{
- if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
- "not a cxl_nvdimm_bridge device\n"))
- return NULL;
- return container_of(dev, struct cxl_nvdimm_bridge, dev);
-}
-EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
-
-static struct cxl_nvdimm_bridge *
-cxl_nvdimm_bridge_alloc(struct cxl_port *port)
-{
- struct cxl_nvdimm_bridge *cxl_nvb;
- struct device *dev;
-
- cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
- if (!cxl_nvb)
- return ERR_PTR(-ENOMEM);
-
- dev = &cxl_nvb->dev;
- cxl_nvb->port = port;
- cxl_nvb->state = CXL_NVB_NEW;
- device_initialize(dev);
- device_set_pm_not_required(dev);
- dev->parent = &port->dev;
- dev->bus = &cxl_bus_type;
- dev->type = &cxl_nvdimm_bridge_type;
-
- return cxl_nvb;
-}
-
-static void unregister_nvb(void *_cxl_nvb)
-{
- struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
- bool flush;
-
- /*
- * If the bridge was ever activated then there might be in-flight state
- * work to flush. Once the state has been changed to 'dead' then no new
- * work can be queued by user-triggered bind.
- */
- device_lock(&cxl_nvb->dev);
- flush = cxl_nvb->state != CXL_NVB_NEW;
- cxl_nvb->state = CXL_NVB_DEAD;
- device_unlock(&cxl_nvb->dev);
-
- /*
- * Even though the device core will trigger device_release_driver()
- * before the unregister, it does not know about the fact that
- * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
- * release not and flush it before tearing down the nvdimm device
- * hierarchy.
- */
- device_release_driver(&cxl_nvb->dev);
- if (flush)
- flush_work(&cxl_nvb->state_work);
- device_unregister(&cxl_nvb->dev);
-}
-
-struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
- struct cxl_port *port)
-{
- struct cxl_nvdimm_bridge *cxl_nvb;
- struct device *dev;
- int rc;
-
- if (!IS_ENABLED(CONFIG_CXL_PMEM))
- return ERR_PTR(-ENXIO);
-
- cxl_nvb = cxl_nvdimm_bridge_alloc(port);
- if (IS_ERR(cxl_nvb))
- return cxl_nvb;
-
- dev = &cxl_nvb->dev;
- rc = dev_set_name(dev, "nvdimm-bridge");
- if (rc)
- goto err;
-
- rc = device_add(dev);
- if (rc)
- goto err;
-
- rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
- if (rc)
- return ERR_PTR(rc);
-
- return cxl_nvb;
-
-err:
- put_device(dev);
- return ERR_PTR(rc);
-}
-EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
-
-static void cxl_nvdimm_release(struct device *dev)
-{
- struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
-
- kfree(cxl_nvd);
-}
-
-static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
- &cxl_base_attribute_group,
- NULL,
-};
-
-static const struct device_type cxl_nvdimm_type = {
- .name = "cxl_nvdimm",
- .release = cxl_nvdimm_release,
- .groups = cxl_nvdimm_attribute_groups,
-};
-
-bool is_cxl_nvdimm(struct device *dev)
-{
- return dev->type == &cxl_nvdimm_type;
-}
-EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
-
-struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
-{
- if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
- "not a cxl_nvdimm device\n"))
- return NULL;
- return container_of(dev, struct cxl_nvdimm, dev);
-}
-EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
-
-static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
-{
- struct cxl_nvdimm *cxl_nvd;
- struct device *dev;
-
- cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
- if (!cxl_nvd)
- return ERR_PTR(-ENOMEM);
-
- dev = &cxl_nvd->dev;
- cxl_nvd->cxlmd = cxlmd;
- device_initialize(dev);
- device_set_pm_not_required(dev);
- dev->parent = &cxlmd->dev;
- dev->bus = &cxl_bus_type;
- dev->type = &cxl_nvdimm_type;
-
- return cxl_nvd;
-}
-
-int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
-{
- struct cxl_nvdimm *cxl_nvd;
- struct device *dev;
- int rc;
-
- cxl_nvd = cxl_nvdimm_alloc(cxlmd);
- if (IS_ERR(cxl_nvd))
- return PTR_ERR(cxl_nvd);
-
- dev = &cxl_nvd->dev;
- rc = dev_set_name(dev, "pmem%d", cxlmd->id);
- if (rc)
- goto err;
-
- rc = device_add(dev);
- if (rc)
- goto err;
-
- dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
- dev_name(dev));
-
- return devm_add_action_or_reset(host, unregister_dev, dev);
-
-err:
- put_device(dev);
- return rc;
-}
-EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
-
-/**
- * cxl_probe_device_regs() - Detect CXL Device register blocks
- * @dev: Host device of the @base mapping
- * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface
- * @map: Map object describing the register block information found
- *
- * Probe for device register information and return it in map object.
- */
-void cxl_probe_device_regs(struct device *dev, void __iomem *base,
- struct cxl_device_reg_map *map)
-{
- int cap, cap_count;
- u64 cap_array;
-
- *map = (struct cxl_device_reg_map){ 0 };
-
- cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET);
- if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
- CXLDEV_CAP_ARRAY_CAP_ID)
- return;
-
- cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
-
- for (cap = 1; cap <= cap_count; cap++) {
- u32 offset, length;
- u16 cap_id;
-
- cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
- readl(base + cap * 0x10));
- offset = readl(base + cap * 0x10 + 0x4);
- length = readl(base + cap * 0x10 + 0x8);
-
- switch (cap_id) {
- case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
- dev_dbg(dev, "found Status capability (0x%x)\n", offset);
-
- map->status.valid = true;
- map->status.offset = offset;
- map->status.size = length;
- break;
- case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
- dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
- map->mbox.valid = true;
- map->mbox.offset = offset;
- map->mbox.size = length;
- break;
- case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
- dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
- break;
- case CXLDEV_CAP_CAP_ID_MEMDEV:
- dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
- map->memdev.valid = true;
- map->memdev.offset = offset;
- map->memdev.size = length;
- break;
- default:
- if (cap_id >= 0x8000)
- dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset);
- else
- dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
- break;
- }
- }
-}
-EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
-
-static void __iomem *devm_cxl_iomap_block(struct device *dev,
- resource_size_t addr,
- resource_size_t length)
-{
- void __iomem *ret_val;
- struct resource *res;
-
- res = devm_request_mem_region(dev, addr, length, dev_name(dev));
- if (!res) {
- resource_size_t end = addr + length - 1;
-
- dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
- return NULL;
- }
-
- ret_val = devm_ioremap(dev, addr, length);
- if (!ret_val)
- dev_err(dev, "Failed to map region %pr\n", res);
-
- return ret_val;
-}
-
-int cxl_map_component_regs(struct pci_dev *pdev,
- struct cxl_component_regs *regs,
- struct cxl_register_map *map)
-{
- struct device *dev = &pdev->dev;
- resource_size_t phys_addr;
- resource_size_t length;
-
- phys_addr = pci_resource_start(pdev, map->barno);
- phys_addr += map->block_offset;
-
- phys_addr += map->component_map.hdm_decoder.offset;
- length = map->component_map.hdm_decoder.size;
- regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length);
- if (!regs->hdm_decoder)
- return -ENOMEM;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_map_component_regs);
-
-int cxl_map_device_regs(struct pci_dev *pdev,
- struct cxl_device_regs *regs,
- struct cxl_register_map *map)
-{
- struct device *dev = &pdev->dev;
- resource_size_t phys_addr;
-
- phys_addr = pci_resource_start(pdev, map->barno);
- phys_addr += map->block_offset;
-
- if (map->device_map.status.valid) {
- resource_size_t addr;
- resource_size_t length;
-
- addr = phys_addr + map->device_map.status.offset;
- length = map->device_map.status.size;
- regs->status = devm_cxl_iomap_block(dev, addr, length);
- if (!regs->status)
- return -ENOMEM;
- }
-
- if (map->device_map.mbox.valid) {
- resource_size_t addr;
- resource_size_t length;
-
- addr = phys_addr + map->device_map.mbox.offset;
- length = map->device_map.mbox.size;
- regs->mbox = devm_cxl_iomap_block(dev, addr, length);
- if (!regs->mbox)
- return -ENOMEM;
- }
-
- if (map->device_map.memdev.valid) {
- resource_size_t addr;
- resource_size_t length;
-
- addr = phys_addr + map->device_map.memdev.offset;
- length = map->device_map.memdev.size;
- regs->memdev = devm_cxl_iomap_block(dev, addr, length);
- if (!regs->memdev)
- return -ENOMEM;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_map_device_regs);
-
-/**
* __cxl_driver_register - register a driver for the cxl bus
* @cxl_drv: cxl driver structure to attach
* @owner: owning module/driver
@@ -1053,12 +633,26 @@ EXPORT_SYMBOL_GPL(cxl_bus_type);
static __init int cxl_core_init(void)
{
- return bus_register(&cxl_bus_type);
+ int rc;
+
+ rc = cxl_memdev_init();
+ if (rc)
+ return rc;
+
+ rc = bus_register(&cxl_bus_type);
+ if (rc)
+ goto err;
+ return 0;
+
+err:
+ cxl_memdev_exit();
+ return rc;
}
static void cxl_core_exit(void)
{
bus_unregister(&cxl_bus_type);
+ cxl_memdev_exit();
}
module_init(cxl_core_init);
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
new file mode 100644
index 000000000000..036a3c8106b4
--- /dev/null
+++ b/drivers/cxl/core/core.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2020 Intel Corporation. */
+
+#ifndef __CXL_CORE_H__
+#define __CXL_CORE_H__
+
+extern const struct device_type cxl_nvdimm_bridge_type;
+extern const struct device_type cxl_nvdimm_type;
+
+extern struct attribute_group cxl_base_attribute_group;
+
+static inline void unregister_cxl_dev(void *dev)
+{
+ device_unregister(dev);
+}
+
+int cxl_memdev_init(void);
+void cxl_memdev_exit(void);
+
+#endif /* __CXL_CORE_H__ */
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
new file mode 100644
index 000000000000..a9c317e32010
--- /dev/null
+++ b/drivers/cxl/core/memdev.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/pci.h>
+#include <cxlmem.h>
+#include "core.h"
+
+/*
+ * An entire PCI topology full of devices should be enough for any
+ * config
+ */
+#define CXL_MEM_MAX_DEVS 65536
+
+static int cxl_mem_major;
+static DEFINE_IDA(cxl_memdev_ida);
+
+static void cxl_memdev_release(struct device *dev)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+
+ ida_free(&cxl_memdev_ida, cxlmd->id);
+ kfree(cxlmd);
+}
+
+static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
+ kgid_t *gid)
+{
+ return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
+}
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+
+ return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static ssize_t payload_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+
+ return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
+}
+static DEVICE_ATTR_RO(payload_max);
+
+static ssize_t label_storage_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+
+ return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
+}
+static DEVICE_ATTR_RO(label_storage_size);
+
+static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+ unsigned long long len = range_len(&cxlm->ram_range);
+
+ return sysfs_emit(buf, "%#llx\n", len);
+}
+
+static struct device_attribute dev_attr_ram_size =
+ __ATTR(size, 0444, ram_size_show, NULL);
+
+static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_mem *cxlm = cxlmd->cxlm;
+ unsigned long long len = range_len(&cxlm->pmem_range);
+
+ return sysfs_emit(buf, "%#llx\n", len);
+}
+
+static struct device_attribute dev_attr_pmem_size =
+ __ATTR(size, 0444, pmem_size_show, NULL);
+
+static struct attribute *cxl_memdev_attributes[] = {
+ &dev_attr_firmware_version.attr,
+ &dev_attr_payload_max.attr,
+ &dev_attr_label_storage_size.attr,
+ NULL,
+};
+
+static struct attribute *cxl_memdev_pmem_attributes[] = {
+ &dev_attr_pmem_size.attr,
+ NULL,
+};
+
+static struct attribute *cxl_memdev_ram_attributes[] = {
+ &dev_attr_ram_size.attr,
+ NULL,
+};
+
+static struct attribute_group cxl_memdev_attribute_group = {
+ .attrs = cxl_memdev_attributes,
+};
+
+static struct attribute_group cxl_memdev_ram_attribute_group = {
+ .name = "ram",
+ .attrs = cxl_memdev_ram_attributes,
+};
+
+static struct attribute_group cxl_memdev_pmem_attribute_group = {
+ .name = "pmem",
+ .attrs = cxl_memdev_pmem_attributes,
+};
+
+static const struct attribute_group *cxl_memdev_attribute_groups[] = {
+ &cxl_memdev_attribute_group,
+ &cxl_memdev_ram_attribute_group,
+ &cxl_memdev_pmem_attribute_group,
+ NULL,
+};
+
+static const struct device_type cxl_memdev_type = {
+ .name = "cxl_memdev",
+ .release = cxl_memdev_release,
+ .devnode = cxl_memdev_devnode,
+ .groups = cxl_memdev_attribute_groups,
+};
+
+static void cxl_memdev_unregister(void *_cxlmd)
+{
+ struct cxl_memdev *cxlmd = _cxlmd;
+ struct device *dev = &cxlmd->dev;
+ struct cdev *cdev = &cxlmd->cdev;
+ const struct cdevm_file_operations *cdevm_fops;
+
+ cdevm_fops = container_of(cdev->ops, typeof(*cdevm_fops), fops);
+ cdevm_fops->shutdown(dev);
+
+ cdev_device_del(&cxlmd->cdev, dev);
+ put_device(dev);
+}
+
+static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
+ const struct file_operations *fops)
+{
+ struct pci_dev *pdev = cxlm->pdev;
+ struct cxl_memdev *cxlmd;
+ struct device *dev;
+ struct cdev *cdev;
+ int rc;
+
+ cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
+ if (!cxlmd)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
+ if (rc < 0)
+ goto err;
+ cxlmd->id = rc;
+
+ dev = &cxlmd->dev;
+ device_initialize(dev);
+ dev->parent = &pdev->dev;
+ dev->bus = &cxl_bus_type;
+ dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
+ dev->type = &cxl_memdev_type;
+ device_set_pm_not_required(dev);
+
+ cdev = &cxlmd->cdev;
+ cdev_init(cdev, fops);
+ return cxlmd;
+
+err:
+ kfree(cxlmd);
+ return ERR_PTR(rc);
+}
+
+struct cxl_memdev *
+devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
+ const struct cdevm_file_operations *cdevm_fops)
+{
+ struct cxl_memdev *cxlmd;
+ struct device *dev;
+ struct cdev *cdev;
+ int rc;
+
+ cxlmd = cxl_memdev_alloc(cxlm, &cdevm_fops->fops);
+ if (IS_ERR(cxlmd))
+ return cxlmd;
+
+ dev = &cxlmd->dev;
+ rc = dev_set_name(dev, "mem%d", cxlmd->id);
+ if (rc)
+ goto err;
+
+ /*
+ * Activate ioctl operations, no cxl_memdev_rwsem manipulation
+ * needed as this is ordered with cdev_add() publishing the device.
+ */
+ cxlmd->cxlm = cxlm;
+
+ cdev = &cxlmd->cdev;
+ rc = cdev_device_add(cdev, dev);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
+ if (rc)
+ return ERR_PTR(rc);
+ return cxlmd;
+
+err:
+ /*
+ * The cdev was briefly live, shutdown any ioctl operations that
+ * saw that state.
+ */
+ cdevm_fops->shutdown(dev);
+ put_device(dev);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_memdev);
+
+__init int cxl_memdev_init(void)
+{
+ dev_t devt;
+ int rc;
+
+ rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
+ if (rc)
+ return rc;
+
+ cxl_mem_major = MAJOR(devt);
+
+ return 0;
+}
+
+void cxl_memdev_exit(void)
+{
+ unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
+}
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
new file mode 100644
index 000000000000..d24570f5b8ba
--- /dev/null
+++ b/drivers/cxl/core/pmem.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <cxlmem.h>
+#include <cxl.h>
+#include "core.h"
+
+/**
+ * DOC: cxl pmem
+ *
+ * The core CXL PMEM infrastructure supports persistent memory
+ * provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL
+ * 'bridge' device is added at the root of a CXL device topology if
+ * platform firmware advertises at least one persistent memory capable
+ * CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus'
+ * device. Then for each cxl_memdev in the CXL device topology a bridge
+ * device is added to host a LIBNVDIMM dimm object. When these bridges
+ * are registered native LIBNVDIMM uapis are translated to CXL
+ * operations, for example, namespace label access commands.
+ */
+
+static void cxl_nvdimm_bridge_release(struct device *dev)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
+
+ kfree(cxl_nvb);
+}
+
+static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
+ &cxl_base_attribute_group,
+ NULL,
+};
+
+const struct device_type cxl_nvdimm_bridge_type = {
+ .name = "cxl_nvdimm_bridge",
+ .release = cxl_nvdimm_bridge_release,
+ .groups = cxl_nvdimm_bridge_attribute_groups,
+};
+
+struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
+ "not a cxl_nvdimm_bridge device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_nvdimm_bridge, dev);
+}
+EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
+
+static struct cxl_nvdimm_bridge *
+cxl_nvdimm_bridge_alloc(struct cxl_port *port)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct device *dev;
+
+ cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
+ if (!cxl_nvb)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &cxl_nvb->dev;
+ cxl_nvb->port = port;
+ cxl_nvb->state = CXL_NVB_NEW;
+ device_initialize(dev);
+ device_set_pm_not_required(dev);
+ dev->parent = &port->dev;
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_nvdimm_bridge_type;
+
+ return cxl_nvb;
+}
+
+static void unregister_nvb(void *_cxl_nvb)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
+ bool flush;
+
+ /*
+ * If the bridge was ever activated then there might be in-flight state
+ * work to flush. Once the state has been changed to 'dead' then no new
+ * work can be queued by user-triggered bind.
+ */
+ device_lock(&cxl_nvb->dev);
+ flush = cxl_nvb->state != CXL_NVB_NEW;
+ cxl_nvb->state = CXL_NVB_DEAD;
+ device_unlock(&cxl_nvb->dev);
+
+ /*
+ * Even though the device core will trigger device_release_driver()
+ * before the unregister, it does not know about the fact that
+ * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
+ * release not and flush it before tearing down the nvdimm device
+ * hierarchy.
+ */
+ device_release_driver(&cxl_nvb->dev);
+ if (flush)
+ flush_work(&cxl_nvb->state_work);
+ device_unregister(&cxl_nvb->dev);
+}
+
+/**
+ * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
+ * @host: platform firmware root device
+ * @port: CXL port at the root of a CXL topology
+ *
+ * Return: bridge device that can host cxl_nvdimm objects
+ */
+struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
+ struct cxl_port *port)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb;
+ struct device *dev;
+ int rc;
+
+ if (!IS_ENABLED(CONFIG_CXL_PMEM))
+ return ERR_PTR(-ENXIO);
+
+ cxl_nvb = cxl_nvdimm_bridge_alloc(port);
+ if (IS_ERR(cxl_nvb))
+ return cxl_nvb;
+
+ dev = &cxl_nvb->dev;
+ rc = dev_set_name(dev, "nvdimm-bridge");
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return cxl_nvb;
+
+err:
+ put_device(dev);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
+
+static void cxl_nvdimm_release(struct device *dev)
+{
+ struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
+
+ kfree(cxl_nvd);
+}
+
+static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
+ &cxl_base_attribute_group,
+ NULL,
+};
+
+const struct device_type cxl_nvdimm_type = {
+ .name = "cxl_nvdimm",
+ .release = cxl_nvdimm_release,
+ .groups = cxl_nvdimm_attribute_groups,
+};
+
+bool is_cxl_nvdimm(struct device *dev)
+{
+ return dev->type == &cxl_nvdimm_type;
+}
+EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
+
+struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
+{
+ if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
+ "not a cxl_nvdimm device\n"))
+ return NULL;
+ return container_of(dev, struct cxl_nvdimm, dev);
+}
+EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
+
+static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
+{
+ struct cxl_nvdimm *cxl_nvd;
+ struct device *dev;
+
+ cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
+ if (!cxl_nvd)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &cxl_nvd->dev;
+ cxl_nvd->cxlmd = cxlmd;
+ device_initialize(dev);
+ device_set_pm_not_required(dev);
+ dev->parent = &cxlmd->dev;
+ dev->bus = &cxl_bus_type;
+ dev->type = &cxl_nvdimm_type;
+
+ return cxl_nvd;
+}
+
+/**
+ * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
+ * @host: same host as @cxlmd
+ * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
+ *
+ * Return: 0 on success negative error code on failure.
+ */
+int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
+{
+ struct cxl_nvdimm *cxl_nvd;
+ struct device *dev;
+ int rc;
+
+ cxl_nvd = cxl_nvdimm_alloc(cxlmd);
+ if (IS_ERR(cxl_nvd))
+ return PTR_ERR(cxl_nvd);
+
+ dev = &cxl_nvd->dev;
+ rc = dev_set_name(dev, "pmem%d", cxlmd->id);
+ if (rc)
+ goto err;
+
+ rc = device_add(dev);
+ if (rc)
+ goto err;
+
+ dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
+ dev_name(dev));
+
+ return devm_add_action_or_reset(host, unregister_cxl_dev, dev);
+
+err:
+ put_device(dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
new file mode 100644
index 000000000000..41de4a136ecd
--- /dev/null
+++ b/drivers/cxl/core/regs.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. */
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <cxlmem.h>
+
+/**
+ * DOC: cxl registers
+ *
+ * CXL device capabilities are enumerated by PCI DVSEC (Designated
+ * Vendor-specific) and / or descriptors provided by platform firmware.
+ * They can be defined as a set like the device and component registers
+ * mandated by CXL Section 8.1.12.2 Memory Device PCIe Capabilities and
+ * Extended Capabilities, or they can be individual capabilities
+ * appended to bridged and endpoint devices.
+ *
+ * Provide common infrastructure for enumerating and mapping these
+ * discrete capabilities.
+ */
+
+/**
+ * cxl_probe_component_regs() - Detect CXL Component register blocks
+ * @dev: Host device of the @base mapping
+ * @base: Mapping containing the HDM Decoder Capability Header
+ * @map: Map object describing the register block information found
+ *
+ * See CXL 2.0 8.2.4 Component Register Layout and Definition
+ * See CXL 2.0 8.2.5.5 CXL Device Register Interface
+ *
+ * Probe for component register information and return it in map object.
+ */
+void cxl_probe_component_regs(struct device *dev, void __iomem *base,
+ struct cxl_component_reg_map *map)
+{
+ int cap, cap_count;
+ u64 cap_array;
+
+ *map = (struct cxl_component_reg_map) { 0 };
+
+ /*
+ * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in
+ * CXL 2.0 8.2.4 Table 141.
+ */
+ base += CXL_CM_OFFSET;
+
+ cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET);
+
+ if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
+ dev_err(dev,
+ "Couldn't locate the CXL.cache and CXL.mem capability array header./n");
+ return;
+ }
+
+ /* It's assumed that future versions will be backward compatible */
+ cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array);
+
+ for (cap = 1; cap <= cap_count; cap++) {
+ void __iomem *register_block;
+ u32 hdr;
+ int decoder_cnt;
+ u16 cap_id, offset;
+ u32 length;
+
+ hdr = readl(base + cap * 0x4);
+
+ cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
+ offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
+ register_block = base + offset;
+
+ switch (cap_id) {
+ case CXL_CM_CAP_CAP_ID_HDM:
+ dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
+ offset);
+
+ hdr = readl(register_block);
+
+ decoder_cnt = cxl_hdm_decoder_count(hdr);
+ length = 0x20 * decoder_cnt + 0x10;
+
+ map->hdm_decoder.valid = true;
+ map->hdm_decoder.offset = CXL_CM_OFFSET + offset;
+ map->hdm_decoder.size = length;
+ break;
+ default:
+ dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
+ offset);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
+
+/**
+ * cxl_probe_device_regs() - Detect CXL Device register blocks
+ * @dev: Host device of the @base mapping
+ * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface
+ * @map: Map object describing the register block information found
+ *
+ * Probe for device register information and return it in map object.
+ */
+void cxl_probe_device_regs(struct device *dev, void __iomem *base,
+ struct cxl_device_reg_map *map)
+{
+ int cap, cap_count;
+ u64 cap_array;
+
+ *map = (struct cxl_device_reg_map){ 0 };
+
+ cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET);
+ if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
+ CXLDEV_CAP_ARRAY_CAP_ID)
+ return;
+
+ cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
+
+ for (cap = 1; cap <= cap_count; cap++) {
+ u32 offset, length;
+ u16 cap_id;
+
+ cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
+ readl(base + cap * 0x10));
+ offset = readl(base + cap * 0x10 + 0x4);
+ length = readl(base + cap * 0x10 + 0x8);
+
+ switch (cap_id) {
+ case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
+ dev_dbg(dev, "found Status capability (0x%x)\n", offset);
+
+ map->status.valid = true;
+ map->status.offset = offset;
+ map->status.size = length;
+ break;
+ case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
+ dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
+ map->mbox.valid = true;
+ map->mbox.offset = offset;
+ map->mbox.size = length;
+ break;
+ case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
+ dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
+ break;
+ case CXLDEV_CAP_CAP_ID_MEMDEV:
+ dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
+ map->memdev.valid = true;
+ map->memdev.offset = offset;
+ map->memdev.size = length;
+ break;
+ default:
+ if (cap_id >= 0x8000)
+ dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset);
+ else
+ dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
+
+static void __iomem *devm_cxl_iomap_block(struct device *dev,
+ resource_size_t addr,
+ resource_size_t length)
+{
+ void __iomem *ret_val;
+ struct resource *res;
+
+ res = devm_request_mem_region(dev, addr, length, dev_name(dev));
+ if (!res) {
+ resource_size_t end = addr + length - 1;
+
+ dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
+ return NULL;
+ }
+
+ ret_val = devm_ioremap(dev, addr, length);
+ if (!ret_val)
+ dev_err(dev, "Failed to map region %pr\n", res);
+
+ return ret_val;
+}
+
+int cxl_map_component_regs(struct pci_dev *pdev,
+ struct cxl_component_regs *regs,
+ struct cxl_register_map *map)
+{
+ struct device *dev = &pdev->dev;
+ resource_size_t phys_addr;
+ resource_size_t length;
+
+ phys_addr = pci_resource_start(pdev, map->barno);
+ phys_addr += map->block_offset;
+
+ phys_addr += map->component_map.hdm_decoder.offset;
+ length = map->component_map.hdm_decoder.size;
+ regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length);
+ if (!regs->hdm_decoder)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_map_component_regs);
+
+int cxl_map_device_regs(struct pci_dev *pdev,
+ struct cxl_device_regs *regs,
+ struct cxl_register_map *map)
+{
+ struct device *dev = &pdev->dev;
+ resource_size_t phys_addr;
+
+ phys_addr = pci_resource_start(pdev, map->barno);
+ phys_addr += map->block_offset;
+
+ if (map->device_map.status.valid) {
+ resource_size_t addr;
+ resource_size_t length;
+
+ addr = phys_addr + map->device_map.status.offset;
+ length = map->device_map.status.size;
+ regs->status = devm_cxl_iomap_block(dev, addr, length);
+ if (!regs->status)
+ return -ENOMEM;
+ }
+
+ if (map->device_map.mbox.valid) {
+ resource_size_t addr;
+ resource_size_t length;
+
+ addr = phys_addr + map->device_map.mbox.offset;
+ length = map->device_map.mbox.size;
+ regs->mbox = devm_cxl_iomap_block(dev, addr, length);
+ if (!regs->mbox)
+ return -ENOMEM;
+ }
+
+ if (map->device_map.memdev.valid) {
+ resource_size_t addr;
+ resource_size_t length;
+
+ addr = phys_addr + map->device_map.memdev.offset;
+ length = map->device_map.memdev.size;
+ regs->memdev = devm_cxl_iomap_block(dev, addr, length);
+ if (!regs->memdev)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_map_device_regs);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index b6bda39a59e3..53927f9fa77e 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -140,7 +140,6 @@ struct cxl_device_reg_map {
};
struct cxl_register_map {
- struct list_head list;
u64 block_offset;
u8 reg_type;
u8 barno;
diff --git a/drivers/cxl/mem.h b/drivers/cxl/cxlmem.h
index 8f02d02b26b4..6c0b1e2ea97c 100644
--- a/drivers/cxl/mem.h
+++ b/drivers/cxl/cxlmem.h
@@ -28,11 +28,20 @@
(FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \
CXLMDEV_RESET_NEEDED_NOT)
-/*
- * An entire PCI topology full of devices should be enough for any
- * config
+/**
+ * struct cdevm_file_operations - devm coordinated cdev file operations
+ * @fops: file operations that are synchronized against @shutdown
+ * @shutdown: disconnect driver data
+ *
+ * @shutdown is invoked in the devres release path to disconnect any
+ * driver instance data from @dev. It assumes synchronization with any
+ * fops operation that requires driver data. After @shutdown an
+ * operation may only reference @device data.
*/
-#define CXL_MEM_MAX_DEVS 65536
+struct cdevm_file_operations {
+ struct file_operations fops;
+ void (*shutdown)(struct device *dev);
+};
/**
* struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
@@ -48,6 +57,15 @@ struct cxl_memdev {
int id;
};
+static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
+{
+ return container_of(dev, struct cxl_memdev, dev);
+}
+
+struct cxl_memdev *
+devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
+ const struct cdevm_file_operations *cdevm_fops);
+
/**
* struct cxl_mem - A CXL memory device
* @pdev: The PCI device associated with this CXL device.
@@ -77,5 +95,14 @@ struct cxl_mem {
struct range pmem_range;
struct range ram_range;
+ u64 total_bytes;
+ u64 volatile_only_bytes;
+ u64 persistent_only_bytes;
+ u64 partition_align_bytes;
+
+ u64 active_volatile_bytes;
+ u64 active_persistent_bytes;
+ u64 next_volatile_bytes;
+ u64 next_persistent_bytes;
};
#endif /* __CXL_MEM_H__ */
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 4cf351a3cf99..8e45aa07d662 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -12,9 +12,9 @@
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include "cxlmem.h"
#include "pci.h"
#include "cxl.h"
-#include "mem.h"
/**
* DOC: cxl pci
@@ -64,6 +64,15 @@ enum opcode {
CXL_MBOX_OP_MAX = 0x10000
};
+/*
+ * CXL 2.0 - Memory capacity multiplier
+ * See Section 8.2.9.5
+ *
+ * Volatile, Persistent, and Partition capacities are specified to be in
+ * multiples of 256MB - define a multiplier to convert to/from bytes.
+ */
+#define CXL_CAPACITY_MULTIPLIER SZ_256M
+
/**
* struct mbox_cmd - A command to be submitted to hardware.
* @opcode: (input) The command set and command submitted to hardware.
@@ -94,8 +103,6 @@ struct mbox_cmd {
#define CXL_MBOX_SUCCESS 0
};
-static int cxl_mem_major;
-static DEFINE_IDA(cxl_memdev_ida);
static DECLARE_RWSEM(cxl_memdev_rwsem);
static struct dentry *cxl_debugfs;
static bool cxl_raw_allow_all;
@@ -568,7 +575,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
return false;
- if (security_locked_down(LOCKDOWN_NONE))
+ if (security_locked_down(LOCKDOWN_PCI_ACCESS))
return false;
if (cxl_raw_allow_all)
@@ -806,13 +813,25 @@ static int cxl_memdev_release_file(struct inode *inode, struct file *file)
return 0;
}
-static const struct file_operations cxl_memdev_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = cxl_memdev_ioctl,
- .open = cxl_memdev_open,
- .release = cxl_memdev_release_file,
- .compat_ioctl = compat_ptr_ioctl,
- .llseek = noop_llseek,
+static void cxl_memdev_shutdown(struct device *dev)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+
+ down_write(&cxl_memdev_rwsem);
+ cxlmd->cxlm = NULL;
+ up_write(&cxl_memdev_rwsem);
+}
+
+static const struct cdevm_file_operations cxl_memdev_fops = {
+ .fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = cxl_memdev_ioctl,
+ .open = cxl_memdev_open,
+ .release = cxl_memdev_release_file,
+ .compat_ioctl = compat_ptr_ioctl,
+ .llseek = noop_llseek,
+ },
+ .shutdown = cxl_memdev_shutdown,
};
static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
@@ -1022,8 +1041,8 @@ static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base,
!dev_map->memdev.valid) {
dev_err(dev, "registers not found: %s%s%s\n",
!dev_map->status.valid ? "status " : "",
- !dev_map->mbox.valid ? "status " : "",
- !dev_map->memdev.valid ? "status " : "");
+ !dev_map->mbox.valid ? "mbox " : "",
+ !dev_map->memdev.valid ? "memdev " : "");
return -ENXIO;
}
@@ -1081,9 +1100,8 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
struct device *dev = &pdev->dev;
u32 regloc_size, regblocks;
void __iomem *base;
- int regloc, i;
- struct cxl_register_map *map, *n;
- LIST_HEAD(register_maps);
+ int regloc, i, n_maps;
+ struct cxl_register_map *map, maps[CXL_REGLOC_RBI_TYPES];
int ret = 0;
regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID);
@@ -1102,20 +1120,12 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET;
regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8;
- for (i = 0; i < regblocks; i++, regloc += 8) {
+ for (i = 0, n_maps = 0; i < regblocks; i++, regloc += 8) {
u32 reg_lo, reg_hi;
u8 reg_type;
u64 offset;
u8 bar;
- map = kzalloc(sizeof(*map), GFP_KERNEL);
- if (!map) {
- ret = -ENOMEM;
- goto free_maps;
- }
-
- list_add(&map->list, &register_maps);
-
pci_read_config_dword(pdev, regloc, &reg_lo);
pci_read_config_dword(pdev, regloc + 4, &reg_hi);
@@ -1125,12 +1135,15 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n",
bar, offset, reg_type);
+ /* Ignore unknown register block types */
+ if (reg_type > CXL_REGLOC_RBI_MEMDEV)
+ continue;
+
base = cxl_mem_map_regblock(cxlm, bar, offset);
- if (!base) {
- ret = -ENOMEM;
- goto free_maps;
- }
+ if (!base)
+ return -ENOMEM;
+ map = &maps[n_maps];
map->barno = bar;
map->block_offset = offset;
map->reg_type = reg_type;
@@ -1141,240 +1154,22 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
cxl_mem_unmap_regblock(cxlm, base);
if (ret)
- goto free_maps;
+ return ret;
+
+ n_maps++;
}
pci_release_mem_regions(pdev);
- list_for_each_entry(map, &register_maps, list) {
- ret = cxl_map_regs(cxlm, map);
+ for (i = 0; i < n_maps; i++) {
+ ret = cxl_map_regs(cxlm, &maps[i]);
if (ret)
- goto free_maps;
- }
-
-free_maps:
- list_for_each_entry_safe(map, n, &register_maps, list) {
- list_del(&map->list);
- kfree(map);
+ break;
}
return ret;
}
-static struct cxl_memdev *to_cxl_memdev(struct device *dev)
-{
- return container_of(dev, struct cxl_memdev, dev);
-}
-
-static void cxl_memdev_release(struct device *dev)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-
- ida_free(&cxl_memdev_ida, cxlmd->id);
- kfree(cxlmd);
-}
-
-static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
- kgid_t *gid)
-{
- return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
-}
-
-static ssize_t firmware_version_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
-
- return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
-}
-static DEVICE_ATTR_RO(firmware_version);
-
-static ssize_t payload_max_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
-
- return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
-}
-static DEVICE_ATTR_RO(payload_max);
-
-static ssize_t label_storage_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
-
- return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
-}
-static DEVICE_ATTR_RO(label_storage_size);
-
-static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
- unsigned long long len = range_len(&cxlm->ram_range);
-
- return sysfs_emit(buf, "%#llx\n", len);
-}
-
-static struct device_attribute dev_attr_ram_size =
- __ATTR(size, 0444, ram_size_show, NULL);
-
-static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
- unsigned long long len = range_len(&cxlm->pmem_range);
-
- return sysfs_emit(buf, "%#llx\n", len);
-}
-
-static struct device_attribute dev_attr_pmem_size =
- __ATTR(size, 0444, pmem_size_show, NULL);
-
-static struct attribute *cxl_memdev_attributes[] = {
- &dev_attr_firmware_version.attr,
- &dev_attr_payload_max.attr,
- &dev_attr_label_storage_size.attr,
- NULL,
-};
-
-static struct attribute *cxl_memdev_pmem_attributes[] = {
- &dev_attr_pmem_size.attr,
- NULL,
-};
-
-static struct attribute *cxl_memdev_ram_attributes[] = {
- &dev_attr_ram_size.attr,
- NULL,
-};
-
-static struct attribute_group cxl_memdev_attribute_group = {
- .attrs = cxl_memdev_attributes,
-};
-
-static struct attribute_group cxl_memdev_ram_attribute_group = {
- .name = "ram",
- .attrs = cxl_memdev_ram_attributes,
-};
-
-static struct attribute_group cxl_memdev_pmem_attribute_group = {
- .name = "pmem",
- .attrs = cxl_memdev_pmem_attributes,
-};
-
-static const struct attribute_group *cxl_memdev_attribute_groups[] = {
- &cxl_memdev_attribute_group,
- &cxl_memdev_ram_attribute_group,
- &cxl_memdev_pmem_attribute_group,
- NULL,
-};
-
-static const struct device_type cxl_memdev_type = {
- .name = "cxl_memdev",
- .release = cxl_memdev_release,
- .devnode = cxl_memdev_devnode,
- .groups = cxl_memdev_attribute_groups,
-};
-
-static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd)
-{
- down_write(&cxl_memdev_rwsem);
- cxlmd->cxlm = NULL;
- up_write(&cxl_memdev_rwsem);
-}
-
-static void cxl_memdev_unregister(void *_cxlmd)
-{
- struct cxl_memdev *cxlmd = _cxlmd;
- struct device *dev = &cxlmd->dev;
-
- cdev_device_del(&cxlmd->cdev, dev);
- cxl_memdev_shutdown(cxlmd);
- put_device(dev);
-}
-
-static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm)
-{
- struct pci_dev *pdev = cxlm->pdev;
- struct cxl_memdev *cxlmd;
- struct device *dev;
- struct cdev *cdev;
- int rc;
-
- cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
- if (!cxlmd)
- return ERR_PTR(-ENOMEM);
-
- rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
- if (rc < 0)
- goto err;
- cxlmd->id = rc;
-
- dev = &cxlmd->dev;
- device_initialize(dev);
- dev->parent = &pdev->dev;
- dev->bus = &cxl_bus_type;
- dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
- dev->type = &cxl_memdev_type;
- device_set_pm_not_required(dev);
-
- cdev = &cxlmd->cdev;
- cdev_init(cdev, &cxl_memdev_fops);
- return cxlmd;
-
-err:
- kfree(cxlmd);
- return ERR_PTR(rc);
-}
-
-static struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
- struct cxl_mem *cxlm)
-{
- struct cxl_memdev *cxlmd;
- struct device *dev;
- struct cdev *cdev;
- int rc;
-
- cxlmd = cxl_memdev_alloc(cxlm);
- if (IS_ERR(cxlmd))
- return cxlmd;
-
- dev = &cxlmd->dev;
- rc = dev_set_name(dev, "mem%d", cxlmd->id);
- if (rc)
- goto err;
-
- /*
- * Activate ioctl operations, no cxl_memdev_rwsem manipulation
- * needed as this is ordered with cdev_add() publishing the device.
- */
- cxlmd->cxlm = cxlm;
-
- cdev = &cxlmd->cdev;
- rc = cdev_device_add(cdev, dev);
- if (rc)
- goto err;
-
- rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
- if (rc)
- return ERR_PTR(rc);
- return cxlmd;
-
-err:
- /*
- * The cdev was briefly live, shutdown any ioctl operations that
- * saw that state.
- */
- cxl_memdev_shutdown(cxlmd);
- put_device(dev);
- return ERR_PTR(rc);
-}
-
static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
{
u32 remaining = size;
@@ -1469,6 +1264,53 @@ static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm)
}
/**
+ * cxl_mem_get_partition_info - Get partition info
+ * @cxlm: The device to act on
+ * @active_volatile_bytes: returned active volatile capacity
+ * @active_persistent_bytes: returned active persistent capacity
+ * @next_volatile_bytes: return next volatile capacity
+ * @next_persistent_bytes: return next persistent capacity
+ *
+ * Retrieve the current partition info for the device specified. If not 0, the
+ * 'next' values are pending and take affect on next cold reset.
+ *
+ * Return: 0 if no error: or the result of the mailbox command.
+ *
+ * See CXL @8.2.9.5.2.1 Get Partition Info
+ */
+static int cxl_mem_get_partition_info(struct cxl_mem *cxlm,
+ u64 *active_volatile_bytes,
+ u64 *active_persistent_bytes,
+ u64 *next_volatile_bytes,
+ u64 *next_persistent_bytes)
+{
+ struct cxl_mbox_get_partition_info {
+ __le64 active_volatile_cap;
+ __le64 active_persistent_cap;
+ __le64 next_volatile_cap;
+ __le64 next_persistent_cap;
+ } __packed pi;
+ int rc;
+
+ rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_PARTITION_INFO,
+ NULL, 0, &pi, sizeof(pi));
+ if (rc)
+ return rc;
+
+ *active_volatile_bytes = le64_to_cpu(pi.active_volatile_cap);
+ *active_persistent_bytes = le64_to_cpu(pi.active_persistent_cap);
+ *next_volatile_bytes = le64_to_cpu(pi.next_volatile_cap);
+ *next_persistent_bytes = le64_to_cpu(pi.next_volatile_cap);
+
+ *active_volatile_bytes *= CXL_CAPACITY_MULTIPLIER;
+ *active_persistent_bytes *= CXL_CAPACITY_MULTIPLIER;
+ *next_volatile_bytes *= CXL_CAPACITY_MULTIPLIER;
+ *next_persistent_bytes *= CXL_CAPACITY_MULTIPLIER;
+
+ return 0;
+}
+
+/**
* cxl_mem_enumerate_cmds() - Enumerate commands for a device.
* @cxlm: The device.
*
@@ -1564,16 +1406,27 @@ static int cxl_mem_identify(struct cxl_mem *cxlm)
if (rc < 0)
return rc;
- /*
- * TODO: enumerate DPA map, as 'ram' and 'pmem' do not alias.
- * For now, only the capacity is exported in sysfs
- */
- cxlm->ram_range.start = 0;
- cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1;
+ cxlm->total_bytes = le64_to_cpu(id.total_capacity);
+ cxlm->total_bytes *= CXL_CAPACITY_MULTIPLIER;
+
+ cxlm->volatile_only_bytes = le64_to_cpu(id.volatile_capacity);
+ cxlm->volatile_only_bytes *= CXL_CAPACITY_MULTIPLIER;
- cxlm->pmem_range.start = 0;
- cxlm->pmem_range.end =
- le64_to_cpu(id.persistent_capacity) * SZ_256M - 1;
+ cxlm->persistent_only_bytes = le64_to_cpu(id.persistent_capacity);
+ cxlm->persistent_only_bytes *= CXL_CAPACITY_MULTIPLIER;
+
+ cxlm->partition_align_bytes = le64_to_cpu(id.partition_align);
+ cxlm->partition_align_bytes *= CXL_CAPACITY_MULTIPLIER;
+
+ dev_dbg(&cxlm->pdev->dev, "Identify Memory Device\n"
+ " total_bytes = %#llx\n"
+ " volatile_only_bytes = %#llx\n"
+ " persistent_only_bytes = %#llx\n"
+ " partition_align_bytes = %#llx\n",
+ cxlm->total_bytes,
+ cxlm->volatile_only_bytes,
+ cxlm->persistent_only_bytes,
+ cxlm->partition_align_bytes);
cxlm->lsa_size = le32_to_cpu(id.lsa_size);
memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
@@ -1581,6 +1434,49 @@ static int cxl_mem_identify(struct cxl_mem *cxlm)
return 0;
}
+static int cxl_mem_create_range_info(struct cxl_mem *cxlm)
+{
+ int rc;
+
+ if (cxlm->partition_align_bytes == 0) {
+ cxlm->ram_range.start = 0;
+ cxlm->ram_range.end = cxlm->volatile_only_bytes - 1;
+ cxlm->pmem_range.start = cxlm->volatile_only_bytes;
+ cxlm->pmem_range.end = cxlm->volatile_only_bytes +
+ cxlm->persistent_only_bytes - 1;
+ return 0;
+ }
+
+ rc = cxl_mem_get_partition_info(cxlm,
+ &cxlm->active_volatile_bytes,
+ &cxlm->active_persistent_bytes,
+ &cxlm->next_volatile_bytes,
+ &cxlm->next_persistent_bytes);
+ if (rc < 0) {
+ dev_err(&cxlm->pdev->dev, "Failed to query partition information\n");
+ return rc;
+ }
+
+ dev_dbg(&cxlm->pdev->dev, "Get Partition Info\n"
+ " active_volatile_bytes = %#llx\n"
+ " active_persistent_bytes = %#llx\n"
+ " next_volatile_bytes = %#llx\n"
+ " next_persistent_bytes = %#llx\n",
+ cxlm->active_volatile_bytes,
+ cxlm->active_persistent_bytes,
+ cxlm->next_volatile_bytes,
+ cxlm->next_persistent_bytes);
+
+ cxlm->ram_range.start = 0;
+ cxlm->ram_range.end = cxlm->active_volatile_bytes - 1;
+
+ cxlm->pmem_range.start = cxlm->active_volatile_bytes;
+ cxlm->pmem_range.end = cxlm->active_volatile_bytes +
+ cxlm->active_persistent_bytes - 1;
+
+ return 0;
+}
+
static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct cxl_memdev *cxlmd;
@@ -1611,7 +1507,11 @@ static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm);
+ rc = cxl_mem_create_range_info(cxlm);
+ if (rc)
+ return rc;
+
+ cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm, &cxl_memdev_fops);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
@@ -1640,25 +1540,15 @@ static struct pci_driver cxl_mem_driver = {
static __init int cxl_mem_init(void)
{
struct dentry *mbox_debugfs;
- dev_t devt;
int rc;
/* Double check the anonymous union trickery in struct cxl_regs */
BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) !=
offsetof(struct cxl_regs, device_regs.memdev));
- rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
- if (rc)
- return rc;
-
- cxl_mem_major = MAJOR(devt);
-
rc = pci_register_driver(&cxl_mem_driver);
- if (rc) {
- unregister_chrdev_region(MKDEV(cxl_mem_major, 0),
- CXL_MEM_MAX_DEVS);
+ if (rc)
return rc;
- }
cxl_debugfs = debugfs_create_dir("cxl", NULL);
mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs);
@@ -1672,7 +1562,6 @@ static __exit void cxl_mem_exit(void)
{
debugfs_remove_recursive(cxl_debugfs);
pci_unregister_driver(&cxl_mem_driver);
- unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
}
MODULE_LICENSE("GPL v2");
diff --git a/drivers/cxl/pci.h b/drivers/cxl/pci.h
index dad7a831f65f..8c1a58813816 100644
--- a/drivers/cxl/pci.h
+++ b/drivers/cxl/pci.h
@@ -25,6 +25,7 @@
#define CXL_REGLOC_RBI_COMPONENT 1
#define CXL_REGLOC_RBI_VIRT 2
#define CXL_REGLOC_RBI_MEMDEV 3
+#define CXL_REGLOC_RBI_TYPES CXL_REGLOC_RBI_MEMDEV + 1
#define CXL_REGLOC_ADDR_MASK GENMASK(31, 16)
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 0088e41dd2f3..9652c3ee41e7 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -6,7 +6,7 @@
#include <linux/ndctl.h>
#include <linux/async.h>
#include <linux/slab.h>
-#include "mem.h"
+#include "cxlmem.h"
#include "cxl.h"
/*
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index ac231cc36359..a37622060fff 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -37,15 +37,16 @@ static int dax_kmem_range(struct dev_dax *dev_dax, int i, struct range *r)
struct dax_kmem_data {
const char *res_name;
+ int mgid;
struct resource *res[];
};
static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
{
struct device *dev = &dev_dax->dev;
+ unsigned long total_len = 0;
struct dax_kmem_data *data;
- int rc = -ENOMEM;
- int i, mapped = 0;
+ int i, rc, mapped = 0;
int numa_node;
/*
@@ -61,24 +62,44 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
return -EINVAL;
}
+ for (i = 0; i < dev_dax->nr_range; i++) {
+ struct range range;
+
+ rc = dax_kmem_range(dev_dax, i, &range);
+ if (rc) {
+ dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
+ i, range.start, range.end);
+ continue;
+ }
+ total_len += range_len(&range);
+ }
+
+ if (!total_len) {
+ dev_warn(dev, "rejecting DAX region without any memory after alignment\n");
+ return -EINVAL;
+ }
+
data = kzalloc(struct_size(data, res, dev_dax->nr_range), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ rc = -ENOMEM;
data->res_name = kstrdup(dev_name(dev), GFP_KERNEL);
if (!data->res_name)
goto err_res_name;
+ rc = memory_group_register_static(numa_node, total_len);
+ if (rc < 0)
+ goto err_reg_mgid;
+ data->mgid = rc;
+
for (i = 0; i < dev_dax->nr_range; i++) {
struct resource *res;
struct range range;
rc = dax_kmem_range(dev_dax, i, &range);
- if (rc) {
- dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
- i, range.start, range.end);
+ if (rc)
continue;
- }
/* Region is permanently reserved if hotremove fails. */
res = request_mem_region(range.start, range_len(&range), data->res_name);
@@ -108,8 +129,8 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
* Ensure that future kexec'd kernels will not treat
* this as RAM automatically.
*/
- rc = add_memory_driver_managed(numa_node, range.start,
- range_len(&range), kmem_name, MHP_NONE);
+ rc = add_memory_driver_managed(data->mgid, range.start,
+ range_len(&range), kmem_name, MHP_NID_IS_MGID);
if (rc) {
dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n",
@@ -129,6 +150,8 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
return 0;
err_request_mem:
+ memory_group_unregister(data->mgid);
+err_reg_mgid:
kfree(data->res_name);
err_res_name:
kfree(data);
@@ -156,8 +179,7 @@ static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
if (rc)
continue;
- rc = remove_memory(dev_dax->target_node, range.start,
- range_len(&range));
+ rc = remove_memory(range.start, range_len(&range));
if (rc == 0) {
release_resource(data->res[i]);
kfree(data->res[i]);
@@ -172,6 +194,7 @@ static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
}
if (success >= dev_dax->nr_range) {
+ memory_group_unregister(data->mgid);
kfree(data->res_name);
kfree(data);
dev_set_drvdata(dev, NULL);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 44736cbd446e..fc89e91beea7 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -17,6 +17,24 @@
#include <linux/fs.h>
#include "dax-private.h"
+/**
+ * struct dax_device - anchor object for dax services
+ * @inode: core vfs
+ * @cdev: optional character interface for "device dax"
+ * @host: optional name for lookups where the device path is not available
+ * @private: dax driver private data
+ * @flags: state and boolean properties
+ */
+struct dax_device {
+ struct hlist_node list;
+ struct inode inode;
+ struct cdev cdev;
+ const char *host;
+ void *private;
+ unsigned long flags;
+ const struct dax_operations *ops;
+};
+
static dev_t dax_devt;
DEFINE_STATIC_SRCU(dax_srcu);
static struct vfsmount *dax_mnt;
@@ -40,6 +58,42 @@ void dax_read_unlock(int id)
}
EXPORT_SYMBOL_GPL(dax_read_unlock);
+static int dax_host_hash(const char *host)
+{
+ return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
+}
+
+/**
+ * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
+ * @host: alternate name for the device registered by a dax driver
+ */
+static struct dax_device *dax_get_by_host(const char *host)
+{
+ struct dax_device *dax_dev, *found = NULL;
+ int hash, id;
+
+ if (!host)
+ return NULL;
+
+ hash = dax_host_hash(host);
+
+ id = dax_read_lock();
+ spin_lock(&dax_host_lock);
+ hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
+ if (!dax_alive(dax_dev)
+ || strcmp(host, dax_dev->host) != 0)
+ continue;
+
+ if (igrab(&dax_dev->inode))
+ found = dax_dev;
+ break;
+ }
+ spin_unlock(&dax_host_lock);
+ dax_read_unlock(id);
+
+ return found;
+}
+
#ifdef CONFIG_BLOCK
#include <linux/blkdev.h>
@@ -65,15 +119,13 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
return dax_get_by_host(bdev->bd_disk->disk_name);
}
EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
-#endif
-bool __generic_fsdax_supported(struct dax_device *dax_dev,
+bool generic_fsdax_supported(struct dax_device *dax_dev,
struct block_device *bdev, int blocksize, sector_t start,
sector_t sectors)
{
bool dax_enabled = false;
pgoff_t pgoff, pgoff_end;
- char buf[BDEVNAME_SIZE];
void *kaddr, *end_kaddr;
pfn_t pfn, end_pfn;
sector_t last_page;
@@ -81,29 +133,25 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
int err, id;
if (blocksize != PAGE_SIZE) {
- pr_info("%s: error: unsupported blocksize for dax\n",
- bdevname(bdev, buf));
+ pr_info("%pg: error: unsupported blocksize for dax\n", bdev);
return false;
}
if (!dax_dev) {
- pr_debug("%s: error: dax unsupported by block device\n",
- bdevname(bdev, buf));
+ pr_debug("%pg: error: dax unsupported by block device\n", bdev);
return false;
}
err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
if (err) {
- pr_info("%s: error: unaligned partition for dax\n",
- bdevname(bdev, buf));
+ pr_info("%pg: error: unaligned partition for dax\n", bdev);
return false;
}
last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
if (err) {
- pr_info("%s: error: unaligned partition for dax\n",
- bdevname(bdev, buf));
+ pr_info("%pg: error: unaligned partition for dax\n", bdev);
return false;
}
@@ -112,8 +160,8 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
if (len < 1 || len2 < 1) {
- pr_info("%s: error: dax access failed (%ld)\n",
- bdevname(bdev, buf), len < 1 ? len : len2);
+ pr_info("%pg: error: dax access failed (%ld)\n",
+ bdev, len < 1 ? len : len2);
dax_read_unlock(id);
return false;
}
@@ -147,57 +195,32 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
dax_read_unlock(id);
if (!dax_enabled) {
- pr_info("%s: error: dax support not enabled\n",
- bdevname(bdev, buf));
+ pr_info("%pg: error: dax support not enabled\n", bdev);
return false;
}
return true;
}
-EXPORT_SYMBOL_GPL(__generic_fsdax_supported);
+EXPORT_SYMBOL_GPL(generic_fsdax_supported);
-/**
- * __bdev_dax_supported() - Check if the device supports dax for filesystem
- * @bdev: block device to check
- * @blocksize: The block size of the device
- *
- * This is a library function for filesystems to check if the block device
- * can be mounted with dax option.
- *
- * Return: true if supported, false if unsupported
- */
-bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
+bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
+ int blocksize, sector_t start, sector_t len)
{
- struct dax_device *dax_dev;
- struct request_queue *q;
- char buf[BDEVNAME_SIZE];
- bool ret;
+ bool ret = false;
int id;
- q = bdev_get_queue(bdev);
- if (!q || !blk_queue_dax(q)) {
- pr_debug("%s: error: request queue doesn't support dax\n",
- bdevname(bdev, buf));
- return false;
- }
-
- dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
- if (!dax_dev) {
- pr_debug("%s: error: device does not support dax\n",
- bdevname(bdev, buf));
+ if (!dax_dev)
return false;
- }
id = dax_read_lock();
- ret = dax_supported(dax_dev, bdev, blocksize, 0,
- i_size_read(bdev->bd_inode) / 512);
+ if (dax_alive(dax_dev) && dax_dev->ops->dax_supported)
+ ret = dax_dev->ops->dax_supported(dax_dev, bdev, blocksize,
+ start, len);
dax_read_unlock(id);
-
- put_dax(dax_dev);
-
return ret;
}
-EXPORT_SYMBOL_GPL(__bdev_dax_supported);
-#endif
+EXPORT_SYMBOL_GPL(dax_supported);
+#endif /* CONFIG_FS_DAX */
+#endif /* CONFIG_BLOCK */
enum dax_device_flags {
/* !alive + rcu grace period == no new operations / mappings */
@@ -208,24 +231,6 @@ enum dax_device_flags {
DAXDEV_SYNC,
};
-/**
- * struct dax_device - anchor object for dax services
- * @inode: core vfs
- * @cdev: optional character interface for "device dax"
- * @host: optional name for lookups where the device path is not available
- * @private: dax driver private data
- * @flags: state and boolean properties
- */
-struct dax_device {
- struct hlist_node list;
- struct inode inode;
- struct cdev cdev;
- const char *host;
- void *private;
- unsigned long flags;
- const struct dax_operations *ops;
-};
-
static ssize_t write_cache_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -323,19 +328,6 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
}
EXPORT_SYMBOL_GPL(dax_direct_access);
-bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
- int blocksize, sector_t start, sector_t len)
-{
- if (!dax_dev)
- return false;
-
- if (!dax_alive(dax_dev))
- return false;
-
- return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
-}
-EXPORT_SYMBOL_GPL(dax_supported);
-
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i)
{
@@ -423,11 +415,6 @@ bool dax_alive(struct dax_device *dax_dev)
}
EXPORT_SYMBOL_GPL(dax_alive);
-static int dax_host_hash(const char *host)
-{
- return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
-}
-
/*
* Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
* that any fault handlers or operations that might have seen
@@ -625,38 +612,6 @@ void put_dax(struct dax_device *dax_dev)
EXPORT_SYMBOL_GPL(put_dax);
/**
- * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
- * @host: alternate name for the device registered by a dax driver
- */
-struct dax_device *dax_get_by_host(const char *host)
-{
- struct dax_device *dax_dev, *found = NULL;
- int hash, id;
-
- if (!host)
- return NULL;
-
- hash = dax_host_hash(host);
-
- id = dax_read_lock();
- spin_lock(&dax_host_lock);
- hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
- if (!dax_alive(dax_dev)
- || strcmp(host, dax_dev->host) != 0)
- continue;
-
- if (igrab(&dax_dev->inode))
- found = dax_dev;
- break;
- }
- spin_unlock(&dax_host_lock);
- dax_read_unlock(id);
-
- return found;
-}
-EXPORT_SYMBOL_GPL(dax_get_by_host);
-
-/**
* inode_dax: convert a public inode into its dax_dev
* @inode: An inode with i_cdev pointing to a dax_dev
*
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 28f3e0ba6cdd..85faa7a5c7d1 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -27,6 +27,7 @@
#include <linux/hrtimer.h>
#include <linux/of.h>
#include <linux/pm_qos.h>
+#include <linux/units.h>
#include "governor.h"
#define CREATE_TRACE_POINTS
@@ -34,7 +35,6 @@
#define IS_SUPPORTED_FLAG(f, name) ((f & DEVFREQ_GOV_FLAG_##name) ? true : false)
#define IS_SUPPORTED_ATTR(f, name) ((f & DEVFREQ_GOV_ATTR_##name) ? true : false)
-#define HZ_PER_KHZ 1000
static struct class *devfreq_class;
static struct dentry *devfreq_debugfs;
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 9561e3d2d428..541efe01abc7 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -42,6 +42,7 @@ config UDMABUF
config DMABUF_MOVE_NOTIFY
bool "Move notify between drivers (EXPERIMENTAL)"
default n
+ depends on DMA_SHARED_BUFFER
help
Don't pin buffers if the dynamic DMA-buf interface is available on
both the exporter as well as the importer. This fixes a security
@@ -52,6 +53,7 @@ config DMABUF_MOVE_NOTIFY
config DMABUF_DEBUG
bool "DMA-BUF debug checks"
+ depends on DMA_SHARED_BUFFER
default y if DMA_API_DEBUG
help
This option enables additional checks for DMA-BUF importers and
@@ -74,7 +76,7 @@ menuconfig DMABUF_HEAPS
menuconfig DMABUF_SYSFS_STATS
bool "DMA-BUF sysfs statistics"
- select DMA_SHARED_BUFFER
+ depends on DMA_SHARED_BUFFER
help
Choose this option to enable DMA-BUF sysfs statistics
in location /sys/kernel/dmabuf/buffers.
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 39b5b46e880f..80c2c03cb014 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -277,10 +277,15 @@ config INTEL_IDMA64
Enable DMA support for Intel Low Power Subsystem such as found on
Intel Skylake PCH.
+config INTEL_IDXD_BUS
+ tristate
+ default INTEL_IDXD
+
config INTEL_IDXD
tristate "Intel Data Accelerators support"
- depends on PCI && X86_64
+ depends on PCI && X86_64 && !UML
depends on PCI_MSI
+ depends on PCI_PASID
depends on SBITMAP
select DMA_ENGINE
help
@@ -291,6 +296,23 @@ config INTEL_IDXD
If unsure, say N.
+config INTEL_IDXD_COMPAT
+ bool "Legacy behavior for idxd driver"
+ depends on PCI && X86_64
+ select INTEL_IDXD_BUS
+ help
+ Compatible driver to support old /sys/bus/dsa/drivers/dsa behavior.
+ The old behavior performed driver bind/unbind for device and wq
+ devices all under the dsa driver. The compat driver will emulate
+ the legacy behavior in order to allow existing support apps (i.e.
+ accel-config) to continue function. It is expected that accel-config
+ v3.2 and earlier will need the compat mode. A distro with later
+ accel-config version can disable this compat config.
+
+ Say Y if you have old applications that require such behavior.
+
+ If unsure, say N.
+
# Config symbol that collects all the dependencies that's necessary to
# support shared virtual memory for the devices supported by idxd.
config INTEL_IDXD_SVM
@@ -315,7 +337,7 @@ config INTEL_IDXD_PERFMON
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
- depends on PCI && X86_64
+ depends on PCI && X86_64 && !UML
select DMA_ENGINE
select DMA_ENGINE_RAID
select DCA
@@ -716,6 +738,8 @@ source "drivers/dma/bestcomm/Kconfig"
source "drivers/dma/mediatek/Kconfig"
+source "drivers/dma/ptdma/Kconfig"
+
source "drivers/dma/qcom/Kconfig"
source "drivers/dma/dw/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index aa69094e3547..616d926cf2a5 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
+obj-$(CONFIG_AMD_PTDMA) += ptdma/
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
@@ -41,7 +42,7 @@ obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_INTEL_IDMA64) += idma64.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
-obj-$(CONFIG_INTEL_IDXD) += idxd/
+obj-y += idxd/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 235f1396f968..5906eae26e2a 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -70,10 +70,22 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
si = (const struct acpi_csrt_shared_info *)&grp[1];
- /* Match device by MMIO and IRQ */
+ /* Match device by MMIO */
if (si->mmio_base_low != lower_32_bits(mem) ||
- si->mmio_base_high != upper_32_bits(mem) ||
- si->gsi_interrupt != irq)
+ si->mmio_base_high != upper_32_bits(mem))
+ return 0;
+
+ /*
+ * acpi_gsi_to_irq() can't be used because some platforms do not save
+ * registered IRQs in the MP table. Instead we just try to register
+ * the GSI, which is the core part of the above mentioned function.
+ */
+ ret = acpi_register_gsi(NULL, si->gsi_interrupt, si->interrupt_mode, si->interrupt_polarity);
+ if (ret < 0)
+ return 0;
+
+ /* Match device by Linux vIRQ */
+ if (ret != irq)
return 0;
dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 0fe0676f8e1d..5a2c7573b692 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -691,10 +691,14 @@ static void msgdma_tasklet(struct tasklet_struct *t)
spin_lock_irqsave(&mdev->lock, flags);
- /* Read number of responses that are available */
- count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
- dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
- __func__, __LINE__, count);
+ if (mdev->resp) {
+ /* Read number of responses that are available */
+ count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
+ dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
+ __func__, __LINE__, count);
+ } else {
+ count = 1;
+ }
while (count--) {
/*
@@ -703,8 +707,12 @@ static void msgdma_tasklet(struct tasklet_struct *t)
* have any real values, like transferred bytes or error
* bits. So we need to just drop these values.
*/
- size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
- status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
+ if (mdev->resp) {
+ size = ioread32(mdev->resp +
+ MSGDMA_RESP_BYTES_TRANSFERRED);
+ status = ioread32(mdev->resp +
+ MSGDMA_RESP_STATUS);
+ }
msgdma_complete_descriptor(mdev);
msgdma_chan_desc_cleanup(mdev);
@@ -757,14 +765,21 @@ static void msgdma_dev_remove(struct msgdma_device *mdev)
}
static int request_and_map(struct platform_device *pdev, const char *name,
- struct resource **res, void __iomem **ptr)
+ struct resource **res, void __iomem **ptr,
+ bool optional)
{
struct resource *region;
struct device *device = &pdev->dev;
*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
if (*res == NULL) {
- dev_err(device, "resource %s not defined\n", name);
+ if (optional) {
+ *ptr = NULL;
+ dev_info(device, "optional resource %s not defined\n",
+ name);
+ return 0;
+ }
+ dev_err(device, "mandatory resource %s not defined\n", name);
return -ENODEV;
}
@@ -805,17 +820,17 @@ static int msgdma_probe(struct platform_device *pdev)
mdev->dev = &pdev->dev;
/* Map CSR space */
- ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr);
+ ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr, false);
if (ret)
return ret;
/* Map (extended) descriptor space */
- ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc);
+ ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc, false);
if (ret)
return ret;
/* Map response space */
- ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp);
+ ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp, true);
if (ret)
return ret;
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 64a52bf4d737..ab78e0f6afd7 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -2240,10 +2240,16 @@ static struct platform_driver at_xdmac_driver = {
static int __init at_xdmac_init(void)
{
- return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
+ return platform_driver_register(&at_xdmac_driver);
}
subsys_initcall(at_xdmac_init);
+static void __exit at_xdmac_exit(void)
+{
+ platform_driver_unregister(&at_xdmac_driver);
+}
+module_exit(at_xdmac_exit);
+
MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index d9e4ac3edb4e..35993ab92154 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -363,12 +363,16 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC)
<< CH_CFG_H_TT_FC_POS;
+ if (chan->chip->apb_regs)
+ reg |= (chan->id << CH_CFG_H_DST_PER_POS);
break;
case DMA_DEV_TO_MEM:
reg |= (chan->config.device_fc ?
DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC)
<< CH_CFG_H_TT_FC_POS;
+ if (chan->chip->apb_regs)
+ reg |= (chan->id << CH_CFG_H_SRC_PER_POS);
break;
default:
break;
@@ -470,18 +474,13 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan)
pm_runtime_put(chan->chip->dev);
}
-static void dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip,
- u32 handshake_num, bool set)
+static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
{
- unsigned long start = 0;
- unsigned long reg_value;
- unsigned long reg_mask;
- unsigned long reg_set;
- unsigned long mask;
- unsigned long val;
+ struct axi_dma_chip *chip = chan->chip;
+ unsigned long reg_value, val;
if (!chip->apb_regs) {
- dev_dbg(chip->dev, "apb_regs not initialized\n");
+ dev_err(chip->dev, "apb_regs not initialized\n");
return;
}
@@ -490,26 +489,22 @@ static void dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip,
* Lock the DMA channel by assign a handshake number to the channel.
* Unlock the DMA channel by assign 0x3F to the channel.
*/
- if (set) {
- reg_set = UNUSED_CHANNEL;
- val = handshake_num;
- } else {
- reg_set = handshake_num;
+ if (set)
+ val = chan->hw_handshake_num;
+ else
val = UNUSED_CHANNEL;
- }
reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
- for_each_set_clump8(start, reg_mask, &reg_value, 64) {
- if (reg_mask == reg_set) {
- mask = GENMASK_ULL(start + 7, start);
- reg_value &= ~mask;
- reg_value |= rol64(val, start);
- lo_hi_writeq(reg_value,
- chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
- break;
- }
- }
+ /* Channel is already allocated, set handshake as per channel ID */
+ /* 64 bit write should handle for 8 channels */
+
+ reg_value &= ~(DMA_APB_HS_SEL_MASK <<
+ (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
+ reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
+ lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
+
+ return;
}
/*
@@ -742,7 +737,7 @@ dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
llp = hw_desc->llp;
} while (total_segments);
- dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true);
+ dw_axi_dma_set_hw_channel(chan, true);
return vchan_tx_prep(&chan->vc, &desc->vd, flags);
@@ -822,7 +817,7 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
llp = hw_desc->llp;
} while (num_sgs);
- dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true);
+ dw_axi_dma_set_hw_channel(chan, true);
return vchan_tx_prep(&chan->vc, &desc->vd, flags);
@@ -1098,8 +1093,7 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
"%s failed to stop\n", axi_chan_name(chan));
if (chan->direction != DMA_MEM_TO_MEM)
- dw_axi_dma_set_hw_channel(chan->chip,
- chan->hw_handshake_num, false);
+ dw_axi_dma_set_hw_channel(chan, false);
if (chan->direction == DMA_MEM_TO_DEV)
dw_axi_dma_set_byte_halfword(chan, false);
@@ -1296,7 +1290,7 @@ static int parse_device_properties(struct axi_dma_chip *chip)
return -EINVAL;
chip->dw->hdata->restrict_axi_burst_len = true;
- chip->dw->hdata->axi_rw_burst_len = tmp - 1;
+ chip->dw->hdata->axi_rw_burst_len = tmp;
}
return 0;
@@ -1365,7 +1359,6 @@ static int dw_probe(struct platform_device *pdev)
if (ret)
return ret;
-
INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < hdata->nr_channels; i++) {
struct axi_dma_chan *chan = &dw->chan[i];
@@ -1386,6 +1379,7 @@ static int dw_probe(struct platform_device *pdev)
/* DMA capabilities */
dw->dma.chancnt = hdata->nr_channels;
+ dw->dma.max_burst = hdata->axi_rw_burst_len;
dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
dw->dma.directions = BIT(DMA_MEM_TO_MEM);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
index b69897887c76..380005afde16 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -184,6 +184,8 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
#define DMAC_APB_HALFWORD_WR_CH_EN 0x020 /* DMAC Halfword write enables */
#define UNUSED_CHANNEL 0x3F /* Set unused DMA channel to 0x3F */
+#define DMA_APB_HS_SEL_BIT_SIZE 0x08 /* HW handshake bits per channel */
+#define DMA_APB_HS_SEL_MASK 0xFF /* HW handshake select masks */
#define MAX_BLOCK_SIZE 0x1000 /* 1024 blocks * 4 bytes data width */
/* DMAC_CFG */
@@ -256,6 +258,8 @@ enum {
/* CH_CFG_H */
#define CH_CFG_H_PRIORITY_POS 17
+#define CH_CFG_H_DST_PER_POS 12
+#define CH_CFG_H_SRC_PER_POS 7
#define CH_CFG_H_HS_SEL_DST_POS 4
#define CH_CFG_H_HS_SEL_SRC_POS 3
enum {
diff --git a/drivers/dma/dw/idma32.c b/drivers/dma/dw/idma32.c
index 3ce44de25d33..58f4078d83fe 100644
--- a/drivers/dma/dw/idma32.c
+++ b/drivers/dma/dw/idma32.c
@@ -1,15 +1,144 @@
// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2013,2018 Intel Corporation
+// Copyright (C) 2013,2018,2020-2021 Intel Corporation
#include <linux/bitops.h>
#include <linux/dmaengine.h>
#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "internal.h"
-static void idma32_initialize_chan(struct dw_dma_chan *dwc)
+#define DMA_CTL_CH(x) (0x1000 + (x) * 4)
+#define DMA_SRC_ADDR_FILLIN(x) (0x1100 + (x) * 4)
+#define DMA_DST_ADDR_FILLIN(x) (0x1200 + (x) * 4)
+#define DMA_XBAR_SEL(x) (0x1300 + (x) * 4)
+#define DMA_REGACCESS_CHID_CFG (0x1400)
+
+#define CTL_CH_TRANSFER_MODE_MASK GENMASK(1, 0)
+#define CTL_CH_TRANSFER_MODE_S2S 0
+#define CTL_CH_TRANSFER_MODE_S2D 1
+#define CTL_CH_TRANSFER_MODE_D2S 2
+#define CTL_CH_TRANSFER_MODE_D2D 3
+#define CTL_CH_RD_RS_MASK GENMASK(4, 3)
+#define CTL_CH_WR_RS_MASK GENMASK(6, 5)
+#define CTL_CH_RD_NON_SNOOP_BIT BIT(8)
+#define CTL_CH_WR_NON_SNOOP_BIT BIT(9)
+
+#define XBAR_SEL_DEVID_MASK GENMASK(15, 0)
+#define XBAR_SEL_RX_TX_BIT BIT(16)
+#define XBAR_SEL_RX_TX_SHIFT 16
+
+#define REGACCESS_CHID_MASK GENMASK(2, 0)
+
+static unsigned int idma32_get_slave_devfn(struct dw_dma_chan *dwc)
+{
+ struct device *slave = dwc->chan.slave;
+
+ if (!slave || !dev_is_pci(slave))
+ return 0;
+
+ return to_pci_dev(slave)->devfn;
+}
+
+static void idma32_initialize_chan_xbar(struct dw_dma_chan *dwc)
+{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ void __iomem *misc = __dw_regs(dw);
+ u32 cfghi = 0, cfglo = 0;
+ u8 dst_id, src_id;
+ u32 value;
+
+ /* DMA Channel ID Configuration register must be programmed first */
+ value = readl(misc + DMA_REGACCESS_CHID_CFG);
+
+ value &= ~REGACCESS_CHID_MASK;
+ value |= dwc->chan.chan_id;
+
+ writel(value, misc + DMA_REGACCESS_CHID_CFG);
+
+ /* Configure channel attributes */
+ value = readl(misc + DMA_CTL_CH(dwc->chan.chan_id));
+
+ value &= ~(CTL_CH_RD_NON_SNOOP_BIT | CTL_CH_WR_NON_SNOOP_BIT);
+ value &= ~(CTL_CH_RD_RS_MASK | CTL_CH_WR_RS_MASK);
+ value &= ~CTL_CH_TRANSFER_MODE_MASK;
+
+ switch (dwc->direction) {
+ case DMA_MEM_TO_DEV:
+ value |= CTL_CH_TRANSFER_MODE_D2S;
+ value |= CTL_CH_WR_NON_SNOOP_BIT;
+ break;
+ case DMA_DEV_TO_MEM:
+ value |= CTL_CH_TRANSFER_MODE_S2D;
+ value |= CTL_CH_RD_NON_SNOOP_BIT;
+ break;
+ default:
+ /*
+ * Memory-to-Memory and Device-to-Device are ignored for now.
+ *
+ * For Memory-to-Memory transfers we would need to set mode
+ * and disable snooping on both sides.
+ */
+ return;
+ }
+
+ writel(value, misc + DMA_CTL_CH(dwc->chan.chan_id));
+
+ /* Configure crossbar selection */
+ value = readl(misc + DMA_XBAR_SEL(dwc->chan.chan_id));
+
+ /* DEVFN selection */
+ value &= ~XBAR_SEL_DEVID_MASK;
+ value |= idma32_get_slave_devfn(dwc);
+
+ switch (dwc->direction) {
+ case DMA_MEM_TO_DEV:
+ value |= XBAR_SEL_RX_TX_BIT;
+ break;
+ case DMA_DEV_TO_MEM:
+ value &= ~XBAR_SEL_RX_TX_BIT;
+ break;
+ default:
+ /* Memory-to-Memory and Device-to-Device are ignored for now */
+ return;
+ }
+
+ writel(value, misc + DMA_XBAR_SEL(dwc->chan.chan_id));
+
+ /* Configure DMA channel low and high registers */
+ switch (dwc->direction) {
+ case DMA_MEM_TO_DEV:
+ dst_id = dwc->chan.chan_id;
+ src_id = dwc->dws.src_id;
+ break;
+ case DMA_DEV_TO_MEM:
+ dst_id = dwc->dws.dst_id;
+ src_id = dwc->chan.chan_id;
+ break;
+ default:
+ /* Memory-to-Memory and Device-to-Device are ignored for now */
+ return;
+ }
+
+ /* Set default burst alignment */
+ cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
+
+ /* Low 4 bits of the request lines */
+ cfghi |= IDMA32C_CFGH_DST_PER(dst_id & 0xf);
+ cfghi |= IDMA32C_CFGH_SRC_PER(src_id & 0xf);
+
+ /* Request line extension (2 bits) */
+ cfghi |= IDMA32C_CFGH_DST_PER_EXT(dst_id >> 4 & 0x3);
+ cfghi |= IDMA32C_CFGH_SRC_PER_EXT(src_id >> 4 & 0x3);
+
+ channel_writel(dwc, CFG_LO, cfglo);
+ channel_writel(dwc, CFG_HI, cfghi);
+}
+
+static void idma32_initialize_chan_generic(struct dw_dma_chan *dwc)
{
u32 cfghi = 0;
u32 cfglo = 0;
@@ -134,7 +263,10 @@ int idma32_dma_probe(struct dw_dma_chip *chip)
return -ENOMEM;
/* Channel operations */
- dw->initialize_chan = idma32_initialize_chan;
+ if (chip->pdata->quirks & DW_DMA_QUIRK_XBAR_PRESENT)
+ dw->initialize_chan = idma32_initialize_chan_xbar;
+ else
+ dw->initialize_chan = idma32_initialize_chan_generic;
dw->suspend_chan = idma32_suspend_chan;
dw->resume_chan = idma32_resume_chan;
dw->prepare_ctllo = idma32_prepare_ctllo;
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h
index 2e1c52eefdeb..563ce73488db 100644
--- a/drivers/dma/dw/internal.h
+++ b/drivers/dma/dw/internal.h
@@ -74,4 +74,20 @@ static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = {
.remove = idma32_dma_remove,
};
+static const struct dw_dma_platform_data xbar_pdata = {
+ .nr_channels = 8,
+ .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
+ .chan_priority = CHAN_PRIORITY_ASCENDING,
+ .block_size = 131071,
+ .nr_masters = 1,
+ .data_width = {4},
+ .quirks = DW_DMA_QUIRK_XBAR_PRESENT,
+};
+
+static __maybe_unused const struct dw_dma_chip_pdata xbar_chip_pdata = {
+ .pdata = &xbar_pdata,
+ .probe = idma32_dma_probe,
+ .remove = idma32_dma_remove,
+};
+
#endif /* _DMA_DW_INTERNAL_H */
diff --git a/drivers/dma/dw/of.c b/drivers/dma/dw/of.c
index c1cf7675b9d1..523ca806837c 100644
--- a/drivers/dma/dw/of.c
+++ b/drivers/dma/dw/of.c
@@ -50,15 +50,10 @@ struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct dw_dma_platform_data *pdata;
- u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
+ u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
u32 nr_masters;
u32 nr_channels;
- if (!np) {
- dev_err(&pdev->dev, "Missing DT data\n");
- return NULL;
- }
-
if (of_property_read_u32(np, "dma-masters", &nr_masters))
return NULL;
if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
@@ -76,41 +71,29 @@ struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev)
pdata->nr_masters = nr_masters;
pdata->nr_channels = nr_channels;
- if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
- pdata->chan_allocation_order = (unsigned char)tmp;
+ of_property_read_u32(np, "chan_allocation_order", &pdata->chan_allocation_order);
+ of_property_read_u32(np, "chan_priority", &pdata->chan_priority);
- if (!of_property_read_u32(np, "chan_priority", &tmp))
- pdata->chan_priority = tmp;
+ of_property_read_u32(np, "block_size", &pdata->block_size);
- if (!of_property_read_u32(np, "block_size", &tmp))
- pdata->block_size = tmp;
-
- if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
- for (tmp = 0; tmp < nr_masters; tmp++)
- pdata->data_width[tmp] = arr[tmp];
- } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
+ /* Try deprecated property first */
+ if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
for (tmp = 0; tmp < nr_masters; tmp++)
pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
}
- if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
- for (tmp = 0; tmp < nr_channels; tmp++)
- pdata->multi_block[tmp] = mb[tmp];
- } else {
- for (tmp = 0; tmp < nr_channels; tmp++)
- pdata->multi_block[tmp] = 1;
- }
+ /* If "data_width" and "data-width" both provided use the latter one */
+ of_property_read_u32_array(np, "data-width", pdata->data_width, nr_masters);
- if (of_property_read_u32_array(np, "snps,max-burst-len", pdata->max_burst,
- nr_channels)) {
- memset32(pdata->max_burst, DW_DMA_MAX_BURST, nr_channels);
- }
+ memset32(pdata->multi_block, 1, nr_channels);
+ of_property_read_u32_array(np, "multi-block", pdata->multi_block, nr_channels);
- if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
- if (tmp > CHAN_PROTCTL_MASK)
- return NULL;
- pdata->protctl = tmp;
- }
+ memset32(pdata->max_burst, DW_DMA_MAX_BURST, nr_channels);
+ of_property_read_u32_array(np, "snps,max-burst-len", pdata->max_burst, nr_channels);
+
+ of_property_read_u32(np, "snps,dma-protection-control", &pdata->protctl);
+ if (pdata->protctl > CHAN_PROTCTL_MASK)
+ return NULL;
return pdata;
}
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 1142aa6f8c4a..26a3f926da02 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -120,9 +120,9 @@ static const struct pci_device_id dw_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_dma_chip_pdata },
/* Elkhart Lake iDMA 32-bit (PSE DMA) */
- { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_chip_pdata },
- { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_chip_pdata },
- { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&xbar_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&xbar_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&xbar_chip_pdata },
/* Haswell */
{ PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_dma_chip_pdata },
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 0585d749d935..246118955877 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -149,9 +149,9 @@ static const struct acpi_device_id dw_dma_acpi_id_table[] = {
{ "808622C0", (kernel_ulong_t)&dw_dma_chip_pdata },
/* Elkhart Lake iDMA 32-bit (PSE DMA) */
- { "80864BB4", (kernel_ulong_t)&idma32_chip_pdata },
- { "80864BB5", (kernel_ulong_t)&idma32_chip_pdata },
- { "80864BB6", (kernel_ulong_t)&idma32_chip_pdata },
+ { "80864BB4", (kernel_ulong_t)&xbar_chip_pdata },
+ { "80864BB5", (kernel_ulong_t)&xbar_chip_pdata },
+ { "80864BB6", (kernel_ulong_t)&xbar_chip_pdata },
{ }
};
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 01027779beb8..98f9ee70362e 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -897,7 +897,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
if (data && data->name)
name = data->name;
- ret = clk_enable(edmac->clk);
+ ret = clk_prepare_enable(edmac->clk);
if (ret)
return ret;
@@ -936,7 +936,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
fail_free_irq:
free_irq(edmac->irq, edmac);
fail_clk_disable:
- clk_disable(edmac->clk);
+ clk_disable_unprepare(edmac->clk);
return ret;
}
@@ -969,7 +969,7 @@ static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
list_for_each_entry_safe(desc, d, &list, node)
kfree(desc);
- clk_disable(edmac->clk);
+ clk_disable_unprepare(edmac->clk);
free_irq(edmac->irq, edmac);
}
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
index 4ae057922ef1..8dd40d00a672 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -291,9 +291,8 @@ static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
if (err) {
- list_del(&dpaa2_comp->list);
- list_add_tail(&dpaa2_comp->list,
- &dpaa2_chan->comp_free);
+ list_move_tail(&dpaa2_comp->list,
+ &dpaa2_chan->comp_free);
}
}
err_enqueue:
@@ -626,8 +625,7 @@ static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
dpaa2_comp = to_fsl_qdma_comp(vdesc);
qchan = dpaa2_comp->qchan;
spin_lock_irqsave(&qchan->queue_lock, flags);
- list_del(&dpaa2_comp->list);
- list_add_tail(&dpaa2_comp->list, &qchan->comp_free);
+ list_move_tail(&dpaa2_comp->list, &qchan->comp_free);
spin_unlock_irqrestore(&qchan->queue_lock, flags);
}
@@ -703,7 +701,7 @@ static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
/* DPDMAI enable */
err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
if (err) {
- dev_err(dev, "dpdmai_enable() faile\n");
+ dev_err(dev, "dpdmai_enable() failed\n");
goto err_enable;
}
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
index a259ee010e9b..c855a0e4f9ff 100644
--- a/drivers/dma/hisi_dma.c
+++ b/drivers/dma/hisi_dma.c
@@ -133,11 +133,6 @@ static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
writel_relaxed(tmp, addr);
}
-static void hisi_dma_free_irq_vectors(void *data)
-{
- pci_free_irq_vectors(data);
-}
-
static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index,
bool pause)
{
@@ -544,6 +539,7 @@ static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, hdma_dev);
pci_set_master(pdev);
+ /* This will be freed by 'pcim_release()'. See 'pcim_enable_device()' */
ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM,
PCI_IRQ_MSI);
if (ret < 0) {
@@ -551,10 +547,6 @@ static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
- ret = devm_add_action_or_reset(dev, hisi_dma_free_irq_vectors, pdev);
- if (ret)
- return ret;
-
dma_dev = &hdma_dev->dma_dev;
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
index 6d11558756f8..a1e9f2b3a37c 100644
--- a/drivers/dma/idxd/Makefile
+++ b/drivers/dma/idxd/Makefile
@@ -1,4 +1,12 @@
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD
+
obj-$(CONFIG_INTEL_IDXD) += idxd.o
idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
+
+obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
+idxd_bus-y := bus.o
+
+obj-$(CONFIG_INTEL_IDXD_COMPAT) += idxd_compat.o
+idxd_compat-y := compat.o
diff --git a/drivers/dma/idxd/bus.c b/drivers/dma/idxd/bus.c
new file mode 100644
index 000000000000..6f84621053c6
--- /dev/null
+++ b/drivers/dma/idxd/bus.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include "idxd.h"
+
+
+int __idxd_driver_register(struct idxd_device_driver *idxd_drv, struct module *owner,
+ const char *mod_name)
+{
+ struct device_driver *drv = &idxd_drv->drv;
+
+ if (!idxd_drv->type) {
+ pr_debug("driver type not set (%ps)\n", __builtin_return_address(0));
+ return -EINVAL;
+ }
+
+ drv->name = idxd_drv->name;
+ drv->bus = &dsa_bus_type;
+ drv->owner = owner;
+ drv->mod_name = mod_name;
+
+ return driver_register(drv);
+}
+EXPORT_SYMBOL_GPL(__idxd_driver_register);
+
+void idxd_driver_unregister(struct idxd_device_driver *idxd_drv)
+{
+ driver_unregister(&idxd_drv->drv);
+}
+EXPORT_SYMBOL_GPL(idxd_driver_unregister);
+
+static int idxd_config_bus_match(struct device *dev,
+ struct device_driver *drv)
+{
+ struct idxd_device_driver *idxd_drv =
+ container_of(drv, struct idxd_device_driver, drv);
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+ int i = 0;
+
+ while (idxd_drv->type[i] != IDXD_DEV_NONE) {
+ if (idxd_dev->type == idxd_drv->type[i])
+ return 1;
+ i++;
+ }
+
+ return 0;
+}
+
+static int idxd_config_bus_probe(struct device *dev)
+{
+ struct idxd_device_driver *idxd_drv =
+ container_of(dev->driver, struct idxd_device_driver, drv);
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return idxd_drv->probe(idxd_dev);
+}
+
+static void idxd_config_bus_remove(struct device *dev)
+{
+ struct idxd_device_driver *idxd_drv =
+ container_of(dev->driver, struct idxd_device_driver, drv);
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ idxd_drv->remove(idxd_dev);
+}
+
+struct bus_type dsa_bus_type = {
+ .name = "dsa",
+ .match = idxd_config_bus_match,
+ .probe = idxd_config_bus_probe,
+ .remove = idxd_config_bus_remove,
+};
+EXPORT_SYMBOL_GPL(dsa_bus_type);
+
+static int __init dsa_bus_init(void)
+{
+ return bus_register(&dsa_bus_type);
+}
+module_init(dsa_bus_init);
+
+static void __exit dsa_bus_exit(void)
+{
+ bus_unregister(&dsa_bus_type);
+}
+module_exit(dsa_bus_exit);
+
+MODULE_DESCRIPTION("IDXD driver dsa_bus_type driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index e9def577c697..b9b2b4a4124e 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -41,7 +41,7 @@ struct idxd_user_context {
static void idxd_cdev_dev_release(struct device *dev)
{
- struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev);
+ struct idxd_cdev *idxd_cdev = dev_to_cdev(dev);
struct idxd_cdev_context *cdev_ctx;
struct idxd_wq *wq = idxd_cdev->wq;
@@ -218,14 +218,13 @@ static __poll_t idxd_cdev_poll(struct file *filp,
struct idxd_user_context *ctx = filp->private_data;
struct idxd_wq *wq = ctx->wq;
struct idxd_device *idxd = wq->idxd;
- unsigned long flags;
__poll_t out = 0;
poll_wait(filp, &wq->err_queue, wait);
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
if (idxd->sw_err.valid)
out = EPOLLIN | EPOLLRDNORM;
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock(&idxd->dev_lock);
return out;
}
@@ -256,9 +255,10 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
if (!idxd_cdev)
return -ENOMEM;
+ idxd_cdev->idxd_dev.type = IDXD_DEV_CDEV;
idxd_cdev->wq = wq;
cdev = &idxd_cdev->cdev;
- dev = &idxd_cdev->dev;
+ dev = cdev_dev(idxd_cdev);
cdev_ctx = &ictx[wq->idxd->data->type];
minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
if (minor < 0) {
@@ -268,7 +268,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
idxd_cdev->minor = minor;
device_initialize(dev);
- dev->parent = &wq->conf_dev;
+ dev->parent = wq_confdev(wq);
dev->bus = &dsa_bus_type;
dev->type = &idxd_cdev_device_type;
dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
@@ -299,10 +299,67 @@ void idxd_wq_del_cdev(struct idxd_wq *wq)
idxd_cdev = wq->idxd_cdev;
wq->idxd_cdev = NULL;
- cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
- put_device(&idxd_cdev->dev);
+ cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
+ put_device(cdev_dev(idxd_cdev));
}
+static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
+{
+ struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return -ENXIO;
+
+ mutex_lock(&wq->wq_lock);
+ wq->type = IDXD_WQT_USER;
+ rc = __drv_enable_wq(wq);
+ if (rc < 0)
+ goto err;
+
+ rc = idxd_wq_add_cdev(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_CDEV_ERR;
+ goto err_cdev;
+ }
+
+ idxd->cmd_status = 0;
+ mutex_unlock(&wq->wq_lock);
+ return 0;
+
+err_cdev:
+ __drv_disable_wq(wq);
+err:
+ wq->type = IDXD_WQT_NONE;
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+}
+
+static void idxd_user_drv_remove(struct idxd_dev *idxd_dev)
+{
+ struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
+
+ mutex_lock(&wq->wq_lock);
+ idxd_wq_del_cdev(wq);
+ __drv_disable_wq(wq);
+ wq->type = IDXD_WQT_NONE;
+ mutex_unlock(&wq->wq_lock);
+}
+
+static enum idxd_dev_type dev_types[] = {
+ IDXD_DEV_WQ,
+ IDXD_DEV_NONE,
+};
+
+struct idxd_device_driver idxd_user_drv = {
+ .probe = idxd_user_drv_probe,
+ .remove = idxd_user_drv_remove,
+ .name = "user",
+ .type = dev_types,
+};
+EXPORT_SYMBOL_GPL(idxd_user_drv);
+
int idxd_cdev_register(void)
{
int rc, i;
diff --git a/drivers/dma/idxd/compat.c b/drivers/dma/idxd/compat.c
new file mode 100644
index 000000000000..3df21615f888
--- /dev/null
+++ b/drivers/dma/idxd/compat.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/device/bus.h>
+#include "idxd.h"
+
+extern int device_driver_attach(struct device_driver *drv, struct device *dev);
+extern void device_driver_detach(struct device *dev);
+
+#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
+ struct driver_attribute driver_attr_##_name = \
+ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
+
+static ssize_t unbind_store(struct device_driver *drv, const char *buf, size_t count)
+{
+ struct bus_type *bus = drv->bus;
+ struct device *dev;
+ int rc = -ENODEV;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (dev && dev->driver) {
+ device_driver_detach(dev);
+ rc = count;
+ }
+
+ return rc;
+}
+static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store);
+
+static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t count)
+{
+ struct bus_type *bus = drv->bus;
+ struct device *dev;
+ struct device_driver *alt_drv = NULL;
+ int rc = -ENODEV;
+ struct idxd_dev *idxd_dev;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (!dev || dev->driver || drv != &dsa_drv.drv)
+ return -ENODEV;
+
+ idxd_dev = confdev_to_idxd_dev(dev);
+ if (is_idxd_dev(idxd_dev)) {
+ alt_drv = driver_find("idxd", bus);
+ } else if (is_idxd_wq_dev(idxd_dev)) {
+ struct idxd_wq *wq = confdev_to_wq(dev);
+
+ if (is_idxd_wq_kernel(wq))
+ alt_drv = driver_find("dmaengine", bus);
+ else if (is_idxd_wq_user(wq))
+ alt_drv = driver_find("user", bus);
+ }
+ if (!alt_drv)
+ return -ENODEV;
+
+ rc = device_driver_attach(alt_drv, dev);
+ if (rc < 0)
+ return rc;
+
+ return count;
+}
+static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store);
+
+static struct attribute *dsa_drv_compat_attrs[] = {
+ &driver_attr_bind.attr,
+ &driver_attr_unbind.attr,
+ NULL,
+};
+
+static const struct attribute_group dsa_drv_compat_attr_group = {
+ .attrs = dsa_drv_compat_attrs,
+};
+
+static const struct attribute_group *dsa_drv_compat_groups[] = {
+ &dsa_drv_compat_attr_group,
+ NULL,
+};
+
+static int idxd_dsa_drv_probe(struct idxd_dev *idxd_dev)
+{
+ return -ENODEV;
+}
+
+static void idxd_dsa_drv_remove(struct idxd_dev *idxd_dev)
+{
+}
+
+static enum idxd_dev_type dev_types[] = {
+ IDXD_DEV_NONE,
+};
+
+struct idxd_device_driver dsa_drv = {
+ .name = "dsa",
+ .probe = idxd_dsa_drv_probe,
+ .remove = idxd_dsa_drv_remove,
+ .type = dev_types,
+ .drv = {
+ .suppress_bind_attrs = true,
+ .groups = dsa_drv_compat_groups,
+ },
+};
+
+module_idxd_driver(dsa_drv);
+MODULE_IMPORT_NS(IDXD);
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 420b93fe5feb..83a5ff2ecf2a 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -15,6 +15,8 @@
static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
u32 *status);
+static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
+static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
/* Interrupt control bits */
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
@@ -139,8 +141,8 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
if (wq->type != IDXD_WQT_KERNEL)
return 0;
- wq->num_descs = wq->size;
- num_descs = wq->size;
+ num_descs = wq_dedicated(wq) ? wq->size : wq->threshold;
+ wq->num_descs = num_descs;
rc = alloc_hw_descs(wq, num_descs);
if (rc < 0)
@@ -234,7 +236,7 @@ int idxd_wq_enable(struct idxd_wq *wq)
return 0;
}
-int idxd_wq_disable(struct idxd_wq *wq)
+int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
@@ -255,6 +257,8 @@ int idxd_wq_disable(struct idxd_wq *wq)
return -ENXIO;
}
+ if (reset_config)
+ idxd_wq_disable_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
dev_dbg(dev, "WQ %d disabled\n", wq->id);
return 0;
@@ -289,6 +293,7 @@ void idxd_wq_reset(struct idxd_wq *wq)
operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
+ idxd_wq_disable_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
}
@@ -315,6 +320,7 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
devm_iounmap(dev, wq->portal);
wq->portal = NULL;
+ wq->portal_offset = 0;
}
void idxd_wqs_unmap_portal(struct idxd_device *idxd)
@@ -335,19 +341,18 @@ int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
int rc;
union wqcfg wqcfg;
unsigned int offset;
- unsigned long flags;
- rc = idxd_wq_disable(wq);
+ rc = idxd_wq_disable(wq, false);
if (rc < 0)
return rc;
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
wqcfg.pasid_en = 1;
wqcfg.pasid = pasid;
iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock(&idxd->dev_lock);
rc = idxd_wq_enable(wq);
if (rc < 0)
@@ -362,19 +367,18 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
int rc;
union wqcfg wqcfg;
unsigned int offset;
- unsigned long flags;
- rc = idxd_wq_disable(wq);
+ rc = idxd_wq_disable(wq, false);
if (rc < 0)
return rc;
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
wqcfg.pasid_en = 0;
wqcfg.pasid = 0;
iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock(&idxd->dev_lock);
rc = idxd_wq_enable(wq);
if (rc < 0)
@@ -383,11 +387,11 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
return 0;
}
-void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
- lockdep_assert_held(&idxd->dev_lock);
+ lockdep_assert_held(&wq->wq_lock);
memset(wq->wqcfg, 0, idxd->wqcfg_size);
wq->type = IDXD_WQT_NONE;
wq->size = 0;
@@ -396,6 +400,7 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
wq->priority = 0;
wq->ats_dis = 0;
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
+ clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
}
@@ -455,7 +460,6 @@ int idxd_device_init_reset(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
union idxd_command_reg cmd;
- unsigned long flags;
if (idxd_device_is_halted(idxd)) {
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
@@ -465,13 +469,13 @@ int idxd_device_init_reset(struct idxd_device *idxd)
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = IDXD_CMD_RESET_DEVICE;
dev_dbg(dev, "%s: sending reset for init.\n", __func__);
- spin_lock_irqsave(&idxd->cmd_lock, flags);
+ spin_lock(&idxd->cmd_lock);
iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
IDXD_CMDSTS_ACTIVE)
cpu_relax();
- spin_unlock_irqrestore(&idxd->cmd_lock, flags);
+ spin_unlock(&idxd->cmd_lock);
return 0;
}
@@ -480,7 +484,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
{
union idxd_command_reg cmd;
DECLARE_COMPLETION_ONSTACK(done);
- unsigned long flags;
+ u32 stat;
if (idxd_device_is_halted(idxd)) {
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
@@ -494,7 +498,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
cmd.operand = operand;
cmd.int_req = 1;
- spin_lock_irqsave(&idxd->cmd_lock, flags);
+ spin_lock(&idxd->cmd_lock);
wait_event_lock_irq(idxd->cmd_waitq,
!test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
idxd->cmd_lock);
@@ -511,18 +515,18 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
* After command submitted, release lock and go to sleep until
* the command completes via interrupt.
*/
- spin_unlock_irqrestore(&idxd->cmd_lock, flags);
+ spin_unlock(&idxd->cmd_lock);
wait_for_completion(&done);
- spin_lock_irqsave(&idxd->cmd_lock, flags);
- if (status) {
- *status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
- idxd->cmd_status = *status & GENMASK(7, 0);
- }
+ stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ spin_lock(&idxd->cmd_lock);
+ if (status)
+ *status = stat;
+ idxd->cmd_status = stat & GENMASK(7, 0);
__clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
/* Wake up other pending commands */
wake_up(&idxd->cmd_waitq);
- spin_unlock_irqrestore(&idxd->cmd_lock, flags);
+ spin_unlock(&idxd->cmd_lock);
}
int idxd_device_enable(struct idxd_device *idxd)
@@ -548,27 +552,10 @@ int idxd_device_enable(struct idxd_device *idxd)
return 0;
}
-void idxd_device_wqs_clear_state(struct idxd_device *idxd)
-{
- int i;
-
- lockdep_assert_held(&idxd->dev_lock);
-
- for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = idxd->wqs[i];
-
- if (wq->state == IDXD_WQ_ENABLED) {
- idxd_wq_disable_cleanup(wq);
- wq->state = IDXD_WQ_DISABLED;
- }
- }
-}
-
int idxd_device_disable(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
u32 status;
- unsigned long flags;
if (!idxd_is_enabled(idxd)) {
dev_dbg(dev, "Device is not enabled\n");
@@ -584,22 +571,20 @@ int idxd_device_disable(struct idxd_device *idxd)
return -ENXIO;
}
- spin_lock_irqsave(&idxd->dev_lock, flags);
- idxd_device_wqs_clear_state(idxd);
- idxd->state = IDXD_DEV_CONF_READY;
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
+ idxd_device_clear_state(idxd);
+ idxd->state = IDXD_DEV_DISABLED;
+ spin_unlock(&idxd->dev_lock);
return 0;
}
void idxd_device_reset(struct idxd_device *idxd)
{
- unsigned long flags;
-
idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
- spin_lock_irqsave(&idxd->dev_lock, flags);
- idxd_device_wqs_clear_state(idxd);
- idxd->state = IDXD_DEV_CONF_READY;
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
+ idxd_device_clear_state(idxd);
+ idxd->state = IDXD_DEV_DISABLED;
+ spin_unlock(&idxd->dev_lock);
}
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
@@ -649,7 +634,6 @@ int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
struct device *dev = &idxd->pdev->dev;
u32 operand, status;
union idxd_command_reg cmd;
- unsigned long flags;
if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
return -EOPNOTSUPP;
@@ -667,13 +651,13 @@ int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand);
- spin_lock_irqsave(&idxd->cmd_lock, flags);
+ spin_lock(&idxd->cmd_lock);
iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
cpu_relax();
status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
- spin_unlock_irqrestore(&idxd->cmd_lock, flags);
+ spin_unlock(&idxd->cmd_lock);
if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
dev_dbg(dev, "release int handle failed: %#x\n", status);
@@ -685,6 +669,59 @@ int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
}
/* Device configuration bits */
+static void idxd_engines_clear_state(struct idxd_device *idxd)
+{
+ struct idxd_engine *engine;
+ int i;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ for (i = 0; i < idxd->max_engines; i++) {
+ engine = idxd->engines[i];
+ engine->group = NULL;
+ }
+}
+
+static void idxd_groups_clear_state(struct idxd_device *idxd)
+{
+ struct idxd_group *group;
+ int i;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = idxd->groups[i];
+ memset(&group->grpcfg, 0, sizeof(group->grpcfg));
+ group->num_engines = 0;
+ group->num_wqs = 0;
+ group->use_token_limit = false;
+ group->tokens_allowed = 0;
+ group->tokens_reserved = 0;
+ group->tc_a = -1;
+ group->tc_b = -1;
+ }
+}
+
+static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
+{
+ int i;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = idxd->wqs[i];
+
+ if (wq->state == IDXD_WQ_ENABLED) {
+ idxd_wq_disable_cleanup(wq);
+ wq->state = IDXD_WQ_DISABLED;
+ }
+ }
+}
+
+void idxd_device_clear_state(struct idxd_device *idxd)
+{
+ idxd_groups_clear_state(idxd);
+ idxd_engines_clear_state(idxd);
+ idxd_device_wqs_clear_state(idxd);
+}
+
void idxd_msix_perm_setup(struct idxd_device *idxd)
{
union msix_perm mperm;
@@ -773,6 +810,15 @@ static int idxd_groups_config_write(struct idxd_device *idxd)
return 0;
}
+static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+
+ if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV))
+ return true;
+ return false;
+}
+
static int idxd_wq_config_write(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
@@ -796,6 +842,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg->wq_size = wq->size;
if (wq->size == 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_SIZE;
dev_warn(dev, "Incorrect work queue size: 0\n");
return -EINVAL;
}
@@ -804,7 +851,6 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg->wq_thresh = wq->threshold;
/* byte 8-11 */
- wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
if (wq_dedicated(wq))
wq->wqcfg->mode = 1;
@@ -814,6 +860,25 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg->pasid = idxd->pasid;
}
+ /*
+ * Here the priv bit is set depending on the WQ type. priv = 1 if the
+ * WQ type is kernel to indicate privileged access. This setting only
+ * matters for dedicated WQ. According to the DSA spec:
+ * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
+ * Privileged Mode Enable field of the PCI Express PASID capability
+ * is 0, this field must be 0.
+ *
+ * In the case of a dedicated kernel WQ that is not able to support
+ * the PASID cap, then the configuration will be rejected.
+ */
+ wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
+ if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
+ !idxd_device_pasid_priv_enabled(idxd) &&
+ wq->type == IDXD_WQT_KERNEL) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV;
+ return -EOPNOTSUPP;
+ }
+
wq->wqcfg->priority = wq->priority;
if (idxd->hw.gen_cap.block_on_fault &&
@@ -931,6 +996,7 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
continue;
if (wq_shared(wq) && !device_swq_supported(idxd)) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
dev_warn(dev, "No shared wq support but configured.\n");
return -EINVAL;
}
@@ -939,8 +1005,10 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
configured++;
}
- if (configured == 0)
+ if (configured == 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED;
return -EINVAL;
+ }
return 0;
}
@@ -1086,3 +1154,203 @@ int idxd_device_load_config(struct idxd_device *idxd)
return 0;
}
+
+int __drv_enable_wq(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ int rc = -ENXIO;
+
+ lockdep_assert_held(&wq->wq_lock);
+
+ if (idxd->state != IDXD_DEV_ENABLED) {
+ idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED;
+ goto err;
+ }
+
+ if (wq->state != IDXD_WQ_DISABLED) {
+ dev_dbg(dev, "wq %d already enabled.\n", wq->id);
+ idxd->cmd_status = IDXD_SCMD_WQ_ENABLED;
+ rc = -EBUSY;
+ goto err;
+ }
+
+ if (!wq->group) {
+ dev_dbg(dev, "wq %d not attached to group.\n", wq->id);
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP;
+ goto err;
+ }
+
+ if (strlen(wq->name) == 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME;
+ dev_dbg(dev, "wq %d name not set.\n", wq->id);
+ goto err;
+ }
+
+ /* Shared WQ checks */
+ if (wq_shared(wq)) {
+ if (!device_swq_supported(idxd)) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
+ dev_dbg(dev, "PASID not enabled and shared wq.\n");
+ goto err;
+ }
+ /*
+ * Shared wq with the threshold set to 0 means the user
+ * did not set the threshold or transitioned from a
+ * dedicated wq but did not set threshold. A value
+ * of 0 would effectively disable the shared wq. The
+ * driver does not allow a value of 0 to be set for
+ * threshold via sysfs.
+ */
+ if (wq->threshold == 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH;
+ dev_dbg(dev, "Shared wq and threshold 0.\n");
+ goto err;
+ }
+ }
+
+ rc = 0;
+ spin_lock(&idxd->dev_lock);
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ rc = idxd_device_config(idxd);
+ spin_unlock(&idxd->dev_lock);
+ if (rc < 0) {
+ dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc);
+ goto err;
+ }
+
+ rc = idxd_wq_enable(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc);
+ goto err;
+ }
+
+ rc = idxd_wq_map_portal(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR;
+ dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc);
+ goto err_map_portal;
+ }
+
+ wq->client_count = 0;
+ return 0;
+
+err_map_portal:
+ rc = idxd_wq_disable(wq, false);
+ if (rc < 0)
+ dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
+err:
+ return rc;
+}
+
+int drv_enable_wq(struct idxd_wq *wq)
+{
+ int rc;
+
+ mutex_lock(&wq->wq_lock);
+ rc = __drv_enable_wq(wq);
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+}
+
+void __drv_disable_wq(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+
+ lockdep_assert_held(&wq->wq_lock);
+
+ if (idxd_wq_refcount(wq))
+ dev_warn(dev, "Clients has claim on wq %d: %d\n",
+ wq->id, idxd_wq_refcount(wq));
+
+ idxd_wq_unmap_portal(wq);
+
+ idxd_wq_drain(wq);
+ idxd_wq_reset(wq);
+
+ wq->client_count = 0;
+}
+
+void drv_disable_wq(struct idxd_wq *wq)
+{
+ mutex_lock(&wq->wq_lock);
+ __drv_disable_wq(wq);
+ mutex_unlock(&wq->wq_lock);
+}
+
+int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
+{
+ struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
+ int rc = 0;
+
+ /*
+ * Device should be in disabled state for the idxd_drv to load. If it's in
+ * enabled state, then the device was altered outside of driver's control.
+ * If the state is in halted state, then we don't want to proceed.
+ */
+ if (idxd->state != IDXD_DEV_DISABLED) {
+ idxd->cmd_status = IDXD_SCMD_DEV_ENABLED;
+ return -ENXIO;
+ }
+
+ /* Device configuration */
+ spin_lock(&idxd->dev_lock);
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ rc = idxd_device_config(idxd);
+ spin_unlock(&idxd->dev_lock);
+ if (rc < 0)
+ return -ENXIO;
+
+ /* Start device */
+ rc = idxd_device_enable(idxd);
+ if (rc < 0)
+ return rc;
+
+ /* Setup DMA device without channels */
+ rc = idxd_register_dma_device(idxd);
+ if (rc < 0) {
+ idxd_device_disable(idxd);
+ idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
+ return rc;
+ }
+
+ idxd->cmd_status = 0;
+ return 0;
+}
+
+void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
+{
+ struct device *dev = &idxd_dev->conf_dev;
+ struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = idxd->wqs[i];
+ struct device *wq_dev = wq_confdev(wq);
+
+ if (wq->state == IDXD_WQ_DISABLED)
+ continue;
+ dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev));
+ device_release_driver(wq_dev);
+ }
+
+ idxd_unregister_dma_device(idxd);
+ idxd_device_disable(idxd);
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ idxd_device_reset(idxd);
+}
+
+static enum idxd_dev_type dev_types[] = {
+ IDXD_DEV_DSA,
+ IDXD_DEV_IAX,
+ IDXD_DEV_NONE,
+};
+
+struct idxd_device_driver idxd_drv = {
+ .type = dev_types,
+ .probe = idxd_device_drv_probe,
+ .remove = idxd_device_drv_remove,
+ .name = "idxd",
+};
+EXPORT_SYMBOL_GPL(idxd_drv);
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index 77439b645044..e0f056c1d1f5 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -69,7 +69,11 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
hw->src_addr = addr_f1;
hw->dst_addr = addr_f2;
hw->xfer_size = len;
- hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
+ /*
+ * For dedicated WQ, this field is ignored and HW will use the WQCFG.priv
+ * field instead. This field should be set to 1 for kernel descriptors.
+ */
+ hw->priv = 1;
hw->completion_addr = compl;
}
@@ -149,10 +153,8 @@ static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(tx);
rc = idxd_submit_desc(wq, desc);
- if (rc < 0) {
- idxd_free_desc(wq, desc);
+ if (rc < 0)
return rc;
- }
return cookie;
}
@@ -245,7 +247,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq)
wq->idxd_chan = idxd_chan;
idxd_chan->wq = wq;
- get_device(&wq->conf_dev);
+ get_device(wq_confdev(wq));
return 0;
}
@@ -260,5 +262,87 @@ void idxd_unregister_dma_channel(struct idxd_wq *wq)
list_del(&chan->device_node);
kfree(wq->idxd_chan);
wq->idxd_chan = NULL;
- put_device(&wq->conf_dev);
+ put_device(wq_confdev(wq));
}
+
+static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
+{
+ struct device *dev = &idxd_dev->conf_dev;
+ struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return -ENXIO;
+
+ mutex_lock(&wq->wq_lock);
+ wq->type = IDXD_WQT_KERNEL;
+ rc = __drv_enable_wq(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
+ rc = -ENXIO;
+ goto err;
+ }
+
+ rc = idxd_wq_alloc_resources(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
+ dev_dbg(dev, "WQ resource alloc failed\n");
+ goto err_res_alloc;
+ }
+
+ rc = idxd_wq_init_percpu_ref(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
+ dev_dbg(dev, "percpu_ref setup failed\n");
+ goto err_ref;
+ }
+
+ rc = idxd_register_dma_channel(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR;
+ dev_dbg(dev, "Failed to register dma channel\n");
+ goto err_dma;
+ }
+
+ idxd->cmd_status = 0;
+ mutex_unlock(&wq->wq_lock);
+ return 0;
+
+err_dma:
+ idxd_wq_quiesce(wq);
+err_ref:
+ idxd_wq_free_resources(wq);
+err_res_alloc:
+ __drv_disable_wq(wq);
+err:
+ wq->type = IDXD_WQT_NONE;
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+}
+
+static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
+{
+ struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
+
+ mutex_lock(&wq->wq_lock);
+ idxd_wq_quiesce(wq);
+ idxd_unregister_dma_channel(wq);
+ __drv_disable_wq(wq);
+ idxd_wq_free_resources(wq);
+ wq->type = IDXD_WQT_NONE;
+ mutex_unlock(&wq->wq_lock);
+}
+
+static enum idxd_dev_type dev_types[] = {
+ IDXD_DEV_WQ,
+ IDXD_DEV_NONE,
+};
+
+struct idxd_device_driver idxd_dmaengine_drv = {
+ .probe = idxd_dmaengine_drv_probe,
+ .remove = idxd_dmaengine_drv_remove,
+ .name = "dmaengine",
+ .type = dev_types,
+};
+EXPORT_SYMBOL_GPL(idxd_dmaengine_drv);
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index fc708be7ad9a..bfcb03329f77 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -11,14 +11,32 @@
#include <linux/idr.h>
#include <linux/pci.h>
#include <linux/perf_event.h>
+#include <uapi/linux/idxd.h>
#include "registers.h"
#define IDXD_DRIVER_VERSION "1.00"
extern struct kmem_cache *idxd_desc_pool;
+extern bool tc_override;
-struct idxd_device;
struct idxd_wq;
+struct idxd_dev;
+
+enum idxd_dev_type {
+ IDXD_DEV_NONE = -1,
+ IDXD_DEV_DSA = 0,
+ IDXD_DEV_IAX,
+ IDXD_DEV_WQ,
+ IDXD_DEV_GROUP,
+ IDXD_DEV_ENGINE,
+ IDXD_DEV_CDEV,
+ IDXD_DEV_MAX_TYPE,
+};
+
+struct idxd_dev {
+ struct device conf_dev;
+ enum idxd_dev_type type;
+};
#define IDXD_REG_TIMEOUT 50
#define IDXD_DRAIN_TIMEOUT 5000
@@ -34,9 +52,18 @@ enum idxd_type {
#define IDXD_PMU_EVENT_MAX 64
struct idxd_device_driver {
+ const char *name;
+ enum idxd_dev_type *type;
+ int (*probe)(struct idxd_dev *idxd_dev);
+ void (*remove)(struct idxd_dev *idxd_dev);
struct device_driver drv;
};
+extern struct idxd_device_driver dsa_drv;
+extern struct idxd_device_driver idxd_drv;
+extern struct idxd_device_driver idxd_dmaengine_drv;
+extern struct idxd_device_driver idxd_user_drv;
+
struct idxd_irq_entry {
struct idxd_device *idxd;
int id;
@@ -51,7 +78,7 @@ struct idxd_irq_entry {
};
struct idxd_group {
- struct device conf_dev;
+ struct idxd_dev idxd_dev;
struct idxd_device *idxd;
struct grpcfg grpcfg;
int id;
@@ -110,7 +137,7 @@ enum idxd_wq_type {
struct idxd_cdev {
struct idxd_wq *wq;
struct cdev cdev;
- struct device dev;
+ struct idxd_dev idxd_dev;
int minor;
};
@@ -136,9 +163,10 @@ struct idxd_dma_chan {
struct idxd_wq {
void __iomem *portal;
+ u32 portal_offset;
struct percpu_ref wq_active;
struct completion wq_dead;
- struct device conf_dev;
+ struct idxd_dev idxd_dev;
struct idxd_cdev *idxd_cdev;
struct wait_queue_head err_queue;
struct idxd_device *idxd;
@@ -153,7 +181,6 @@ struct idxd_wq {
enum idxd_wq_state state;
unsigned long flags;
union wqcfg *wqcfg;
- u32 vec_ptr; /* interrupt steering */
struct dsa_hw_desc **hw_descs;
int num_descs;
union {
@@ -174,7 +201,7 @@ struct idxd_wq {
};
struct idxd_engine {
- struct device conf_dev;
+ struct idxd_dev idxd_dev;
int id;
struct idxd_group *group;
struct idxd_device *idxd;
@@ -194,7 +221,6 @@ struct idxd_hw {
enum idxd_device_state {
IDXD_DEV_HALTED = -1,
IDXD_DEV_DISABLED = 0,
- IDXD_DEV_CONF_READY,
IDXD_DEV_ENABLED,
};
@@ -218,7 +244,7 @@ struct idxd_driver_data {
};
struct idxd_device {
- struct device conf_dev;
+ struct idxd_dev idxd_dev;
struct idxd_driver_data *data;
struct list_head list;
struct idxd_hw hw;
@@ -226,7 +252,7 @@ struct idxd_device {
unsigned long flags;
int id;
int major;
- u8 cmd_status;
+ u32 cmd_status;
struct pci_dev *pdev;
void __iomem *reg_base;
@@ -290,7 +316,6 @@ struct idxd_desc {
struct list_head list;
int id;
int cpu;
- unsigned int vector;
struct idxd_wq *wq;
};
@@ -302,11 +327,62 @@ enum idxd_completion_status {
IDXD_COMP_DESC_ABORT = 0xff,
};
-#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
-#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
+#define idxd_confdev(idxd) &idxd->idxd_dev.conf_dev
+#define wq_confdev(wq) &wq->idxd_dev.conf_dev
+#define engine_confdev(engine) &engine->idxd_dev.conf_dev
+#define group_confdev(group) &group->idxd_dev.conf_dev
+#define cdev_dev(cdev) &cdev->idxd_dev.conf_dev
+
+#define confdev_to_idxd_dev(dev) container_of(dev, struct idxd_dev, conf_dev)
+#define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev)
+#define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev)
+
+static inline struct idxd_device *confdev_to_idxd(struct device *dev)
+{
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return idxd_dev_to_idxd(idxd_dev);
+}
+
+static inline struct idxd_wq *confdev_to_wq(struct device *dev)
+{
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return idxd_dev_to_wq(idxd_dev);
+}
+
+static inline struct idxd_engine *confdev_to_engine(struct device *dev)
+{
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return container_of(idxd_dev, struct idxd_engine, idxd_dev);
+}
+
+static inline struct idxd_group *confdev_to_group(struct device *dev)
+{
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return container_of(idxd_dev, struct idxd_group, idxd_dev);
+}
+
+static inline struct idxd_cdev *dev_to_cdev(struct device *dev)
+{
+ struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
+
+ return container_of(idxd_dev, struct idxd_cdev, idxd_dev);
+}
+
+static inline void idxd_dev_set_type(struct idxd_dev *idev, int type)
+{
+ if (type >= IDXD_DEV_MAX_TYPE) {
+ idev->type = IDXD_DEV_NONE;
+ return;
+ }
+
+ idev->type = type;
+}
extern struct bus_type dsa_bus_type;
-extern struct bus_type iax_bus_type;
extern bool support_enqcmd;
extern struct ida idxd_ida;
@@ -316,24 +392,24 @@ extern struct device_type idxd_wq_device_type;
extern struct device_type idxd_engine_device_type;
extern struct device_type idxd_group_device_type;
-static inline bool is_dsa_dev(struct device *dev)
+static inline bool is_dsa_dev(struct idxd_dev *idxd_dev)
{
- return dev->type == &dsa_device_type;
+ return idxd_dev->type == IDXD_DEV_DSA;
}
-static inline bool is_iax_dev(struct device *dev)
+static inline bool is_iax_dev(struct idxd_dev *idxd_dev)
{
- return dev->type == &iax_device_type;
+ return idxd_dev->type == IDXD_DEV_IAX;
}
-static inline bool is_idxd_dev(struct device *dev)
+static inline bool is_idxd_dev(struct idxd_dev *idxd_dev)
{
- return is_dsa_dev(dev) || is_iax_dev(dev);
+ return is_dsa_dev(idxd_dev) || is_iax_dev(idxd_dev);
}
-static inline bool is_idxd_wq_dev(struct device *dev)
+static inline bool is_idxd_wq_dev(struct idxd_dev *idxd_dev)
{
- return dev->type == &idxd_wq_device_type;
+ return idxd_dev->type == IDXD_DEV_WQ;
}
static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
@@ -343,11 +419,16 @@ static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
return false;
}
-static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
+static inline bool is_idxd_wq_user(struct idxd_wq *wq)
{
return wq->type == IDXD_WQT_USER;
}
+static inline bool is_idxd_wq_kernel(struct idxd_wq *wq)
+{
+ return wq->type == IDXD_WQT_KERNEL;
+}
+
static inline bool wq_dedicated(struct idxd_wq *wq)
{
return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
@@ -389,6 +470,24 @@ static inline int idxd_get_wq_portal_full_offset(int wq_id,
return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
}
+#define IDXD_PORTAL_MASK (PAGE_SIZE - 1)
+
+/*
+ * Even though this function can be accessed by multiple threads, it is safe to use.
+ * At worst the address gets used more than once before it gets incremented. We don't
+ * hit a threshold until iops becomes many million times a second. So the occasional
+ * reuse of the same address is tolerable compare to using an atomic variable. This is
+ * safe on a system that has atomic load/store for 32bit integers. Given that this is an
+ * Intel iEP device, that should not be a problem.
+ */
+static inline void __iomem *idxd_wq_portal_addr(struct idxd_wq *wq)
+{
+ int ofs = wq->portal_offset;
+
+ wq->portal_offset = (ofs + sizeof(struct dsa_raw_desc)) & IDXD_PORTAL_MASK;
+ return wq->portal + ofs;
+}
+
static inline void idxd_wq_get(struct idxd_wq *wq)
{
wq->client_count++;
@@ -404,6 +503,16 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
return wq->client_count;
};
+int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
+ struct module *module, const char *mod_name);
+#define idxd_driver_register(driver) \
+ __idxd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+
+void idxd_driver_unregister(struct idxd_device_driver *idxd_drv);
+
+#define module_idxd_driver(__idxd_driver) \
+ module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister)
+
int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
int idxd_register_devices(struct idxd_device *idxd);
@@ -424,13 +533,20 @@ void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
/* device control */
+int idxd_register_idxd_drv(void);
+void idxd_unregister_idxd_drv(void);
+int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
+void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
+int drv_enable_wq(struct idxd_wq *wq);
+int __drv_enable_wq(struct idxd_wq *wq);
+void drv_disable_wq(struct idxd_wq *wq);
+void __drv_disable_wq(struct idxd_wq *wq);
int idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
void idxd_device_reset(struct idxd_device *idxd);
-void idxd_device_cleanup(struct idxd_device *idxd);
+void idxd_device_clear_state(struct idxd_device *idxd);
int idxd_device_config(struct idxd_device *idxd);
-void idxd_device_wqs_clear_state(struct idxd_device *idxd);
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
int idxd_device_load_config(struct idxd_device *idxd);
int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
@@ -443,12 +559,11 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd);
int idxd_wq_alloc_resources(struct idxd_wq *wq);
void idxd_wq_free_resources(struct idxd_wq *wq);
int idxd_wq_enable(struct idxd_wq *wq);
-int idxd_wq_disable(struct idxd_wq *wq);
+int idxd_wq_disable(struct idxd_wq *wq, bool reset_config);
void idxd_wq_drain(struct idxd_wq *wq);
void idxd_wq_reset(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
-void idxd_wq_disable_cleanup(struct idxd_wq *wq);
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
int idxd_wq_disable_pasid(struct idxd_wq *wq);
void idxd_wq_quiesce(struct idxd_wq *wq);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index c0f4c0422f32..eb09bc591c31 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -26,11 +26,16 @@
MODULE_VERSION(IDXD_DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
+MODULE_IMPORT_NS(IDXD);
static bool sva = true;
module_param(sva, bool, 0644);
MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
+bool tc_override;
+module_param(tc_override, bool, 0644);
+MODULE_PARM_DESC(tc_override, "Override traffic class defaults");
+
#define DRV_NAME "idxd"
bool support_enqcmd;
@@ -200,6 +205,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
struct idxd_wq *wq;
+ struct device *conf_dev;
int i, rc;
idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
@@ -214,15 +220,17 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
goto err;
}
+ idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
+ conf_dev = wq_confdev(wq);
wq->id = i;
wq->idxd = idxd;
- device_initialize(&wq->conf_dev);
- wq->conf_dev.parent = &idxd->conf_dev;
- wq->conf_dev.bus = &dsa_bus_type;
- wq->conf_dev.type = &idxd_wq_device_type;
- rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
+ device_initialize(wq_confdev(wq));
+ conf_dev->parent = idxd_confdev(idxd);
+ conf_dev->bus = &dsa_bus_type;
+ conf_dev->type = &idxd_wq_device_type;
+ rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
if (rc < 0) {
- put_device(&wq->conf_dev);
+ put_device(conf_dev);
goto err;
}
@@ -233,7 +241,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
wq->max_batch_size = idxd->max_batch_size;
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg) {
- put_device(&wq->conf_dev);
+ put_device(conf_dev);
rc = -ENOMEM;
goto err;
}
@@ -243,8 +251,11 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
return 0;
err:
- while (--i >= 0)
- put_device(&idxd->wqs[i]->conf_dev);
+ while (--i >= 0) {
+ wq = idxd->wqs[i];
+ conf_dev = wq_confdev(wq);
+ put_device(conf_dev);
+ }
return rc;
}
@@ -252,6 +263,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
{
struct idxd_engine *engine;
struct device *dev = &idxd->pdev->dev;
+ struct device *conf_dev;
int i, rc;
idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
@@ -266,15 +278,17 @@ static int idxd_setup_engines(struct idxd_device *idxd)
goto err;
}
+ idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE);
+ conf_dev = engine_confdev(engine);
engine->id = i;
engine->idxd = idxd;
- device_initialize(&engine->conf_dev);
- engine->conf_dev.parent = &idxd->conf_dev;
- engine->conf_dev.bus = &dsa_bus_type;
- engine->conf_dev.type = &idxd_engine_device_type;
- rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
+ device_initialize(conf_dev);
+ conf_dev->parent = idxd_confdev(idxd);
+ conf_dev->bus = &dsa_bus_type;
+ conf_dev->type = &idxd_engine_device_type;
+ rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
if (rc < 0) {
- put_device(&engine->conf_dev);
+ put_device(conf_dev);
goto err;
}
@@ -284,14 +298,18 @@ static int idxd_setup_engines(struct idxd_device *idxd)
return 0;
err:
- while (--i >= 0)
- put_device(&idxd->engines[i]->conf_dev);
+ while (--i >= 0) {
+ engine = idxd->engines[i];
+ conf_dev = engine_confdev(engine);
+ put_device(conf_dev);
+ }
return rc;
}
static int idxd_setup_groups(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
+ struct device *conf_dev;
struct idxd_group *group;
int i, rc;
@@ -307,28 +325,37 @@ static int idxd_setup_groups(struct idxd_device *idxd)
goto err;
}
+ idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP);
+ conf_dev = group_confdev(group);
group->id = i;
group->idxd = idxd;
- device_initialize(&group->conf_dev);
- group->conf_dev.parent = &idxd->conf_dev;
- group->conf_dev.bus = &dsa_bus_type;
- group->conf_dev.type = &idxd_group_device_type;
- rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
+ device_initialize(conf_dev);
+ conf_dev->parent = idxd_confdev(idxd);
+ conf_dev->bus = &dsa_bus_type;
+ conf_dev->type = &idxd_group_device_type;
+ rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
if (rc < 0) {
- put_device(&group->conf_dev);
+ put_device(conf_dev);
goto err;
}
idxd->groups[i] = group;
- group->tc_a = -1;
- group->tc_b = -1;
+ if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
+ group->tc_a = 1;
+ group->tc_b = 1;
+ } else {
+ group->tc_a = -1;
+ group->tc_b = -1;
+ }
}
return 0;
err:
- while (--i >= 0)
- put_device(&idxd->groups[i]->conf_dev);
+ while (--i >= 0) {
+ group = idxd->groups[i];
+ put_device(group_confdev(group));
+ }
return rc;
}
@@ -337,11 +364,11 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
int i;
for (i = 0; i < idxd->max_groups; i++)
- put_device(&idxd->groups[i]->conf_dev);
+ put_device(group_confdev(idxd->groups[i]));
for (i = 0; i < idxd->max_engines; i++)
- put_device(&idxd->engines[i]->conf_dev);
+ put_device(engine_confdev(idxd->engines[i]));
for (i = 0; i < idxd->max_wqs; i++)
- put_device(&idxd->wqs[i]->conf_dev);
+ put_device(wq_confdev(idxd->wqs[i]));
destroy_workqueue(idxd->wq);
}
@@ -381,13 +408,13 @@ static int idxd_setup_internals(struct idxd_device *idxd)
err_wkq_create:
for (i = 0; i < idxd->max_groups; i++)
- put_device(&idxd->groups[i]->conf_dev);
+ put_device(group_confdev(idxd->groups[i]));
err_group:
for (i = 0; i < idxd->max_engines; i++)
- put_device(&idxd->engines[i]->conf_dev);
+ put_device(engine_confdev(idxd->engines[i]));
err_engine:
for (i = 0; i < idxd->max_wqs; i++)
- put_device(&idxd->wqs[i]->conf_dev);
+ put_device(wq_confdev(idxd->wqs[i]));
err_wqs:
kfree(idxd->int_handles);
return rc;
@@ -469,6 +496,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
{
struct device *dev = &pdev->dev;
+ struct device *conf_dev;
struct idxd_device *idxd;
int rc;
@@ -476,19 +504,21 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
if (!idxd)
return NULL;
+ conf_dev = idxd_confdev(idxd);
idxd->pdev = pdev;
idxd->data = data;
+ idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
if (idxd->id < 0)
return NULL;
- device_initialize(&idxd->conf_dev);
- idxd->conf_dev.parent = dev;
- idxd->conf_dev.bus = &dsa_bus_type;
- idxd->conf_dev.type = idxd->data->dev_type;
- rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
+ device_initialize(conf_dev);
+ conf_dev->parent = dev;
+ conf_dev->bus = &dsa_bus_type;
+ conf_dev->type = idxd->data->dev_type;
+ rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
if (rc < 0) {
- put_device(&idxd->conf_dev);
+ put_device(conf_dev);
return NULL;
}
@@ -639,15 +669,9 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
dev_dbg(dev, "Set DMA masks\n");
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc)
- goto err;
-
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc)
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
goto err;
@@ -668,8 +692,6 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_dev_register;
}
- idxd->state = IDXD_DEV_CONF_READY;
-
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
idxd->hw.version);
@@ -680,7 +702,7 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err:
pci_iounmap(pdev, idxd->reg_base);
err_iomap:
- put_device(&idxd->conf_dev);
+ put_device(idxd_confdev(idxd));
err_idxd_alloc:
pci_disable_device(pdev);
return rc;
@@ -793,7 +815,7 @@ static void idxd_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
destroy_workqueue(idxd->wq);
perfmon_pmu_remove(idxd);
- device_unregister(&idxd->conf_dev);
+ device_unregister(idxd_confdev(idxd));
}
static struct pci_driver idxd_pci_driver = {
@@ -824,13 +846,17 @@ static int __init idxd_init_module(void)
perfmon_init();
- err = idxd_register_bus_type();
+ err = idxd_driver_register(&idxd_drv);
if (err < 0)
- return err;
+ goto err_idxd_driver_register;
- err = idxd_register_driver();
+ err = idxd_driver_register(&idxd_dmaengine_drv);
if (err < 0)
- goto err_idxd_driver_register;
+ goto err_idxd_dmaengine_driver_register;
+
+ err = idxd_driver_register(&idxd_user_drv);
+ if (err < 0)
+ goto err_idxd_user_driver_register;
err = idxd_cdev_register();
if (err)
@@ -845,19 +871,23 @@ static int __init idxd_init_module(void)
err_pci_register:
idxd_cdev_remove();
err_cdev_register:
- idxd_unregister_driver();
+ idxd_driver_unregister(&idxd_user_drv);
+err_idxd_user_driver_register:
+ idxd_driver_unregister(&idxd_dmaengine_drv);
+err_idxd_dmaengine_driver_register:
+ idxd_driver_unregister(&idxd_drv);
err_idxd_driver_register:
- idxd_unregister_bus_type();
return err;
}
module_init(idxd_init_module);
static void __exit idxd_exit_module(void)
{
- idxd_unregister_driver();
+ idxd_driver_unregister(&idxd_user_drv);
+ idxd_driver_unregister(&idxd_dmaengine_drv);
+ idxd_driver_unregister(&idxd_drv);
pci_unregister_driver(&idxd_pci_driver);
idxd_cdev_remove();
- idxd_unregister_bus_type();
perfmon_exit();
}
module_exit(idxd_exit_module);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 4e3a7198c0ca..ca88fa7a328e 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -22,13 +22,6 @@ struct idxd_fault {
struct idxd_device *idxd;
};
-static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
- enum irq_work_type wtype,
- int *processed, u64 data);
-static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
- enum irq_work_type wtype,
- int *processed, u64 data);
-
static void idxd_device_reinit(struct work_struct *work)
{
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
@@ -51,7 +44,7 @@ static void idxd_device_reinit(struct work_struct *work)
rc = idxd_wq_enable(wq);
if (rc < 0) {
dev_warn(dev, "Unable to re-enable wq %s\n",
- dev_name(&wq->conf_dev));
+ dev_name(wq_confdev(wq)));
}
}
}
@@ -59,47 +52,7 @@ static void idxd_device_reinit(struct work_struct *work)
return;
out:
- idxd_device_wqs_clear_state(idxd);
-}
-
-static void idxd_device_fault_work(struct work_struct *work)
-{
- struct idxd_fault *fault = container_of(work, struct idxd_fault, work);
- struct idxd_irq_entry *ie;
- int i;
- int processed;
- int irqcnt = fault->idxd->num_wq_irqs + 1;
-
- for (i = 1; i < irqcnt; i++) {
- ie = &fault->idxd->irq_entries[i];
- irq_process_work_list(ie, IRQ_WORK_PROCESS_FAULT,
- &processed, fault->addr);
- if (processed)
- break;
-
- irq_process_pending_llist(ie, IRQ_WORK_PROCESS_FAULT,
- &processed, fault->addr);
- if (processed)
- break;
- }
-
- kfree(fault);
-}
-
-static int idxd_device_schedule_fault_process(struct idxd_device *idxd,
- u64 fault_addr)
-{
- struct idxd_fault *fault;
-
- fault = kmalloc(sizeof(*fault), GFP_ATOMIC);
- if (!fault)
- return -ENOMEM;
-
- fault->addr = fault_addr;
- fault->idxd = idxd;
- INIT_WORK(&fault->work, idxd_device_fault_work);
- queue_work(idxd->wq, &fault->work);
- return 0;
+ idxd_device_clear_state(idxd);
}
static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
@@ -111,7 +64,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
bool err = false;
if (cause & IDXD_INTC_ERR) {
- spin_lock_bh(&idxd->dev_lock);
+ spin_lock(&idxd->dev_lock);
for (i = 0; i < 4; i++)
idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
IDXD_SWERR_OFFSET + i * sizeof(u64));
@@ -136,7 +89,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
}
}
- spin_unlock_bh(&idxd->dev_lock);
+ spin_unlock(&idxd->dev_lock);
val |= IDXD_INTC_ERR;
for (i = 0; i < 4; i++)
@@ -168,15 +121,6 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
if (!err)
return 0;
- /*
- * This case should rarely happen and typically is due to software
- * programming error by the driver.
- */
- if (idxd->sw_err.valid &&
- idxd->sw_err.desc_valid &&
- idxd->sw_err.fault_addr)
- idxd_device_schedule_fault_process(idxd, idxd->sw_err.fault_addr);
-
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
if (gensts.state == IDXD_DEVICE_STATE_HALT) {
idxd->state = IDXD_DEV_HALTED;
@@ -189,15 +133,15 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
INIT_WORK(&idxd->work, idxd_device_reinit);
queue_work(idxd->wq, &idxd->work);
} else {
- spin_lock_bh(&idxd->dev_lock);
+ spin_lock(&idxd->dev_lock);
idxd_wqs_quiesce(idxd);
idxd_wqs_unmap_portal(idxd);
- idxd_device_wqs_clear_state(idxd);
+ idxd_device_clear_state(idxd);
dev_err(&idxd->pdev->dev,
"idxd halted, need %s.\n",
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
"FLR" : "system reset");
- spin_unlock_bh(&idxd->dev_lock);
+ spin_unlock(&idxd->dev_lock);
return -ENXIO;
}
}
@@ -228,127 +172,79 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
return IRQ_HANDLED;
}
-static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
-{
- /*
- * Completion address can be bad as well. Check fault address match for descriptor
- * and completion address.
- */
- if ((u64)desc->hw == fault_addr || (u64)desc->completion == fault_addr) {
- struct idxd_device *idxd = desc->wq->idxd;
- struct device *dev = &idxd->pdev->dev;
-
- dev_warn(dev, "desc with fault address: %#llx\n", fault_addr);
- return true;
- }
-
- return false;
-}
-
-static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
- enum irq_work_type wtype,
- int *processed, u64 data)
+static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
{
struct idxd_desc *desc, *t;
struct llist_node *head;
- int queued = 0;
- unsigned long flags;
- enum idxd_complete_type reason;
- *processed = 0;
head = llist_del_all(&irq_entry->pending_llist);
if (!head)
- goto out;
-
- if (wtype == IRQ_WORK_NORMAL)
- reason = IDXD_COMPLETE_NORMAL;
- else
- reason = IDXD_COMPLETE_DEV_FAIL;
+ return;
llist_for_each_entry_safe(desc, t, head, llnode) {
u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
if (status) {
- if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
+ /*
+ * Check against the original status as ABORT is software defined
+ * and 0xff, which DSA_COMP_STATUS_MASK can mask out.
+ */
+ if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
complete_desc(desc, IDXD_COMPLETE_ABORT);
- (*processed)++;
continue;
}
- if (unlikely(status != DSA_COMP_SUCCESS))
- match_fault(desc, data);
- complete_desc(desc, reason);
- (*processed)++;
+ complete_desc(desc, IDXD_COMPLETE_NORMAL);
} else {
- spin_lock_irqsave(&irq_entry->list_lock, flags);
+ spin_lock(&irq_entry->list_lock);
list_add_tail(&desc->list,
&irq_entry->work_list);
- spin_unlock_irqrestore(&irq_entry->list_lock, flags);
- queued++;
+ spin_unlock(&irq_entry->list_lock);
}
}
-
- out:
- return queued;
}
-static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
- enum irq_work_type wtype,
- int *processed, u64 data)
+static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
{
- int queued = 0;
- unsigned long flags;
LIST_HEAD(flist);
struct idxd_desc *desc, *n;
- enum idxd_complete_type reason;
-
- *processed = 0;
- if (wtype == IRQ_WORK_NORMAL)
- reason = IDXD_COMPLETE_NORMAL;
- else
- reason = IDXD_COMPLETE_DEV_FAIL;
/*
* This lock protects list corruption from access of list outside of the irq handler
* thread.
*/
- spin_lock_irqsave(&irq_entry->list_lock, flags);
+ spin_lock(&irq_entry->list_lock);
if (list_empty(&irq_entry->work_list)) {
- spin_unlock_irqrestore(&irq_entry->list_lock, flags);
- return 0;
+ spin_unlock(&irq_entry->list_lock);
+ return;
}
list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
if (desc->completion->status) {
list_del(&desc->list);
- (*processed)++;
list_add_tail(&desc->list, &flist);
- } else {
- queued++;
}
}
- spin_unlock_irqrestore(&irq_entry->list_lock, flags);
+ spin_unlock(&irq_entry->list_lock);
list_for_each_entry(desc, &flist, list) {
- u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
-
- if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
+ /*
+ * Check against the original status as ABORT is software defined
+ * and 0xff, which DSA_COMP_STATUS_MASK can mask out.
+ */
+ if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
complete_desc(desc, IDXD_COMPLETE_ABORT);
continue;
}
- if (unlikely(status != DSA_COMP_SUCCESS))
- match_fault(desc, data);
- complete_desc(desc, reason);
+ complete_desc(desc, IDXD_COMPLETE_NORMAL);
}
-
- return queued;
}
-static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
+irqreturn_t idxd_wq_thread(int irq, void *data)
{
- int rc, processed, total = 0;
+ struct idxd_irq_entry *irq_entry = data;
/*
* There are two lists we are processing. The pending_llist is where
@@ -367,31 +263,9 @@ static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
* and process the completed entries.
* 4. If the entry is still waiting on hardware, list_add_tail() to
* the work_list.
- * 5. Repeat until no more descriptors.
*/
- do {
- rc = irq_process_work_list(irq_entry, IRQ_WORK_NORMAL,
- &processed, 0);
- total += processed;
- if (rc != 0)
- continue;
-
- rc = irq_process_pending_llist(irq_entry, IRQ_WORK_NORMAL,
- &processed, 0);
- total += processed;
- } while (rc != 0);
-
- return total;
-}
-
-irqreturn_t idxd_wq_thread(int irq, void *data)
-{
- struct idxd_irq_entry *irq_entry = data;
- int processed;
-
- processed = idxd_desc_process(irq_entry);
- if (processed == 0)
- return IRQ_NONE;
+ irq_process_work_list(irq_entry);
+ irq_process_pending_llist(irq_entry);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index c970c3f025f0..ffc7550a77ee 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -7,6 +7,9 @@
#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
+#define DEVICE_VERSION_1 0x100
+#define DEVICE_VERSION_2 0x200
+
#define IDXD_MMIO_BAR 0
#define IDXD_WQ_BAR 2
#define IDXD_PORTAL_SIZE PAGE_SIZE
@@ -349,6 +352,9 @@ union wqcfg {
} __packed;
#define WQCFG_PASID_IDX 2
+#define WQCFG_OCCUP_IDX 6
+
+#define WQCFG_OCCUP_MASK 0xffff
/*
* This macro calculates the offset into the WQCFG register
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 36c9c1a89b7e..de76fb4abac2 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -22,21 +22,13 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
desc->hw->pasid = idxd->pasid;
/*
- * Descriptor completion vectors are 1...N for MSIX. We will round
- * robin through the N vectors.
+ * On host, MSIX vecotr 0 is used for misc interrupt. Therefore when we match
+ * vector 1:1 to the WQ id, we need to add 1
*/
- wq->vec_ptr = desc->vector = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
- if (!idxd->int_handles) {
- desc->hw->int_handle = wq->vec_ptr;
- } else {
- /*
- * int_handles are only for descriptor completion. However for device
- * MSIX enumeration, vec 0 is used for misc interrupts. Therefore even
- * though we are rotating through 1...N for descriptor interrupts, we
- * need to acqurie the int_handles from 0..N-1.
- */
- desc->hw->int_handle = idxd->int_handles[desc->vector - 1];
- }
+ if (!idxd->int_handles)
+ desc->hw->int_handle = wq->id + 1;
+ else
+ desc->hw->int_handle = idxd->int_handles[wq->id];
return desc;
}
@@ -67,7 +59,7 @@ struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
if (signal_pending_state(TASK_INTERRUPTIBLE, current))
break;
idx = sbitmap_queue_get(sbq, &cpu);
- if (idx > 0)
+ if (idx >= 0)
break;
schedule();
}
@@ -114,14 +106,13 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
{
struct idxd_desc *d, *t, *found = NULL;
struct llist_node *head;
- unsigned long flags;
desc->completion->status = IDXD_COMP_DESC_ABORT;
/*
* Grab the list lock so it will block the irq thread handler. This allows the
* abort code to locate the descriptor need to be aborted.
*/
- spin_lock_irqsave(&ie->list_lock, flags);
+ spin_lock(&ie->list_lock);
head = llist_del_all(&ie->pending_llist);
if (head) {
llist_for_each_entry_safe(d, t, head, llnode) {
@@ -135,7 +126,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
if (!found)
found = list_abort_desc(wq, ie, desc);
- spin_unlock_irqrestore(&ie->list_lock, flags);
+ spin_unlock(&ie->list_lock);
if (found)
complete_desc(found, IDXD_COMPLETE_ABORT);
@@ -148,13 +139,17 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
void __iomem *portal;
int rc;
- if (idxd->state != IDXD_DEV_ENABLED)
+ if (idxd->state != IDXD_DEV_ENABLED) {
+ idxd_free_desc(wq, desc);
return -EIO;
+ }
- if (!percpu_ref_tryget_live(&wq->wq_active))
+ if (!percpu_ref_tryget_live(&wq->wq_active)) {
+ idxd_free_desc(wq, desc);
return -ENXIO;
+ }
- portal = wq->portal;
+ portal = idxd_wq_portal_addr(wq);
/*
* The wmb() flushes writes to coherent DMA data before
@@ -168,7 +163,7 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
* that we designated the descriptor to.
*/
if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
- ie = &idxd->irq_entries[desc->vector];
+ ie = &idxd->irq_entries[wq->id + 1];
llist_add(&desc->llnode, &ie->pending_llist);
}
@@ -183,8 +178,12 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
*/
rc = enqcmds(portal, desc->hw);
if (rc < 0) {
+ percpu_ref_put(&wq->wq_active);
+ /* abort operation frees the descriptor */
if (ie)
llist_abort_desc(wq, ie, desc);
+ else
+ idxd_free_desc(wq, desc);
return rc;
}
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 26d8ff97d13d..a9025be940db 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -16,336 +16,11 @@ static char *idxd_wq_type_names[] = {
[IDXD_WQT_USER] = "user",
};
-static int idxd_config_bus_match(struct device *dev,
- struct device_driver *drv)
-{
- int matched = 0;
-
- if (is_idxd_dev(dev)) {
- struct idxd_device *idxd = confdev_to_idxd(dev);
-
- if (idxd->state != IDXD_DEV_CONF_READY)
- return 0;
- matched = 1;
- } else if (is_idxd_wq_dev(dev)) {
- struct idxd_wq *wq = confdev_to_wq(dev);
- struct idxd_device *idxd = wq->idxd;
-
- if (idxd->state < IDXD_DEV_CONF_READY)
- return 0;
-
- if (wq->state != IDXD_WQ_DISABLED) {
- dev_dbg(dev, "%s not disabled\n", dev_name(dev));
- return 0;
- }
- matched = 1;
- }
-
- if (matched)
- dev_dbg(dev, "%s matched\n", dev_name(dev));
-
- return matched;
-}
-
-static int enable_wq(struct idxd_wq *wq)
-{
- struct idxd_device *idxd = wq->idxd;
- struct device *dev = &idxd->pdev->dev;
- unsigned long flags;
- int rc;
-
- mutex_lock(&wq->wq_lock);
-
- if (idxd->state != IDXD_DEV_ENABLED) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "Enabling while device not enabled.\n");
- return -EPERM;
- }
-
- if (wq->state != IDXD_WQ_DISABLED) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ %d already enabled.\n", wq->id);
- return -EBUSY;
- }
-
- if (!wq->group) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ not attached to group.\n");
- return -EINVAL;
- }
-
- if (strlen(wq->name) == 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ name not set.\n");
- return -EINVAL;
- }
-
- /* Shared WQ checks */
- if (wq_shared(wq)) {
- if (!device_swq_supported(idxd)) {
- dev_warn(dev, "PASID not enabled and shared WQ.\n");
- mutex_unlock(&wq->wq_lock);
- return -ENXIO;
- }
- /*
- * Shared wq with the threshold set to 0 means the user
- * did not set the threshold or transitioned from a
- * dedicated wq but did not set threshold. A value
- * of 0 would effectively disable the shared wq. The
- * driver does not allow a value of 0 to be set for
- * threshold via sysfs.
- */
- if (wq->threshold == 0) {
- dev_warn(dev, "Shared WQ and threshold 0.\n");
- mutex_unlock(&wq->wq_lock);
- return -EINVAL;
- }
- }
-
- rc = idxd_wq_alloc_resources(wq);
- if (rc < 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ resource alloc failed\n");
- return rc;
- }
-
- spin_lock_irqsave(&idxd->dev_lock, flags);
- if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
- rc = idxd_device_config(idxd);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
- if (rc < 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "Writing WQ %d config failed: %d\n", wq->id, rc);
- return rc;
- }
-
- rc = idxd_wq_enable(wq);
- if (rc < 0) {
- mutex_unlock(&wq->wq_lock);
- dev_warn(dev, "WQ %d enabling failed: %d\n", wq->id, rc);
- return rc;
- }
-
- rc = idxd_wq_map_portal(wq);
- if (rc < 0) {
- dev_warn(dev, "wq portal mapping failed: %d\n", rc);
- rc = idxd_wq_disable(wq);
- if (rc < 0)
- dev_warn(dev, "IDXD wq disable failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
-
- wq->client_count = 0;
-
- if (wq->type == IDXD_WQT_KERNEL) {
- rc = idxd_wq_init_percpu_ref(wq);
- if (rc < 0) {
- dev_dbg(dev, "percpu_ref setup failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
- }
-
- if (is_idxd_wq_dmaengine(wq)) {
- rc = idxd_register_dma_channel(wq);
- if (rc < 0) {
- dev_dbg(dev, "DMA channel register failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
- } else if (is_idxd_wq_cdev(wq)) {
- rc = idxd_wq_add_cdev(wq);
- if (rc < 0) {
- dev_dbg(dev, "Cdev creation failed\n");
- mutex_unlock(&wq->wq_lock);
- return rc;
- }
- }
-
- mutex_unlock(&wq->wq_lock);
- dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
-
- return 0;
-}
-
-static int idxd_config_bus_probe(struct device *dev)
-{
- int rc = 0;
- unsigned long flags;
-
- dev_dbg(dev, "%s called\n", __func__);
-
- if (is_idxd_dev(dev)) {
- struct idxd_device *idxd = confdev_to_idxd(dev);
-
- if (idxd->state != IDXD_DEV_CONF_READY) {
- dev_warn(dev, "Device not ready for config\n");
- return -EBUSY;
- }
-
- if (!try_module_get(THIS_MODULE))
- return -ENXIO;
-
- /* Perform IDXD configuration and enabling */
- spin_lock_irqsave(&idxd->dev_lock, flags);
- if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
- rc = idxd_device_config(idxd);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
- if (rc < 0) {
- module_put(THIS_MODULE);
- dev_warn(dev, "Device config failed: %d\n", rc);
- return rc;
- }
-
- /* start device */
- rc = idxd_device_enable(idxd);
- if (rc < 0) {
- module_put(THIS_MODULE);
- dev_warn(dev, "Device enable failed: %d\n", rc);
- return rc;
- }
-
- dev_info(dev, "Device %s enabled\n", dev_name(dev));
-
- rc = idxd_register_dma_device(idxd);
- if (rc < 0) {
- module_put(THIS_MODULE);
- dev_dbg(dev, "Failed to register dmaengine device\n");
- return rc;
- }
- return 0;
- } else if (is_idxd_wq_dev(dev)) {
- struct idxd_wq *wq = confdev_to_wq(dev);
-
- return enable_wq(wq);
- }
-
- return -ENODEV;
-}
-
-static void disable_wq(struct idxd_wq *wq)
-{
- struct idxd_device *idxd = wq->idxd;
- struct device *dev = &idxd->pdev->dev;
-
- mutex_lock(&wq->wq_lock);
- dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
- if (wq->state == IDXD_WQ_DISABLED) {
- mutex_unlock(&wq->wq_lock);
- return;
- }
-
- if (wq->type == IDXD_WQT_KERNEL)
- idxd_wq_quiesce(wq);
-
- if (is_idxd_wq_dmaengine(wq))
- idxd_unregister_dma_channel(wq);
- else if (is_idxd_wq_cdev(wq))
- idxd_wq_del_cdev(wq);
-
- if (idxd_wq_refcount(wq))
- dev_warn(dev, "Clients has claim on wq %d: %d\n",
- wq->id, idxd_wq_refcount(wq));
-
- idxd_wq_unmap_portal(wq);
-
- idxd_wq_drain(wq);
- idxd_wq_reset(wq);
-
- idxd_wq_free_resources(wq);
- wq->client_count = 0;
- mutex_unlock(&wq->wq_lock);
-
- dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
-}
-
-static void idxd_config_bus_remove(struct device *dev)
-{
- int rc;
-
- dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
-
- /* disable workqueue here */
- if (is_idxd_wq_dev(dev)) {
- struct idxd_wq *wq = confdev_to_wq(dev);
-
- disable_wq(wq);
- } else if (is_idxd_dev(dev)) {
- struct idxd_device *idxd = confdev_to_idxd(dev);
- int i;
-
- dev_dbg(dev, "%s removing dev %s\n", __func__,
- dev_name(&idxd->conf_dev));
- for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = idxd->wqs[i];
-
- if (wq->state == IDXD_WQ_DISABLED)
- continue;
- dev_warn(dev, "Active wq %d on disable %s.\n", i,
- dev_name(&idxd->conf_dev));
- device_release_driver(&wq->conf_dev);
- }
-
- idxd_unregister_dma_device(idxd);
- rc = idxd_device_disable(idxd);
- if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
- for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = idxd->wqs[i];
-
- mutex_lock(&wq->wq_lock);
- idxd_wq_disable_cleanup(wq);
- mutex_unlock(&wq->wq_lock);
- }
- }
- module_put(THIS_MODULE);
- if (rc < 0)
- dev_warn(dev, "Device disable failed\n");
- else
- dev_info(dev, "Device %s disabled\n", dev_name(dev));
-
- }
-}
-
-static void idxd_config_bus_shutdown(struct device *dev)
-{
- dev_dbg(dev, "%s called\n", __func__);
-}
-
-struct bus_type dsa_bus_type = {
- .name = "dsa",
- .match = idxd_config_bus_match,
- .probe = idxd_config_bus_probe,
- .remove = idxd_config_bus_remove,
- .shutdown = idxd_config_bus_shutdown,
-};
-
-static struct idxd_device_driver dsa_drv = {
- .drv = {
- .name = "dsa",
- .bus = &dsa_bus_type,
- .owner = THIS_MODULE,
- .mod_name = KBUILD_MODNAME,
- },
-};
-
-/* IDXD generic driver setup */
-int idxd_register_driver(void)
-{
- return driver_register(&dsa_drv.drv);
-}
-
-void idxd_unregister_driver(void)
-{
- driver_unregister(&dsa_drv.drv);
-}
-
/* IDXD engine attributes */
static ssize_t engine_group_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_engine *engine =
- container_of(dev, struct idxd_engine, conf_dev);
+ struct idxd_engine *engine = confdev_to_engine(dev);
if (engine->group)
return sysfs_emit(buf, "%d\n", engine->group->id);
@@ -357,8 +32,7 @@ static ssize_t engine_group_id_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_engine *engine =
- container_of(dev, struct idxd_engine, conf_dev);
+ struct idxd_engine *engine = confdev_to_engine(dev);
struct idxd_device *idxd = engine->idxd;
long id;
int rc;
@@ -412,7 +86,7 @@ static const struct attribute_group *idxd_engine_attribute_groups[] = {
static void idxd_conf_engine_release(struct device *dev)
{
- struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
+ struct idxd_engine *engine = confdev_to_engine(dev);
kfree(engine);
}
@@ -442,8 +116,7 @@ static ssize_t group_tokens_reserved_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%u\n", group->tokens_reserved);
}
@@ -452,8 +125,7 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
unsigned long val;
int rc;
@@ -490,8 +162,7 @@ static ssize_t group_tokens_allowed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%u\n", group->tokens_allowed);
}
@@ -500,8 +171,7 @@ static ssize_t group_tokens_allowed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
unsigned long val;
int rc;
@@ -535,8 +205,7 @@ static ssize_t group_use_token_limit_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%u\n", group->use_token_limit);
}
@@ -545,8 +214,7 @@ static ssize_t group_use_token_limit_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
unsigned long val;
int rc;
@@ -578,8 +246,7 @@ static struct device_attribute dev_attr_group_use_token_limit =
static ssize_t group_engines_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
int i, rc = 0;
struct idxd_device *idxd = group->idxd;
@@ -607,8 +274,7 @@ static struct device_attribute dev_attr_group_engines =
static ssize_t group_work_queues_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
int i, rc = 0;
struct idxd_device *idxd = group->idxd;
@@ -637,8 +303,7 @@ static ssize_t group_traffic_class_a_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%d\n", group->tc_a);
}
@@ -647,8 +312,7 @@ static ssize_t group_traffic_class_a_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
long val;
int rc;
@@ -663,6 +327,9 @@ static ssize_t group_traffic_class_a_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
+ if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
+ return -EPERM;
+
if (val < 0 || val > 7)
return -EINVAL;
@@ -678,8 +345,7 @@ static ssize_t group_traffic_class_b_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
return sysfs_emit(buf, "%d\n", group->tc_b);
}
@@ -688,8 +354,7 @@ static ssize_t group_traffic_class_b_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_group *group =
- container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
long val;
int rc;
@@ -704,6 +369,9 @@ static ssize_t group_traffic_class_b_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
+ if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
+ return -EPERM;
+
if (val < 0 || val > 7)
return -EINVAL;
@@ -737,7 +405,7 @@ static const struct attribute_group *idxd_group_attribute_groups[] = {
static void idxd_conf_group_release(struct device *dev)
{
- struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_group *group = confdev_to_group(dev);
kfree(group);
}
@@ -752,7 +420,7 @@ struct device_type idxd_group_device_type = {
static ssize_t wq_clients_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%d\n", wq->client_count);
}
@@ -763,7 +431,7 @@ static struct device_attribute dev_attr_wq_clients =
static ssize_t wq_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
switch (wq->state) {
case IDXD_WQ_DISABLED:
@@ -781,7 +449,7 @@ static struct device_attribute dev_attr_wq_state =
static ssize_t wq_group_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
if (wq->group)
return sysfs_emit(buf, "%u\n", wq->group->id);
@@ -793,7 +461,7 @@ static ssize_t wq_group_id_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
long id;
int rc;
@@ -836,7 +504,7 @@ static struct device_attribute dev_attr_wq_group_id =
static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
}
@@ -845,7 +513,7 @@ static ssize_t wq_mode_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
@@ -872,7 +540,7 @@ static struct device_attribute dev_attr_wq_mode =
static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", wq->size);
}
@@ -895,7 +563,7 @@ static ssize_t wq_size_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
unsigned long size;
struct idxd_device *idxd = wq->idxd;
int rc;
@@ -923,7 +591,7 @@ static struct device_attribute dev_attr_wq_size =
static ssize_t wq_priority_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", wq->priority);
}
@@ -932,7 +600,7 @@ static ssize_t wq_priority_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
unsigned long prio;
struct idxd_device *idxd = wq->idxd;
int rc;
@@ -960,7 +628,7 @@ static struct device_attribute dev_attr_wq_priority =
static ssize_t wq_block_on_fault_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
}
@@ -969,11 +637,14 @@ static ssize_t wq_block_on_fault_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
bool bof;
int rc;
+ if (!idxd->hw.gen_cap.block_on_fault)
+ return -EOPNOTSUPP;
+
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
@@ -999,7 +670,7 @@ static struct device_attribute dev_attr_wq_block_on_fault =
static ssize_t wq_threshold_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", wq->threshold);
}
@@ -1008,7 +679,7 @@ static ssize_t wq_threshold_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
unsigned int val;
int rc;
@@ -1040,7 +711,7 @@ static struct device_attribute dev_attr_wq_threshold =
static ssize_t wq_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
switch (wq->type) {
case IDXD_WQT_KERNEL:
@@ -1059,7 +730,7 @@ static ssize_t wq_type_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
enum idxd_wq_type old_type;
if (wq->state != IDXD_WQ_DISABLED)
@@ -1088,7 +759,7 @@ static struct device_attribute dev_attr_wq_type =
static ssize_t wq_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%s\n", wq->name);
}
@@ -1097,7 +768,7 @@ static ssize_t wq_name_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
@@ -1124,7 +795,7 @@ static struct device_attribute dev_attr_wq_name =
static ssize_t wq_cdev_minor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
int minor = -1;
mutex_lock(&wq->wq_lock);
@@ -1158,7 +829,7 @@ static int __get_sysfs_u64(const char *buf, u64 *val)
static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
}
@@ -1166,7 +837,7 @@ static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attri
static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
u64 xfer_size;
int rc;
@@ -1192,7 +863,7 @@ static struct device_attribute dev_attr_wq_max_transfer_size =
static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", wq->max_batch_size);
}
@@ -1200,7 +871,7 @@ static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribut
static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
u64 batch_size;
int rc;
@@ -1225,7 +896,7 @@ static struct device_attribute dev_attr_wq_max_batch_size =
static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
return sysfs_emit(buf, "%u\n", wq->ats_dis);
}
@@ -1233,7 +904,7 @@ static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *
static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
bool ats_dis;
int rc;
@@ -1256,6 +927,24 @@ static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute
static struct device_attribute dev_attr_wq_ats_disable =
__ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
+static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ struct idxd_device *idxd = wq->idxd;
+ u32 occup, offset;
+
+ if (!idxd->hw.wq_cap.occupancy)
+ return -EOPNOTSUPP;
+
+ offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
+ occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
+
+ return sysfs_emit(buf, "%u\n", occup);
+}
+
+static struct device_attribute dev_attr_wq_occupancy =
+ __ATTR(occupancy, 0444, wq_occupancy_show, NULL);
+
static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_clients.attr,
&dev_attr_wq_state.attr,
@@ -1271,6 +960,7 @@ static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_max_transfer_size.attr,
&dev_attr_wq_max_batch_size.attr,
&dev_attr_wq_ats_disable.attr,
+ &dev_attr_wq_occupancy.attr,
NULL,
};
@@ -1285,7 +975,7 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = {
static void idxd_conf_wq_release(struct device *dev)
{
- struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_wq *wq = confdev_to_wq(dev);
kfree(wq->wqcfg);
kfree(wq);
@@ -1301,8 +991,7 @@ struct device_type idxd_wq_device_type = {
static ssize_t version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%#x\n", idxd->hw.version);
}
@@ -1312,8 +1001,7 @@ static ssize_t max_work_queues_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
}
@@ -1322,8 +1010,7 @@ static DEVICE_ATTR_RO(max_work_queues_size);
static ssize_t max_groups_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_groups);
}
@@ -1332,8 +1019,7 @@ static DEVICE_ATTR_RO(max_groups);
static ssize_t max_work_queues_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_wqs);
}
@@ -1342,8 +1028,7 @@ static DEVICE_ATTR_RO(max_work_queues);
static ssize_t max_engines_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_engines);
}
@@ -1352,8 +1037,7 @@ static DEVICE_ATTR_RO(max_engines);
static ssize_t numa_node_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
}
@@ -1362,8 +1046,7 @@ static DEVICE_ATTR_RO(numa_node);
static ssize_t max_batch_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
}
@@ -1373,8 +1056,7 @@ static ssize_t max_transfer_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
}
@@ -1383,8 +1065,7 @@ static DEVICE_ATTR_RO(max_transfer_size);
static ssize_t op_cap_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
int i, rc = 0;
for (i = 0; i < 4; i++)
@@ -1399,8 +1080,7 @@ static DEVICE_ATTR_RO(op_cap);
static ssize_t gen_cap_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
}
@@ -1409,8 +1089,7 @@ static DEVICE_ATTR_RO(gen_cap);
static ssize_t configurable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
}
@@ -1419,18 +1098,16 @@ static DEVICE_ATTR_RO(configurable);
static ssize_t clients_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
- unsigned long flags;
+ struct idxd_device *idxd = confdev_to_idxd(dev);
int count = 0, i;
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
for (i = 0; i < idxd->max_wqs; i++) {
struct idxd_wq *wq = idxd->wqs[i];
count += wq->client_count;
}
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock(&idxd->dev_lock);
return sysfs_emit(buf, "%d\n", count);
}
@@ -1439,8 +1116,7 @@ static DEVICE_ATTR_RO(clients);
static ssize_t pasid_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
}
@@ -1449,12 +1125,10 @@ static DEVICE_ATTR_RO(pasid_enabled);
static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
switch (idxd->state) {
case IDXD_DEV_DISABLED:
- case IDXD_DEV_CONF_READY:
return sysfs_emit(buf, "disabled\n");
case IDXD_DEV_ENABLED:
return sysfs_emit(buf, "enabled\n");
@@ -1469,15 +1143,13 @@ static DEVICE_ATTR_RO(state);
static ssize_t errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
int i, out = 0;
- unsigned long flags;
- spin_lock_irqsave(&idxd->dev_lock, flags);
+ spin_lock(&idxd->dev_lock);
for (i = 0; i < 4; i++)
out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
- spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ spin_unlock(&idxd->dev_lock);
out--;
out += sysfs_emit_at(buf, out, "\n");
return out;
@@ -1487,8 +1159,7 @@ static DEVICE_ATTR_RO(errors);
static ssize_t max_tokens_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->max_tokens);
}
@@ -1497,8 +1168,7 @@ static DEVICE_ATTR_RO(max_tokens);
static ssize_t token_limit_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->token_limit);
}
@@ -1507,8 +1177,7 @@ static ssize_t token_limit_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
unsigned long val;
int rc;
@@ -1536,8 +1205,7 @@ static DEVICE_ATTR_RW(token_limit);
static ssize_t cdev_major_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd =
- container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%u\n", idxd->major);
}
@@ -1546,11 +1214,20 @@ static DEVICE_ATTR_RO(cdev_major);
static ssize_t cmd_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
}
-static DEVICE_ATTR_RO(cmd_status);
+
+static ssize_t cmd_status_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+
+ idxd->cmd_status = 0;
+ return count;
+}
+static DEVICE_ATTR_RW(cmd_status);
static struct attribute *idxd_device_attributes[] = {
&dev_attr_version.attr,
@@ -1586,7 +1263,7 @@ static const struct attribute_group *idxd_attribute_groups[] = {
static void idxd_conf_device_release(struct device *dev)
{
- struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
+ struct idxd_device *idxd = confdev_to_idxd(dev);
kfree(idxd->groups);
kfree(idxd->wqs);
@@ -1611,12 +1288,12 @@ struct device_type iax_device_type = {
static int idxd_register_engine_devices(struct idxd_device *idxd)
{
+ struct idxd_engine *engine;
int i, j, rc;
for (i = 0; i < idxd->max_engines; i++) {
- struct idxd_engine *engine = idxd->engines[i];
-
- rc = device_add(&engine->conf_dev);
+ engine = idxd->engines[i];
+ rc = device_add(engine_confdev(engine));
if (rc < 0)
goto cleanup;
}
@@ -1625,22 +1302,26 @@ static int idxd_register_engine_devices(struct idxd_device *idxd)
cleanup:
j = i - 1;
- for (; i < idxd->max_engines; i++)
- put_device(&idxd->engines[i]->conf_dev);
+ for (; i < idxd->max_engines; i++) {
+ engine = idxd->engines[i];
+ put_device(engine_confdev(engine));
+ }
- while (j--)
- device_unregister(&idxd->engines[j]->conf_dev);
+ while (j--) {
+ engine = idxd->engines[j];
+ device_unregister(engine_confdev(engine));
+ }
return rc;
}
static int idxd_register_group_devices(struct idxd_device *idxd)
{
+ struct idxd_group *group;
int i, j, rc;
for (i = 0; i < idxd->max_groups; i++) {
- struct idxd_group *group = idxd->groups[i];
-
- rc = device_add(&group->conf_dev);
+ group = idxd->groups[i];
+ rc = device_add(group_confdev(group));
if (rc < 0)
goto cleanup;
}
@@ -1649,22 +1330,26 @@ static int idxd_register_group_devices(struct idxd_device *idxd)
cleanup:
j = i - 1;
- for (; i < idxd->max_groups; i++)
- put_device(&idxd->groups[i]->conf_dev);
+ for (; i < idxd->max_groups; i++) {
+ group = idxd->groups[i];
+ put_device(group_confdev(group));
+ }
- while (j--)
- device_unregister(&idxd->groups[j]->conf_dev);
+ while (j--) {
+ group = idxd->groups[j];
+ device_unregister(group_confdev(group));
+ }
return rc;
}
static int idxd_register_wq_devices(struct idxd_device *idxd)
{
+ struct idxd_wq *wq;
int i, rc, j;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = idxd->wqs[i];
-
- rc = device_add(&wq->conf_dev);
+ wq = idxd->wqs[i];
+ rc = device_add(wq_confdev(wq));
if (rc < 0)
goto cleanup;
}
@@ -1673,11 +1358,15 @@ static int idxd_register_wq_devices(struct idxd_device *idxd)
cleanup:
j = i - 1;
- for (; i < idxd->max_wqs; i++)
- put_device(&idxd->wqs[i]->conf_dev);
+ for (; i < idxd->max_wqs; i++) {
+ wq = idxd->wqs[i];
+ put_device(wq_confdev(wq));
+ }
- while (j--)
- device_unregister(&idxd->wqs[j]->conf_dev);
+ while (j--) {
+ wq = idxd->wqs[j];
+ device_unregister(wq_confdev(wq));
+ }
return rc;
}
@@ -1686,7 +1375,7 @@ int idxd_register_devices(struct idxd_device *idxd)
struct device *dev = &idxd->pdev->dev;
int rc, i;
- rc = device_add(&idxd->conf_dev);
+ rc = device_add(idxd_confdev(idxd));
if (rc < 0)
return rc;
@@ -1712,12 +1401,12 @@ int idxd_register_devices(struct idxd_device *idxd)
err_group:
for (i = 0; i < idxd->max_engines; i++)
- device_unregister(&idxd->engines[i]->conf_dev);
+ device_unregister(engine_confdev(idxd->engines[i]));
err_engine:
for (i = 0; i < idxd->max_wqs; i++)
- device_unregister(&idxd->wqs[i]->conf_dev);
+ device_unregister(wq_confdev(idxd->wqs[i]));
err_wq:
- device_del(&idxd->conf_dev);
+ device_del(idxd_confdev(idxd));
return rc;
}
@@ -1728,19 +1417,19 @@ void idxd_unregister_devices(struct idxd_device *idxd)
for (i = 0; i < idxd->max_wqs; i++) {
struct idxd_wq *wq = idxd->wqs[i];
- device_unregister(&wq->conf_dev);
+ device_unregister(wq_confdev(wq));
}
for (i = 0; i < idxd->max_engines; i++) {
struct idxd_engine *engine = idxd->engines[i];
- device_unregister(&engine->conf_dev);
+ device_unregister(engine_confdev(engine));
}
for (i = 0; i < idxd->max_groups; i++) {
struct idxd_group *group = idxd->groups[i];
- device_unregister(&group->conf_dev);
+ device_unregister(group_confdev(group));
}
}
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index df7704053d91..e2b5129c5f84 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4319,6 +4319,7 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf,
size_t count)
{
unsigned long val;
+ int err;
if (!count || count > 11)
return -EINVAL;
@@ -4327,7 +4328,10 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf,
return -EFAULT;
/* Write a key */
- sscanf(buf, "%lx", &val);
+ err = kstrtoul(buf, 16, &val);
+ if (err)
+ return err;
+
dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_XORBA, val);
isync();
@@ -4368,7 +4372,7 @@ static ssize_t poly_store(struct device_driver *dev, const char *buf,
size_t count)
{
unsigned long reg, val;
-
+ int err;
#ifdef CONFIG_440SP
/* 440SP uses default 0x14D polynomial only */
return -EINVAL;
@@ -4378,7 +4382,9 @@ static ssize_t poly_store(struct device_driver *dev, const char *buf,
return -EINVAL;
/* e.g., 0x14D or 0x11D */
- sscanf(buf, "%lx", &val);
+ err = kstrtoul(buf, 16, &val);
+ if (err)
+ return err;
if (val & ~0x1FF)
return -EINVAL;
diff --git a/drivers/dma/ptdma/Kconfig b/drivers/dma/ptdma/Kconfig
new file mode 100644
index 000000000000..b430edd709f9
--- /dev/null
+++ b/drivers/dma/ptdma/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config AMD_PTDMA
+ tristate "AMD PassThru DMA Engine"
+ depends on X86_64 && PCI
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the AMD PTDMA controller. This controller
+ provides DMA capabilities to perform high bandwidth memory to
+ memory and IO copy operations. It performs DMA transfer through
+ queue-based descriptor management. This DMA controller is intended
+ to be used with AMD Non-Transparent Bridge devices and not for
+ general purpose peripheral DMA.
diff --git a/drivers/dma/ptdma/Makefile b/drivers/dma/ptdma/Makefile
new file mode 100644
index 000000000000..ce5410268a9a
--- /dev/null
+++ b/drivers/dma/ptdma/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AMD Passthru DMA driver
+#
+
+obj-$(CONFIG_AMD_PTDMA) += ptdma.o
+
+ptdma-objs := ptdma-dev.o ptdma-dmaengine.o ptdma-debugfs.o
+
+ptdma-$(CONFIG_PCI) += ptdma-pci.o
diff --git a/drivers/dma/ptdma/ptdma-debugfs.c b/drivers/dma/ptdma/ptdma-debugfs.c
new file mode 100644
index 000000000000..c8307d3044a3
--- /dev/null
+++ b/drivers/dma/ptdma/ptdma-debugfs.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthrough DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "ptdma.h"
+
+/* DebugFS helpers */
+#define RI_VERSION_NUM 0x0000003F
+
+#define RI_NUM_VQM 0x00078000
+#define RI_NVQM_SHIFT 15
+
+static int pt_debugfs_info_show(struct seq_file *s, void *p)
+{
+ struct pt_device *pt = s->private;
+ unsigned int regval;
+
+ seq_printf(s, "Device name: %s\n", dev_name(pt->dev));
+ seq_printf(s, " # Queues: %d\n", 1);
+ seq_printf(s, " # Cmds: %d\n", pt->cmd_count);
+
+ regval = ioread32(pt->io_regs + CMD_PT_VERSION);
+
+ seq_printf(s, " Version: %d\n", regval & RI_VERSION_NUM);
+ seq_puts(s, " Engines:");
+ seq_puts(s, "\n");
+ seq_printf(s, " Queues: %d\n", (regval & RI_NUM_VQM) >> RI_NVQM_SHIFT);
+
+ return 0;
+}
+
+/*
+ * Return a formatted buffer containing the current
+ * statistics of queue for PTDMA
+ */
+static int pt_debugfs_stats_show(struct seq_file *s, void *p)
+{
+ struct pt_device *pt = s->private;
+
+ seq_printf(s, "Total Interrupts Handled: %ld\n", pt->total_interrupts);
+
+ return 0;
+}
+
+static int pt_debugfs_queue_show(struct seq_file *s, void *p)
+{
+ struct pt_cmd_queue *cmd_q = s->private;
+ unsigned int regval;
+
+ if (!cmd_q)
+ return 0;
+
+ seq_printf(s, " Pass-Thru: %ld\n", cmd_q->total_pt_ops);
+
+ regval = ioread32(cmd_q->reg_control + 0x000C);
+
+ seq_puts(s, " Enabled Interrupts:");
+ if (regval & INT_EMPTY_QUEUE)
+ seq_puts(s, " EMPTY");
+ if (regval & INT_QUEUE_STOPPED)
+ seq_puts(s, " STOPPED");
+ if (regval & INT_ERROR)
+ seq_puts(s, " ERROR");
+ if (regval & INT_COMPLETION)
+ seq_puts(s, " COMPLETION");
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(pt_debugfs_info);
+DEFINE_SHOW_ATTRIBUTE(pt_debugfs_queue);
+DEFINE_SHOW_ATTRIBUTE(pt_debugfs_stats);
+
+void ptdma_debugfs_setup(struct pt_device *pt)
+{
+ struct pt_cmd_queue *cmd_q;
+ struct dentry *debugfs_q_instance;
+
+ if (!debugfs_initialized())
+ return;
+
+ debugfs_create_file("info", 0400, pt->dma_dev.dbg_dev_root, pt,
+ &pt_debugfs_info_fops);
+
+ debugfs_create_file("stats", 0400, pt->dma_dev.dbg_dev_root, pt,
+ &pt_debugfs_stats_fops);
+
+ cmd_q = &pt->cmd_q;
+
+ debugfs_q_instance =
+ debugfs_create_dir("q", pt->dma_dev.dbg_dev_root);
+
+ debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
+ &pt_debugfs_queue_fops);
+}
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
new file mode 100644
index 000000000000..8a6bf291a73f
--- /dev/null
+++ b/drivers/dma/ptdma/ptdma-dev.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthru DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ptdma.h"
+
+/* Human-readable error strings */
+static char *pt_error_codes[] = {
+ "",
+ "ERR 01: ILLEGAL_ENGINE",
+ "ERR 03: ILLEGAL_FUNCTION_TYPE",
+ "ERR 04: ILLEGAL_FUNCTION_MODE",
+ "ERR 06: ILLEGAL_FUNCTION_SIZE",
+ "ERR 08: ILLEGAL_FUNCTION_RSVD",
+ "ERR 09: ILLEGAL_BUFFER_LENGTH",
+ "ERR 10: VLSB_FAULT",
+ "ERR 11: ILLEGAL_MEM_ADDR",
+ "ERR 12: ILLEGAL_MEM_SEL",
+ "ERR 13: ILLEGAL_CONTEXT_ID",
+ "ERR 15: 0xF Reserved",
+ "ERR 18: CMD_TIMEOUT",
+ "ERR 19: IDMA0_AXI_SLVERR",
+ "ERR 20: IDMA0_AXI_DECERR",
+ "ERR 21: 0x15 Reserved",
+ "ERR 22: IDMA1_AXI_SLAVE_FAULT",
+ "ERR 23: IDMA1_AIXI_DECERR",
+ "ERR 24: 0x18 Reserved",
+ "ERR 27: 0x1B Reserved",
+ "ERR 38: ODMA0_AXI_SLVERR",
+ "ERR 39: ODMA0_AXI_DECERR",
+ "ERR 40: 0x28 Reserved",
+ "ERR 41: ODMA1_AXI_SLVERR",
+ "ERR 42: ODMA1_AXI_DECERR",
+ "ERR 43: LSB_PARITY_ERR",
+};
+
+static void pt_log_error(struct pt_device *d, int e)
+{
+ dev_err(d->dev, "PTDMA error: %s (0x%x)\n", pt_error_codes[e], e);
+}
+
+void pt_start_queue(struct pt_cmd_queue *cmd_q)
+{
+ /* Turn on the run bit */
+ iowrite32(cmd_q->qcontrol | CMD_Q_RUN, cmd_q->reg_control);
+}
+
+void pt_stop_queue(struct pt_cmd_queue *cmd_q)
+{
+ /* Turn off the run bit */
+ iowrite32(cmd_q->qcontrol & ~CMD_Q_RUN, cmd_q->reg_control);
+}
+
+static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd_q)
+{
+ bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
+ u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
+ u32 tail;
+
+ if (soc) {
+ desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
+ desc->dw0 &= ~DWORD0_SOC;
+ }
+ mutex_lock(&cmd_q->q_mutex);
+
+ /* Copy 32-byte command descriptor to hw queue. */
+ memcpy(q_desc, desc, 32);
+ cmd_q->qidx = (cmd_q->qidx + 1) % CMD_Q_LEN;
+
+ /* The data used by this command must be flushed to memory */
+ wmb();
+
+ /* Write the new tail address back to the queue register */
+ tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
+ iowrite32(tail, cmd_q->reg_control + 0x0004);
+
+ /* Turn the queue back on using our cached control register */
+ pt_start_queue(cmd_q);
+ mutex_unlock(&cmd_q->q_mutex);
+
+ return 0;
+}
+
+int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
+ struct pt_passthru_engine *pt_engine)
+{
+ struct ptdma_desc desc;
+
+ cmd_q->cmd_error = 0;
+ cmd_q->total_pt_ops++;
+ memset(&desc, 0, sizeof(desc));
+ desc.dw0 = CMD_DESC_DW0_VAL;
+ desc.length = pt_engine->src_len;
+ desc.src_lo = lower_32_bits(pt_engine->src_dma);
+ desc.dw3.src_hi = upper_32_bits(pt_engine->src_dma);
+ desc.dst_lo = lower_32_bits(pt_engine->dst_dma);
+ desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma);
+
+ return pt_core_execute_cmd(&desc, cmd_q);
+}
+
+static inline void pt_core_disable_queue_interrupts(struct pt_device *pt)
+{
+ iowrite32(0, pt->cmd_q.reg_control + 0x000C);
+}
+
+static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
+{
+ iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
+}
+
+static void pt_do_cmd_complete(unsigned long data)
+{
+ struct pt_tasklet_data *tdata = (struct pt_tasklet_data *)data;
+ struct pt_cmd *cmd = tdata->cmd;
+ struct pt_cmd_queue *cmd_q = &cmd->pt->cmd_q;
+ u32 tail;
+
+ if (cmd_q->cmd_error) {
+ /*
+ * Log the error and flush the queue by
+ * moving the head pointer
+ */
+ tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
+ pt_log_error(cmd_q->pt, cmd_q->cmd_error);
+ iowrite32(tail, cmd_q->reg_control + 0x0008);
+ }
+
+ cmd->pt_cmd_callback(cmd->data, cmd->ret);
+}
+
+static irqreturn_t pt_core_irq_handler(int irq, void *data)
+{
+ struct pt_device *pt = data;
+ struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+ u32 status;
+
+ pt_core_disable_queue_interrupts(pt);
+ pt->total_interrupts++;
+ status = ioread32(cmd_q->reg_control + 0x0010);
+ if (status) {
+ cmd_q->int_status = status;
+ cmd_q->q_status = ioread32(cmd_q->reg_control + 0x0100);
+ cmd_q->q_int_status = ioread32(cmd_q->reg_control + 0x0104);
+
+ /* On error, only save the first error value */
+ if ((status & INT_ERROR) && !cmd_q->cmd_error)
+ cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+ /* Acknowledge the interrupt */
+ iowrite32(status, cmd_q->reg_control + 0x0010);
+ pt_core_enable_queue_interrupts(pt);
+ pt_do_cmd_complete((ulong)&pt->tdata);
+ }
+ return IRQ_HANDLED;
+}
+
+int pt_core_init(struct pt_device *pt)
+{
+ char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
+ struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+ u32 dma_addr_lo, dma_addr_hi;
+ struct device *dev = pt->dev;
+ struct dma_pool *dma_pool;
+ int ret;
+
+ /* Allocate a dma pool for the queue */
+ snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q", dev_name(pt->dev));
+
+ dma_pool = dma_pool_create(dma_pool_name, dev,
+ PT_DMAPOOL_MAX_SIZE,
+ PT_DMAPOOL_ALIGN, 0);
+ if (!dma_pool)
+ return -ENOMEM;
+
+ /* ptdma core initialisation */
+ iowrite32(CMD_CONFIG_VHB_EN, pt->io_regs + CMD_CONFIG_OFFSET);
+ iowrite32(CMD_QUEUE_PRIO, pt->io_regs + CMD_QUEUE_PRIO_OFFSET);
+ iowrite32(CMD_TIMEOUT_DISABLE, pt->io_regs + CMD_TIMEOUT_OFFSET);
+ iowrite32(CMD_CLK_GATE_CONFIG, pt->io_regs + CMD_CLK_GATE_CTL_OFFSET);
+ iowrite32(CMD_CONFIG_REQID, pt->io_regs + CMD_REQID_CONFIG_OFFSET);
+
+ cmd_q->pt = pt;
+ cmd_q->dma_pool = dma_pool;
+ mutex_init(&cmd_q->q_mutex);
+
+ /* Page alignment satisfies our needs for N <= 128 */
+ cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+ cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize,
+ &cmd_q->qbase_dma,
+ GFP_KERNEL);
+ if (!cmd_q->qbase) {
+ dev_err(dev, "unable to allocate command queue\n");
+ ret = -ENOMEM;
+ goto e_dma_alloc;
+ }
+
+ cmd_q->qidx = 0;
+
+ /* Preset some register values */
+ cmd_q->reg_control = pt->io_regs + CMD_Q_STATUS_INCR;
+
+ /* Turn off the queues and disable interrupts until ready */
+ pt_core_disable_queue_interrupts(pt);
+
+ cmd_q->qcontrol = 0; /* Start with nothing */
+ iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
+
+ ioread32(cmd_q->reg_control + 0x0104);
+ ioread32(cmd_q->reg_control + 0x0100);
+
+ /* Clear the interrupt status */
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+
+ /* Request an irq */
+ ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
+ if (ret)
+ goto e_pool;
+
+ /* Update the device registers with queue information. */
+ cmd_q->qcontrol &= ~CMD_Q_SIZE;
+ cmd_q->qcontrol |= FIELD_PREP(CMD_Q_SIZE, QUEUE_SIZE_VAL);
+
+ cmd_q->qdma_tail = cmd_q->qbase_dma;
+ dma_addr_lo = lower_32_bits(cmd_q->qdma_tail);
+ iowrite32((u32)dma_addr_lo, cmd_q->reg_control + 0x0004);
+ iowrite32((u32)dma_addr_lo, cmd_q->reg_control + 0x0008);
+
+ dma_addr_hi = upper_32_bits(cmd_q->qdma_tail);
+ cmd_q->qcontrol |= (dma_addr_hi << 16);
+ iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
+
+ pt_core_enable_queue_interrupts(pt);
+
+ /* Register the DMA engine support */
+ ret = pt_dmaengine_register(pt);
+ if (ret)
+ goto e_dmaengine;
+
+ /* Set up debugfs entries */
+ ptdma_debugfs_setup(pt);
+
+ return 0;
+
+e_dmaengine:
+ free_irq(pt->pt_irq, pt);
+
+e_dma_alloc:
+ dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
+
+e_pool:
+ dev_err(dev, "unable to allocate an IRQ\n");
+ dma_pool_destroy(pt->cmd_q.dma_pool);
+
+ return ret;
+}
+
+void pt_core_destroy(struct pt_device *pt)
+{
+ struct device *dev = pt->dev;
+ struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+ struct pt_cmd *cmd;
+
+ /* Unregister the DMA engine */
+ pt_dmaengine_unregister(pt);
+
+ /* Disable and clear interrupts */
+ pt_core_disable_queue_interrupts(pt);
+
+ /* Turn off the run bit */
+ pt_stop_queue(cmd_q);
+
+ /* Clear the interrupt status */
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+ ioread32(cmd_q->reg_control + 0x0104);
+ ioread32(cmd_q->reg_control + 0x0100);
+
+ free_irq(pt->pt_irq, pt);
+
+ dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase,
+ cmd_q->qbase_dma);
+
+ /* Flush the cmd queue */
+ while (!list_empty(&pt->cmd)) {
+ /* Invoke the callback directly with an error code */
+ cmd = list_first_entry(&pt->cmd, struct pt_cmd, entry);
+ list_del(&cmd->entry);
+ cmd->pt_cmd_callback(cmd->data, -ENODEV);
+ }
+}
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
new file mode 100644
index 000000000000..c9e52f6f2f50
--- /dev/null
+++ b/drivers/dma/ptdma/ptdma-dmaengine.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthrough DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include "ptdma.h"
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
+{
+ return container_of(dma_chan, struct pt_dma_chan, vc.chan);
+}
+
+static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct pt_dma_desc, vd);
+}
+
+static void pt_free_chan_resources(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+
+ vchan_free_chan_resources(&chan->vc);
+}
+
+static void pt_synchronize(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+
+ vchan_synchronize(&chan->vc);
+}
+
+static void pt_do_cleanup(struct virt_dma_desc *vd)
+{
+ struct pt_dma_desc *desc = to_pt_desc(vd);
+ struct pt_device *pt = desc->pt;
+
+ kmem_cache_free(pt->dma_desc_cache, desc);
+}
+
+static int pt_dma_start_desc(struct pt_dma_desc *desc)
+{
+ struct pt_passthru_engine *pt_engine;
+ struct pt_device *pt;
+ struct pt_cmd *pt_cmd;
+ struct pt_cmd_queue *cmd_q;
+
+ desc->issued_to_hw = 1;
+
+ pt_cmd = &desc->pt_cmd;
+ pt = pt_cmd->pt;
+ cmd_q = &pt->cmd_q;
+ pt_engine = &pt_cmd->passthru;
+
+ pt->tdata.cmd = pt_cmd;
+
+ /* Execute the command */
+ pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
+
+ return 0;
+}
+
+static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan)
+{
+ /* Get the next DMA descriptor on the active list */
+ struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
+
+ return vd ? to_pt_desc(vd) : NULL;
+}
+
+static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
+ struct pt_dma_desc *desc)
+{
+ struct dma_async_tx_descriptor *tx_desc;
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+
+ /* Loop over descriptors until one is found with commands */
+ do {
+ if (desc) {
+ if (!desc->issued_to_hw) {
+ /* No errors, keep going */
+ if (desc->status != DMA_ERROR)
+ return desc;
+ }
+
+ tx_desc = &desc->vd.tx;
+ vd = &desc->vd;
+ } else {
+ tx_desc = NULL;
+ }
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ if (desc) {
+ if (desc->status != DMA_ERROR)
+ desc->status = DMA_COMPLETE;
+
+ dma_cookie_complete(tx_desc);
+ dma_descriptor_unmap(tx_desc);
+ list_del(&desc->vd.node);
+ }
+
+ desc = pt_next_dma_desc(chan);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ if (tx_desc) {
+ dmaengine_desc_get_callback_invoke(tx_desc, NULL);
+ dma_run_dependencies(tx_desc);
+ vchan_vdesc_fini(vd);
+ }
+ } while (desc);
+
+ return NULL;
+}
+
+static void pt_cmd_callback(void *data, int err)
+{
+ struct pt_dma_desc *desc = data;
+ struct dma_chan *dma_chan;
+ struct pt_dma_chan *chan;
+ int ret;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ dma_chan = desc->vd.tx.chan;
+ chan = to_pt_chan(dma_chan);
+
+ if (err)
+ desc->status = DMA_ERROR;
+
+ while (true) {
+ /* Check for DMA descriptor completion */
+ desc = pt_handle_active_desc(chan, desc);
+
+ /* Don't submit cmd if no descriptor or DMA is paused */
+ if (!desc)
+ break;
+
+ ret = pt_dma_start_desc(desc);
+ if (!ret)
+ break;
+
+ desc->status = DMA_ERROR;
+ }
+}
+
+static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
+ unsigned long flags)
+{
+ struct pt_dma_desc *desc;
+
+ desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ vchan_tx_prep(&chan->vc, &desc->vd, flags);
+
+ desc->pt = chan->pt;
+ desc->issued_to_hw = 0;
+ desc->status = DMA_IN_PROGRESS;
+
+ return desc;
+}
+
+static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
+ dma_addr_t dst,
+ dma_addr_t src,
+ unsigned int len,
+ unsigned long flags)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_passthru_engine *pt_engine;
+ struct pt_dma_desc *desc;
+ struct pt_cmd *pt_cmd;
+
+ desc = pt_alloc_dma_desc(chan, flags);
+ if (!desc)
+ return NULL;
+
+ pt_cmd = &desc->pt_cmd;
+ pt_cmd->pt = chan->pt;
+ pt_engine = &pt_cmd->passthru;
+ pt_cmd->engine = PT_ENGINE_PASSTHRU;
+ pt_engine->src_dma = src;
+ pt_engine->dst_dma = dst;
+ pt_engine->src_len = len;
+ pt_cmd->pt_cmd_callback = pt_cmd_callback;
+ pt_cmd->data = desc;
+
+ desc->len = len;
+
+ return desc;
+}
+
+static struct dma_async_tx_descriptor *
+pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct pt_dma_desc *desc;
+
+ desc = pt_create_desc(dma_chan, dst, src, len, flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->vd.tx;
+}
+
+static struct dma_async_tx_descriptor *
+pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc;
+
+ desc = pt_alloc_dma_desc(chan, flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->vd.tx;
+}
+
+static void pt_issue_pending(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ vchan_issue_pending(&chan->vc);
+
+ desc = pt_next_dma_desc(chan);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ /* If there was nothing active, start processing */
+ if (desc)
+ pt_cmd_callback(desc, 0);
+}
+
+static int pt_pause(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ pt_stop_queue(&chan->pt->cmd_q);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ return 0;
+}
+
+static int pt_resume(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_dma_desc *desc = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ pt_start_queue(&chan->pt->cmd_q);
+ desc = pt_next_dma_desc(chan);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ /* If there was something active, re-start */
+ if (desc)
+ pt_cmd_callback(desc, 0);
+
+ return 0;
+}
+
+static int pt_terminate_all(struct dma_chan *dma_chan)
+{
+ struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ vchan_get_all_descriptors(&chan->vc, &head);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&chan->vc, &head);
+ vchan_free_chan_resources(&chan->vc);
+
+ return 0;
+}
+
+int pt_dmaengine_register(struct pt_device *pt)
+{
+ struct pt_dma_chan *chan;
+ struct dma_device *dma_dev = &pt->dma_dev;
+ char *cmd_cache_name;
+ char *desc_cache_name;
+ int ret;
+
+ pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
+ GFP_KERNEL);
+ if (!pt->pt_dma_chan)
+ return -ENOMEM;
+
+ cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
+ "%s-dmaengine-cmd-cache",
+ dev_name(pt->dev));
+ if (!cmd_cache_name)
+ return -ENOMEM;
+
+ desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
+ "%s-dmaengine-desc-cache",
+ dev_name(pt->dev));
+ if (!desc_cache_name) {
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+
+ pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
+ sizeof(struct pt_dma_desc), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!pt->dma_desc_cache) {
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+
+ dma_dev->dev = pt->dev;
+ dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
+ dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
+ dma_dev->directions = DMA_MEM_TO_MEM;
+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
+
+ /*
+ * PTDMA is intended to be used with the AMD NTB devices, hence
+ * marking it as DMA_PRIVATE.
+ */
+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ chan = pt->pt_dma_chan;
+ chan->pt = pt;
+
+ /* Set base and prep routines */
+ dma_dev->device_free_chan_resources = pt_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
+ dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
+ dma_dev->device_issue_pending = pt_issue_pending;
+ dma_dev->device_tx_status = dma_cookie_status;
+ dma_dev->device_pause = pt_pause;
+ dma_dev->device_resume = pt_resume;
+ dma_dev->device_terminate_all = pt_terminate_all;
+ dma_dev->device_synchronize = pt_synchronize;
+
+ chan->vc.desc_free = pt_do_cleanup;
+ vchan_init(&chan->vc, dma_dev);
+
+ dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret)
+ goto err_reg;
+
+ return 0;
+
+err_reg:
+ kmem_cache_destroy(pt->dma_desc_cache);
+
+err_cache:
+ kmem_cache_destroy(pt->dma_cmd_cache);
+
+ return ret;
+}
+
+void pt_dmaengine_unregister(struct pt_device *pt)
+{
+ struct dma_device *dma_dev = &pt->dma_dev;
+
+ dma_async_device_unregister(dma_dev);
+
+ kmem_cache_destroy(pt->dma_desc_cache);
+ kmem_cache_destroy(pt->dma_cmd_cache);
+}
diff --git a/drivers/dma/ptdma/ptdma-pci.c b/drivers/dma/ptdma/ptdma-pci.c
new file mode 100644
index 000000000000..22739ff0c3c5
--- /dev/null
+++ b/drivers/dma/ptdma/ptdma-pci.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Passthru DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/pci_ids.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+#include "ptdma.h"
+
+struct pt_msix {
+ int msix_count;
+ struct msix_entry msix_entry;
+};
+
+/*
+ * pt_alloc_struct - allocate and initialize the pt_device struct
+ *
+ * @dev: device struct of the PTDMA
+ */
+static struct pt_device *pt_alloc_struct(struct device *dev)
+{
+ struct pt_device *pt;
+
+ pt = devm_kzalloc(dev, sizeof(*pt), GFP_KERNEL);
+
+ if (!pt)
+ return NULL;
+ pt->dev = dev;
+
+ INIT_LIST_HEAD(&pt->cmd);
+
+ return pt;
+}
+
+static int pt_get_msix_irqs(struct pt_device *pt)
+{
+ struct pt_msix *pt_msix = pt->pt_msix;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pt_msix->msix_entry.entry = 0;
+
+ ret = pci_enable_msix_range(pdev, &pt_msix->msix_entry, 1, 1);
+ if (ret < 0)
+ return ret;
+
+ pt_msix->msix_count = ret;
+
+ pt->pt_irq = pt_msix->msix_entry.vector;
+
+ return 0;
+}
+
+static int pt_get_msi_irq(struct pt_device *pt)
+{
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_enable_msi(pdev);
+ if (ret)
+ return ret;
+
+ pt->pt_irq = pdev->irq;
+
+ return 0;
+}
+
+static int pt_get_irqs(struct pt_device *pt)
+{
+ struct device *dev = pt->dev;
+ int ret;
+
+ ret = pt_get_msix_irqs(pt);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI-X vectors, try MSI */
+ dev_err(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
+ ret = pt_get_msi_irq(pt);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI interrupt */
+ dev_err(dev, "could not enable MSI (%d)\n", ret);
+
+ return ret;
+}
+
+static void pt_free_irqs(struct pt_device *pt)
+{
+ struct pt_msix *pt_msix = pt->pt_msix;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (pt_msix->msix_count)
+ pci_disable_msix(pdev);
+ else if (pt->pt_irq)
+ pci_disable_msi(pdev);
+
+ pt->pt_irq = 0;
+}
+
+static int pt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct pt_device *pt;
+ struct pt_msix *pt_msix;
+ struct device *dev = &pdev->dev;
+ void __iomem * const *iomap_table;
+ int bar_mask;
+ int ret = -ENOMEM;
+
+ pt = pt_alloc_struct(dev);
+ if (!pt)
+ goto e_err;
+
+ pt_msix = devm_kzalloc(dev, sizeof(*pt_msix), GFP_KERNEL);
+ if (!pt_msix)
+ goto e_err;
+
+ pt->pt_msix = pt_msix;
+ pt->dev_vdata = (struct pt_dev_vdata *)id->driver_data;
+ if (!pt->dev_vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pcim_enable_device failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ ret = pcim_iomap_regions(pdev, bar_mask, "ptdma");
+ if (ret) {
+ dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ iomap_table = pcim_iomap_table(pdev);
+ if (!iomap_table) {
+ dev_err(dev, "pcim_iomap_table failed\n");
+ ret = -ENOMEM;
+ goto e_err;
+ }
+
+ pt->io_regs = iomap_table[pt->dev_vdata->bar];
+ if (!pt->io_regs) {
+ dev_err(dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto e_err;
+ }
+
+ ret = pt_get_irqs(pt);
+ if (ret)
+ goto e_err;
+
+ pci_set_master(pdev);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
+ ret);
+ goto e_err;
+ }
+ }
+
+ dev_set_drvdata(dev, pt);
+
+ if (pt->dev_vdata)
+ ret = pt_core_init(pt);
+
+ if (ret)
+ goto e_err;
+
+ return 0;
+
+e_err:
+ dev_err(dev, "initialization failed ret = %d\n", ret);
+
+ return ret;
+}
+
+static void pt_pci_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pt_device *pt = dev_get_drvdata(dev);
+
+ if (!pt)
+ return;
+
+ if (pt->dev_vdata)
+ pt_core_destroy(pt);
+
+ pt_free_irqs(pt);
+}
+
+static const struct pt_dev_vdata dev_vdata[] = {
+ {
+ .bar = 2,
+ },
+};
+
+static const struct pci_device_id pt_pci_table[] = {
+ { PCI_VDEVICE(AMD, 0x1498), (kernel_ulong_t)&dev_vdata[0] },
+ /* Last entry must be zero */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, pt_pci_table);
+
+static struct pci_driver pt_pci_driver = {
+ .name = "ptdma",
+ .id_table = pt_pci_table,
+ .probe = pt_pci_probe,
+ .remove = pt_pci_remove,
+};
+
+module_pci_driver(pt_pci_driver);
+
+MODULE_AUTHOR("Sanjay R Mehta <sanju.mehta@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AMD PassThru DMA driver");
diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h
new file mode 100644
index 000000000000..afbf192c9230
--- /dev/null
+++ b/drivers/dma/ptdma/ptdma.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Passthru DMA device driver
+ * -- Based on the CCP driver
+ *
+ * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Sanjay R Mehta <sanju.mehta@amd.com>
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ */
+
+#ifndef __PT_DEV_H__
+#define __PT_DEV_H__
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+
+#include "../virt-dma.h"
+
+#define MAX_PT_NAME_LEN 16
+#define MAX_DMAPOOL_NAME_LEN 32
+
+#define MAX_HW_QUEUES 1
+#define MAX_CMD_QLEN 100
+
+#define PT_ENGINE_PASSTHRU 5
+
+/* Register Mappings */
+#define IRQ_MASK_REG 0x040
+#define IRQ_STATUS_REG 0x200
+
+#define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f)
+
+#define CMD_QUEUE_PRIO_OFFSET 0x00
+#define CMD_REQID_CONFIG_OFFSET 0x04
+#define CMD_TIMEOUT_OFFSET 0x08
+#define CMD_PT_VERSION 0x10
+
+#define CMD_Q_CONTROL_BASE 0x0000
+#define CMD_Q_TAIL_LO_BASE 0x0004
+#define CMD_Q_HEAD_LO_BASE 0x0008
+#define CMD_Q_INT_ENABLE_BASE 0x000C
+#define CMD_Q_INTERRUPT_STATUS_BASE 0x0010
+
+#define CMD_Q_STATUS_BASE 0x0100
+#define CMD_Q_INT_STATUS_BASE 0x0104
+#define CMD_Q_DMA_STATUS_BASE 0x0108
+#define CMD_Q_DMA_READ_STATUS_BASE 0x010C
+#define CMD_Q_DMA_WRITE_STATUS_BASE 0x0110
+#define CMD_Q_ABORT_BASE 0x0114
+#define CMD_Q_AX_CACHE_BASE 0x0118
+
+#define CMD_CONFIG_OFFSET 0x1120
+#define CMD_CLK_GATE_CTL_OFFSET 0x6004
+
+#define CMD_DESC_DW0_VAL 0x500012
+
+/* Address offset for virtual queue registers */
+#define CMD_Q_STATUS_INCR 0x1000
+
+/* Bit masks */
+#define CMD_CONFIG_REQID 0
+#define CMD_TIMEOUT_DISABLE 0
+#define CMD_CLK_DYN_GATING_DIS 0
+#define CMD_CLK_SW_GATE_MODE 0
+#define CMD_CLK_GATE_CTL 0
+#define CMD_QUEUE_PRIO GENMASK(2, 1)
+#define CMD_CONFIG_VHB_EN BIT(0)
+#define CMD_CLK_DYN_GATING_EN BIT(0)
+#define CMD_CLK_HW_GATE_MODE BIT(0)
+#define CMD_CLK_GATE_ON_DELAY BIT(12)
+#define CMD_CLK_GATE_OFF_DELAY BIT(12)
+
+#define CMD_CLK_GATE_CONFIG (CMD_CLK_GATE_CTL | \
+ CMD_CLK_HW_GATE_MODE | \
+ CMD_CLK_GATE_ON_DELAY | \
+ CMD_CLK_DYN_GATING_EN | \
+ CMD_CLK_GATE_OFF_DELAY)
+
+#define CMD_Q_LEN 32
+#define CMD_Q_RUN BIT(0)
+#define CMD_Q_HALT BIT(1)
+#define CMD_Q_MEM_LOCATION BIT(2)
+#define CMD_Q_SIZE_MASK GENMASK(4, 0)
+#define CMD_Q_SIZE GENMASK(7, 3)
+#define CMD_Q_SHIFT GENMASK(1, 0)
+#define QUEUE_SIZE_VAL ((ffs(CMD_Q_LEN) - 2) & \
+ CMD_Q_SIZE_MASK)
+#define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1)
+#define Q_DESC_SIZE sizeof(struct ptdma_desc)
+#define Q_SIZE(n) (CMD_Q_LEN * (n))
+
+#define INT_COMPLETION BIT(0)
+#define INT_ERROR BIT(1)
+#define INT_QUEUE_STOPPED BIT(2)
+#define INT_EMPTY_QUEUE BIT(3)
+#define SUPPORTED_INTERRUPTS (INT_COMPLETION | INT_ERROR)
+
+/****** Local Storage Block ******/
+#define LSB_START 0
+#define LSB_END 127
+#define LSB_COUNT (LSB_END - LSB_START + 1)
+
+#define PT_DMAPOOL_MAX_SIZE 64
+#define PT_DMAPOOL_ALIGN BIT(5)
+
+#define PT_PASSTHRU_BLOCKSIZE 512
+
+struct pt_device;
+
+struct pt_tasklet_data {
+ struct completion completion;
+ struct pt_cmd *cmd;
+};
+
+/*
+ * struct pt_passthru_engine - pass-through operation
+ * without performing DMA mapping
+ * @mask: mask to be applied to data
+ * @mask_len: length in bytes of mask
+ * @src_dma: data to be used for this operation
+ * @dst_dma: data produced by this operation
+ * @src_len: length in bytes of data used for this operation
+ *
+ * Variables required to be set when calling pt_enqueue_cmd():
+ * - bit_mod, byte_swap, src, dst, src_len
+ * - mask, mask_len if bit_mod is not PT_PASSTHRU_BITWISE_NOOP
+ */
+struct pt_passthru_engine {
+ dma_addr_t mask;
+ u32 mask_len; /* In bytes */
+
+ dma_addr_t src_dma, dst_dma;
+ u64 src_len; /* In bytes */
+};
+
+/*
+ * struct pt_cmd - PTDMA operation request
+ * @entry: list element
+ * @work: work element used for callbacks
+ * @pt: PT device to be run on
+ * @ret: operation return code
+ * @flags: cmd processing flags
+ * @engine: PTDMA operation to perform (passthru)
+ * @engine_error: PT engine return code
+ * @passthru: engine specific structures, refer to specific engine struct below
+ * @callback: operation completion callback function
+ * @data: parameter value to be supplied to the callback function
+ *
+ * Variables required to be set when calling pt_enqueue_cmd():
+ * - engine, callback
+ * - See the operation structures below for what is required for each
+ * operation.
+ */
+struct pt_cmd {
+ struct list_head entry;
+ struct work_struct work;
+ struct pt_device *pt;
+ int ret;
+ u32 engine;
+ u32 engine_error;
+ struct pt_passthru_engine passthru;
+ /* Completion callback support */
+ void (*pt_cmd_callback)(void *data, int err);
+ void *data;
+};
+
+struct pt_dma_desc {
+ struct virt_dma_desc vd;
+ struct pt_device *pt;
+ enum dma_status status;
+ size_t len;
+ bool issued_to_hw;
+ struct pt_cmd pt_cmd;
+};
+
+struct pt_dma_chan {
+ struct virt_dma_chan vc;
+ struct pt_device *pt;
+};
+
+struct pt_cmd_queue {
+ struct pt_device *pt;
+
+ /* Queue dma pool */
+ struct dma_pool *dma_pool;
+
+ /* Queue base address (not neccessarily aligned)*/
+ struct ptdma_desc *qbase;
+
+ /* Aligned queue start address (per requirement) */
+ struct mutex q_mutex ____cacheline_aligned;
+ unsigned int qidx;
+
+ unsigned int qsize;
+ dma_addr_t qbase_dma;
+ dma_addr_t qdma_tail;
+
+ unsigned int active;
+ unsigned int suspended;
+
+ /* Register addresses for queue */
+ void __iomem *reg_control;
+ u32 qcontrol; /* Cached control register */
+
+ /* Status values from job */
+ u32 int_status;
+ u32 q_status;
+ u32 q_int_status;
+ u32 cmd_error;
+ /* Queue Statistics */
+ unsigned long total_pt_ops;
+} ____cacheline_aligned;
+
+struct pt_device {
+ struct list_head entry;
+
+ unsigned int ord;
+ char name[MAX_PT_NAME_LEN];
+
+ struct device *dev;
+
+ /* Bus specific device information */
+ struct pt_msix *pt_msix;
+
+ struct pt_dev_vdata *dev_vdata;
+
+ unsigned int pt_irq;
+
+ /* I/O area used for device communication */
+ void __iomem *io_regs;
+
+ spinlock_t cmd_lock ____cacheline_aligned;
+ unsigned int cmd_count;
+ struct list_head cmd;
+
+ /*
+ * The command queue. This represent the queue available on the
+ * PTDMA that are available for processing cmds
+ */
+ struct pt_cmd_queue cmd_q;
+
+ /* Support for the DMA Engine capabilities */
+ struct dma_device dma_dev;
+ struct pt_dma_chan *pt_dma_chan;
+ struct kmem_cache *dma_cmd_cache;
+ struct kmem_cache *dma_desc_cache;
+
+ wait_queue_head_t lsb_queue;
+
+ /* Device Statistics */
+ unsigned long total_interrupts;
+
+ struct pt_tasklet_data tdata;
+};
+
+/*
+ * descriptor for PTDMA commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory type
+ * word 6: reserved 32 bits
+ * word 7: reserved 32 bits
+ */
+
+#define DWORD0_SOC BIT(0)
+#define DWORD0_IOC BIT(1)
+
+struct dword3 {
+ unsigned int src_hi:16;
+ unsigned int src_mem:2;
+ unsigned int lsb_cxt_id:8;
+ unsigned int rsvd1:5;
+ unsigned int fixed:1;
+};
+
+struct dword5 {
+ unsigned int dst_hi:16;
+ unsigned int dst_mem:2;
+ unsigned int rsvd1:13;
+ unsigned int fixed:1;
+};
+
+struct ptdma_desc {
+ u32 dw0;
+ u32 length;
+ u32 src_lo;
+ struct dword3 dw3;
+ u32 dst_lo;
+ struct dword5 dw5;
+ __le32 rsvd1;
+ __le32 rsvd2;
+};
+
+/* Structure to hold PT device data */
+struct pt_dev_vdata {
+ const unsigned int bar;
+};
+
+int pt_dmaengine_register(struct pt_device *pt);
+void pt_dmaengine_unregister(struct pt_device *pt);
+
+void ptdma_debugfs_setup(struct pt_device *pt);
+int pt_core_init(struct pt_device *pt);
+void pt_core_destroy(struct pt_device *pt);
+
+int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
+ struct pt_passthru_engine *pt_engine);
+
+void pt_start_queue(struct pt_cmd_queue *cmd_q);
+void pt_stop_queue(struct pt_cmd_queue *cmd_q);
+
+#endif
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 13437323a85b..a46296285307 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -47,3 +47,12 @@ config RENESAS_USB_DMAC
help
This driver supports the USB-DMA controller found in the Renesas
SoCs.
+
+config RZ_DMAC
+ tristate "Renesas RZ/G2L DMA Controller"
+ depends on ARCH_R9A07G044 || COMPILE_TEST
+ select RENESAS_DMA
+ select DMA_VIRTUAL_CHANNELS
+ help
+ This driver supports the general purpose DMA controller found in the
+ Renesas RZ/G2L SoC variants.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index abdf10341725..360ab6d25e76 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
+obj-$(CONFIG_RZ_DMAC) += rz-dmac.o
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
new file mode 100644
index 000000000000..f9f30cbeccbe
--- /dev/null
+++ b/drivers/dma/sh/rz-dmac.c
@@ -0,0 +1,969 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2L DMA Controller Driver
+ *
+ * Based on imx-dma.c
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+enum rz_dmac_prep_type {
+ RZ_DMAC_DESC_MEMCPY,
+ RZ_DMAC_DESC_SLAVE_SG,
+};
+
+struct rz_lmdesc {
+ u32 header;
+ u32 sa;
+ u32 da;
+ u32 tb;
+ u32 chcfg;
+ u32 chitvl;
+ u32 chext;
+ u32 nxla;
+};
+
+struct rz_dmac_desc {
+ struct virt_dma_desc vd;
+ dma_addr_t src;
+ dma_addr_t dest;
+ size_t len;
+ struct list_head node;
+ enum dma_transfer_direction direction;
+ enum rz_dmac_prep_type type;
+ /* For slave sg */
+ struct scatterlist *sg;
+ unsigned int sgcount;
+};
+
+#define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd)
+
+struct rz_dmac_chan {
+ struct virt_dma_chan vc;
+ void __iomem *ch_base;
+ void __iomem *ch_cmn_base;
+ unsigned int index;
+ int irq;
+ struct rz_dmac_desc *desc;
+ int descs_allocated;
+
+ enum dma_slave_buswidth src_word_size;
+ enum dma_slave_buswidth dst_word_size;
+ dma_addr_t src_per_address;
+ dma_addr_t dst_per_address;
+
+ u32 chcfg;
+ u32 chctrl;
+ int mid_rid;
+
+ struct list_head ld_free;
+ struct list_head ld_queue;
+ struct list_head ld_active;
+
+ struct {
+ struct rz_lmdesc *base;
+ struct rz_lmdesc *head;
+ struct rz_lmdesc *tail;
+ dma_addr_t base_dma;
+ } lmdesc;
+};
+
+#define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan)
+
+struct rz_dmac {
+ struct dma_device engine;
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *ext_base;
+
+ unsigned int n_channels;
+ struct rz_dmac_chan *channels;
+
+ DECLARE_BITMAP(modules, 1024);
+};
+
+#define to_rz_dmac(d) container_of(d, struct rz_dmac, engine)
+
+/*
+ * -----------------------------------------------------------------------------
+ * Registers
+ */
+
+#define CHSTAT 0x0024
+#define CHCTRL 0x0028
+#define CHCFG 0x002c
+#define NXLA 0x0038
+
+#define DCTRL 0x0000
+
+#define EACH_CHANNEL_OFFSET 0x0040
+#define CHANNEL_0_7_OFFSET 0x0000
+#define CHANNEL_0_7_COMMON_BASE 0x0300
+#define CHANNEL_8_15_OFFSET 0x0400
+#define CHANNEL_8_15_COMMON_BASE 0x0700
+
+#define CHSTAT_ER BIT(4)
+#define CHSTAT_EN BIT(0)
+
+#define CHCTRL_CLRINTMSK BIT(17)
+#define CHCTRL_CLRSUS BIT(9)
+#define CHCTRL_CLRTC BIT(6)
+#define CHCTRL_CLREND BIT(5)
+#define CHCTRL_CLRRQ BIT(4)
+#define CHCTRL_SWRST BIT(3)
+#define CHCTRL_STG BIT(2)
+#define CHCTRL_CLREN BIT(1)
+#define CHCTRL_SETEN BIT(0)
+#define CHCTRL_DEFAULT (CHCTRL_CLRINTMSK | CHCTRL_CLRSUS | \
+ CHCTRL_CLRTC | CHCTRL_CLREND | \
+ CHCTRL_CLRRQ | CHCTRL_SWRST | \
+ CHCTRL_CLREN)
+
+#define CHCFG_DMS BIT(31)
+#define CHCFG_DEM BIT(24)
+#define CHCFG_DAD BIT(21)
+#define CHCFG_SAD BIT(20)
+#define CHCFG_REQD BIT(3)
+#define CHCFG_SEL(bits) ((bits) & 0x07)
+#define CHCFG_MEM_COPY (0x80400008)
+#define CHCFG_FILL_DDS(a) (((a) << 16) & GENMASK(19, 16))
+#define CHCFG_FILL_SDS(a) (((a) << 12) & GENMASK(15, 12))
+#define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22)
+#define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6)
+#define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5)
+#define CHCFG_FILL_HIEN(a) (((a) & BIT(0)) << 5)
+
+#define MID_RID_MASK GENMASK(9, 0)
+#define CHCFG_MASK GENMASK(15, 10)
+#define CHCFG_DS_INVALID 0xFF
+#define DCTRL_LVINT BIT(1)
+#define DCTRL_PR BIT(0)
+#define DCTRL_DEFAULT (DCTRL_LVINT | DCTRL_PR)
+
+/* LINK MODE DESCRIPTOR */
+#define HEADER_LV BIT(0)
+
+#define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16
+#define RZ_DMAC_MAX_CHANNELS 16
+#define DMAC_NR_LMDESC 64
+
+/*
+ * -----------------------------------------------------------------------------
+ * Device access
+ */
+
+static void rz_dmac_writel(struct rz_dmac *dmac, unsigned int val,
+ unsigned int offset)
+{
+ writel(val, dmac->base + offset);
+}
+
+static void rz_dmac_ext_writel(struct rz_dmac *dmac, unsigned int val,
+ unsigned int offset)
+{
+ writel(val, dmac->ext_base + offset);
+}
+
+static u32 rz_dmac_ext_readl(struct rz_dmac *dmac, unsigned int offset)
+{
+ return readl(dmac->ext_base + offset);
+}
+
+static void rz_dmac_ch_writel(struct rz_dmac_chan *channel, unsigned int val,
+ unsigned int offset, int which)
+{
+ if (which)
+ writel(val, channel->ch_base + offset);
+ else
+ writel(val, channel->ch_cmn_base + offset);
+}
+
+static u32 rz_dmac_ch_readl(struct rz_dmac_chan *channel,
+ unsigned int offset, int which)
+{
+ if (which)
+ return readl(channel->ch_base + offset);
+ else
+ return readl(channel->ch_cmn_base + offset);
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+static void rz_lmdesc_setup(struct rz_dmac_chan *channel,
+ struct rz_lmdesc *lmdesc)
+{
+ u32 nxla;
+
+ channel->lmdesc.base = lmdesc;
+ channel->lmdesc.head = lmdesc;
+ channel->lmdesc.tail = lmdesc;
+ nxla = channel->lmdesc.base_dma;
+ while (lmdesc < (channel->lmdesc.base + (DMAC_NR_LMDESC - 1))) {
+ lmdesc->header = 0;
+ nxla += sizeof(*lmdesc);
+ lmdesc->nxla = nxla;
+ lmdesc++;
+ }
+
+ lmdesc->header = 0;
+ lmdesc->nxla = channel->lmdesc.base_dma;
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * Descriptors preparation
+ */
+
+static void rz_dmac_lmdesc_recycle(struct rz_dmac_chan *channel)
+{
+ struct rz_lmdesc *lmdesc = channel->lmdesc.head;
+
+ while (!(lmdesc->header & HEADER_LV)) {
+ lmdesc->header = 0;
+ lmdesc++;
+ if (lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC))
+ lmdesc = channel->lmdesc.base;
+ }
+ channel->lmdesc.head = lmdesc;
+}
+
+static void rz_dmac_enable_hw(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ unsigned long flags;
+ u32 nxla;
+ u32 chctrl;
+ u32 chstat;
+
+ dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index);
+
+ local_irq_save(flags);
+
+ rz_dmac_lmdesc_recycle(channel);
+
+ nxla = channel->lmdesc.base_dma +
+ (sizeof(struct rz_lmdesc) * (channel->lmdesc.head -
+ channel->lmdesc.base));
+
+ chstat = rz_dmac_ch_readl(channel, CHSTAT, 1);
+ if (!(chstat & CHSTAT_EN)) {
+ chctrl = (channel->chctrl | CHCTRL_SETEN);
+ rz_dmac_ch_writel(channel, nxla, NXLA, 1);
+ rz_dmac_ch_writel(channel, channel->chcfg, CHCFG, 1);
+ rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1);
+ rz_dmac_ch_writel(channel, chctrl, CHCTRL, 1);
+ }
+
+ local_irq_restore(flags);
+}
+
+static void rz_dmac_disable_hw(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ unsigned long flags;
+
+ dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index);
+
+ local_irq_save(flags);
+ rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
+ local_irq_restore(flags);
+}
+
+static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars)
+{
+ u32 dmars_offset = (nr / 2) * 4;
+ u32 shift = (nr % 2) * 16;
+ u32 dmars32;
+
+ dmars32 = rz_dmac_ext_readl(dmac, dmars_offset);
+ dmars32 &= ~(0xffff << shift);
+ dmars32 |= dmars << shift;
+
+ rz_dmac_ext_writel(dmac, dmars32, dmars_offset);
+}
+
+static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_lmdesc *lmdesc = channel->lmdesc.tail;
+ struct rz_dmac_desc *d = channel->desc;
+ u32 chcfg = CHCFG_MEM_COPY;
+
+ /* prepare descriptor */
+ lmdesc->sa = d->src;
+ lmdesc->da = d->dest;
+ lmdesc->tb = d->len;
+ lmdesc->chcfg = chcfg;
+ lmdesc->chitvl = 0;
+ lmdesc->chext = 0;
+ lmdesc->header = HEADER_LV;
+
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+
+ channel->chcfg = chcfg;
+ channel->chctrl = CHCTRL_STG | CHCTRL_SETEN;
+}
+
+static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_dmac_desc *d = channel->desc;
+ struct scatterlist *sg, *sgl = d->sg;
+ struct rz_lmdesc *lmdesc;
+ unsigned int i, sg_len = d->sgcount;
+
+ channel->chcfg |= CHCFG_SEL(channel->index) | CHCFG_DEM | CHCFG_DMS;
+
+ if (d->direction == DMA_DEV_TO_MEM) {
+ channel->chcfg |= CHCFG_SAD;
+ channel->chcfg &= ~CHCFG_REQD;
+ } else {
+ channel->chcfg |= CHCFG_DAD | CHCFG_REQD;
+ }
+
+ lmdesc = channel->lmdesc.tail;
+
+ for (i = 0, sg = sgl; i < sg_len; i++, sg = sg_next(sg)) {
+ if (d->direction == DMA_DEV_TO_MEM) {
+ lmdesc->sa = channel->src_per_address;
+ lmdesc->da = sg_dma_address(sg);
+ } else {
+ lmdesc->sa = sg_dma_address(sg);
+ lmdesc->da = channel->dst_per_address;
+ }
+
+ lmdesc->tb = sg_dma_len(sg);
+ lmdesc->chitvl = 0;
+ lmdesc->chext = 0;
+ if (i == (sg_len - 1)) {
+ lmdesc->chcfg = (channel->chcfg & ~CHCFG_DEM);
+ lmdesc->header = HEADER_LV;
+ } else {
+ lmdesc->chcfg = channel->chcfg;
+ lmdesc->header = HEADER_LV;
+ }
+ if (++lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC))
+ lmdesc = channel->lmdesc.base;
+ }
+
+ channel->lmdesc.tail = lmdesc;
+
+ rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
+ channel->chctrl = CHCTRL_SETEN;
+}
+
+static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan)
+{
+ struct rz_dmac_desc *d = chan->desc;
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&chan->vc);
+ if (!vd)
+ return 0;
+
+ list_del(&vd->node);
+
+ switch (d->type) {
+ case RZ_DMAC_DESC_MEMCPY:
+ rz_dmac_prepare_desc_for_memcpy(chan);
+ break;
+
+ case RZ_DMAC_DESC_SLAVE_SG:
+ rz_dmac_prepare_descs_for_slave_sg(chan);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ rz_dmac_enable_hw(chan);
+
+ return 0;
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * DMA engine operations
+ */
+
+static int rz_dmac_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+
+ while (channel->descs_allocated < RZ_DMAC_MAX_CHAN_DESCRIPTORS) {
+ struct rz_dmac_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ break;
+
+ list_add_tail(&desc->node, &channel->ld_free);
+ channel->descs_allocated++;
+ }
+
+ if (!channel->descs_allocated)
+ return -ENOMEM;
+
+ return channel->descs_allocated;
+}
+
+static void rz_dmac_free_chan_resources(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_lmdesc *lmdesc = channel->lmdesc.base;
+ struct rz_dmac_desc *desc, *_desc;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&channel->vc.lock, flags);
+
+ for (i = 0; i < DMAC_NR_LMDESC; i++)
+ lmdesc[i].header = 0;
+
+ rz_dmac_disable_hw(channel);
+ list_splice_tail_init(&channel->ld_active, &channel->ld_free);
+ list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
+
+ if (channel->mid_rid >= 0) {
+ clear_bit(channel->mid_rid, dmac->modules);
+ channel->mid_rid = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
+
+ list_for_each_entry_safe(desc, _desc, &channel->ld_free, node) {
+ kfree(desc);
+ channel->descs_allocated--;
+ }
+
+ INIT_LIST_HEAD(&channel->ld_free);
+ vchan_free_chan_resources(&channel->vc);
+}
+
+static struct dma_async_tx_descriptor *
+rz_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_dmac_desc *desc;
+
+ dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n",
+ __func__, channel->index, &src, &dest, len);
+
+ if (list_empty(&channel->ld_free))
+ return NULL;
+
+ desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
+
+ desc->type = RZ_DMAC_DESC_MEMCPY;
+ desc->src = src;
+ desc->dest = dest;
+ desc->len = len;
+ desc->direction = DMA_MEM_TO_MEM;
+
+ list_move_tail(channel->ld_free.next, &channel->ld_queue);
+ return vchan_tx_prep(&channel->vc, &desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac_desc *desc;
+ struct scatterlist *sg;
+ int dma_length = 0;
+ int i = 0;
+
+ if (list_empty(&channel->ld_free))
+ return NULL;
+
+ desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ dma_length += sg_dma_len(sg);
+ }
+
+ desc->type = RZ_DMAC_DESC_SLAVE_SG;
+ desc->sg = sgl;
+ desc->sgcount = sg_len;
+ desc->len = dma_length;
+ desc->direction = direction;
+
+ if (direction == DMA_DEV_TO_MEM)
+ desc->src = channel->src_per_address;
+ else
+ desc->dest = channel->dst_per_address;
+
+ list_move_tail(channel->ld_free.next, &channel->ld_queue);
+ return vchan_tx_prep(&channel->vc, &desc->vd, flags);
+}
+
+static int rz_dmac_terminate_all(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ rz_dmac_disable_hw(channel);
+ spin_lock_irqsave(&channel->vc.lock, flags);
+ list_splice_tail_init(&channel->ld_active, &channel->ld_free);
+ list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
+ vchan_get_all_descriptors(&channel->vc, &head);
+ vchan_dma_desc_free_list(&channel->vc, &head);
+
+ return 0;
+}
+
+static void rz_dmac_issue_pending(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_dmac_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->vc.lock, flags);
+
+ if (!list_empty(&channel->ld_queue)) {
+ desc = list_first_entry(&channel->ld_queue,
+ struct rz_dmac_desc, node);
+ channel->desc = desc;
+ if (vchan_issue_pending(&channel->vc)) {
+ if (rz_dmac_xfer_desc(channel) < 0)
+ dev_warn(dmac->dev, "ch: %d couldn't issue DMA xfer\n",
+ channel->index);
+ else
+ list_move_tail(channel->ld_queue.next,
+ &channel->ld_active);
+ }
+ }
+
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
+}
+
+static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds)
+{
+ u8 i;
+ const enum dma_slave_buswidth ds_lut[] = {
+ DMA_SLAVE_BUSWIDTH_1_BYTE,
+ DMA_SLAVE_BUSWIDTH_2_BYTES,
+ DMA_SLAVE_BUSWIDTH_4_BYTES,
+ DMA_SLAVE_BUSWIDTH_8_BYTES,
+ DMA_SLAVE_BUSWIDTH_16_BYTES,
+ DMA_SLAVE_BUSWIDTH_32_BYTES,
+ DMA_SLAVE_BUSWIDTH_64_BYTES,
+ DMA_SLAVE_BUSWIDTH_128_BYTES,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(ds_lut); i++) {
+ if (ds_lut[i] == ds)
+ return i;
+ }
+
+ return CHCFG_DS_INVALID;
+}
+
+static int rz_dmac_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ u32 val;
+
+ channel->src_per_address = config->src_addr;
+ channel->src_word_size = config->src_addr_width;
+ channel->dst_per_address = config->dst_addr;
+ channel->dst_word_size = config->dst_addr_width;
+
+ val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
+ if (val == CHCFG_DS_INVALID)
+ return -EINVAL;
+
+ channel->chcfg |= CHCFG_FILL_DDS(val);
+
+ val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
+ if (val == CHCFG_DS_INVALID)
+ return -EINVAL;
+
+ channel->chcfg |= CHCFG_FILL_SDS(val);
+
+ return 0;
+}
+
+static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd)
+{
+ /*
+ * Place holder
+ * Descriptor allocation is done during alloc_chan_resources and
+ * get freed during free_chan_resources.
+ * list is used to manage the descriptors and avoid any memory
+ * allocation/free during DMA read/write.
+ */
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * IRQ handling
+ */
+
+static void rz_dmac_irq_handle_channel(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ u32 chstat, chctrl;
+
+ chstat = rz_dmac_ch_readl(channel, CHSTAT, 1);
+ if (chstat & CHSTAT_ER) {
+ dev_err(dmac->dev, "DMAC err CHSTAT_%d = %08X\n",
+ channel->index, chstat);
+ rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
+ goto done;
+ }
+
+ chctrl = rz_dmac_ch_readl(channel, CHCTRL, 1);
+ rz_dmac_ch_writel(channel, chctrl | CHCTRL_CLREND, CHCTRL, 1);
+done:
+ return;
+}
+
+static irqreturn_t rz_dmac_irq_handler(int irq, void *dev_id)
+{
+ struct rz_dmac_chan *channel = dev_id;
+
+ if (channel) {
+ rz_dmac_irq_handle_channel(channel);
+ return IRQ_WAKE_THREAD;
+ }
+ /* handle DMAERR irq */
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rz_dmac_irq_handler_thread(int irq, void *dev_id)
+{
+ struct rz_dmac_chan *channel = dev_id;
+ struct rz_dmac_desc *desc = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->vc.lock, flags);
+
+ if (list_empty(&channel->ld_active)) {
+ /* Someone might have called terminate all */
+ goto out;
+ }
+
+ desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node);
+ vchan_cookie_complete(&desc->vd);
+ list_move_tail(channel->ld_active.next, &channel->ld_free);
+ if (!list_empty(&channel->ld_queue)) {
+ desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc,
+ node);
+ channel->desc = desc;
+ if (rz_dmac_xfer_desc(channel) == 0)
+ list_move_tail(channel->ld_queue.next, &channel->ld_active);
+ }
+out:
+ spin_unlock_irqrestore(&channel->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * OF xlate and channel filter
+ */
+
+static bool rz_dmac_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct of_phandle_args *dma_spec = arg;
+ u32 ch_cfg;
+
+ channel->mid_rid = dma_spec->args[0] & MID_RID_MASK;
+ ch_cfg = (dma_spec->args[0] & CHCFG_MASK) >> 10;
+ channel->chcfg = CHCFG_FILL_TM(ch_cfg) | CHCFG_FILL_AM(ch_cfg) |
+ CHCFG_FILL_LVL(ch_cfg) | CHCFG_FILL_HIEN(ch_cfg);
+
+ return !test_and_set_bit(channel->mid_rid, dmac->modules);
+}
+
+static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ dma_cap_mask_t mask;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ /* Only slave DMA channels can be allocated via DT */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ return dma_request_channel(mask, rz_dmac_chan_filter, dma_spec);
+}
+
+/*
+ * -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+static int rz_dmac_chan_probe(struct rz_dmac *dmac,
+ struct rz_dmac_chan *channel,
+ unsigned int index)
+{
+ struct platform_device *pdev = to_platform_device(dmac->dev);
+ struct rz_lmdesc *lmdesc;
+ char pdev_irqname[5];
+ char *irqname;
+ int ret;
+
+ channel->index = index;
+ channel->mid_rid = -EINVAL;
+
+ /* Request the channel interrupt. */
+ sprintf(pdev_irqname, "ch%u", index);
+ channel->irq = platform_get_irq_byname(pdev, pdev_irqname);
+ if (channel->irq < 0)
+ return channel->irq;
+
+ irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
+ dev_name(dmac->dev), index);
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_threaded_irq(dmac->dev, channel->irq,
+ rz_dmac_irq_handler,
+ rz_dmac_irq_handler_thread, 0,
+ irqname, channel);
+ if (ret) {
+ dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
+ channel->irq, ret);
+ return ret;
+ }
+
+ /* Set io base address for each channel */
+ if (index < 8) {
+ channel->ch_base = dmac->base + CHANNEL_0_7_OFFSET +
+ EACH_CHANNEL_OFFSET * index;
+ channel->ch_cmn_base = dmac->base + CHANNEL_0_7_COMMON_BASE;
+ } else {
+ channel->ch_base = dmac->base + CHANNEL_8_15_OFFSET +
+ EACH_CHANNEL_OFFSET * (index - 8);
+ channel->ch_cmn_base = dmac->base + CHANNEL_8_15_COMMON_BASE;
+ }
+
+ /* Allocate descriptors */
+ lmdesc = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
+ &channel->lmdesc.base_dma, GFP_KERNEL);
+ if (!lmdesc) {
+ dev_err(&pdev->dev, "Can't allocate memory (lmdesc)\n");
+ return -ENOMEM;
+ }
+ rz_lmdesc_setup(channel, lmdesc);
+
+ /* Initialize register for each channel */
+ rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
+
+ channel->vc.desc_free = rz_dmac_virt_desc_free;
+ vchan_init(&channel->vc, &dmac->engine);
+ INIT_LIST_HEAD(&channel->ld_queue);
+ INIT_LIST_HEAD(&channel->ld_free);
+ INIT_LIST_HEAD(&channel->ld_active);
+
+ return 0;
+}
+
+static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
+ if (ret < 0) {
+ dev_err(dev, "unable to read dma-channels property\n");
+ return ret;
+ }
+
+ if (!dmac->n_channels || dmac->n_channels > RZ_DMAC_MAX_CHANNELS) {
+ dev_err(dev, "invalid number of channels %u\n", dmac->n_channels);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rz_dmac_probe(struct platform_device *pdev)
+{
+ const char *irqname = "error";
+ struct dma_device *engine;
+ struct rz_dmac *dmac;
+ int channel_num;
+ unsigned int i;
+ int ret;
+ int irq;
+
+ dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
+ if (!dmac)
+ return -ENOMEM;
+
+ dmac->dev = &pdev->dev;
+ platform_set_drvdata(pdev, dmac);
+
+ ret = rz_dmac_parse_of(&pdev->dev, dmac);
+ if (ret < 0)
+ return ret;
+
+ dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
+ sizeof(*dmac->channels), GFP_KERNEL);
+ if (!dmac->channels)
+ return -ENOMEM;
+
+ /* Request resources */
+ dmac->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dmac->base))
+ return PTR_ERR(dmac->base);
+
+ dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(dmac->ext_base))
+ return PTR_ERR(dmac->ext_base);
+
+ /* Register interrupt handler for error */
+ irq = platform_get_irq_byname(pdev, irqname);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, rz_dmac_irq_handler, 0,
+ irqname, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
+ irq, ret);
+ return ret;
+ }
+
+ /* Initialize the channels. */
+ INIT_LIST_HEAD(&dmac->engine.channels);
+
+ for (i = 0; i < dmac->n_channels; i++) {
+ ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i);
+ if (ret < 0)
+ goto err;
+ }
+
+ /* Register the DMAC as a DMA provider for DT. */
+ ret = of_dma_controller_register(pdev->dev.of_node, rz_dmac_of_xlate,
+ NULL);
+ if (ret < 0)
+ goto err;
+
+ /* Register the DMA engine device. */
+ engine = &dmac->engine;
+ dma_cap_set(DMA_SLAVE, engine->cap_mask);
+ dma_cap_set(DMA_MEMCPY, engine->cap_mask);
+ rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL);
+ rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL);
+
+ engine->dev = &pdev->dev;
+
+ engine->device_alloc_chan_resources = rz_dmac_alloc_chan_resources;
+ engine->device_free_chan_resources = rz_dmac_free_chan_resources;
+ engine->device_tx_status = dma_cookie_status;
+ engine->device_prep_slave_sg = rz_dmac_prep_slave_sg;
+ engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy;
+ engine->device_config = rz_dmac_config;
+ engine->device_terminate_all = rz_dmac_terminate_all;
+ engine->device_issue_pending = rz_dmac_issue_pending;
+
+ engine->copy_align = DMAENGINE_ALIGN_1_BYTE;
+ dma_set_max_seg_size(engine->dev, U32_MAX);
+
+ ret = dma_async_device_register(engine);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to register\n");
+ goto dma_register_err;
+ }
+ return 0;
+
+dma_register_err:
+ of_dma_controller_free(pdev->dev.of_node);
+err:
+ channel_num = i ? i - 1 : 0;
+ for (i = 0; i < channel_num; i++) {
+ struct rz_dmac_chan *channel = &dmac->channels[i];
+
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
+ channel->lmdesc.base,
+ channel->lmdesc.base_dma);
+ }
+
+ return ret;
+}
+
+static int rz_dmac_remove(struct platform_device *pdev)
+{
+ struct rz_dmac *dmac = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ for (i = 0; i < dmac->n_channels; i++) {
+ struct rz_dmac_chan *channel = &dmac->channels[i];
+
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
+ channel->lmdesc.base,
+ channel->lmdesc.base_dma);
+ }
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&dmac->engine);
+
+ return 0;
+}
+
+static const struct of_device_id of_rz_dmac_match[] = {
+ { .compatible = "renesas,rz-dmac", },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_rz_dmac_match);
+
+static struct platform_driver rz_dmac_driver = {
+ .driver = {
+ .name = "rz-dmac",
+ .of_match_table = of_rz_dmac_match,
+ },
+ .probe = rz_dmac_probe,
+ .remove = rz_dmac_remove,
+};
+
+module_platform_driver(rz_dmac_driver);
+
+MODULE_DESCRIPTION("Renesas RZ/G2L DMA Controller Driver");
+MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 1cc06900153e..5edaeb89d1e6 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -466,7 +466,7 @@ static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
static unsigned int usb_dmac_get_current_residue(struct usb_dmac_chan *chan,
struct usb_dmac_desc *desc,
- int sg_index)
+ unsigned int sg_index)
{
struct usb_dmac_sg *sg = desc->sg + sg_index;
u32 mem_addr = sg->mem_addr & 0xffffffff;
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 0ef5ca81ba4d..4357d2395e6b 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -1265,6 +1265,7 @@ static const struct of_device_id sprd_dma_match[] = {
{ .compatible = "sprd,sc9860-dma", },
{},
};
+MODULE_DEVICE_TABLE(of, sprd_dma_match);
static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
{
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 7dd1d3d0bf06..9063c727962e 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -60,6 +60,7 @@
#define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11)
#define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6)
#define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6)
+#define STM32_DMA_SCR_TRBUFF BIT(20) /* Bufferable transfer for USART/UART */
#define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */
#define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */
#define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */
@@ -138,8 +139,9 @@
#define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
#define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
#define STM32_DMA_DIRECT_MODE_MASK BIT(2)
-#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) \
- >> 2)
+#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) >> 2)
+#define STM32_DMA_ALT_ACK_MODE_MASK BIT(4)
+#define STM32_DMA_ALT_ACK_MODE_GET(n) (((n) & STM32_DMA_ALT_ACK_MODE_MASK) >> 4)
enum stm32_dma_width {
STM32_DMA_BYTE,
@@ -1252,6 +1254,8 @@ static void stm32_dma_set_config(struct stm32_dma_chan *chan,
chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features);
if (STM32_DMA_DIRECT_MODE_GET(cfg->features))
chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE;
+ if (STM32_DMA_ALT_ACK_MODE_GET(cfg->features))
+ chan->chan_reg.dma_scr |= STM32_DMA_SCR_TRBUFF;
}
static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 4735742e826d..b1115a6d1935 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -655,9 +655,8 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc)
return ret;
}
- ret = pm_runtime_get_sync(tdc2dev(tdc));
+ ret = pm_runtime_resume_and_get(tdc2dev(tdc));
if (ret < 0) {
- pm_runtime_put_noidle(tdc2dev(tdc));
free_irq(tdc->irq, tdc);
return ret;
}
@@ -869,10 +868,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ if (ret < 0)
goto rpm_disable;
- }
ret = tegra_adma_init(tdma);
if (ret)
diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c
index 7580870ed746..34e3fc565a37 100644
--- a/drivers/dma/ti/k3-psil-j721e.c
+++ b/drivers/dma/ti/k3-psil-j721e.c
@@ -58,6 +58,14 @@
}, \
}
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
static struct psil_ep j721e_src_ep_map[] = {
/* SA2UL */
@@ -138,6 +146,71 @@ static struct psil_ep j721e_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4707),
PSIL_PDMA_XY_PKT(0x4708),
PSIL_PDMA_XY_PKT(0x4709),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x4940),
+ PSIL_CSI2RX(0x4941),
+ PSIL_CSI2RX(0x4942),
+ PSIL_CSI2RX(0x4943),
+ PSIL_CSI2RX(0x4944),
+ PSIL_CSI2RX(0x4945),
+ PSIL_CSI2RX(0x4946),
+ PSIL_CSI2RX(0x4947),
+ PSIL_CSI2RX(0x4948),
+ PSIL_CSI2RX(0x4949),
+ PSIL_CSI2RX(0x494a),
+ PSIL_CSI2RX(0x494b),
+ PSIL_CSI2RX(0x494c),
+ PSIL_CSI2RX(0x494d),
+ PSIL_CSI2RX(0x494e),
+ PSIL_CSI2RX(0x494f),
+ PSIL_CSI2RX(0x4950),
+ PSIL_CSI2RX(0x4951),
+ PSIL_CSI2RX(0x4952),
+ PSIL_CSI2RX(0x4953),
+ PSIL_CSI2RX(0x4954),
+ PSIL_CSI2RX(0x4955),
+ PSIL_CSI2RX(0x4956),
+ PSIL_CSI2RX(0x4957),
+ PSIL_CSI2RX(0x4958),
+ PSIL_CSI2RX(0x4959),
+ PSIL_CSI2RX(0x495a),
+ PSIL_CSI2RX(0x495b),
+ PSIL_CSI2RX(0x495c),
+ PSIL_CSI2RX(0x495d),
+ PSIL_CSI2RX(0x495e),
+ PSIL_CSI2RX(0x495f),
+ PSIL_CSI2RX(0x4960),
+ PSIL_CSI2RX(0x4961),
+ PSIL_CSI2RX(0x4962),
+ PSIL_CSI2RX(0x4963),
+ PSIL_CSI2RX(0x4964),
+ PSIL_CSI2RX(0x4965),
+ PSIL_CSI2RX(0x4966),
+ PSIL_CSI2RX(0x4967),
+ PSIL_CSI2RX(0x4968),
+ PSIL_CSI2RX(0x4969),
+ PSIL_CSI2RX(0x496a),
+ PSIL_CSI2RX(0x496b),
+ PSIL_CSI2RX(0x496c),
+ PSIL_CSI2RX(0x496d),
+ PSIL_CSI2RX(0x496e),
+ PSIL_CSI2RX(0x496f),
+ PSIL_CSI2RX(0x4970),
+ PSIL_CSI2RX(0x4971),
+ PSIL_CSI2RX(0x4972),
+ PSIL_CSI2RX(0x4973),
+ PSIL_CSI2RX(0x4974),
+ PSIL_CSI2RX(0x4975),
+ PSIL_CSI2RX(0x4976),
+ PSIL_CSI2RX(0x4977),
+ PSIL_CSI2RX(0x4978),
+ PSIL_CSI2RX(0x4979),
+ PSIL_CSI2RX(0x497a),
+ PSIL_CSI2RX(0x497b),
+ PSIL_CSI2RX(0x497c),
+ PSIL_CSI2RX(0x497d),
+ PSIL_CSI2RX(0x497e),
+ PSIL_CSI2RX(0x497f),
/* CPSW9 */
PSIL_ETHERNET(0x4a00),
/* CPSW0 */
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 4b9530a7bf65..a4450bc95466 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -1420,8 +1420,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
chan->desc_submitcount++;
chan->desc_pendingcount--;
- list_del(&desc->node);
- list_add_tail(&desc->node, &chan->active_list);
+ list_move_tail(&desc->node, &chan->active_list);
if (chan->desc_submitcount == chan->num_frms)
chan->desc_submitcount = 0;
@@ -1659,6 +1658,17 @@ static void xilinx_dma_issue_pending(struct dma_chan *dchan)
}
/**
+ * xilinx_dma_device_config - Configure the DMA channel
+ * @dchan: DMA channel
+ * @config: channel configuration
+ */
+static int xilinx_dma_device_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ return 0;
+}
+
+/**
* xilinx_dma_complete_descriptor - Mark the active descriptor as complete
* @chan : xilinx DMA channel
*
@@ -3077,7 +3087,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->ext_addr = false;
/* Set the dma mask bits */
- dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
+ dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
/* Initialize the DMA engine */
xdev->common.dev = &pdev->dev;
@@ -3096,6 +3106,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_synchronize = xilinx_dma_synchronize;
xdev->common.device_tx_status = xilinx_dma_tx_status;
xdev->common.device_issue_pending = xilinx_dma_issue_pending;
+ xdev->common.device_config = xilinx_dma_device_config;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 5fecf5aa6e85..97f02f8eb03a 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -434,8 +434,7 @@ static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan,
struct zynqmp_dma_desc_sw *child, *next;
chan->desc_free_cnt++;
- list_del(&sdesc->node);
- list_add_tail(&sdesc->node, &chan->free_list);
+ list_move_tail(&sdesc->node, &chan->free_list);
list_for_each_entry_safe(child, next, &sdesc->tx_list, node) {
chan->desc_free_cnt++;
list_move_tail(&child->node, &chan->free_list);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 715e491dfbc3..4c3fd2eed1da 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -488,9 +488,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
struct sk_buff *skb, u16 source_node_id,
bool is_broadcast, u16 ether_type)
{
- struct fwnet_device *dev;
int status;
- __be64 guid;
switch (ether_type) {
case ETH_P_ARP:
@@ -503,7 +501,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
goto err;
}
- dev = netdev_priv(net);
/* Write metadata, and then pass to the receive level */
skb->dev = net;
skb->ip_summed = CHECKSUM_NONE;
@@ -512,7 +509,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
* Parse the encapsulation header. This actually does the job of
* converting to an ethernet-like pseudo frame header.
*/
- guid = cpu_to_be64(dev->card->guid);
if (dev_hard_header(skb, net, ether_type,
is_broadcast ? net->broadcast : net->dev_addr,
NULL, skb->len) >= 0) {
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index ced1964faf42..2ee97bab7440 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -1147,6 +1147,64 @@ int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
}
EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
+bool qcom_scm_lmh_dcvsh_available(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
+}
+EXPORT_SYMBOL(qcom_scm_lmh_dcvsh_available);
+
+int qcom_scm_lmh_profile_change(u32 profile_id)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_LMH,
+ .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
+ .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
+ .args[0] = profile_id,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_lmh_profile_change);
+
+int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
+ u64 limit_node, u32 node_id, u64 version)
+{
+ dma_addr_t payload_phys;
+ u32 *payload_buf;
+ int ret, payload_size = 5 * sizeof(u32);
+
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_LMH,
+ .cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
+ .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
+ QCOM_SCM_VAL, QCOM_SCM_VAL),
+ .args[1] = payload_size,
+ .args[2] = limit_node,
+ .args[3] = node_id,
+ .args[4] = version,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
+ if (!payload_buf)
+ return -ENOMEM;
+
+ payload_buf[0] = payload_fn;
+ payload_buf[1] = 0;
+ payload_buf[2] = payload_reg;
+ payload_buf[3] = 1;
+ payload_buf[4] = payload_val;
+
+ desc.args[0] = payload_phys;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+
+ dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_lmh_dcvsh);
+
static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
{
struct device_node *tcsr;
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 632fe3142462..d92156ceb3ac 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -114,6 +114,10 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_SVC_HDCP 0x11
#define QCOM_SCM_HDCP_INVOKE 0x01
+#define QCOM_SCM_SVC_LMH 0x13
+#define QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE 0x01
+#define QCOM_SCM_LMH_LIMIT_DCVSH 0x10
+
#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03
#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 8f53837d4d3e..97178b307ed6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -468,14 +468,18 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade
return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
}
-/*
- * Helper function to query RAS EEPROM address
- *
- * @adev: amdgpu_device pointer
+/**
+ * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
+ * adev: amdgpu_device pointer
+ * i2c_address: pointer to u8; if not NULL, will contain
+ * the RAS EEPROM address if the function returns true
*
- * Return true if vbios supports ras rom address reporting
+ * Return true if VBIOS supports RAS EEPROM address reporting,
+ * else return false. If true and @i2c_address is not NULL,
+ * will contain the RAS ROM address.
*/
-bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address)
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
+ u8 *i2c_address)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
int index;
@@ -483,27 +487,39 @@ bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_a
union firmware_info *firmware_info;
u8 frev, crev;
- if (i2c_address == NULL)
- return false;
-
- *i2c_address = 0;
-
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- firmwareinfo);
+ firmwareinfo);
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
- index, &size, &frev, &crev, &data_offset)) {
+ index, &size, &frev, &crev,
+ &data_offset)) {
/* support firmware_info 3.4 + */
if ((frev == 3 && crev >=4) || (frev > 3)) {
firmware_info = (union firmware_info *)
(mode_info->atom_context->bios + data_offset);
- *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
+ /* The ras_rom_i2c_slave_addr should ideally
+ * be a 19-bit EEPROM address, which would be
+ * used as is by the driver; see top of
+ * amdgpu_eeprom.c.
+ *
+ * When this is the case, 0 is of course a
+ * valid RAS EEPROM address, in which case,
+ * we'll drop the first "if (firm...)" and only
+ * leave the check for the pointer.
+ *
+ * The reason this works right now is because
+ * ras_rom_i2c_slave_addr contains the EEPROM
+ * device type qualifier 1010b in the top 4
+ * bits.
+ */
+ if (firmware_info->v34.ras_rom_i2c_slave_addr) {
+ if (i2c_address)
+ *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
+ return true;
+ }
}
}
- if (*i2c_address != 0)
- return true;
-
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 8e5a7ac8c36f..7a7316731911 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -522,6 +522,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
break;
case CHIP_RENOIR:
case CHIP_VANGOGH:
+ case CHIP_YELLOW_CARP:
domain |= AMDGPU_GEM_DOMAIN_GTT;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b6640291f980..f18240f87387 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1181,7 +1181,12 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
@@ -1197,6 +1202,11 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
{0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
{0x1002, 0x73C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
{0x1002, 0x73DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
/* DIMGREY_CAVEFISH */
@@ -1204,6 +1214,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73E2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73ED, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
/* Aldebaran */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index d94c5419ec25..5a6857c44bb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -59,6 +59,7 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
struct drm_file *file = f->private_data;
struct amdgpu_device *adev = drm_to_adev(file->minor->dev);
+ struct amdgpu_bo *root;
int ret;
ret = amdgpu_file_to_fpriv(f, &fpriv);
@@ -69,13 +70,19 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
dev = PCI_SLOT(adev->pdev->devfn);
fn = PCI_FUNC(adev->pdev->devfn);
- ret = amdgpu_bo_reserve(fpriv->vm.root.bo, false);
+ root = amdgpu_bo_ref(fpriv->vm.root.bo);
+ if (!root)
+ return;
+
+ ret = amdgpu_bo_reserve(root, false);
if (ret) {
DRM_ERROR("Fail to reserve bo\n");
return;
}
amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, &gtt_mem, &cpu_mem);
- amdgpu_bo_unreserve(fpriv->vm.root.bo);
+ amdgpu_bo_unreserve(root);
+ amdgpu_bo_unref(&root);
+
seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus,
dev, fn, fpriv->vm.pasid);
seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 14499f0de32d..8d682befe0d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -552,6 +552,9 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
+ if (!ring->no_scheduler)
+ drm_sched_stop(&ring->sched, NULL);
+
/* You can't wait for HW to signal if it's gone */
if (!drm_dev_is_unplugged(&adev->ddev))
r = amdgpu_fence_wait_empty(ring);
@@ -611,6 +614,11 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
+ if (!ring->no_scheduler) {
+ drm_sched_resubmit_jobs(&ring->sched);
+ drm_sched_start(&ring->sched, true);
+ }
+
/* enable the interrupt */
if (ring->fence_drv.irq_src)
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index cb07cc3b06ed..d6aa032890ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -341,21 +341,18 @@ retry:
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
initial_domain,
flags, ttm_bo_type_device, resv, &gobj);
- if (r) {
- if (r != -ERESTARTSYS) {
- if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
- flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- goto retry;
- }
+ if (r && r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
- if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
- initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
- goto retry;
- }
- DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
- size, initial_domain, args->in.alignment, r);
+ if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+ goto retry;
}
- return r;
+ DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
+ size, initial_domain, args->in.alignment, r);
}
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 543000304a1c..675a72ef305d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -118,7 +118,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
- * @mem: the resulting mem object
+ * @res: the resulting mem object
*
* Dummy, allocate the node but no space for it yet.
*/
@@ -182,7 +182,7 @@ err_out:
* amdgpu_gtt_mgr_del - free ranges
*
* @man: TTM memory type manager
- * @mem: TTM memory object
+ * @res: TTM memory object
*
* Free the allocated GTT again.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 23efdc672502..9b41cb8c3de5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -469,10 +469,10 @@ psp_cmd_submit_buf(struct psp_context *psp,
*/
if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
if (ucode)
- DRM_WARN("failed to load ucode (%s) ",
- amdgpu_ucode_name(ucode->ucode_id));
- DRM_WARN("psp gfx command (%s) failed and response status is (0x%X)\n",
- psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
+ DRM_WARN("failed to load ucode %s(0x%X) ",
+ amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
+ DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
+ psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
psp->cmd_buf_mem->resp.status);
if (!timeout) {
ret = -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 9dc3b2d88176..dc44c946a244 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -114,27 +114,24 @@ static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
struct amdgpu_ras_eeprom_control *control)
{
- uint8_t ras_rom_i2c_slave_addr;
+ u8 i2c_addr;
if (!control)
return false;
- control->i2c_address = 0;
-
- if (amdgpu_atomfirmware_ras_rom_addr(adev, &ras_rom_i2c_slave_addr))
- {
- switch (ras_rom_i2c_slave_addr) {
- case 0xA0:
- control->i2c_address = 0;
- return true;
- case 0xA8:
- control->i2c_address = 0x40000;
- return true;
- default:
- dev_warn(adev->dev, "RAS EEPROM I2C slave address %02x not supported",
- ras_rom_i2c_slave_addr);
- return false;
- }
+ if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
+ /* The address given by VBIOS is an 8-bit, wire-format
+ * address, i.e. the most significant byte.
+ *
+ * Normalize it to a 19-bit EEPROM address. Remove the
+ * device type identifier and make it a 7-bit address;
+ * then make it a 19-bit EEPROM address. See top of
+ * amdgpu_eeprom.c.
+ */
+ i2c_addr = (i2c_addr & 0x0F) >> 1;
+ control->i2c_address = ((u32) i2c_addr) << 16;
+
+ return true;
}
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 2fd77c36a1ff..7b2b0980ec41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -361,7 +361,7 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
- * @mem: the resulting mem object
+ * @res: the resulting mem object
*
* Allocate VRAM for the given BO.
*/
@@ -487,7 +487,7 @@ error_sub:
* amdgpu_vram_mgr_del - free ranges
*
* @man: TTM memory type manager
- * @mem: TTM memory object
+ * @res: TTM memory object
*
* Free the allocated VRAM again.
*/
@@ -522,7 +522,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
* amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
*
* @adev: amdgpu device pointer
- * @mem: TTM memory object
+ * @res: TTM memory object
* @offset: byte offset from the base of VRAM BO
* @length: number of bytes to export in sg_table
* @dev: the other device
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index ff2307d7ee0f..23b066bcffb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -258,6 +258,8 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
amdgpu_virt_fini_data_exchange(adev);
atomic_set(&adev->in_gpu_reset, 1);
+ xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
+
do {
if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
goto flr_done;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 50572635d0f8..bd3b23171579 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -37,6 +37,7 @@ enum idh_request {
IDH_REQ_GPU_RESET_ACCESS,
IDH_LOG_VF_ERROR = 200,
+ IDH_READY_TO_RESET = 201,
};
enum idh_event {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index ba1d3ab869c1..f50045cebd44 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -85,11 +85,14 @@
#define mmRCC_DEV0_EPF0_STRAP0_ALDE 0x0015
#define mmRCC_DEV0_EPF0_STRAP0_ALDE_BASE_IDX 2
-#define mmBIF_DOORBELL_INT_CNTL_ALDE 0x3878
+#define mmBIF_DOORBELL_INT_CNTL_ALDE 0x00fe
#define mmBIF_DOORBELL_INT_CNTL_ALDE_BASE_IDX 2
#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE__SHIFT 0x18
#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L
+#define mmBIF_INTR_CNTL_ALDE 0x0101
+#define mmBIF_INTR_CNTL_ALDE_BASE_IDX 2
+
static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
@@ -440,14 +443,23 @@ static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
*/
uint32_t bif_intr_cntl;
- bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
+ else
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+
if (state == AMDGPU_IRQ_STATE_ENABLE) {
/* set interrupt vector select bit to 0 to select
* vetcor 1 for bare metal case */
bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
BIF_INTR_CNTL,
RAS_INTR_VEC_SEL, 0);
- WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
+ else
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+
}
return 0;
@@ -476,14 +488,22 @@ static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *ade
*/
uint32_t bif_intr_cntl;
- bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
+ else
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+
if (state == AMDGPU_IRQ_STATE_ENABLE) {
/* set interrupt vector select bit to 0 to select
* vetcor 1 for bare metal case */
bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
BIF_INTR_CNTL,
RAS_INTR_VEC_SEL, 0);
- WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
+ else
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 42a35d9520f9..fe9a7cc8d9eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -904,14 +904,7 @@ static bool vi_asic_supports_baco(struct amdgpu_device *adev)
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_TOPAZ:
- /* Disable BACO support for the specific polaris12 SKU temporarily */
- if ((adev->pdev->device == 0x699F) &&
- (adev->pdev->revision == 0xC7) &&
- (adev->pdev->subsystem_vendor == 0x1028) &&
- (adev->pdev->subsystem_device == 0x0039))
- return false;
- else
- return amdgpu_dpm_is_baco_supported(adev);
+ return amdgpu_dpm_is_baco_supported(adev);
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 491373fcdb38..9fc8021bb0ab 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -2484,7 +2484,8 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
}
if (!p->xnack_enabled) {
pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
- return -EFAULT;
+ r = -EFAULT;
+ goto out;
}
svms = &p->svms;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 816723691d51..9b1fc54555ee 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1200,7 +1200,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_hardware_init(adev->dm.dc);
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (adev->apu_flags) {
+ if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
struct dc_phy_addr_space_config pa_config;
mmhub_read_system_context(adev, &pa_config);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index cd025c12f17b..330edd666b7d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1561,7 +1561,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
struct dc_link *link,
const struct dc_link_settings *link_setting)
{
- struct link_training_settings lt_settings;
+ struct link_training_settings lt_settings = {0};
dp_decide_training_settings(
link,
@@ -1707,7 +1707,7 @@ enum link_training_result dc_link_dp_perform_link_training(
bool skip_video_pattern)
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
- struct link_training_settings lt_settings;
+ struct link_training_settings lt_settings = {0};
enum dp_link_encoding encoding =
dp_get_link_encoding_format(link_settings);
@@ -1923,7 +1923,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
struct dc_link_settings *link_settings,
struct dc_link_training_overrides *lt_overrides)
{
- struct link_training_settings lt_settings;
+ struct link_training_settings lt_settings = {0};
enum link_training_result lt_status = LINK_TRAINING_SUCCESS;
enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT;
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index dc7823d23ba8..dd38796ba30a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -510,8 +510,12 @@ static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id
vpg = dcn303_vpg_create(ctx, vpg_inst);
afmt = dcn303_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt)
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
return NULL;
+ }
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id],
&se_shift, &se_mask);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 7db268da6976..3b3721386571 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -109,7 +109,7 @@ bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl)
union dmub_rb_cmd cmd;
if (!dcn31_query_backlight_info(panel_cntl, &cmd))
- return 0;
+ return false;
return cmd.panel_cntl.data.is_backlight_on;
}
@@ -119,7 +119,7 @@ bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl)
union dmub_rb_cmd cmd;
if (!dcn31_query_backlight_info(panel_cntl, &cmd))
- return 0;
+ return false;
return cmd.panel_cntl.data.is_powered_on;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index fbed5304692d..63bbdf8b8678 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2641,7 +2641,7 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
if (mode_lib->vba.DRAMClockChangeWatermark >
- dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+ dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
mode_lib->vba.MinTTUVBlank[k] += 25;
}
}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 2d55627b05b1..249cb0aeb5ae 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2005,10 +2005,10 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 715b4225f5ee..8156729c370b 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -1335,6 +1335,30 @@ enum smu_cmn2asic_mapping_type {
#define WORKLOAD_MAP(profile, workload) \
[profile] = {1, (workload)}
+/**
+ * smu_memcpy_trailing - Copy the end of one structure into the middle of another
+ *
+ * @dst: Pointer to destination struct
+ * @first_dst_member: The member name in @dst where the overwrite begins
+ * @last_dst_member: The member name in @dst where the overwrite ends after
+ * @src: Pointer to the source struct
+ * @first_src_member: The member name in @src where the copy begins
+ *
+ */
+#define smu_memcpy_trailing(dst, first_dst_member, last_dst_member, \
+ src, first_src_member) \
+({ \
+ size_t __src_offset = offsetof(typeof(*(src)), first_src_member); \
+ size_t __src_size = sizeof(*(src)) - __src_offset; \
+ size_t __dst_offset = offsetof(typeof(*(dst)), first_dst_member); \
+ size_t __dst_size = offsetofend(typeof(*(dst)), last_dst_member) - \
+ __dst_offset; \
+ BUILD_BUG_ON(__src_size != __dst_size); \
+ __builtin_memcpy((u8 *)(dst) + __dst_offset, \
+ (u8 *)(src) + __src_offset, \
+ __dst_size); \
+})
+
#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
int smu_get_power_limit(void *handle,
uint32_t *limit,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 465ff8d2a01a..e7803ce8f67a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -27,6 +27,9 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <asm/div64.h>
+#if IS_ENABLED(CONFIG_X86_64)
+#include <asm/intel-family.h>
+#endif
#include <drm/amdgpu_drm.h>
#include "ppatomctrl.h"
#include "atombios.h"
@@ -1733,6 +1736,17 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
return result;
}
+static bool intel_core_rkl_chk(void)
+{
+#if IS_ENABLED(CONFIG_X86_64)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE);
+#else
+ return false;
+#endif
+}
+
static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1758,7 +1772,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
- data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
+ data->pcie_dpm_key_disabled =
+ intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
/* need to set voltage control types before EVV patching */
data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 273df66cac14..e343cc218990 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -483,10 +483,8 @@ static int arcturus_append_powerplay_table(struct smu_context *smu)
if ((smc_dpm_table->table_header.format_revision == 4) &&
(smc_dpm_table->table_header.content_revision == 6))
- memcpy(&smc_pptable->MaxVoltageStepGfx,
- &smc_dpm_table->maxvoltagestepgfx,
- sizeof(*smc_dpm_table) - offsetof(struct atom_smc_dpm_info_v4_6, maxvoltagestepgfx));
-
+ smu_memcpy_trailing(smc_pptable, MaxVoltageStepGfx, BoardReserved,
+ smc_dpm_table, maxvoltagestepgfx);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index f96681700c41..a5fc5d7cb6c7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -431,16 +431,16 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
switch (smc_dpm_table->table_header.content_revision) {
case 5: /* nv10 and nv14 */
- memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
- sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
+ smu_memcpy_trailing(smc_pptable, I2cControllers, BoardReserved,
+ smc_dpm_table, I2cControllers);
break;
case 7: /* nv12 */
ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
(uint8_t **)&smc_dpm_table_v4_7);
if (ret)
return ret;
- memcpy(smc_pptable->I2cControllers, smc_dpm_table_v4_7->I2cControllers,
- sizeof(*smc_dpm_table_v4_7) - sizeof(smc_dpm_table_v4_7->table_header));
+ smu_memcpy_trailing(smc_pptable, I2cControllers, BoardReserved,
+ smc_dpm_table_v4_7, I2cControllers);
break;
default:
dev_err(smu->adev->dev, "smc_dpm_info with unsupported content revision %d!\n",
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 6eb50b05a33c..3a3421452e57 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -1869,7 +1869,7 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
} else {
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
dev_err(smu->adev->dev,
- "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu->gfx_actual_hard_min_freq,
smu->gfx_actual_soft_max_freq);
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index b39138041141..5aa175e12a78 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -426,7 +426,7 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu,
} else {
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
dev_err(smu->adev->dev,
- "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu->gfx_actual_hard_min_freq,
smu->gfx_actual_soft_max_freq);
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index ec8c30daf31c..ab652028e003 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -409,9 +409,8 @@ static int aldebaran_append_powerplay_table(struct smu_context *smu)
if ((smc_dpm_table->table_header.format_revision == 4) &&
(smc_dpm_table->table_header.content_revision == 10))
- memcpy(&smc_pptable->GfxMaxCurrent,
- &smc_dpm_table->GfxMaxCurrent,
- sizeof(*smc_dpm_table) - offsetof(struct atom_smc_dpm_info_v4_10, GfxMaxCurrent));
+ smu_memcpy_trailing(smc_pptable, GfxMaxCurrent, reserved,
+ smc_dpm_table, GfxMaxCurrent);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 0f17c2522c85..627ba2eec7fd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -731,7 +731,7 @@ static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM
} else {
if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
dev_err(smu->adev->dev,
- "The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu->gfx_actual_hard_min_freq,
smu->gfx_actual_soft_max_freq);
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
index 51dbe0e3294e..d2969f68dd64 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
@@ -6,7 +6,7 @@
#ifndef INTEL_GT_REQUESTS_H
#define INTEL_GT_REQUESTS_H
-#include <stddef.h>
+#include <linux/stddef.h>
struct intel_engine_cs;
struct intel_gt;
diff --git a/drivers/gpu/drm/mgag200/mgag200_pll.c b/drivers/gpu/drm/mgag200/mgag200_pll.c
index 7c903cf19c0d..e9ae22b4f813 100644
--- a/drivers/gpu/drm/mgag200/mgag200_pll.c
+++ b/drivers/gpu/drm/mgag200/mgag200_pll.c
@@ -124,6 +124,7 @@ static int mgag200_pixpll_compute_g200se_00(struct mgag200_pll *pixpll, long clo
unsigned int computed;
m = n = p = s = 0;
+ delta = 0xffffffff;
permitteddelta = clock * 5 / 1000;
for (testp = 8; testp > 0; testp /= 2) {
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 0da5b3100ab1..dfe5f1d29763 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -58,25 +58,16 @@ static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
}
static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
- u64 iova, size_t size)
+ u64 iova, u64 size)
{
u8 region_width;
u64 region = iova & PAGE_MASK;
- /*
- * fls returns:
- * 1 .. 32
- *
- * 10 + fls(num_pages)
- * results in the range (11 .. 42)
- */
-
- size = round_up(size, PAGE_SIZE);
- region_width = 10 + fls(size >> PAGE_SHIFT);
- if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
- /* not pow2, so must go up to the next pow2 */
- region_width += 1;
- }
+ /* The size is encoded as ceil(log2) minus(1), which may be calculated
+ * with fls. The size must be clamped to hardware bounds.
+ */
+ size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
+ region_width = fls64(size - 1) - 1;
region |= region_width;
/* Lock the region that needs to be updated */
@@ -87,7 +78,7 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
- u64 iova, size_t size, u32 op)
+ u64 iova, u64 size, u32 op)
{
if (as_nr < 0)
return 0;
@@ -104,7 +95,7 @@ static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
static int mmu_hw_do_operation(struct panfrost_device *pfdev,
struct panfrost_mmu *mmu,
- u64 iova, size_t size, u32 op)
+ u64 iova, u64 size, u32 op)
{
int ret;
@@ -121,7 +112,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
+ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
@@ -137,7 +128,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
{
- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
+ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
@@ -251,7 +242,7 @@ static size_t get_pgsize(u64 addr, size_t size)
static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
struct panfrost_mmu *mmu,
- u64 iova, size_t size)
+ u64 iova, u64 size)
{
if (mmu->as < 0)
return;
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index 1940ff86e49a..6c5a11ef1ee8 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -316,6 +316,8 @@
#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
+#define AS_LOCK_REGION_MIN_SIZE (1ULL << 15)
+
#define gpu_write(dev, reg, data) writel(data, dev->iomem + reg)
#define gpu_read(dev, reg) readl(dev->iomem + reg)
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 8ab3247dbc4a..13c6b857158f 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -1123,7 +1123,7 @@ static int cdn_dp_suspend(struct device *dev)
return ret;
}
-static int cdn_dp_resume(struct device *dev)
+static __maybe_unused int cdn_dp_resume(struct device *dev)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ea4add2b9717..bb9e02c31946 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1160,9 +1160,9 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
}
if (bo->deleted) {
- ttm_bo_cleanup_refs(bo, false, false, locked);
+ ret = ttm_bo_cleanup_refs(bo, false, false, locked);
ttm_bo_put(bo);
- return 0;
+ return ret == -EBUSY ? -ENOSPC : ret;
}
ttm_bo_del_from_lru(bo);
@@ -1216,7 +1216,7 @@ out:
if (locked)
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
- return ret;
+ return ret == -EBUSY ? -ENOSPC : ret;
}
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 763fa6f4e07d..1c5ffe2935af 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -143,7 +143,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_resource *src_mem = bo->resource;
struct ttm_resource_manager *src_man =
ttm_manager_type(bdev, src_mem->mem_type);
- struct ttm_resource src_copy = *src_mem;
union {
struct ttm_kmap_iter_tt tt;
struct ttm_kmap_iter_linear_io io;
@@ -173,11 +172,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
}
ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
- src_copy = *src_mem;
- ttm_bo_move_sync_cleanup(bo, dst_mem);
if (!src_iter->ops->maps_tt)
- ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, &src_copy);
+ ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
+ ttm_bo_move_sync_cleanup(bo, dst_mem);
+
out_src_iter:
if (!dst_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index cb38b1a17b09..82cbb29a05aa 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -383,7 +383,8 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
else
gfp_flags |= GFP_HIGHUSER;
- for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages;
+ for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
+ num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) {
bool apply_caching = false;
struct ttm_pool_type *pt;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 24031a8acd2d..d5cd8b5dc0bf 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -32,7 +32,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/sched.h>
-#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <drm/drm_cache.h>
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index b7dc32a0c9bb..4a1115043114 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -1462,7 +1462,7 @@ static const struct hdmi_codec_ops vc4_hdmi_codec_ops = {
.audio_startup = vc4_hdmi_audio_startup,
};
-struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
+static struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
.ops = &vc4_hdmi_codec_ops,
.max_i2s_channels = 8,
.i2s = 1,
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 2aee356840a2..314015d9e912 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -245,6 +245,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
mutex_unlock(&ring_info->ring_buffer_mutex);
kfree(ring_info->pkt_buffer);
+ ring_info->pkt_buffer = NULL;
ring_info->pkt_buffer_size = 0;
}
diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
index 18da5a25e89a..868243dba1ee 100644
--- a/drivers/hwmon/mr75203.c
+++ b/drivers/hwmon/mr75203.c
@@ -17,6 +17,7 @@
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <linux/units.h>
/* PVT Common register */
#define PVT_IP_CONFIG 0x04
@@ -37,7 +38,6 @@
#define CLK_SYNTH_EN BIT(24)
#define CLK_SYS_CYCLES_MAX 514
#define CLK_SYS_CYCLES_MIN 2
-#define HZ_PER_MHZ 1000000L
#define SDIF_DISABLE 0x04
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index 043f199e7bc6..9b279937a24e 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -6,12 +6,11 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/time.h>
+#include <linux/units.h>
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
-#define HZ_PER_MHZ 1000000L
-
static struct {
u32 usage_id;
int unit; /* 0 for default others from HID sensor spec */
diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c
index 7b32dfaee9b3..3ba2378df3dd 100644
--- a/drivers/iio/light/as73211.c
+++ b/drivers/iio/light/as73211.c
@@ -24,8 +24,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm.h>
-
-#define HZ_PER_KHZ 1000
+#include <linux/units.h>
#define AS73211_DRV_NAME "as73211"
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 3048862c961c..408dfbcc47b5 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -1309,7 +1309,7 @@ out:
static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
struct bnxt_re_pd *pd)
{
- struct bnxt_re_sqp_entries *sqp_tbl = NULL;
+ struct bnxt_re_sqp_entries *sqp_tbl;
struct bnxt_re_dev *rdev;
struct bnxt_re_qp *sqp;
struct bnxt_re_ah *sah;
@@ -1317,7 +1317,7 @@ static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
rdev = qp->rdev;
/* Create a shadow QP to handle the QP1 traffic */
- sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES,
+ sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl),
GFP_KERNEL);
if (!sqp_tbl)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index d9b5bbb2d011..8302469582c6 100644
--- a/drivers/infiniband/hw/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -488,7 +488,7 @@ struct hfi1_ctxt_hist {
atomic_t data[255];
};
-struct hfi1_ctxt_hist hist = {
+static struct hfi1_ctxt_hist hist = {
.count = ATOMIC_INIT(0)
};
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index a520ac8ab68c..3be36ebbf67a 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -995,7 +995,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
{
const size_t xlt_chunk_align =
- MLX5_UMR_MTT_ALIGNMENT / sizeof(ent_size);
+ MLX5_UMR_MTT_ALIGNMENT / ent_size;
size_t size;
void *res = NULL;
@@ -1024,7 +1024,7 @@ static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
if (size > MLX5_SPARE_UMR_CHUNK) {
size = MLX5_SPARE_UMR_CHUNK;
- *nents = get_order(size) / ent_size;
+ *nents = size / ent_size;
res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
get_order(size));
if (res)
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index d57e49de6650..452e2355d24e 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -403,9 +403,11 @@ static ssize_t diagc_attr_store(struct ib_device *ibdev, u32 port_num,
}
#define QIB_DIAGC_ATTR(N) \
+ static_assert(&((struct qib_ibport *)0)->rvp.n_##N != (u64 *)NULL); \
static struct qib_diagc_attr qib_diagc_attr_##N = { \
.attr = __ATTR(N, 0664, diagc_attr_show, diagc_attr_store), \
- .counter = &((struct qib_ibport *)0)->rvp.n_##N - (u64 *)0, \
+ .counter = \
+ offsetof(struct qib_ibport, rvp.n_##N) / sizeof(u64) \
}
QIB_DIAGC_ATTR(rc_resends);
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index f798922a4598..882c3c8ba399 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -28,10 +28,6 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-static bool use_ktime = true;
-module_param(use_ktime, bool, 0400);
-MODULE_PARM_DESC(use_ktime, "Use ktime for measuring I/O speed");
-
/*
* Option parsing.
*/
@@ -110,7 +106,6 @@ struct analog_port {
char cooked;
int bads;
int reads;
- int speed;
int loop;
int fuzz;
int axes[4];
@@ -120,66 +115,6 @@ struct analog_port {
};
/*
- * Time macros.
- */
-
-#ifdef __i386__
-
-#include <linux/i8253.h>
-
-#define GET_TIME(x) do { if (boot_cpu_has(X86_FEATURE_TSC)) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0)
-#define DELTA(x,y) (boot_cpu_has(X86_FEATURE_TSC) ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
-#define TIME_NAME (boot_cpu_has(X86_FEATURE_TSC)?"TSC":"PIT")
-static unsigned int get_time_pit(void)
-{
- unsigned long flags;
- unsigned int count;
-
- raw_spin_lock_irqsave(&i8253_lock, flags);
- outb_p(0x00, 0x43);
- count = inb_p(0x40);
- count |= inb_p(0x40) << 8;
- raw_spin_unlock_irqrestore(&i8253_lock, flags);
-
- return count;
-}
-#elif defined(__x86_64__)
-#define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0)
-#define DELTA(x,y) ((y)-(x))
-#define TIME_NAME "TSC"
-#elif defined(__alpha__) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_PPC) || defined(CONFIG_RISCV)
-#define GET_TIME(x) do { x = get_cycles(); } while (0)
-#define DELTA(x,y) ((y)-(x))
-#define TIME_NAME "get_cycles"
-#else
-#define FAKE_TIME
-static unsigned long analog_faketime = 0;
-#define GET_TIME(x) do { x = analog_faketime++; } while(0)
-#define DELTA(x,y) ((y)-(x))
-#define TIME_NAME "Unreliable"
-#warning Precise timer not defined for this architecture.
-#endif
-
-static inline u64 get_time(void)
-{
- if (use_ktime) {
- return ktime_get_ns();
- } else {
- unsigned int x;
- GET_TIME(x);
- return x;
- }
-}
-
-static inline unsigned int delta(u64 x, u64 y)
-{
- if (use_ktime)
- return y - x;
- else
- return DELTA((unsigned int)x, (unsigned int)y);
-}
-
-/*
* analog_decode() decodes analog joystick data and reports input events.
*/
@@ -234,18 +169,18 @@ static void analog_decode(struct analog *analog, int *axes, int *initial, int bu
static int analog_cooked_read(struct analog_port *port)
{
struct gameport *gameport = port->gameport;
- u64 time[4], start, loop, now;
+ ktime_t time[4], start, loop, now;
unsigned int loopout, timeout;
unsigned char data[4], this, last;
unsigned long flags;
int i, j;
loopout = (ANALOG_LOOP_TIME * port->loop) / 1000;
- timeout = ANALOG_MAX_TIME * port->speed;
+ timeout = ANALOG_MAX_TIME * NSEC_PER_MSEC;
local_irq_save(flags);
gameport_trigger(gameport);
- now = get_time();
+ now = ktime_get();
local_irq_restore(flags);
start = now;
@@ -258,16 +193,16 @@ static int analog_cooked_read(struct analog_port *port)
local_irq_disable();
this = gameport_read(gameport) & port->mask;
- now = get_time();
+ now = ktime_get();
local_irq_restore(flags);
- if ((last ^ this) && (delta(loop, now) < loopout)) {
+ if ((last ^ this) && (ktime_sub(now, loop) < loopout)) {
data[i] = last ^ this;
time[i] = now;
i++;
}
- } while (this && (i < 4) && (delta(start, now) < timeout));
+ } while (this && (i < 4) && (ktime_sub(now, start) < timeout));
this <<= 4;
@@ -275,7 +210,7 @@ static int analog_cooked_read(struct analog_port *port)
this |= data[i];
for (j = 0; j < 4; j++)
if (data[i] & (1 << j))
- port->axes[j] = (delta(start, time[i]) << ANALOG_FUZZ_BITS) / port->loop;
+ port->axes[j] = ((u32)ktime_sub(time[i], start) << ANALOG_FUZZ_BITS) / port->loop;
}
return -(this != port->mask);
@@ -375,38 +310,22 @@ static void analog_calibrate_timer(struct analog_port *port)
{
struct gameport *gameport = port->gameport;
unsigned int i, t, tx;
- u64 t1, t2, t3;
+ ktime_t t1, t2, t3;
unsigned long flags;
- if (use_ktime) {
- port->speed = 1000000;
- } else {
- local_irq_save(flags);
- t1 = get_time();
-#ifdef FAKE_TIME
- analog_faketime += 830;
-#endif
- mdelay(1);
- t2 = get_time();
- t3 = get_time();
- local_irq_restore(flags);
-
- port->speed = delta(t1, t2) - delta(t2, t3);
- }
-
tx = ~0;
for (i = 0; i < 50; i++) {
local_irq_save(flags);
- t1 = get_time();
+ t1 = ktime_get();
for (t = 0; t < 50; t++) {
gameport_read(gameport);
- t2 = get_time();
+ t2 = ktime_get();
}
- t3 = get_time();
+ t3 = ktime_get();
local_irq_restore(flags);
udelay(i);
- t = delta(t1, t2) - delta(t2, t3);
+ t = ktime_sub(t2, t1) - ktime_sub(t3, t2);
if (t < tx) tx = t;
}
@@ -611,7 +530,7 @@ static int analog_init_port(struct gameport *gameport, struct gameport_driver *d
t = gameport_read(gameport);
msleep(ANALOG_MAX_TIME);
port->mask = (gameport_read(gameport) ^ t) & t & 0xf;
- port->fuzz = (port->speed * ANALOG_FUZZ_MAGIC) / port->loop / 1000 + ANALOG_FUZZ_BITS;
+ port->fuzz = (NSEC_PER_MSEC * ANALOG_FUZZ_MAGIC) / port->loop / 1000 + ANALOG_FUZZ_BITS;
for (i = 0; i < ANALOG_INIT_RETRIES; i++) {
if (!analog_cooked_read(port))
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 40a070a2e7f5..e75650e98c9e 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -210,7 +210,7 @@ config KEYBOARD_LKKBD
select SERIO
help
Say Y here if you want to use a LK201 or LK401 style serial
- keyboard. This keyboard is also useable on PCs if you attach
+ keyboard. This keyboard is also usable on PCs if you attach
it with the inputattach program. The connector pinout is
described within lkkbd.c.
diff --git a/drivers/input/keyboard/adc-keys.c b/drivers/input/keyboard/adc-keys.c
index 6d5be48d1b3d..bf72ab8df817 100644
--- a/drivers/input/keyboard/adc-keys.c
+++ b/drivers/input/keyboard/adc-keys.c
@@ -193,7 +193,7 @@ static const struct of_device_id adc_keys_of_match[] = {
MODULE_DEVICE_TABLE(of, adc_keys_of_match);
#endif
-static struct platform_driver __refdata adc_keys_driver = {
+static struct platform_driver adc_keys_driver = {
.driver = {
.name = "adc_keys",
.of_match_table = of_match_ptr(adc_keys_of_match),
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 90a59b973d00..1592da4de336 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/platform_data/adp5588.h>
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 654e0476406b..bdd264459a97 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -18,7 +18,7 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/input/adp5589.h>
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index c8194333d612..e0e931e796fa 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -157,7 +157,7 @@ static int ep93xx_keypad_open(struct input_dev *pdev)
if (!keypad->enabled) {
ep93xx_keypad_config(keypad);
- clk_enable(keypad->clk);
+ clk_prepare_enable(keypad->clk);
keypad->enabled = true;
}
@@ -169,7 +169,7 @@ static void ep93xx_keypad_close(struct input_dev *pdev)
struct ep93xx_keypad *keypad = input_get_drvdata(pdev);
if (keypad->enabled) {
- clk_disable(keypad->clk);
+ clk_disable_unprepare(keypad->clk);
keypad->enabled = false;
}
}
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 498cde376981..dd5227cf8696 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -309,18 +309,6 @@ config INPUT_GPIO_VIBRA
To compile this driver as a module, choose M here: the module will be
called gpio-vibra.
-config INPUT_IXP4XX_BEEPER
- tristate "IXP4XX Beeper support"
- depends on ARCH_IXP4XX
- help
- If you say yes here, you can connect a beeper to the
- ixp4xx gpio pins. This is used by the LinkSys NSLU2.
-
- If unsure, say Y.
-
- To compile this driver as a module, choose M here: the
- module will be called ixp4xx-beeper.
-
config INPUT_COBALT_BTNS
tristate "Cobalt button interface"
depends on MIPS_COBALT
@@ -811,16 +799,6 @@ config INPUT_XEN_KBDDEV_FRONTEND
To compile this driver as a module, choose M here: the
module will be called xen-kbdfront.
-config INPUT_SIRFSOC_ONKEY
- tristate "CSR SiRFSoC power on/off/suspend key support"
- depends on ARCH_SIRF && OF
- default y
- help
- Say Y here if you want to support for the SiRFSoC power on/off/suspend key
- in Linux, after you press the onkey, system will suspend.
-
- If unsure, say N.
-
config INPUT_IDEAPAD_SLIDEBAR
tristate "IdeaPad Laptop Slidebar"
depends on INPUT
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index f593beed7e05..b92c53a6b5ae 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
obj-$(CONFIG_INPUT_IQS269A) += iqs269a.o
obj-$(CONFIG_INPUT_IQS626A) += iqs626a.o
-obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
@@ -74,7 +73,6 @@ obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
obj-$(CONFIG_INPUT_RK805_PWRKEY) += rk805-pwrkey.o
obj-$(CONFIG_INPUT_SC27XX_VIBRA) += sc27xx-vibra.o
obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
-obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o
obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o
obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
obj-$(CONFIG_INPUT_STPMIC1_ONKEY) += stpmic1_onkey.o
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
deleted file mode 100644
index 05018d0c97c7..000000000000
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ /dev/null
@@ -1,183 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Generic IXP4xx beeper driver
- *
- * Copyright (C) 2005 Tower Technologies
- *
- * based on nslu2-io.c
- * Copyright (C) 2004 Karen Spearel
- *
- * Author: Alessandro Zummo <a.zummo@towertech.it>
- * Maintainers: http://www.nslu2-linux.org/
- */
-
-#include <linux/module.h>
-#include <linux/input.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <mach/hardware.h>
-
-MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
-MODULE_DESCRIPTION("ixp4xx beeper driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ixp4xx-beeper");
-
-static DEFINE_SPINLOCK(beep_lock);
-
-static int ixp4xx_timer2_irq;
-
-static void ixp4xx_spkr_control(unsigned int pin, unsigned int count)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&beep_lock, flags);
-
- if (count) {
- gpio_direction_output(pin, 0);
- *IXP4XX_OSRT2 = (count & ~IXP4XX_OST_RELOAD_MASK) | IXP4XX_OST_ENABLE;
- } else {
- gpio_direction_output(pin, 1);
- gpio_direction_input(pin);
- *IXP4XX_OSRT2 = 0;
- }
-
- spin_unlock_irqrestore(&beep_lock, flags);
-}
-
-static int ixp4xx_spkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
-{
- unsigned int pin = (unsigned int) input_get_drvdata(dev);
- unsigned int count = 0;
-
- if (type != EV_SND)
- return -1;
-
- switch (code) {
- case SND_BELL:
- if (value)
- value = 1000;
- case SND_TONE:
- break;
- default:
- return -1;
- }
-
- if (value > 20 && value < 32767)
- count = (ixp4xx_timer_freq / (value * 4)) - 1;
-
- ixp4xx_spkr_control(pin, count);
-
- return 0;
-}
-
-static irqreturn_t ixp4xx_spkr_interrupt(int irq, void *dev_id)
-{
- unsigned int pin = (unsigned int) dev_id;
-
- /* clear interrupt */
- *IXP4XX_OSST = IXP4XX_OSST_TIMER_2_PEND;
-
- /* flip the beeper output */
- gpio_set_value(pin, !gpio_get_value(pin));
-
- return IRQ_HANDLED;
-}
-
-static int ixp4xx_spkr_probe(struct platform_device *dev)
-{
- struct input_dev *input_dev;
- int irq;
- int err;
-
- input_dev = input_allocate_device();
- if (!input_dev)
- return -ENOMEM;
-
- input_set_drvdata(input_dev, (void *) dev->id);
-
- input_dev->name = "ixp4xx beeper";
- input_dev->phys = "ixp4xx/gpio";
- input_dev->id.bustype = BUS_HOST;
- input_dev->id.vendor = 0x001f;
- input_dev->id.product = 0x0001;
- input_dev->id.version = 0x0100;
- input_dev->dev.parent = &dev->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_SND);
- input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE);
- input_dev->event = ixp4xx_spkr_event;
-
- irq = platform_get_irq(dev, 0);
- if (irq < 0) {
- err = irq;
- goto err_free_device;
- }
-
- err = gpio_request(dev->id, "ixp4-beeper");
- if (err)
- goto err_free_device;
-
- err = request_irq(irq, &ixp4xx_spkr_interrupt,
- IRQF_NO_SUSPEND, "ixp4xx-beeper",
- (void *) dev->id);
- if (err)
- goto err_free_gpio;
- ixp4xx_timer2_irq = irq;
-
- err = input_register_device(input_dev);
- if (err)
- goto err_free_irq;
-
- platform_set_drvdata(dev, input_dev);
-
- return 0;
-
- err_free_irq:
- free_irq(irq, (void *)dev->id);
- err_free_gpio:
- gpio_free(dev->id);
- err_free_device:
- input_free_device(input_dev);
-
- return err;
-}
-
-static int ixp4xx_spkr_remove(struct platform_device *dev)
-{
- struct input_dev *input_dev = platform_get_drvdata(dev);
- unsigned int pin = (unsigned int) input_get_drvdata(input_dev);
-
- input_unregister_device(input_dev);
-
- /* turn the speaker off */
- disable_irq(ixp4xx_timer2_irq);
- ixp4xx_spkr_control(pin, 0);
-
- free_irq(ixp4xx_timer2_irq, (void *)dev->id);
- gpio_free(dev->id);
-
- return 0;
-}
-
-static void ixp4xx_spkr_shutdown(struct platform_device *dev)
-{
- struct input_dev *input_dev = platform_get_drvdata(dev);
- unsigned int pin = (unsigned int) input_get_drvdata(input_dev);
-
- /* turn off the speaker */
- disable_irq(ixp4xx_timer2_irq);
- ixp4xx_spkr_control(pin, 0);
-}
-
-static struct platform_driver ixp4xx_spkr_platform_driver = {
- .driver = {
- .name = "ixp4xx-beeper",
- },
- .probe = ixp4xx_spkr_probe,
- .remove = ixp4xx_spkr_remove,
- .shutdown = ixp4xx_spkr_shutdown,
-};
-module_platform_driver(ixp4xx_spkr_platform_driver);
-
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index 10e3fc0eac6e..33609603245d 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -284,7 +284,7 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
}
if (pwrkey->data->supports_ps_hold_poff_config) {
- pwrkey->reboot_notifier.notifier_call = pm8941_reboot_notify,
+ pwrkey->reboot_notifier.notifier_call = pm8941_reboot_notify;
error = register_reboot_notifier(&pwrkey->reboot_notifier);
if (error) {
dev_err(&pdev->dev, "failed to register reboot notifier: %d\n",
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
deleted file mode 100644
index 7982bf8fb839..000000000000
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ /dev/null
@@ -1,207 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Power key driver for SiRF PrimaII
- *
- * Copyright (c) 2013 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
- * company.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/rtc/sirfsoc_rtciobrg.h>
-#include <linux/of.h>
-#include <linux/workqueue.h>
-
-struct sirfsoc_pwrc_drvdata {
- u32 pwrc_base;
- struct input_dev *input;
- struct delayed_work work;
-};
-
-#define PWRC_ON_KEY_BIT (1 << 0)
-
-#define PWRC_INT_STATUS 0xc
-#define PWRC_INT_MASK 0x10
-#define PWRC_PIN_STATUS 0x14
-#define PWRC_KEY_DETECT_UP_TIME 20 /* ms*/
-
-static int sirfsoc_pwrc_is_on_key_down(struct sirfsoc_pwrc_drvdata *pwrcdrv)
-{
- u32 state = sirfsoc_rtc_iobrg_readl(pwrcdrv->pwrc_base +
- PWRC_PIN_STATUS);
- return !(state & PWRC_ON_KEY_BIT); /* ON_KEY is active low */
-}
-
-static void sirfsoc_pwrc_report_event(struct work_struct *work)
-{
- struct sirfsoc_pwrc_drvdata *pwrcdrv =
- container_of(work, struct sirfsoc_pwrc_drvdata, work.work);
-
- if (sirfsoc_pwrc_is_on_key_down(pwrcdrv)) {
- schedule_delayed_work(&pwrcdrv->work,
- msecs_to_jiffies(PWRC_KEY_DETECT_UP_TIME));
- } else {
- input_event(pwrcdrv->input, EV_KEY, KEY_POWER, 0);
- input_sync(pwrcdrv->input);
- }
-}
-
-static irqreturn_t sirfsoc_pwrc_isr(int irq, void *dev_id)
-{
- struct sirfsoc_pwrc_drvdata *pwrcdrv = dev_id;
- u32 int_status;
-
- int_status = sirfsoc_rtc_iobrg_readl(pwrcdrv->pwrc_base +
- PWRC_INT_STATUS);
- sirfsoc_rtc_iobrg_writel(int_status & ~PWRC_ON_KEY_BIT,
- pwrcdrv->pwrc_base + PWRC_INT_STATUS);
-
- input_event(pwrcdrv->input, EV_KEY, KEY_POWER, 1);
- input_sync(pwrcdrv->input);
- schedule_delayed_work(&pwrcdrv->work,
- msecs_to_jiffies(PWRC_KEY_DETECT_UP_TIME));
-
- return IRQ_HANDLED;
-}
-
-static void sirfsoc_pwrc_toggle_interrupts(struct sirfsoc_pwrc_drvdata *pwrcdrv,
- bool enable)
-{
- u32 int_mask;
-
- int_mask = sirfsoc_rtc_iobrg_readl(pwrcdrv->pwrc_base + PWRC_INT_MASK);
- if (enable)
- int_mask |= PWRC_ON_KEY_BIT;
- else
- int_mask &= ~PWRC_ON_KEY_BIT;
- sirfsoc_rtc_iobrg_writel(int_mask, pwrcdrv->pwrc_base + PWRC_INT_MASK);
-}
-
-static int sirfsoc_pwrc_open(struct input_dev *input)
-{
- struct sirfsoc_pwrc_drvdata *pwrcdrv = input_get_drvdata(input);
-
- sirfsoc_pwrc_toggle_interrupts(pwrcdrv, true);
-
- return 0;
-}
-
-static void sirfsoc_pwrc_close(struct input_dev *input)
-{
- struct sirfsoc_pwrc_drvdata *pwrcdrv = input_get_drvdata(input);
-
- sirfsoc_pwrc_toggle_interrupts(pwrcdrv, false);
- cancel_delayed_work_sync(&pwrcdrv->work);
-}
-
-static const struct of_device_id sirfsoc_pwrc_of_match[] = {
- { .compatible = "sirf,prima2-pwrc" },
- {},
-};
-MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match);
-
-static int sirfsoc_pwrc_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct sirfsoc_pwrc_drvdata *pwrcdrv;
- int irq;
- int error;
-
- pwrcdrv = devm_kzalloc(&pdev->dev, sizeof(struct sirfsoc_pwrc_drvdata),
- GFP_KERNEL);
- if (!pwrcdrv) {
- dev_info(&pdev->dev, "Not enough memory for the device data\n");
- return -ENOMEM;
- }
-
- /*
- * We can't use of_iomap because pwrc is not mapped in memory,
- * the so-called base address is only offset in rtciobrg
- */
- error = of_property_read_u32(np, "reg", &pwrcdrv->pwrc_base);
- if (error) {
- dev_err(&pdev->dev,
- "unable to find base address of pwrc node in dtb\n");
- return error;
- }
-
- pwrcdrv->input = devm_input_allocate_device(&pdev->dev);
- if (!pwrcdrv->input)
- return -ENOMEM;
-
- pwrcdrv->input->name = "sirfsoc pwrckey";
- pwrcdrv->input->phys = "pwrc/input0";
- pwrcdrv->input->evbit[0] = BIT_MASK(EV_KEY);
- input_set_capability(pwrcdrv->input, EV_KEY, KEY_POWER);
-
- INIT_DELAYED_WORK(&pwrcdrv->work, sirfsoc_pwrc_report_event);
-
- pwrcdrv->input->open = sirfsoc_pwrc_open;
- pwrcdrv->input->close = sirfsoc_pwrc_close;
-
- input_set_drvdata(pwrcdrv->input, pwrcdrv);
-
- /* Make sure the device is quiesced */
- sirfsoc_pwrc_toggle_interrupts(pwrcdrv, false);
-
- irq = platform_get_irq(pdev, 0);
- error = devm_request_irq(&pdev->dev, irq,
- sirfsoc_pwrc_isr, 0,
- "sirfsoc_pwrc_int", pwrcdrv);
- if (error) {
- dev_err(&pdev->dev, "unable to claim irq %d, error: %d\n",
- irq, error);
- return error;
- }
-
- error = input_register_device(pwrcdrv->input);
- if (error) {
- dev_err(&pdev->dev,
- "unable to register input device, error: %d\n",
- error);
- return error;
- }
-
- dev_set_drvdata(&pdev->dev, pwrcdrv);
- device_init_wakeup(&pdev->dev, 1);
-
- return 0;
-}
-
-static int __maybe_unused sirfsoc_pwrc_resume(struct device *dev)
-{
- struct sirfsoc_pwrc_drvdata *pwrcdrv = dev_get_drvdata(dev);
- struct input_dev *input = pwrcdrv->input;
-
- /*
- * Do not mask pwrc interrupt as we want pwrc work as a wakeup source
- * if users touch X_ONKEY_B, see arch/arm/mach-prima2/pm.c
- */
- mutex_lock(&input->mutex);
- if (input_device_enabled(input))
- sirfsoc_pwrc_toggle_interrupts(pwrcdrv, true);
- mutex_unlock(&input->mutex);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(sirfsoc_pwrc_pm_ops, NULL, sirfsoc_pwrc_resume);
-
-static struct platform_driver sirfsoc_pwrc_driver = {
- .probe = sirfsoc_pwrc_probe,
- .driver = {
- .name = "sirfsoc-pwrc",
- .pm = &sirfsoc_pwrc_pm_ops,
- .of_match_table = sirfsoc_pwrc_of_match,
- }
-};
-
-module_platform_driver(sirfsoc_pwrc_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
-MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
-MODULE_ALIAS("platform:sirfsoc-pwrc");
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index dc4a240f4489..3c84deefa327 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -55,8 +55,9 @@
#define ETP_FW_PAGE_SIZE_512 512
#define ETP_FW_SIGNATURE_SIZE 6
-#define ETP_PRODUCT_ID_DELBIN 0x00C2
+#define ETP_PRODUCT_ID_WHITEBOX 0x00B8
#define ETP_PRODUCT_ID_VOXEL 0x00BF
+#define ETP_PRODUCT_ID_DELBIN 0x00C2
#define ETP_PRODUCT_ID_MAGPIE 0x0120
#define ETP_PRODUCT_ID_BOBBA 0x0121
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index dad22c1ea6a0..47af62c12267 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -105,6 +105,7 @@ static u32 elan_i2c_lookup_quirks(u16 ic_type, u16 product_id)
u32 quirks;
} elan_i2c_quirks[] = {
{ 0x0D, ETP_PRODUCT_ID_DELBIN, ETP_QUIRK_QUICK_WAKEUP },
+ { 0x0D, ETP_PRODUCT_ID_WHITEBOX, ETP_QUIRK_QUICK_WAKEUP },
{ 0x10, ETP_PRODUCT_ID_VOXEL, ETP_QUIRK_QUICK_WAKEUP },
{ 0x14, ETP_PRODUCT_ID_MAGPIE, ETP_QUIRK_QUICK_WAKEUP },
{ 0x14, ETP_PRODUCT_ID_BOBBA, ETP_QUIRK_QUICK_WAKEUP },
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 3ac57a91ede4..51b68501896c 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -220,16 +220,4 @@ static struct parport_driver parkbd_parport_driver = {
.detach = parkbd_detach,
.devmodel = true,
};
-
-static int __init parkbd_init(void)
-{
- return parport_register_driver(&parkbd_parport_driver);
-}
-
-static void __exit parkbd_exit(void)
-{
- parport_unregister_driver(&parkbd_parport_driver);
-}
-
-module_init(parkbd_init);
-module_exit(parkbd_exit);
+module_parport_driver(parkbd_parport_driver);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index ad454cd2855a..d4e74738c5a8 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -932,7 +932,7 @@ config TOUCHSCREEN_USB_COMPOSITE
- JASTEC USB Touch Controller/DigiTech DTR-02U
- Zytronic controllers
- Elo TouchSystems 2700 IntelliTouch
- - EasyTouch USB Touch Controller from Data Modul
+ - EasyTouch USB Touch Controller from Data Module
- e2i (Mimo monitors)
Have a look at <http://linux.chapter7.ch/touchkit/> for
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 263de3bfb6cd..bb2e1cbffba7 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -899,6 +899,7 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
* the identification registers.
*/
switch (rdbuf[0]) {
+ case 0x11: /* EDT EP0110M09 */
case 0x35: /* EDT EP0350M09 */
case 0x43: /* EDT EP0430M09 */
case 0x50: /* EDT EP0500M09 */
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 0efd1a1bb192..9fa3b0e421be 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -54,6 +54,7 @@
enum mms_type {
TYPE_MMS114 = 114,
+ TYPE_MMS134S = 134,
TYPE_MMS136 = 136,
TYPE_MMS152 = 152,
TYPE_MMS345L = 345,
@@ -212,7 +213,7 @@ static irqreturn_t mms114_interrupt(int irq, void *dev_id)
goto out;
/* MMS136 has slightly different event size */
- if (data->type == TYPE_MMS136)
+ if (data->type == TYPE_MMS134S || data->type == TYPE_MMS136)
touch_size = packet_size / MMS136_EVENT_SIZE;
else
touch_size = packet_size / MMS114_EVENT_SIZE;
@@ -281,6 +282,7 @@ static int mms114_get_version(struct mms114_data *data)
break;
case TYPE_MMS114:
+ case TYPE_MMS134S:
case TYPE_MMS136:
error = __mms114_read_reg(data, MMS114_TSP_REV, 6, buf);
if (error)
@@ -304,8 +306,9 @@ static int mms114_setup_regs(struct mms114_data *data)
if (error < 0)
return error;
- /* Only MMS114 and MMS136 have configuration and power on registers */
- if (data->type != TYPE_MMS114 && data->type != TYPE_MMS136)
+ /* MMS114, MMS134S and MMS136 have configuration and power on registers */
+ if (data->type != TYPE_MMS114 && data->type != TYPE_MMS134S &&
+ data->type != TYPE_MMS136)
return 0;
error = mms114_set_active(data, true);
@@ -487,7 +490,8 @@ static int mms114_probe(struct i2c_client *client,
0, data->props.max_y, 0, 0);
}
- if (data->type == TYPE_MMS114 || data->type == TYPE_MMS136) {
+ if (data->type == TYPE_MMS114 || data->type == TYPE_MMS134S ||
+ data->type == TYPE_MMS136) {
/*
* The firmware handles movement and pressure fuzz, so
* don't duplicate that in software.
@@ -612,6 +616,9 @@ static const struct of_device_id mms114_dt_match[] = {
.compatible = "melfas,mms114",
.data = (void *)TYPE_MMS114,
}, {
+ .compatible = "melfas,mms134s",
+ .data = (void *)TYPE_MMS134S,
+ }, {
.compatible = "melfas,mms136",
.data = (void *)TYPE_MMS136,
}, {
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 8ad8618b3530..124c41adeca1 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -82,7 +82,7 @@ config IOMMU_DEBUGFS
choice
prompt "IOMMU default domain type"
depends on IOMMU_API
- default IOMMU_DEFAULT_DMA_LAZY if AMD_IOMMU || INTEL_IOMMU
+ default IOMMU_DEFAULT_DMA_LAZY if X86 || IA64
default IOMMU_DEFAULT_DMA_STRICT
help
Choose the type of IOMMU domain used to manage DMA API usage by
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index bdcf167b4afe..2a822b229bd0 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -297,6 +297,22 @@ int amd_iommu_get_num_iommus(void)
return amd_iommus_present;
}
+#ifdef CONFIG_IRQ_REMAP
+static bool check_feature_on_all_iommus(u64 mask)
+{
+ bool ret = false;
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu) {
+ ret = iommu_feature(iommu, mask);
+ if (!ret)
+ return false;
+ }
+
+ return true;
+}
+#endif
+
/*
* For IVHD type 0x11/0x40, EFR is also available via IVHD.
* Default to IVHD EFR since it is available sooner
@@ -813,9 +829,9 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
return 0;
}
-#ifdef CONFIG_IRQ_REMAP
static int iommu_init_ga_log(struct amd_iommu *iommu)
{
+#ifdef CONFIG_IRQ_REMAP
u64 entry;
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
@@ -845,25 +861,9 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
err_out:
free_ga_log(iommu);
return -EINVAL;
-}
-#endif /* CONFIG_IRQ_REMAP */
-
-static int iommu_init_ga(struct amd_iommu *iommu)
-{
- int ret = 0;
-
-#ifdef CONFIG_IRQ_REMAP
- /* Note: We have already checked GASup from IVRS table.
- * Now, we need to make sure that GAMSup is set.
- */
- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
- !iommu_feature(iommu, FEATURE_GAM_VAPIC))
- amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
-
- ret = iommu_init_ga_log(iommu);
+#else
+ return 0;
#endif /* CONFIG_IRQ_REMAP */
-
- return ret;
}
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
@@ -1845,7 +1845,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
return -ENOMEM;
- ret = iommu_init_ga(iommu);
+ ret = iommu_init_ga_log(iommu);
if (ret)
return ret;
@@ -2479,6 +2479,14 @@ static void early_enable_iommus(void)
}
#ifdef CONFIG_IRQ_REMAP
+ /*
+ * Note: We have already checked GASup from IVRS table.
+ * Now, we need to make sure that GAMSup is set.
+ */
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
+ !check_feature_on_all_iommus(FEATURE_GAM_VAPIC))
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
+
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
#endif
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 2014fe8695ac..0c228787704f 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -514,9 +514,6 @@ static void load_pasid(struct mm_struct *mm, u32 pasid)
{
mutex_lock(&mm->context.lock);
- /* Synchronize with READ_ONCE in update_pasid(). */
- smp_store_release(&mm->pasid, pasid);
-
/* Update PASID MSR on all CPUs running the mm's tasks. */
on_each_cpu_mask(mm_cpumask(mm), _load_pasid, NULL, true);
@@ -792,7 +789,19 @@ prq_retry:
goto prq_retry;
}
+ /*
+ * A work in IO page fault workqueue may try to lock pasid_mutex now.
+ * Holding pasid_mutex while waiting in iopf_queue_flush_dev() for
+ * all works in the workqueue to finish may cause deadlock.
+ *
+ * It's unnecessary to hold pasid_mutex in iopf_queue_flush_dev().
+ * Unlock it to allow the works to be handled while waiting for
+ * them to finish.
+ */
+ lockdep_assert_held(&pasid_mutex);
+ mutex_unlock(&pasid_mutex);
iopf_queue_flush_dev(dev);
+ mutex_lock(&pasid_mutex);
/*
* Perform steps described in VT-d spec CH7.10 to drain page
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 0af42fb93a49..9e8bc802ac05 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -519,6 +519,7 @@ retry:
return new_iova->pfn_lo;
}
+EXPORT_SYMBOL_GPL(alloc_iova_fast);
/**
* free_iova_fast - free iova pfn range into rcache
@@ -536,6 +537,7 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
free_iova(iovad, pfn);
}
+EXPORT_SYMBOL_GPL(free_iova_fast);
#define fq_ring_for_each(i, fq) \
for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 94fb63a7b357..fe63d5ee201b 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -570,7 +570,7 @@ fail_msg_node:
fail_db_node:
of_node_put(smu->db_node);
fail_bootmem:
- memblock_free(__pa(smu), sizeof(struct smu_device));
+ memblock_free_ptr(smu, sizeof(struct smu_device));
smu = NULL;
fail_np:
of_node_put(np);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index b03eabc1ed7c..2111daaacaba 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -809,14 +809,9 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- int blocksize = *(int *) data, id;
- bool rc;
+ int blocksize = *(int *) data;
- id = dax_read_lock();
- rc = !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
- dax_read_unlock(id);
-
- return rc;
+ return !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
}
/* Check devices support synchronous DAX */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 84e9145b1714..a011d09cb0fa 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -654,7 +654,7 @@ static int open_table_device(struct table_device *td, dev_t dev,
}
td->dm_dev.bdev = bdev;
- td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
+ td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev);
return 0;
}
diff --git a/drivers/media/i2c/ov02a10.c b/drivers/media/i2c/ov02a10.c
index a3ce5500d355..0f08c05333ea 100644
--- a/drivers/media/i2c/ov02a10.c
+++ b/drivers/media/i2c/ov02a10.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/units.h>
#include <media/media-entity.h>
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
@@ -64,7 +65,6 @@
/* Test pattern control */
#define OV02A10_REG_TEST_PATTERN 0xb6
-#define HZ_PER_MHZ 1000000L
#define OV02A10_LINK_FREQ_390MHZ (390 * HZ_PER_MHZ)
#define OV02A10_ECLK_FREQ (24 * HZ_PER_MHZ)
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile
index 5d8b48288cf4..6ebe3c7001ff 100644
--- a/drivers/misc/habanalabs/common/Makefile
+++ b/drivers/misc/habanalabs/common/Makefile
@@ -10,4 +10,5 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/asid.o common/habanalabs_ioctl.o \
common/command_buffer.o common/hw_queue.o common/irq.o \
common/sysfs.o common/hwmon.o common/memory.o \
- common/command_submission.o common/firmware_if.o
+ common/command_submission.o common/firmware_if.o \
+ common/state_dump.o
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index 719168c980a4..8132a84698d5 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -314,8 +314,6 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
spin_lock(&mgr->cb_lock);
rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
- if (rc < 0)
- rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_KERNEL);
spin_unlock(&mgr->cb_lock);
if (rc < 0) {
@@ -552,7 +550,7 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
vma->vm_private_data = cb;
- rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
+ rc = hdev->asic_funcs->mmap(hdev, vma, cb->kernel_address,
cb->bus_address, cb->size);
if (rc) {
spin_lock(&cb->lock);
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index 80c60fb41bbc..7b0516cf808b 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -38,7 +38,11 @@ static void hl_sob_reset(struct kref *ref)
kref);
struct hl_device *hdev = hw_sob->hdev;
+ dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id);
+
hdev->asic_funcs->reset_sob(hdev, hw_sob);
+
+ hw_sob->need_reset = false;
}
void hl_sob_reset_error(struct kref *ref)
@@ -52,6 +56,24 @@ void hl_sob_reset_error(struct kref *ref)
hw_sob->q_idx, hw_sob->sob_id);
}
+void hw_sob_put(struct hl_hw_sob *hw_sob)
+{
+ if (hw_sob)
+ kref_put(&hw_sob->kref, hl_sob_reset);
+}
+
+static void hw_sob_put_err(struct hl_hw_sob *hw_sob)
+{
+ if (hw_sob)
+ kref_put(&hw_sob->kref, hl_sob_reset_error);
+}
+
+void hw_sob_get(struct hl_hw_sob *hw_sob)
+{
+ if (hw_sob)
+ kref_get(&hw_sob->kref);
+}
+
/**
* hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
* @sob_base: sob base id
@@ -84,76 +106,29 @@ int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
return 0;
}
-static void sob_reset_work(struct work_struct *work)
-{
- struct hl_cs_compl *hl_cs_cmpl =
- container_of(work, struct hl_cs_compl, sob_reset_work);
- struct hl_device *hdev = hl_cs_cmpl->hdev;
-
- /*
- * A signal CS can get completion while the corresponding wait
- * for signal CS is on its way to the PQ. The wait for signal CS
- * will get stuck if the signal CS incremented the SOB to its
- * max value and there are no pending (submitted) waits on this
- * SOB.
- * We do the following to void this situation:
- * 1. The wait for signal CS must get a ref for the signal CS as
- * soon as possible in cs_ioctl_signal_wait() and put it
- * before being submitted to the PQ but after it incremented
- * the SOB refcnt in init_signal_wait_cs().
- * 2. Signal/Wait for signal CS will decrement the SOB refcnt
- * here.
- * These two measures guarantee that the wait for signal CS will
- * reset the SOB upon completion rather than the signal CS and
- * hence the above scenario is avoided.
- */
- kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
-
- if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
- hdev->asic_funcs->reset_sob_group(hdev,
- hl_cs_cmpl->sob_group);
-
- kfree(hl_cs_cmpl);
-}
-
static void hl_fence_release(struct kref *kref)
{
struct hl_fence *fence =
container_of(kref, struct hl_fence, refcount);
struct hl_cs_compl *hl_cs_cmpl =
container_of(fence, struct hl_cs_compl, base_fence);
- struct hl_device *hdev = hl_cs_cmpl->hdev;
- /* EBUSY means the CS was never submitted and hence we don't have
- * an attached hw_sob object that we should handle here
- */
- if (fence->error == -EBUSY)
- goto free;
-
- if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
- (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
- (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)) {
-
- dev_dbg(hdev->dev,
- "CS 0x%llx type %d finished, sob_id: %d, sob_val: 0x%x\n",
- hl_cs_cmpl->cs_seq,
- hl_cs_cmpl->type,
- hl_cs_cmpl->hw_sob->sob_id,
- hl_cs_cmpl->sob_val);
-
- queue_work(hdev->sob_reset_wq, &hl_cs_cmpl->sob_reset_work);
-
- return;
- }
-
-free:
kfree(hl_cs_cmpl);
}
void hl_fence_put(struct hl_fence *fence)
{
- if (fence)
- kref_put(&fence->refcount, hl_fence_release);
+ if (IS_ERR_OR_NULL(fence))
+ return;
+ kref_put(&fence->refcount, hl_fence_release);
+}
+
+void hl_fences_put(struct hl_fence **fence, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++, fence++)
+ hl_fence_put(*fence);
}
void hl_fence_get(struct hl_fence *fence)
@@ -473,11 +448,139 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
spin_unlock(&hdev->cs_mirror_lock);
}
+/*
+ * force_complete_multi_cs - complete all contexts that wait on multi-CS
+ *
+ * @hdev: pointer to habanalabs device structure
+ */
+static void force_complete_multi_cs(struct hl_device *hdev)
+{
+ int i;
+
+ for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
+ struct multi_cs_completion *mcs_compl;
+
+ mcs_compl = &hdev->multi_cs_completion[i];
+
+ spin_lock(&mcs_compl->lock);
+
+ if (!mcs_compl->used) {
+ spin_unlock(&mcs_compl->lock);
+ continue;
+ }
+
+ /* when calling force complete no context should be waiting on
+ * multi-cS.
+ * We are calling the function as a protection for such case
+ * to free any pending context and print error message
+ */
+ dev_err(hdev->dev,
+ "multi-CS completion context %d still waiting when calling force completion\n",
+ i);
+ complete_all(&mcs_compl->completion);
+ spin_unlock(&mcs_compl->lock);
+ }
+}
+
+/*
+ * complete_multi_cs - complete all waiting entities on multi-CS
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @cs: CS structure
+ * The function signals a waiting entity that has an overlapping stream masters
+ * with the completed CS.
+ * For example:
+ * - a completed CS worked on stream master QID 4, multi CS completion
+ * is actively waiting on stream master QIDs 3, 5. don't send signal as no
+ * common stream master QID
+ * - a completed CS worked on stream master QID 4, multi CS completion
+ * is actively waiting on stream master QIDs 3, 4. send signal as stream
+ * master QID 4 is common
+ */
+static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
+{
+ struct hl_fence *fence = cs->fence;
+ int i;
+
+ /* in case of multi CS check for completion only for the first CS */
+ if (cs->staged_cs && !cs->staged_first)
+ return;
+
+ for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
+ struct multi_cs_completion *mcs_compl;
+
+ mcs_compl = &hdev->multi_cs_completion[i];
+ if (!mcs_compl->used)
+ continue;
+
+ spin_lock(&mcs_compl->lock);
+
+ /*
+ * complete if:
+ * 1. still waiting for completion
+ * 2. the completed CS has at least one overlapping stream
+ * master with the stream masters in the completion
+ */
+ if (mcs_compl->used &&
+ (fence->stream_master_qid_map &
+ mcs_compl->stream_master_qid_map)) {
+ /* extract the timestamp only of first completed CS */
+ if (!mcs_compl->timestamp)
+ mcs_compl->timestamp =
+ ktime_to_ns(fence->timestamp);
+ complete_all(&mcs_compl->completion);
+ }
+
+ spin_unlock(&mcs_compl->lock);
+ }
+}
+
+static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
+ struct hl_cs *cs,
+ struct hl_cs_compl *hl_cs_cmpl)
+{
+ /* Skip this handler if the cs wasn't submitted, to avoid putting
+ * the hw_sob twice, since this case already handled at this point,
+ * also skip if the hw_sob pointer wasn't set.
+ */
+ if (!hl_cs_cmpl->hw_sob || !cs->submitted)
+ return;
+
+ spin_lock(&hl_cs_cmpl->lock);
+
+ /*
+ * we get refcount upon reservation of signals or signal/wait cs for the
+ * hw_sob object, and need to put it when the first staged cs
+ * (which cotains the encaps signals) or cs signal/wait is completed.
+ */
+ if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
+ (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
+ (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) ||
+ (!!hl_cs_cmpl->encaps_signals)) {
+ dev_dbg(hdev->dev,
+ "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n",
+ hl_cs_cmpl->cs_seq,
+ hl_cs_cmpl->type,
+ hl_cs_cmpl->hw_sob->sob_id,
+ hl_cs_cmpl->sob_val);
+
+ hw_sob_put(hl_cs_cmpl->hw_sob);
+
+ if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
+ hdev->asic_funcs->reset_sob_group(hdev,
+ hl_cs_cmpl->sob_group);
+ }
+
+ spin_unlock(&hl_cs_cmpl->lock);
+}
+
static void cs_do_release(struct kref *ref)
{
struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
struct hl_device *hdev = cs->ctx->hdev;
struct hl_cs_job *job, *tmp;
+ struct hl_cs_compl *hl_cs_cmpl =
+ container_of(cs->fence, struct hl_cs_compl, base_fence);
cs->completed = true;
@@ -493,8 +596,9 @@ static void cs_do_release(struct kref *ref)
complete_job(hdev, job);
if (!cs->submitted) {
- /* In case the wait for signal CS was submitted, the put occurs
- * in init_signal_wait_cs() or collective_wait_init_cs()
+ /*
+ * In case the wait for signal CS was submitted, the fence put
+ * occurs in init_signal_wait_cs() or collective_wait_init_cs()
* right before hanging on the PQ.
*/
if (cs->type == CS_TYPE_WAIT ||
@@ -535,8 +639,20 @@ static void cs_do_release(struct kref *ref)
list_del(&cs->staged_cs_node);
spin_unlock(&hdev->cs_mirror_lock);
}
+
+ /* decrement refcount to handle when first staged cs
+ * with encaps signals is completed.
+ */
+ if (hl_cs_cmpl->encaps_signals)
+ kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
+ hl_encaps_handle_do_release);
}
+ if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
+ && cs->encaps_signals)
+ kref_put(&cs->encaps_sig_hdl->refcount,
+ hl_encaps_handle_do_release);
+
out:
/* Must be called before hl_ctx_put because inside we use ctx to get
* the device
@@ -566,6 +682,10 @@ out:
if (cs->timestamp)
cs->fence->timestamp = ktime_get();
complete_all(&cs->fence->completion);
+ complete_multi_cs(hdev, cs);
+
+ cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl);
+
hl_fence_put(cs->fence);
kfree(cs->jobs_in_queue_cnt);
@@ -621,6 +741,10 @@ static void cs_timedout(struct work_struct *work)
break;
}
+ rc = hl_state_dump(hdev);
+ if (rc)
+ dev_err(hdev->dev, "Error during system state dump %d\n", rc);
+
cs_put(cs);
if (likely(!skip_reset_on_timeout)) {
@@ -661,6 +785,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs->completed = false;
cs->type = cs_type;
cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
+ cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
cs->timeout_jiffies = timeout;
cs->skip_reset_on_timeout =
hdev->skip_reset_on_timeout ||
@@ -671,9 +796,9 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
kref_init(&cs->refcount);
spin_lock_init(&cs->job_lock);
- cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
+ cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
if (!cs_cmpl)
- cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_KERNEL);
+ cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL);
if (!cs_cmpl) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
@@ -698,7 +823,6 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs_cmpl->hdev = hdev;
cs_cmpl->type = cs->type;
spin_lock_init(&cs_cmpl->lock);
- INIT_WORK(&cs_cmpl->sob_reset_work, sob_reset_work);
cs->fence = &cs_cmpl->base_fence;
spin_lock(&ctx->cs_lock);
@@ -791,31 +915,22 @@ void hl_cs_rollback_all(struct hl_device *hdev)
cs_rollback(hdev, cs);
cs_put(cs);
}
-}
-
-void hl_pending_cb_list_flush(struct hl_ctx *ctx)
-{
- struct hl_pending_cb *pending_cb, *tmp;
- list_for_each_entry_safe(pending_cb, tmp,
- &ctx->pending_cb_list, cb_node) {
- list_del(&pending_cb->cb_node);
- hl_cb_put(pending_cb->cb);
- kfree(pending_cb);
- }
+ force_complete_multi_cs(hdev);
}
static void
wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
{
struct hl_user_pending_interrupt *pend;
+ unsigned long flags;
- spin_lock(&interrupt->wait_list_lock);
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
list_for_each_entry(pend, &interrupt->wait_list_head, wait_list_node) {
pend->fence.error = -EIO;
complete_all(&pend->fence.completion);
}
- spin_unlock(&interrupt->wait_list_lock);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
}
void hl_release_pending_user_interrupts(struct hl_device *hdev)
@@ -981,6 +1096,10 @@ static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
return CS_TYPE_WAIT;
else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
return CS_TYPE_COLLECTIVE_WAIT;
+ else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY)
+ return CS_RESERVE_SIGNALS;
+ else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
+ return CS_UNRESERVE_SIGNALS;
else
return CS_TYPE_DEFAULT;
}
@@ -1081,7 +1200,8 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev,
}
static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
- u64 sequence, u32 flags)
+ u64 sequence, u32 flags,
+ u32 encaps_signal_handle)
{
if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
return 0;
@@ -1093,6 +1213,9 @@ static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
/* Staged CS sequence is the first CS sequence */
INIT_LIST_HEAD(&cs->staged_cs_node);
cs->staged_sequence = cs->sequence;
+
+ if (cs->encaps_signals)
+ cs->encaps_sig_hdl_id = encaps_signal_handle;
} else {
/* User sequence will be validated in 'hl_hw_queue_schedule_cs'
* under the cs_mirror_lock
@@ -1108,9 +1231,20 @@ static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
return 0;
}
+static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
+{
+ int i;
+
+ for (i = 0; i < hdev->stream_master_qid_arr_size; i++)
+ if (qid == hdev->stream_master_qid_arr[i])
+ return BIT(i);
+
+ return 0;
+}
+
static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
u32 num_chunks, u64 *cs_seq, u32 flags,
- u32 timeout)
+ u32 encaps_signals_handle, u32 timeout)
{
bool staged_mid, int_queues_only = true;
struct hl_device *hdev = hpriv->hdev;
@@ -1121,6 +1255,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
struct hl_cs *cs;
struct hl_cb *cb;
u64 user_sequence;
+ u8 stream_master_qid_map = 0;
int rc, i;
cntr = &hdev->aggregated_cs_counters;
@@ -1148,7 +1283,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
hl_debugfs_add_cs(cs);
- rc = cs_staged_submission(hdev, cs, user_sequence, flags);
+ rc = cs_staged_submission(hdev, cs, user_sequence, flags,
+ encaps_signals_handle);
if (rc)
goto free_cs_object;
@@ -1179,9 +1315,20 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
}
- if (queue_type == QUEUE_TYPE_EXT || queue_type == QUEUE_TYPE_HW)
+ if (queue_type == QUEUE_TYPE_EXT ||
+ queue_type == QUEUE_TYPE_HW) {
int_queues_only = false;
+ /*
+ * store which stream are being used for external/HW
+ * queues of this CS
+ */
+ if (hdev->supports_wait_for_multi_cs)
+ stream_master_qid_map |=
+ get_stream_master_qid_mask(hdev,
+ chunk->queue_index);
+ }
+
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb);
if (!job) {
@@ -1242,6 +1389,13 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
goto free_cs_object;
}
+ /*
+ * store the (external/HW queues) streams used by the CS in the
+ * fence object for multi-CS completion
+ */
+ if (hdev->supports_wait_for_multi_cs)
+ cs->fence->stream_master_qid_map = stream_master_qid_map;
+
rc = hl_hw_queue_schedule_cs(cs);
if (rc) {
if (rc != -EAGAIN)
@@ -1270,130 +1424,6 @@ out:
return rc;
}
-static int pending_cb_create_job(struct hl_device *hdev, struct hl_ctx *ctx,
- struct hl_cs *cs, struct hl_cb *cb, u32 size, u32 hw_queue_id)
-{
- struct hw_queue_properties *hw_queue_prop;
- struct hl_cs_counters_atomic *cntr;
- struct hl_cs_job *job;
-
- hw_queue_prop = &hdev->asic_prop.hw_queues_props[hw_queue_id];
- cntr = &hdev->aggregated_cs_counters;
-
- job = hl_cs_allocate_job(hdev, hw_queue_prop->type, true);
- if (!job) {
- atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
- atomic64_inc(&cntr->out_of_mem_drop_cnt);
- dev_err(hdev->dev, "Failed to allocate a new job\n");
- return -ENOMEM;
- }
-
- job->id = 0;
- job->cs = cs;
- job->user_cb = cb;
- atomic_inc(&job->user_cb->cs_cnt);
- job->user_cb_size = size;
- job->hw_queue_id = hw_queue_id;
- job->patched_cb = job->user_cb;
- job->job_cb_size = job->user_cb_size;
-
- /* increment refcount as for external queues we get completion */
- cs_get(cs);
-
- cs->jobs_in_queue_cnt[job->hw_queue_id]++;
-
- list_add_tail(&job->cs_node, &cs->job_list);
-
- hl_debugfs_add_job(hdev, job);
-
- return 0;
-}
-
-static int hl_submit_pending_cb(struct hl_fpriv *hpriv)
-{
- struct hl_device *hdev = hpriv->hdev;
- struct hl_ctx *ctx = hpriv->ctx;
- struct hl_pending_cb *pending_cb, *tmp;
- struct list_head local_cb_list;
- struct hl_cs *cs;
- struct hl_cb *cb;
- u32 hw_queue_id;
- u32 cb_size;
- int process_list, rc = 0;
-
- if (list_empty(&ctx->pending_cb_list))
- return 0;
-
- process_list = atomic_cmpxchg(&ctx->thread_pending_cb_token, 1, 0);
-
- /* Only a single thread is allowed to process the list */
- if (!process_list)
- return 0;
-
- if (list_empty(&ctx->pending_cb_list))
- goto free_pending_cb_token;
-
- /* move all list elements to a local list */
- INIT_LIST_HEAD(&local_cb_list);
- spin_lock(&ctx->pending_cb_lock);
- list_for_each_entry_safe(pending_cb, tmp, &ctx->pending_cb_list,
- cb_node)
- list_move_tail(&pending_cb->cb_node, &local_cb_list);
- spin_unlock(&ctx->pending_cb_lock);
-
- rc = allocate_cs(hdev, ctx, CS_TYPE_DEFAULT, ULLONG_MAX, &cs, 0,
- hdev->timeout_jiffies);
- if (rc)
- goto add_list_elements;
-
- hl_debugfs_add_cs(cs);
-
- /* Iterate through pending cb list, create jobs and add to CS */
- list_for_each_entry(pending_cb, &local_cb_list, cb_node) {
- cb = pending_cb->cb;
- cb_size = pending_cb->cb_size;
- hw_queue_id = pending_cb->hw_queue_id;
-
- rc = pending_cb_create_job(hdev, ctx, cs, cb, cb_size,
- hw_queue_id);
- if (rc)
- goto free_cs_object;
- }
-
- rc = hl_hw_queue_schedule_cs(cs);
- if (rc) {
- if (rc != -EAGAIN)
- dev_err(hdev->dev,
- "Failed to submit CS %d.%llu (%d)\n",
- ctx->asid, cs->sequence, rc);
- goto free_cs_object;
- }
-
- /* pending cb was scheduled successfully */
- list_for_each_entry_safe(pending_cb, tmp, &local_cb_list, cb_node) {
- list_del(&pending_cb->cb_node);
- kfree(pending_cb);
- }
-
- cs_put(cs);
-
- goto free_pending_cb_token;
-
-free_cs_object:
- cs_rollback(hdev, cs);
- cs_put(cs);
-add_list_elements:
- spin_lock(&ctx->pending_cb_lock);
- list_for_each_entry_safe_reverse(pending_cb, tmp, &local_cb_list,
- cb_node)
- list_move(&pending_cb->cb_node, &ctx->pending_cb_list);
- spin_unlock(&ctx->pending_cb_lock);
-free_pending_cb_token:
- atomic_set(&ctx->thread_pending_cb_token, 1);
-
- return rc;
-}
-
static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
u64 *cs_seq)
{
@@ -1443,7 +1473,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
rc = 0;
} else {
rc = cs_ioctl_default(hpriv, chunks, num_chunks,
- cs_seq, 0, hdev->timeout_jiffies);
+ cs_seq, 0, 0, hdev->timeout_jiffies);
}
mutex_unlock(&hpriv->restore_phase_mutex);
@@ -1501,10 +1531,17 @@ out:
* hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case.
* if the SOB value reaches the max value move to the other SOB reserved
* to the queue.
+ * @hdev: pointer to device structure
+ * @q_idx: stream queue index
+ * @hw_sob: the H/W SOB used in this signal CS.
+ * @count: signals count
+ * @encaps_sig: tells whether it's reservation for encaps signals or not.
+ *
* Note that this function must be called while hw_queues_lock is taken.
*/
int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
- struct hl_hw_sob **hw_sob, u32 count)
+ struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig)
+
{
struct hl_sync_stream_properties *prop;
struct hl_hw_sob *sob = *hw_sob, *other_sob;
@@ -1512,7 +1549,7 @@ int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
- kref_get(&sob->kref);
+ hw_sob_get(sob);
/* check for wraparound */
if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) {
@@ -1522,7 +1559,7 @@ int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
* just incremented the refcount right before calling this
* function.
*/
- kref_put(&sob->kref, hl_sob_reset_error);
+ hw_sob_put_err(sob);
/*
* check the other sob value, if it still in use then fail
@@ -1537,12 +1574,42 @@ int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
return -EINVAL;
}
- prop->next_sob_val = 1;
+ /*
+ * next_sob_val always points to the next available signal
+ * in the sob, so in encaps signals it will be the next one
+ * after reserving the required amount.
+ */
+ if (encaps_sig)
+ prop->next_sob_val = count + 1;
+ else
+ prop->next_sob_val = count;
/* only two SOBs are currently in use */
prop->curr_sob_offset = other_sob_offset;
*hw_sob = other_sob;
+ /*
+ * check if other_sob needs reset, then do it before using it
+ * for the reservation or the next signal cs.
+ * we do it here, and for both encaps and regular signal cs
+ * cases in order to avoid possible races of two kref_put
+ * of the sob which can occur at the same time if we move the
+ * sob reset(kref_put) to cs_do_release function.
+ * in addition, if we have combination of cs signal and
+ * encaps, and at the point we need to reset the sob there was
+ * no more reservations and only signal cs keep coming,
+ * in such case we need signal_cs to put the refcount and
+ * reset the sob.
+ */
+ if (other_sob->need_reset)
+ hw_sob_put(other_sob);
+
+ if (encaps_sig) {
+ /* set reset indication for the sob */
+ sob->need_reset = true;
+ hw_sob_get(other_sob);
+ }
+
dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
prop->curr_sob_offset, q_idx);
} else {
@@ -1553,12 +1620,18 @@ int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
}
static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
- struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx)
+ struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx,
+ bool encaps_signals)
{
u64 *signal_seq_arr = NULL;
u32 size_to_copy, signal_seq_arr_len;
int rc = 0;
+ if (encaps_signals) {
+ *signal_seq = chunk->encaps_signal_seq;
+ return 0;
+ }
+
signal_seq_arr_len = chunk->num_signal_seq_arr;
/* currently only one signal seq is supported */
@@ -1583,7 +1656,7 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
return -ENOMEM;
}
- size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr);
+ size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr);
if (copy_from_user(signal_seq_arr,
u64_to_user_ptr(chunk->signal_seq_arr),
size_to_copy)) {
@@ -1605,8 +1678,8 @@ out:
}
static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
- struct hl_ctx *ctx, struct hl_cs *cs, enum hl_queue_type q_type,
- u32 q_idx)
+ struct hl_ctx *ctx, struct hl_cs *cs,
+ enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset)
{
struct hl_cs_counters_atomic *cntr;
struct hl_cs_job *job;
@@ -1644,6 +1717,9 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
job->user_cb_size = cb_size;
job->hw_queue_id = q_idx;
+ if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
+ && cs->encaps_signals)
+ job->encaps_sig_wait_offset = encaps_signal_offset;
/*
* No need in parsing, user CB is the patched CB.
* We call hl_cb_destroy() out of two reasons - we don't need the CB in
@@ -1666,11 +1742,196 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
return 0;
}
+static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
+ u32 q_idx, u32 count,
+ u32 *handle_id, u32 *sob_addr,
+ u32 *signals_count)
+{
+ struct hw_queue_properties *hw_queue_prop;
+ struct hl_sync_stream_properties *prop;
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_cs_encaps_sig_handle *handle;
+ struct hl_encaps_signals_mgr *mgr;
+ struct hl_hw_sob *hw_sob;
+ int hdl_id;
+ int rc = 0;
+
+ if (count >= HL_MAX_SOB_VAL) {
+ dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n",
+ count);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (q_idx >= hdev->asic_prop.max_queues) {
+ dev_err(hdev->dev, "Queue index %d is invalid\n",
+ q_idx);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
+
+ if (!hw_queue_prop->supports_sync_stream) {
+ dev_err(hdev->dev,
+ "Queue index %d does not support sync stream operations\n",
+ q_idx);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ handle->count = count;
+ mgr = &hpriv->ctx->sig_mgr;
+
+ spin_lock(&mgr->lock);
+ hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC);
+ spin_unlock(&mgr->lock);
+
+ if (hdl_id < 0) {
+ dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ handle->id = hdl_id;
+ handle->q_idx = q_idx;
+ handle->hdev = hdev;
+ kref_init(&handle->refcount);
+
+ hdev->asic_funcs->hw_queues_lock(hdev);
+
+ hw_sob = &prop->hw_sob[prop->curr_sob_offset];
+
+ /*
+ * Increment the SOB value by count by user request
+ * to reserve those signals
+ * check if the signals amount to reserve is not exceeding the max sob
+ * value, if yes then switch sob.
+ */
+ rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count,
+ true);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to switch SOB\n");
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+ rc = -EINVAL;
+ goto remove_idr;
+ }
+ /* set the hw_sob to the handle after calling the sob wraparound handler
+ * since sob could have changed.
+ */
+ handle->hw_sob = hw_sob;
+
+ /* store the current sob value for unreserve validity check, and
+ * signal offset support
+ */
+ handle->pre_sob_val = prop->next_sob_val - handle->count;
+
+ *signals_count = prop->next_sob_val;
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ *sob_addr = handle->hw_sob->sob_addr;
+ *handle_id = hdl_id;
+
+ dev_dbg(hdev->dev,
+ "Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n",
+ hw_sob->sob_id, handle->hw_sob->sob_addr,
+ prop->next_sob_val - 1, q_idx, hdl_id);
+ goto out;
+
+remove_idr:
+ spin_lock(&mgr->lock);
+ idr_remove(&mgr->handles, hdl_id);
+ spin_unlock(&mgr->lock);
+
+ kfree(handle);
+out:
+ return rc;
+}
+
+static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
+{
+ struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
+ struct hl_sync_stream_properties *prop;
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_encaps_signals_mgr *mgr;
+ struct hl_hw_sob *hw_sob;
+ u32 q_idx, sob_addr;
+ int rc = 0;
+
+ mgr = &hpriv->ctx->sig_mgr;
+
+ spin_lock(&mgr->lock);
+ encaps_sig_hdl = idr_find(&mgr->handles, handle_id);
+ if (encaps_sig_hdl) {
+ dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n",
+ handle_id, encaps_sig_hdl->hw_sob->sob_addr,
+ encaps_sig_hdl->count);
+
+ hdev->asic_funcs->hw_queues_lock(hdev);
+
+ q_idx = encaps_sig_hdl->q_idx;
+ prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
+ hw_sob = &prop->hw_sob[prop->curr_sob_offset];
+ sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
+
+ /* Check if sob_val got out of sync due to other
+ * signal submission requests which were handled
+ * between the reserve-unreserve calls or SOB switch
+ * upon reaching SOB max value.
+ */
+ if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count
+ != prop->next_sob_val ||
+ sob_addr != encaps_sig_hdl->hw_sob->sob_addr) {
+ dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n",
+ encaps_sig_hdl->pre_sob_val,
+ (prop->next_sob_val - encaps_sig_hdl->count));
+
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Decrement the SOB value by count by user request
+ * to unreserve those signals
+ */
+ prop->next_sob_val -= encaps_sig_hdl->count;
+
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ hw_sob_put(hw_sob);
+
+ /* Release the id and free allocated memory of the handle */
+ idr_remove(&mgr->handles, handle_id);
+ kfree(encaps_sig_hdl);
+ } else {
+ rc = -EINVAL;
+ dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n");
+ }
+out:
+ spin_unlock(&mgr->lock);
+
+ return rc;
+}
+
static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
void __user *chunks, u32 num_chunks,
u64 *cs_seq, u32 flags, u32 timeout)
{
+ struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
+ bool handle_found = false, is_wait_cs = false,
+ wait_cs_submitted = false,
+ cs_encaps_signals = false;
struct hl_cs_chunk *cs_chunk_array, *chunk;
+ bool staged_cs_with_encaps_signals = false;
struct hw_queue_properties *hw_queue_prop;
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_compl *sig_waitcs_cmpl;
@@ -1730,11 +1991,58 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
collective_engine_id = chunk->collective_engine_id;
}
- if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) {
- rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq, ctx);
+ is_wait_cs = !!(cs_type == CS_TYPE_WAIT ||
+ cs_type == CS_TYPE_COLLECTIVE_WAIT);
+
+ cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
+
+ if (is_wait_cs) {
+ rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq,
+ ctx, cs_encaps_signals);
if (rc)
goto free_cs_chunk_array;
+ if (cs_encaps_signals) {
+ /* check if cs sequence has encapsulated
+ * signals handle
+ */
+ struct idr *idp;
+ u32 id;
+
+ spin_lock(&ctx->sig_mgr.lock);
+ idp = &ctx->sig_mgr.handles;
+ idr_for_each_entry(idp, encaps_sig_hdl, id) {
+ if (encaps_sig_hdl->cs_seq == signal_seq) {
+ handle_found = true;
+ /* get refcount to protect removing
+ * this handle from idr, needed when
+ * multiple wait cs are used with offset
+ * to wait on reserved encaps signals.
+ */
+ kref_get(&encaps_sig_hdl->refcount);
+ break;
+ }
+ }
+ spin_unlock(&ctx->sig_mgr.lock);
+
+ if (!handle_found) {
+ dev_err(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
+ signal_seq);
+ rc = -EINVAL;
+ goto free_cs_chunk_array;
+ }
+
+ /* validate also the signal offset value */
+ if (chunk->encaps_signal_offset >
+ encaps_sig_hdl->count) {
+ dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n",
+ chunk->encaps_signal_offset,
+ encaps_sig_hdl->count);
+ rc = -EINVAL;
+ goto free_cs_chunk_array;
+ }
+ }
+
sig_fence = hl_ctx_get_fence(ctx, signal_seq);
if (IS_ERR(sig_fence)) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
@@ -1755,11 +2063,16 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
sig_waitcs_cmpl =
container_of(sig_fence, struct hl_cs_compl, base_fence);
- if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
+ staged_cs_with_encaps_signals = !!
+ (sig_waitcs_cmpl->type == CS_TYPE_DEFAULT &&
+ (flags & HL_CS_FLAGS_ENCAP_SIGNALS));
+
+ if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL &&
+ !staged_cs_with_encaps_signals) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
- "CS seq 0x%llx is not of a signal CS\n",
+ "CS seq 0x%llx is not of a signal/encaps-signal CS\n",
signal_seq);
hl_fence_put(sig_fence);
rc = -EINVAL;
@@ -1776,18 +2089,27 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
if (rc) {
- if (cs_type == CS_TYPE_WAIT ||
- cs_type == CS_TYPE_COLLECTIVE_WAIT)
+ if (is_wait_cs)
hl_fence_put(sig_fence);
+
goto free_cs_chunk_array;
}
/*
* Save the signal CS fence for later initialization right before
* hanging the wait CS on the queue.
+ * for encaps signals case, we save the cs sequence and handle pointer
+ * for later initialization.
*/
- if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT)
+ if (is_wait_cs) {
cs->signal_fence = sig_fence;
+ /* store the handle pointer, so we don't have to
+ * look for it again, later on the flow
+ * when we need to set SOB info in hw_queue.
+ */
+ if (cs->encaps_signals)
+ cs->encaps_sig_hdl = encaps_sig_hdl;
+ }
hl_debugfs_add_cs(cs);
@@ -1795,10 +2117,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
- q_idx);
+ q_idx, chunk->encaps_signal_offset);
else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
- cs, q_idx, collective_engine_id);
+ cs, q_idx, collective_engine_id,
+ chunk->encaps_signal_offset);
else {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
@@ -1810,7 +2133,13 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
rc = hl_hw_queue_schedule_cs(cs);
if (rc) {
- if (rc != -EAGAIN)
+ /* In case wait cs failed here, it means the signal cs
+ * already completed. we want to free all it's related objects
+ * but we don't want to fail the ioctl.
+ */
+ if (is_wait_cs)
+ rc = 0;
+ else if (rc != -EAGAIN)
dev_err(hdev->dev,
"Failed to submit CS %d.%llu to H/W queues, error %d\n",
ctx->asid, cs->sequence, rc);
@@ -1818,6 +2147,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
}
rc = HL_CS_STATUS_SUCCESS;
+ if (is_wait_cs)
+ wait_cs_submitted = true;
goto put_cs;
free_cs_object:
@@ -1828,6 +2159,10 @@ put_cs:
/* We finished with the CS in this function, so put the ref */
cs_put(cs);
free_cs_chunk_array:
+ if (!wait_cs_submitted && cs_encaps_signals && handle_found &&
+ is_wait_cs)
+ kref_put(&encaps_sig_hdl->refcount,
+ hl_encaps_handle_do_release);
kfree(cs_chunk_array);
out:
return rc;
@@ -1836,10 +2171,11 @@ out:
int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
{
union hl_cs_args *args = data;
- enum hl_cs_type cs_type;
+ enum hl_cs_type cs_type = 0;
u64 cs_seq = ULONG_MAX;
void __user *chunks;
- u32 num_chunks, flags, timeout;
+ u32 num_chunks, flags, timeout,
+ signals_count = 0, sob_addr = 0, handle_id = 0;
int rc;
rc = hl_cs_sanity_checks(hpriv, args);
@@ -1850,10 +2186,6 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
if (rc)
goto out;
- rc = hl_submit_pending_cb(hpriv);
- if (rc)
- goto out;
-
cs_type = hl_cs_get_cs_type(args->in.cs_flags &
~HL_CS_FLAGS_FORCE_RESTORE);
chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
@@ -1876,80 +2208,448 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
&cs_seq, args->in.cs_flags, timeout);
break;
+ case CS_RESERVE_SIGNALS:
+ rc = cs_ioctl_reserve_signals(hpriv,
+ args->in.encaps_signals_q_idx,
+ args->in.encaps_signals_count,
+ &handle_id, &sob_addr, &signals_count);
+ break;
+ case CS_UNRESERVE_SIGNALS:
+ rc = cs_ioctl_unreserve_signals(hpriv,
+ args->in.encaps_sig_handle_id);
+ break;
default:
rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
- args->in.cs_flags, timeout);
+ args->in.cs_flags,
+ args->in.encaps_sig_handle_id,
+ timeout);
break;
}
-
out:
if (rc != -EAGAIN) {
memset(args, 0, sizeof(*args));
+
+ if (cs_type == CS_RESERVE_SIGNALS) {
+ args->out.handle_id = handle_id;
+ args->out.sob_base_addr_offset = sob_addr;
+ args->out.count = signals_count;
+ } else {
+ args->out.seq = cs_seq;
+ }
args->out.status = rc;
- args->out.seq = cs_seq;
}
return rc;
}
+static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
+ enum hl_cs_wait_status *status, u64 timeout_us,
+ s64 *timestamp)
+{
+ struct hl_device *hdev = ctx->hdev;
+ long completion_rc;
+ int rc = 0;
+
+ if (IS_ERR(fence)) {
+ rc = PTR_ERR(fence);
+ if (rc == -EINVAL)
+ dev_notice_ratelimited(hdev->dev,
+ "Can't wait on CS %llu because current CS is at seq %llu\n",
+ seq, ctx->cs_sequence);
+ return rc;
+ }
+
+ if (!fence) {
+ dev_dbg(hdev->dev,
+ "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
+ seq, ctx->cs_sequence);
+
+ *status = CS_WAIT_STATUS_GONE;
+ return 0;
+ }
+
+ if (!timeout_us) {
+ completion_rc = completion_done(&fence->completion);
+ } else {
+ unsigned long timeout;
+
+ timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ?
+ timeout_us : usecs_to_jiffies(timeout_us);
+ completion_rc =
+ wait_for_completion_interruptible_timeout(
+ &fence->completion, timeout);
+ }
+
+ if (completion_rc > 0) {
+ *status = CS_WAIT_STATUS_COMPLETED;
+ if (timestamp)
+ *timestamp = ktime_to_ns(fence->timestamp);
+ } else {
+ *status = CS_WAIT_STATUS_BUSY;
+ }
+
+ if (fence->error == -ETIMEDOUT)
+ rc = -ETIMEDOUT;
+ else if (fence->error == -EIO)
+ rc = -EIO;
+
+ return rc;
+}
+
+/*
+ * hl_cs_poll_fences - iterate CS fences to check for CS completion
+ *
+ * @mcs_data: multi-CS internal data
+ *
+ * @return 0 on success, otherwise non 0 error code
+ *
+ * The function iterates on all CS sequence in the list and set bit in
+ * completion_bitmap for each completed CS.
+ * while iterating, the function can extracts the stream map to be later
+ * used by the waiting function.
+ * this function shall be called after taking context ref
+ */
+static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
+{
+ struct hl_fence **fence_ptr = mcs_data->fence_arr;
+ struct hl_device *hdev = mcs_data->ctx->hdev;
+ int i, rc, arr_len = mcs_data->arr_len;
+ u64 *seq_arr = mcs_data->seq_arr;
+ ktime_t max_ktime, first_cs_time;
+ enum hl_cs_wait_status status;
+
+ memset(fence_ptr, 0, arr_len * sizeof(*fence_ptr));
+
+ /* get all fences under the same lock */
+ rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
+ if (rc)
+ return rc;
+
+ /*
+ * set to maximum time to verify timestamp is valid: if at the end
+ * this value is maintained- no timestamp was updated
+ */
+ max_ktime = ktime_set(KTIME_SEC_MAX, 0);
+ first_cs_time = max_ktime;
+
+ for (i = 0; i < arr_len; i++, fence_ptr++) {
+ struct hl_fence *fence = *fence_ptr;
+
+ /*
+ * function won't sleep as it is called with timeout 0 (i.e.
+ * poll the fence)
+ */
+ rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence,
+ &status, 0, NULL);
+ if (rc) {
+ dev_err(hdev->dev,
+ "wait_for_fence error :%d for CS seq %llu\n",
+ rc, seq_arr[i]);
+ break;
+ }
+
+ mcs_data->stream_master_qid_map |= fence->stream_master_qid_map;
+
+ if (status == CS_WAIT_STATUS_BUSY)
+ continue;
+
+ mcs_data->completion_bitmap |= BIT(i);
+
+ /*
+ * best effort to extract timestamp. few notes:
+ * - if even single fence is gone we cannot extract timestamp
+ * (as fence not exist anymore)
+ * - for all completed CSs we take the earliest timestamp.
+ * for this we have to validate that:
+ * 1. given timestamp was indeed set
+ * 2. the timestamp is earliest of all timestamps so far
+ */
+
+ if (status == CS_WAIT_STATUS_GONE) {
+ mcs_data->update_ts = false;
+ mcs_data->gone_cs = true;
+ } else if (mcs_data->update_ts &&
+ (ktime_compare(fence->timestamp,
+ ktime_set(0, 0)) > 0) &&
+ (ktime_compare(fence->timestamp, first_cs_time) < 0)) {
+ first_cs_time = fence->timestamp;
+ }
+ }
+
+ hl_fences_put(mcs_data->fence_arr, arr_len);
+
+ if (mcs_data->update_ts &&
+ (ktime_compare(first_cs_time, max_ktime) != 0))
+ mcs_data->timestamp = ktime_to_ns(first_cs_time);
+
+ return rc;
+}
+
static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
u64 timeout_us, u64 seq,
enum hl_cs_wait_status *status, s64 *timestamp)
{
struct hl_fence *fence;
- unsigned long timeout;
int rc = 0;
- long completion_rc;
if (timestamp)
*timestamp = 0;
- if (timeout_us == MAX_SCHEDULE_TIMEOUT)
- timeout = timeout_us;
- else
- timeout = usecs_to_jiffies(timeout_us);
-
hl_ctx_get(hdev, ctx);
fence = hl_ctx_get_fence(ctx, seq);
- if (IS_ERR(fence)) {
- rc = PTR_ERR(fence);
- if (rc == -EINVAL)
- dev_notice_ratelimited(hdev->dev,
- "Can't wait on CS %llu because current CS is at seq %llu\n",
- seq, ctx->cs_sequence);
- } else if (fence) {
- if (!timeout_us)
- completion_rc = completion_done(&fence->completion);
- else
- completion_rc =
- wait_for_completion_interruptible_timeout(
- &fence->completion, timeout);
- if (completion_rc > 0) {
- *status = CS_WAIT_STATUS_COMPLETED;
- if (timestamp)
- *timestamp = ktime_to_ns(fence->timestamp);
- } else {
- *status = CS_WAIT_STATUS_BUSY;
+ rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
+ hl_fence_put(fence);
+ hl_ctx_put(ctx);
+
+ return rc;
+}
+
+/*
+ * hl_wait_multi_cs_completion_init - init completion structure
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @stream_master_bitmap: stream master QIDs map, set bit indicates stream
+ * master QID to wait on
+ *
+ * @return valid completion struct pointer on success, otherwise error pointer
+ *
+ * up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver.
+ * the function gets the first available completion (by marking it "used")
+ * and initialize its values.
+ */
+static struct multi_cs_completion *hl_wait_multi_cs_completion_init(
+ struct hl_device *hdev,
+ u8 stream_master_bitmap)
+{
+ struct multi_cs_completion *mcs_compl;
+ int i;
+
+ /* find free multi_cs completion structure */
+ for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
+ mcs_compl = &hdev->multi_cs_completion[i];
+ spin_lock(&mcs_compl->lock);
+ if (!mcs_compl->used) {
+ mcs_compl->used = 1;
+ mcs_compl->timestamp = 0;
+ mcs_compl->stream_master_qid_map = stream_master_bitmap;
+ reinit_completion(&mcs_compl->completion);
+ spin_unlock(&mcs_compl->lock);
+ break;
}
+ spin_unlock(&mcs_compl->lock);
+ }
- if (fence->error == -ETIMEDOUT)
- rc = -ETIMEDOUT;
- else if (fence->error == -EIO)
- rc = -EIO;
+ if (i == MULTI_CS_MAX_USER_CTX) {
+ dev_err(hdev->dev,
+ "no available multi-CS completion structure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ return mcs_compl;
+}
- hl_fence_put(fence);
- } else {
- dev_dbg(hdev->dev,
- "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
- seq, ctx->cs_sequence);
- *status = CS_WAIT_STATUS_GONE;
+/*
+ * hl_wait_multi_cs_completion_fini - return completion structure and set as
+ * unused
+ *
+ * @mcs_compl: pointer to the completion structure
+ */
+static void hl_wait_multi_cs_completion_fini(
+ struct multi_cs_completion *mcs_compl)
+{
+ /*
+ * free completion structure, do it under lock to be in-sync with the
+ * thread that signals completion
+ */
+ spin_lock(&mcs_compl->lock);
+ mcs_compl->used = 0;
+ spin_unlock(&mcs_compl->lock);
+}
+
+/*
+ * hl_wait_multi_cs_completion - wait for first CS to complete
+ *
+ * @mcs_data: multi-CS internal data
+ *
+ * @return 0 on success, otherwise non 0 error code
+ */
+static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data)
+{
+ struct hl_device *hdev = mcs_data->ctx->hdev;
+ struct multi_cs_completion *mcs_compl;
+ long completion_rc;
+
+ mcs_compl = hl_wait_multi_cs_completion_init(hdev,
+ mcs_data->stream_master_qid_map);
+ if (IS_ERR(mcs_compl))
+ return PTR_ERR(mcs_compl);
+
+ completion_rc = wait_for_completion_interruptible_timeout(
+ &mcs_compl->completion,
+ usecs_to_jiffies(mcs_data->timeout_us));
+
+ /* update timestamp */
+ if (completion_rc > 0)
+ mcs_data->timestamp = mcs_compl->timestamp;
+
+ hl_wait_multi_cs_completion_fini(mcs_compl);
+
+ mcs_data->wait_status = completion_rc;
+
+ return 0;
+}
+
+/*
+ * hl_multi_cs_completion_init - init array of multi-CS completion structures
+ *
+ * @hdev: pointer to habanalabs device structure
+ */
+void hl_multi_cs_completion_init(struct hl_device *hdev)
+{
+ struct multi_cs_completion *mcs_cmpl;
+ int i;
+
+ for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
+ mcs_cmpl = &hdev->multi_cs_completion[i];
+ mcs_cmpl->used = 0;
+ spin_lock_init(&mcs_cmpl->lock);
+ init_completion(&mcs_cmpl->completion);
+ }
+}
+
+/*
+ * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
+ *
+ * @hpriv: pointer to the private data of the fd
+ * @data: pointer to multi-CS wait ioctl in/out args
+ *
+ */
+static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct multi_cs_data mcs_data = {0};
+ union hl_wait_cs_args *args = data;
+ struct hl_ctx *ctx = hpriv->ctx;
+ struct hl_fence **fence_arr;
+ void __user *seq_arr;
+ u32 size_to_copy;
+ u64 *cs_seq_arr;
+ u8 seq_arr_len;
+ int rc;
+
+ if (!hdev->supports_wait_for_multi_cs) {
+ dev_err(hdev->dev, "Wait for multi CS is not supported\n");
+ return -EPERM;
+ }
+
+ seq_arr_len = args->in.seq_arr_len;
+
+ if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) {
+ dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n",
+ HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len);
+ return -EINVAL;
+ }
+
+ /* allocate memory for sequence array */
+ cs_seq_arr =
+ kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL);
+ if (!cs_seq_arr)
+ return -ENOMEM;
+
+ /* copy CS sequence array from user */
+ seq_arr = (void __user *) (uintptr_t) args->in.seq;
+ size_to_copy = seq_arr_len * sizeof(*cs_seq_arr);
+ if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) {
+ dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n");
+ rc = -EFAULT;
+ goto free_seq_arr;
+ }
+
+ /* allocate array for the fences */
+ fence_arr = kmalloc_array(seq_arr_len, sizeof(*fence_arr), GFP_KERNEL);
+ if (!fence_arr) {
+ rc = -ENOMEM;
+ goto free_seq_arr;
+ }
+
+ /* initialize the multi-CS internal data */
+ mcs_data.ctx = ctx;
+ mcs_data.seq_arr = cs_seq_arr;
+ mcs_data.fence_arr = fence_arr;
+ mcs_data.arr_len = seq_arr_len;
+
+ hl_ctx_get(hdev, ctx);
+
+ /* poll all CS fences, extract timestamp */
+ mcs_data.update_ts = true;
+ rc = hl_cs_poll_fences(&mcs_data);
+ /*
+ * skip wait for CS completion when one of the below is true:
+ * - an error on the poll function
+ * - one or more CS in the list completed
+ * - the user called ioctl with timeout 0
+ */
+ if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
+ goto put_ctx;
+
+ /* wait (with timeout) for the first CS to be completed */
+ mcs_data.timeout_us = args->in.timeout_us;
+ rc = hl_wait_multi_cs_completion(&mcs_data);
+ if (rc)
+ goto put_ctx;
+
+ if (mcs_data.wait_status > 0) {
+ /*
+ * poll fences once again to update the CS map.
+ * no timestamp should be updated this time.
+ */
+ mcs_data.update_ts = false;
+ rc = hl_cs_poll_fences(&mcs_data);
+
+ /*
+ * if hl_wait_multi_cs_completion returned before timeout (i.e.
+ * it got a completion) we expect to see at least one CS
+ * completed after the poll function.
+ */
+ if (!mcs_data.completion_bitmap) {
+ dev_err(hdev->dev, "Multi-CS got completion on wait but no CS completed\n");
+ rc = -EFAULT;
+ }
}
+put_ctx:
hl_ctx_put(ctx);
+ kfree(fence_arr);
- return rc;
+free_seq_arr:
+ kfree(cs_seq_arr);
+
+ /* update output args */
+ memset(args, 0, sizeof(*args));
+ if (rc)
+ return rc;
+
+ if (mcs_data.completion_bitmap) {
+ args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
+ args->out.cs_completion_map = mcs_data.completion_bitmap;
+
+ /* if timestamp not 0- it's valid */
+ if (mcs_data.timestamp) {
+ args->out.timestamp_nsec = mcs_data.timestamp;
+ args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
+ }
+
+ /* update if some CS was gone */
+ if (mcs_data.timestamp)
+ args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
+ } else if (mcs_data.wait_status == -ERESTARTSYS) {
+ args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
+ } else {
+ args->out.status = HL_WAIT_CS_STATUS_BUSY;
+ }
+
+ return 0;
}
static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
@@ -2015,9 +2715,9 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
{
struct hl_user_pending_interrupt *pend;
struct hl_user_interrupt *interrupt;
- unsigned long timeout;
- long completion_rc;
+ unsigned long timeout, flags;
u32 completion_value;
+ long completion_rc;
int rc = 0;
if (timeout_us == U32_MAX)
@@ -2040,17 +2740,10 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
else
interrupt = &hdev->user_interrupt[interrupt_offset];
- spin_lock(&interrupt->wait_list_lock);
- if (!hl_device_operational(hdev, NULL)) {
- rc = -EPERM;
- goto unlock_and_free_fence;
- }
-
if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
- dev_err(hdev->dev,
- "Failed to copy completion value from user\n");
+ dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT;
- goto unlock_and_free_fence;
+ goto free_fence;
}
if (completion_value >= target_value)
@@ -2059,48 +2752,57 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
*status = CS_WAIT_STATUS_BUSY;
if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
- goto unlock_and_free_fence;
+ goto free_fence;
/* Add pending user interrupt to relevant list for the interrupt
* handler to monitor
*/
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
- spin_unlock(&interrupt->wait_list_lock);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
wait_again:
/* Wait for interrupt handler to signal completion */
- completion_rc =
- wait_for_completion_interruptible_timeout(
- &pend->fence.completion, timeout);
+ completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
+ timeout);
/* If timeout did not expire we need to perform the comparison.
* If comparison fails, keep waiting until timeout expires
*/
if (completion_rc > 0) {
- if (copy_from_user(&completion_value,
- u64_to_user_ptr(user_address), 4)) {
- dev_err(hdev->dev,
- "Failed to copy completion value from user\n");
+ if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
+ dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT;
+
goto remove_pending_user_interrupt;
}
if (completion_value >= target_value) {
*status = CS_WAIT_STATUS_COMPLETED;
} else {
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ reinit_completion(&pend->fence.completion);
timeout = completion_rc;
+
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
goto wait_again;
}
+ } else if (completion_rc == -ERESTARTSYS) {
+ dev_err_ratelimited(hdev->dev,
+ "user process got signal while waiting for interrupt ID %d\n",
+ interrupt->interrupt_id);
+ *status = HL_WAIT_CS_STATUS_INTERRUPTED;
+ rc = -EINTR;
} else {
*status = CS_WAIT_STATUS_BUSY;
}
remove_pending_user_interrupt:
- spin_lock(&interrupt->wait_list_lock);
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
list_del(&pend->wait_list_node);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
-unlock_and_free_fence:
- spin_unlock(&interrupt->wait_list_lock);
+free_fence:
kfree(pend);
hl_ctx_put(ctx);
@@ -2148,8 +2850,9 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
memset(args, 0, sizeof(*args));
if (rc) {
- dev_err_ratelimited(hdev->dev,
- "interrupt_wait_ioctl failed (%d)\n", rc);
+ if (rc != -EINTR)
+ dev_err_ratelimited(hdev->dev,
+ "interrupt_wait_ioctl failed (%d)\n", rc);
return rc;
}
@@ -2173,8 +2876,16 @@ int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
u32 flags = args->in.flags;
int rc;
+ /* If the device is not operational, no point in waiting for any command submission or
+ * user interrupt
+ */
+ if (!hl_device_operational(hpriv->hdev, NULL))
+ return -EPERM;
+
if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
rc = hl_interrupt_wait_ioctl(hpriv, data);
+ else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS)
+ rc = hl_multi_cs_wait_ioctl(hpriv, data);
else
rc = hl_cs_wait_ioctl(hpriv, data);
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index 19b6b045219e..22978303ad63 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -9,16 +9,70 @@
#include <linux/slab.h>
+void hl_encaps_handle_do_release(struct kref *ref)
+{
+ struct hl_cs_encaps_sig_handle *handle =
+ container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
+ struct hl_ctx *ctx = handle->hdev->compute_ctx;
+ struct hl_encaps_signals_mgr *mgr = &ctx->sig_mgr;
+
+ spin_lock(&mgr->lock);
+ idr_remove(&mgr->handles, handle->id);
+ spin_unlock(&mgr->lock);
+
+ kfree(handle);
+}
+
+static void hl_encaps_handle_do_release_sob(struct kref *ref)
+{
+ struct hl_cs_encaps_sig_handle *handle =
+ container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
+ struct hl_ctx *ctx = handle->hdev->compute_ctx;
+ struct hl_encaps_signals_mgr *mgr = &ctx->sig_mgr;
+
+ /* if we're here, then there was a signals reservation but cs with
+ * encaps signals wasn't submitted, so need to put refcount
+ * to hw_sob taken at the reservation.
+ */
+ hw_sob_put(handle->hw_sob);
+
+ spin_lock(&mgr->lock);
+ idr_remove(&mgr->handles, handle->id);
+ spin_unlock(&mgr->lock);
+
+ kfree(handle);
+}
+
+static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr)
+{
+ spin_lock_init(&mgr->lock);
+ idr_init(&mgr->handles);
+}
+
+static void hl_encaps_sig_mgr_fini(struct hl_device *hdev,
+ struct hl_encaps_signals_mgr *mgr)
+{
+ struct hl_cs_encaps_sig_handle *handle;
+ struct idr *idp;
+ u32 id;
+
+ idp = &mgr->handles;
+
+ if (!idr_is_empty(idp)) {
+ dev_warn(hdev->dev, "device released while some encaps signals handles are still allocated\n");
+ idr_for_each_entry(idp, handle, id)
+ kref_put(&handle->refcount,
+ hl_encaps_handle_do_release_sob);
+ }
+
+ idr_destroy(&mgr->handles);
+}
+
static void hl_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
int i;
- /* Release all allocated pending cb's, those cb's were never
- * scheduled so it is safe to release them here
- */
- hl_pending_cb_list_flush(ctx);
-
/* Release all allocated HW block mapped list entries and destroy
* the mutex.
*/
@@ -53,6 +107,7 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
hl_cb_va_pool_fini(ctx);
hl_vm_ctx_fini(ctx);
hl_asid_free(hdev, ctx->asid);
+ hl_encaps_sig_mgr_fini(hdev, &ctx->sig_mgr);
/* Scrub both SRAM and DRAM */
hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
@@ -130,9 +185,6 @@ void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
{
if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1)
return;
-
- dev_warn(hdev->dev,
- "user process released device but its command submissions are still executing\n");
}
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
@@ -144,11 +196,8 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
kref_init(&ctx->refcount);
ctx->cs_sequence = 1;
- INIT_LIST_HEAD(&ctx->pending_cb_list);
- spin_lock_init(&ctx->pending_cb_lock);
spin_lock_init(&ctx->cs_lock);
atomic_set(&ctx->thread_ctx_switch_token, 1);
- atomic_set(&ctx->thread_pending_cb_token, 1);
ctx->thread_ctx_switch_wait_token = 0;
ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
sizeof(struct hl_fence *),
@@ -200,6 +249,8 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
goto err_cb_va_pool_fini;
}
+ hl_encaps_sig_mgr_init(&ctx->sig_mgr);
+
dev_dbg(hdev->dev, "create user context %d\n", ctx->asid);
}
@@ -229,25 +280,40 @@ int hl_ctx_put(struct hl_ctx *ctx)
return kref_put(&ctx->refcount, hl_ctx_do_release);
}
-struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
+/*
+ * hl_ctx_get_fence_locked - get CS fence under CS lock
+ *
+ * @ctx: pointer to the context structure.
+ * @seq: CS sequences number
+ *
+ * @return valid fence pointer on success, NULL if fence is gone, otherwise
+ * error pointer.
+ *
+ * NOTE: this function shall be called with cs_lock locked
+ */
+static struct hl_fence *hl_ctx_get_fence_locked(struct hl_ctx *ctx, u64 seq)
{
struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
struct hl_fence *fence;
- spin_lock(&ctx->cs_lock);
-
- if (seq >= ctx->cs_sequence) {
- spin_unlock(&ctx->cs_lock);
+ if (seq >= ctx->cs_sequence)
return ERR_PTR(-EINVAL);
- }
- if (seq + asic_prop->max_pending_cs < ctx->cs_sequence) {
- spin_unlock(&ctx->cs_lock);
+ if (seq + asic_prop->max_pending_cs < ctx->cs_sequence)
return NULL;
- }
fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
hl_fence_get(fence);
+ return fence;
+}
+
+struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
+{
+ struct hl_fence *fence;
+
+ spin_lock(&ctx->cs_lock);
+
+ fence = hl_ctx_get_fence_locked(ctx, seq);
spin_unlock(&ctx->cs_lock);
@@ -255,6 +321,46 @@ struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
}
/*
+ * hl_ctx_get_fences - get multiple CS fences under the same CS lock
+ *
+ * @ctx: pointer to the context structure.
+ * @seq_arr: array of CS sequences to wait for
+ * @fence: fence array to store the CS fences
+ * @arr_len: length of seq_arr and fence_arr
+ *
+ * @return 0 on success, otherwise non 0 error code
+ */
+int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
+ struct hl_fence **fence, u32 arr_len)
+{
+ struct hl_fence **fence_arr_base = fence;
+ int i, rc = 0;
+
+ spin_lock(&ctx->cs_lock);
+
+ for (i = 0; i < arr_len; i++, fence++) {
+ u64 seq = seq_arr[i];
+
+ *fence = hl_ctx_get_fence_locked(ctx, seq);
+
+ if (IS_ERR(*fence)) {
+ dev_err(ctx->hdev->dev,
+ "Failed to get fence for CS with seq 0x%llx\n",
+ seq);
+ rc = PTR_ERR(*fence);
+ break;
+ }
+ }
+
+ spin_unlock(&ctx->cs_lock);
+
+ if (rc)
+ hl_fences_put(fence_arr_base, i);
+
+ return rc;
+}
+
+/*
* hl_ctx_mgr_init - initialize the context manager
*
* @mgr: pointer to context manager structure
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 703d79fb6f3f..985f1f3dbd20 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -209,12 +209,12 @@ static int userptr_show(struct seq_file *s, void *data)
if (first) {
first = false;
seq_puts(s, "\n");
- seq_puts(s, " user virtual address size dma dir\n");
+ seq_puts(s, " pid user virtual address size dma dir\n");
seq_puts(s, "----------------------------------------------------------\n");
}
- seq_printf(s,
- " 0x%-14llx %-10u %-30s\n",
- userptr->addr, userptr->size, dma_dir[userptr->dir]);
+ seq_printf(s, " %-7d 0x%-14llx %-10llu %-30s\n",
+ userptr->pid, userptr->addr, userptr->size,
+ dma_dir[userptr->dir]);
}
spin_unlock(&dev_entry->userptr_spinlock);
@@ -235,7 +235,7 @@ static int vm_show(struct seq_file *s, void *data)
struct hl_vm_hash_node *hnode;
struct hl_userptr *userptr;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
- enum vm_type_t *vm_type;
+ enum vm_type *vm_type;
bool once = true;
u64 j;
int i;
@@ -261,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
if (*vm_type == VM_TYPE_USERPTR) {
userptr = hnode->ptr;
seq_printf(s,
- " 0x%-14llx %-10u\n",
+ " 0x%-14llx %-10llu\n",
hnode->vaddr, userptr->size);
} else {
phys_pg_pack = hnode->ptr;
@@ -320,6 +320,77 @@ static int vm_show(struct seq_file *s, void *data)
return 0;
}
+static int userptr_lookup_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct scatterlist *sg;
+ struct hl_userptr *userptr;
+ bool first = true;
+ u64 total_npages, npages, sg_start, sg_end;
+ dma_addr_t dma_addr;
+ int i;
+
+ spin_lock(&dev_entry->userptr_spinlock);
+
+ list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
+ if (dev_entry->userptr_lookup >= userptr->addr &&
+ dev_entry->userptr_lookup < userptr->addr + userptr->size) {
+ total_npages = 0;
+ for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents,
+ i) {
+ npages = hl_get_sg_info(sg, &dma_addr);
+ sg_start = userptr->addr +
+ total_npages * PAGE_SIZE;
+ sg_end = userptr->addr +
+ (total_npages + npages) * PAGE_SIZE;
+
+ if (dev_entry->userptr_lookup >= sg_start &&
+ dev_entry->userptr_lookup < sg_end) {
+ dma_addr += (dev_entry->userptr_lookup -
+ sg_start);
+ if (first) {
+ first = false;
+ seq_puts(s, "\n");
+ seq_puts(s, " user virtual address dma address pid region start region size\n");
+ seq_puts(s, "---------------------------------------------------------------------------------------\n");
+ }
+ seq_printf(s, " 0x%-18llx 0x%-16llx %-8u 0x%-16llx %-12llu\n",
+ dev_entry->userptr_lookup,
+ (u64)dma_addr, userptr->pid,
+ userptr->addr, userptr->size);
+ }
+ total_npages += npages;
+ }
+ }
+ }
+
+ spin_unlock(&dev_entry->userptr_spinlock);
+
+ if (!first)
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static ssize_t userptr_lookup_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ ssize_t rc;
+ u64 value;
+
+ rc = kstrtoull_from_user(buf, count, 16, &value);
+ if (rc)
+ return rc;
+
+ dev_entry->userptr_lookup = value;
+
+ return count;
+}
+
static int mmu_show(struct seq_file *s, void *data)
{
struct hl_debugfs_entry *entry = s->private;
@@ -349,7 +420,7 @@ static int mmu_show(struct seq_file *s, void *data)
return 0;
}
- phys_addr = hops_info.hop_info[hops_info.used_hops - 1].hop_pte_val;
+ hl_mmu_va_to_pa(ctx, virt_addr, &phys_addr);
if (hops_info.scrambled_vaddr &&
(dev_entry->mmu_addr != hops_info.scrambled_vaddr))
@@ -491,11 +562,10 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct hl_ctx *ctx = hdev->compute_ctx;
struct hl_vm_hash_node *hnode;
+ u64 end_address, range_size;
struct hl_userptr *userptr;
- enum vm_type_t *vm_type;
+ enum vm_type *vm_type;
bool valid = false;
- u64 end_address;
- u32 range_size;
int i, rc = 0;
if (!ctx) {
@@ -1043,6 +1113,60 @@ static ssize_t hl_security_violations_read(struct file *f, char __user *buf,
return 0;
}
+static ssize_t hl_state_dump_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ ssize_t rc;
+
+ down_read(&entry->state_dump_sem);
+ if (!entry->state_dump[entry->state_dump_head])
+ rc = 0;
+ else
+ rc = simple_read_from_buffer(
+ buf, count, ppos,
+ entry->state_dump[entry->state_dump_head],
+ strlen(entry->state_dump[entry->state_dump_head]));
+ up_read(&entry->state_dump_sem);
+
+ return rc;
+}
+
+static ssize_t hl_state_dump_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ ssize_t rc;
+ u32 size;
+ int i;
+
+ rc = kstrtouint_from_user(buf, count, 10, &size);
+ if (rc)
+ return rc;
+
+ if (size <= 0 || size >= ARRAY_SIZE(entry->state_dump)) {
+ dev_err(hdev->dev, "Invalid number of dumps to skip\n");
+ return -EINVAL;
+ }
+
+ if (entry->state_dump[entry->state_dump_head]) {
+ down_write(&entry->state_dump_sem);
+ for (i = 0; i < size; ++i) {
+ vfree(entry->state_dump[entry->state_dump_head]);
+ entry->state_dump[entry->state_dump_head] = NULL;
+ if (entry->state_dump_head > 0)
+ entry->state_dump_head--;
+ else
+ entry->state_dump_head =
+ ARRAY_SIZE(entry->state_dump) - 1;
+ }
+ up_write(&entry->state_dump_sem);
+ }
+
+ return count;
+}
+
static const struct file_operations hl_data32b_fops = {
.owner = THIS_MODULE,
.read = hl_data_read32,
@@ -1110,12 +1234,19 @@ static const struct file_operations hl_security_violations_fops = {
.read = hl_security_violations_read
};
+static const struct file_operations hl_state_dump_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_state_dump_read,
+ .write = hl_state_dump_write
+};
+
static const struct hl_info_list hl_debugfs_list[] = {
{"command_buffers", command_buffers_show, NULL},
{"command_submission", command_submission_show, NULL},
{"command_submission_jobs", command_submission_jobs_show, NULL},
{"userptr", userptr_show, NULL},
{"vm", vm_show, NULL},
+ {"userptr_lookup", userptr_lookup_show, userptr_lookup_write},
{"mmu", mmu_show, mmu_asid_va_write},
{"engines", engines_show, NULL}
};
@@ -1172,6 +1303,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
INIT_LIST_HEAD(&dev_entry->userptr_list);
INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
mutex_init(&dev_entry->file_mutex);
+ init_rwsem(&dev_entry->state_dump_sem);
spin_lock_init(&dev_entry->cb_spinlock);
spin_lock_init(&dev_entry->cs_spinlock);
spin_lock_init(&dev_entry->cs_job_spinlock);
@@ -1283,6 +1415,12 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry->root,
&hdev->skip_reset_on_timeout);
+ debugfs_create_file("state_dump",
+ 0600,
+ dev_entry->root,
+ dev_entry,
+ &hl_state_dump_fops);
+
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name,
0444,
@@ -1297,6 +1435,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
void hl_debugfs_remove_device(struct hl_device *hdev)
{
struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
+ int i;
debugfs_remove_recursive(entry->root);
@@ -1304,6 +1443,9 @@ void hl_debugfs_remove_device(struct hl_device *hdev)
vfree(entry->blob_desc.data);
+ for (i = 0; i < ARRAY_SIZE(entry->state_dump); ++i)
+ vfree(entry->state_dump[i]);
+
kfree(entry->entry_arr);
}
@@ -1416,6 +1558,28 @@ void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
}
+/**
+ * hl_debugfs_set_state_dump - register state dump making it accessible via
+ * debugfs
+ * @hdev: pointer to the device structure
+ * @data: the actual dump data
+ * @length: the length of the data
+ */
+void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
+ unsigned long length)
+{
+ struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
+
+ down_write(&dev_entry->state_dump_sem);
+
+ dev_entry->state_dump_head = (dev_entry->state_dump_head + 1) %
+ ARRAY_SIZE(dev_entry->state_dump);
+ vfree(dev_entry->state_dump[dev_entry->state_dump_head]);
+ dev_entry->state_dump[dev_entry->state_dump_head] = data;
+
+ up_write(&dev_entry->state_dump_sem);
+}
+
void __init hl_debugfs_init(void)
{
hl_debug_root = debugfs_create_dir("habanalabs", NULL);
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index ff4cbde289c0..97c7c86580e6 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -7,11 +7,11 @@
#define pr_fmt(fmt) "habanalabs: " fmt
+#include <uapi/misc/habanalabs.h>
#include "habanalabs.h"
#include <linux/pci.h>
#include <linux/hwmon.h>
-#include <uapi/misc/habanalabs.h>
enum hl_device_status hl_device_status(struct hl_device *hdev)
{
@@ -23,6 +23,8 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
status = HL_DEVICE_STATUS_NEEDS_RESET;
else if (hdev->disabled)
status = HL_DEVICE_STATUS_MALFUNCTION;
+ else if (!hdev->init_done)
+ status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;
else
status = HL_DEVICE_STATUS_OPERATIONAL;
@@ -44,6 +46,7 @@ bool hl_device_operational(struct hl_device *hdev,
case HL_DEVICE_STATUS_NEEDS_RESET:
return false;
case HL_DEVICE_STATUS_OPERATIONAL:
+ case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
default:
return true;
}
@@ -129,8 +132,8 @@ static int hl_device_release(struct inode *inode, struct file *filp)
hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
if (!hl_hpriv_put(hpriv))
- dev_warn(hdev->dev,
- "Device is still in use because there are live CS and/or memory mappings\n");
+ dev_notice(hdev->dev,
+ "User process closed FD but device still in use\n");
hdev->last_open_session_duration_jif =
jiffies - hdev->last_successful_open_jif;
@@ -308,9 +311,15 @@ static void device_hard_reset_pending(struct work_struct *work)
container_of(work, struct hl_device_reset_work,
reset_work.work);
struct hl_device *hdev = device_reset_work->hdev;
+ u32 flags;
int rc;
- rc = hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_FROM_RESET_THREAD);
+ flags = HL_RESET_HARD | HL_RESET_FROM_RESET_THREAD;
+
+ if (device_reset_work->fw_reset)
+ flags |= HL_RESET_FW;
+
+ rc = hl_device_reset(hdev, flags);
if ((rc == -EBUSY) && !hdev->device_fini_pending) {
dev_info(hdev->dev,
"Could not reset device. will try again in %u seconds",
@@ -682,6 +691,44 @@ out:
return rc;
}
+static void take_release_locks(struct hl_device *hdev)
+{
+ /* Flush anyone that is inside the critical section of enqueue
+ * jobs to the H/W
+ */
+ hdev->asic_funcs->hw_queues_lock(hdev);
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ /* Flush processes that are sending message to CPU */
+ mutex_lock(&hdev->send_cpu_message_lock);
+ mutex_unlock(&hdev->send_cpu_message_lock);
+
+ /* Flush anyone that is inside device open */
+ mutex_lock(&hdev->fpriv_list_lock);
+ mutex_unlock(&hdev->fpriv_list_lock);
+}
+
+static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset)
+{
+ if (hard_reset)
+ device_late_fini(hdev);
+
+ /*
+ * Halt the engines and disable interrupts so we won't get any more
+ * completions from H/W and we won't have any accesses from the
+ * H/W to the host machine
+ */
+ hdev->asic_funcs->halt_engines(hdev, hard_reset, fw_reset);
+
+ /* Go over all the queues, release all CS and their jobs */
+ hl_cs_rollback_all(hdev);
+
+ /* Release all pending user interrupts, each pending user interrupt
+ * holds a reference to user context
+ */
+ hl_release_pending_user_interrupts(hdev);
+}
+
/*
* hl_device_suspend - initiate device suspend
*
@@ -707,16 +754,7 @@ int hl_device_suspend(struct hl_device *hdev)
/* This blocks all other stuff that is not blocked by in_reset */
hdev->disabled = true;
- /*
- * Flush anyone that is inside the critical section of enqueue
- * jobs to the H/W
- */
- hdev->asic_funcs->hw_queues_lock(hdev);
- hdev->asic_funcs->hw_queues_unlock(hdev);
-
- /* Flush processes that are sending message to CPU */
- mutex_lock(&hdev->send_cpu_message_lock);
- mutex_unlock(&hdev->send_cpu_message_lock);
+ take_release_locks(hdev);
rc = hdev->asic_funcs->suspend(hdev);
if (rc)
@@ -819,6 +857,11 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout)
usleep_range(1000, 10000);
put_task_struct(task);
+ } else {
+ dev_warn(hdev->dev,
+ "Can't get task struct for PID so giving up on killing process\n");
+ mutex_unlock(&hdev->fpriv_list_lock);
+ return -ETIME;
}
}
@@ -885,7 +928,7 @@ static void device_disable_open_processes(struct hl_device *hdev)
int hl_device_reset(struct hl_device *hdev, u32 flags)
{
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
- bool hard_reset, from_hard_reset_thread, hard_instead_soft = false;
+ bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false;
int i, rc;
if (!hdev->init_done) {
@@ -894,8 +937,9 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
return 0;
}
- hard_reset = (flags & HL_RESET_HARD) != 0;
- from_hard_reset_thread = (flags & HL_RESET_FROM_RESET_THREAD) != 0;
+ hard_reset = !!(flags & HL_RESET_HARD);
+ from_hard_reset_thread = !!(flags & HL_RESET_FROM_RESET_THREAD);
+ fw_reset = !!(flags & HL_RESET_FW);
if (!hard_reset && !hdev->supports_soft_reset) {
hard_instead_soft = true;
@@ -947,11 +991,13 @@ do_reset:
else
hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
- /*
- * if reset is due to heartbeat, device CPU is no responsive in
- * which case no point sending PCI disable message to it
+ /* If reset is due to heartbeat, device CPU is no responsive in
+ * which case no point sending PCI disable message to it.
+ *
+ * If F/W is performing the reset, no need to send it a message to disable
+ * PCI access
*/
- if (hard_reset && !(flags & HL_RESET_HEARTBEAT)) {
+ if (hard_reset && !(flags & (HL_RESET_HEARTBEAT | HL_RESET_FW))) {
/* Disable PCI access from device F/W so he won't send
* us additional interrupts. We disable MSI/MSI-X at
* the halt_engines function and we can't have the F/W
@@ -970,15 +1016,7 @@ do_reset:
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
- /* Flush anyone that is inside the critical section of enqueue
- * jobs to the H/W
- */
- hdev->asic_funcs->hw_queues_lock(hdev);
- hdev->asic_funcs->hw_queues_unlock(hdev);
-
- /* Flush anyone that is inside device open */
- mutex_lock(&hdev->fpriv_list_lock);
- mutex_unlock(&hdev->fpriv_list_lock);
+ take_release_locks(hdev);
dev_err(hdev->dev, "Going to RESET device!\n");
}
@@ -989,6 +1027,8 @@ again:
hdev->process_kill_trial_cnt = 0;
+ hdev->device_reset_work.fw_reset = fw_reset;
+
/*
* Because the reset function can't run from heartbeat work,
* we need to call the reset function from a dedicated work.
@@ -999,31 +1039,7 @@ again:
return 0;
}
- if (hard_reset) {
- device_late_fini(hdev);
-
- /*
- * Now that the heartbeat thread is closed, flush processes
- * which are sending messages to CPU
- */
- mutex_lock(&hdev->send_cpu_message_lock);
- mutex_unlock(&hdev->send_cpu_message_lock);
- }
-
- /*
- * Halt the engines and disable interrupts so we won't get any more
- * completions from H/W and we won't have any accesses from the
- * H/W to the host machine
- */
- hdev->asic_funcs->halt_engines(hdev, hard_reset);
-
- /* Go over all the queues, release all CS and their jobs */
- hl_cs_rollback_all(hdev);
-
- /* Release all pending user interrupts, each pending user interrupt
- * holds a reference to user context
- */
- hl_release_pending_user_interrupts(hdev);
+ cleanup_resources(hdev, hard_reset, fw_reset);
kill_processes:
if (hard_reset) {
@@ -1057,12 +1073,15 @@ kill_processes:
}
/* Reset the H/W. It will be in idle state after this returns */
- hdev->asic_funcs->hw_fini(hdev, hard_reset);
+ hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
if (hard_reset) {
+ hdev->fw_loader.linux_loaded = false;
+
/* Release kernel context */
if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
hdev->kernel_ctx = NULL;
+
hl_vm_fini(hdev);
hl_mmu_fini(hdev);
hl_eq_reset(hdev, &hdev->event_queue);
@@ -1292,6 +1311,10 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
if (rc)
goto user_interrupts_fini;
+
+ /* initialize completion structure for multi CS wait */
+ hl_multi_cs_completion_init(hdev);
+
/*
* Initialize the H/W queues. Must be done before hw_init, because
* there the addresses of the kernel queue are being written to the
@@ -1361,6 +1384,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
hdev->compute_ctx = NULL;
+ hdev->asic_funcs->state_dump_init(hdev);
+
hl_debugfs_add_device(hdev);
/* debugfs nodes are created in hl_ctx_init so it must be called after
@@ -1567,31 +1592,13 @@ void hl_device_fini(struct hl_device *hdev)
/* Mark device as disabled */
hdev->disabled = true;
- /* Flush anyone that is inside the critical section of enqueue
- * jobs to the H/W
- */
- hdev->asic_funcs->hw_queues_lock(hdev);
- hdev->asic_funcs->hw_queues_unlock(hdev);
-
- /* Flush anyone that is inside device open */
- mutex_lock(&hdev->fpriv_list_lock);
- mutex_unlock(&hdev->fpriv_list_lock);
+ take_release_locks(hdev);
hdev->hard_reset_pending = true;
hl_hwmon_fini(hdev);
- device_late_fini(hdev);
-
- /*
- * Halt the engines and disable interrupts so we won't get any more
- * completions from H/W and we won't have any accesses from the
- * H/W to the host machine
- */
- hdev->asic_funcs->halt_engines(hdev, true);
-
- /* Go over all the queues, release all CS and their jobs */
- hl_cs_rollback_all(hdev);
+ cleanup_resources(hdev, true, false);
/* Kill processes here after CS rollback. This is because the process
* can't really exit until all its CSs are done, which is what we
@@ -1610,7 +1617,9 @@ void hl_device_fini(struct hl_device *hdev)
hl_cb_pool_fini(hdev);
/* Reset the H/W. It will be in idle state after this returns */
- hdev->asic_funcs->hw_fini(hdev, true);
+ hdev->asic_funcs->hw_fini(hdev, true, false);
+
+ hdev->fw_loader.linux_loaded = false;
/* Release kernel context */
if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 2e4d04ec6b53..8d2568c63f19 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -240,11 +240,15 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
/* set fence to a non valid value */
pkt->fence = cpu_to_le32(UINT_MAX);
- rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, len, pkt_dma_addr);
- if (rc) {
- dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
- goto out;
- }
+ /*
+ * The CPU queue is a synchronous queue with an effective depth of
+ * a single entry (although it is allocated with room for multiple
+ * entries). We lock on it using 'send_cpu_message_lock' which
+ * serializes accesses to the CPU queue.
+ * Which means that we don't need to lock the access to the entire H/W
+ * queues module when submitting a JOB to the CPU queue.
+ */
+ hl_hw_queue_submit_bd(hdev, queue, 0, len, pkt_dma_addr);
if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
expected_ack_val = queue->pi;
@@ -663,17 +667,15 @@ int hl_fw_cpucp_info_get(struct hl_device *hdev,
hdev->event_queue.check_eqe_index = false;
/* Read FW application security bits again */
- if (hdev->asic_prop.fw_cpu_boot_dev_sts0_valid) {
- hdev->asic_prop.fw_app_cpu_boot_dev_sts0 =
- RREG32(sts_boot_dev_sts0_reg);
- if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
+ if (prop->fw_cpu_boot_dev_sts0_valid) {
+ prop->fw_app_cpu_boot_dev_sts0 = RREG32(sts_boot_dev_sts0_reg);
+ if (prop->fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_EQ_INDEX_EN)
hdev->event_queue.check_eqe_index = true;
}
- if (hdev->asic_prop.fw_cpu_boot_dev_sts1_valid)
- hdev->asic_prop.fw_app_cpu_boot_dev_sts1 =
- RREG32(sts_boot_dev_sts1_reg);
+ if (prop->fw_cpu_boot_dev_sts1_valid)
+ prop->fw_app_cpu_boot_dev_sts1 = RREG32(sts_boot_dev_sts1_reg);
out:
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
@@ -1008,6 +1010,11 @@ void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev)
} else {
WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_GOTO_WFE);
msleep(static_loader->cpu_reset_wait_msec);
+
+ /* Must clear this register in order to prevent preboot
+ * from reading WFE after reboot
+ */
+ WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_NA);
}
hdev->device_cpu_is_halted = true;
@@ -1055,6 +1062,10 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
dev_err(hdev->dev,
"Device boot progress - Thermal Sensor initialization failed\n");
break;
+ case CPU_BOOT_STATUS_SECURITY_READY:
+ dev_err(hdev->dev,
+ "Device boot progress - Stuck in preboot after security initialization\n");
+ break;
default:
dev_err(hdev->dev,
"Device boot progress - Invalid status code %d\n",
@@ -1238,11 +1249,6 @@ static void hl_fw_preboot_update_state(struct hl_device *hdev)
* b. Check whether hard reset is done by boot cpu
* 3. FW application - a. Fetch fw application security status
* b. Check whether hard reset is done by fw app
- *
- * Preboot:
- * Check security status bit (CPU_BOOT_DEV_STS0_ENABLED). If set, then-
- * check security enabled bit (CPU_BOOT_DEV_STS0_SECURITY_EN)
- * If set, then mark GIC controller to be disabled.
*/
prop->hard_reset_done_by_fw =
!!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
@@ -1953,8 +1959,8 @@ static void hl_fw_dynamic_update_linux_interrupt_if(struct hl_device *hdev)
if (!hdev->asic_prop.gic_interrupts_enable &&
!(hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN)) {
- dyn_regs->gic_host_halt_irq = dyn_regs->gic_host_irq_ctrl;
- dyn_regs->gic_host_ints_irq = dyn_regs->gic_host_irq_ctrl;
+ dyn_regs->gic_host_halt_irq = dyn_regs->gic_host_pi_upd_irq;
+ dyn_regs->gic_host_ints_irq = dyn_regs->gic_host_pi_upd_irq;
dev_warn(hdev->dev,
"Using a single interrupt interface towards cpucp");
@@ -2122,8 +2128,7 @@ static void hl_fw_linux_update_state(struct hl_device *hdev,
/* Read FW application security bits */
if (prop->fw_cpu_boot_dev_sts0_valid) {
- prop->fw_app_cpu_boot_dev_sts0 =
- RREG32(cpu_boot_dev_sts0_reg);
+ prop->fw_app_cpu_boot_dev_sts0 = RREG32(cpu_boot_dev_sts0_reg);
if (prop->fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
@@ -2143,8 +2148,7 @@ static void hl_fw_linux_update_state(struct hl_device *hdev,
}
if (prop->fw_cpu_boot_dev_sts1_valid) {
- prop->fw_app_cpu_boot_dev_sts1 =
- RREG32(cpu_boot_dev_sts1_reg);
+ prop->fw_app_cpu_boot_dev_sts1 = RREG32(cpu_boot_dev_sts1_reg);
dev_dbg(hdev->dev,
"Firmware application CPU status1 %#x\n",
@@ -2235,6 +2239,10 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
dev_info(hdev->dev,
"Loading firmware to device, may take some time...\n");
+ /*
+ * In this stage, "cpu_dyn_regs" contains only LKD's hard coded values!
+ * It will be updated from FW after hl_fw_dynamic_request_descriptor().
+ */
dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_RST_STATE,
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 6b3cdd7e068a..bebebcb163ee 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -20,6 +20,7 @@
#include <linux/scatterlist.h>
#include <linux/hashtable.h>
#include <linux/debugfs.h>
+#include <linux/rwsem.h>
#include <linux/bitfield.h>
#include <linux/genalloc.h>
#include <linux/sched/signal.h>
@@ -65,6 +66,11 @@
#define HL_COMMON_USER_INTERRUPT_ID 0xFFF
+#define HL_STATE_DUMP_HIST_LEN 5
+
+#define OBJ_NAMES_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
+#define SYNC_TO_ENGINE_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
+
/* Memory */
#define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
@@ -122,12 +128,17 @@ enum hl_mmu_page_table_location {
*
* - HL_RESET_DEVICE_RELEASE
* Set if reset is due to device release
+ *
+ * - HL_RESET_FW
+ * F/W will perform the reset. No need to ask it to reset the device. This is relevant
+ * only when running with secured f/w
*/
#define HL_RESET_HARD (1 << 0)
#define HL_RESET_FROM_RESET_THREAD (1 << 1)
#define HL_RESET_HEARTBEAT (1 << 2)
#define HL_RESET_TDR (1 << 3)
#define HL_RESET_DEVICE_RELEASE (1 << 4)
+#define HL_RESET_FW (1 << 5)
#define HL_MAX_SOBS_PER_MONITOR 8
@@ -236,7 +247,9 @@ enum hl_cs_type {
CS_TYPE_DEFAULT,
CS_TYPE_SIGNAL,
CS_TYPE_WAIT,
- CS_TYPE_COLLECTIVE_WAIT
+ CS_TYPE_COLLECTIVE_WAIT,
+ CS_RESERVE_SIGNALS,
+ CS_UNRESERVE_SIGNALS
};
/*
@@ -281,13 +294,17 @@ enum queue_cb_alloc_flags {
* @hdev: habanalabs device structure.
* @kref: refcount of this SOB. The SOB will reset once the refcount is zero.
* @sob_id: id of this SOB.
+ * @sob_addr: the sob offset from the base address.
* @q_idx: the H/W queue that uses this SOB.
+ * @need_reset: reset indication set when switching to the other sob.
*/
struct hl_hw_sob {
struct hl_device *hdev;
struct kref kref;
u32 sob_id;
+ u32 sob_addr;
u32 q_idx;
+ bool need_reset;
};
enum hl_collective_mode {
@@ -317,11 +334,11 @@ struct hw_queue_properties {
};
/**
- * enum vm_type_t - virtual memory mapping request information.
+ * enum vm_type - virtual memory mapping request information.
* @VM_TYPE_USERPTR: mapping of user memory to device virtual address.
* @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
*/
-enum vm_type_t {
+enum vm_type {
VM_TYPE_USERPTR = 0x1,
VM_TYPE_PHYS_PACK = 0x2
};
@@ -382,6 +399,16 @@ struct hl_mmu_properties {
};
/**
+ * struct hl_hints_range - hint addresses reserved va range.
+ * @start_addr: start address of the va range.
+ * @end_addr: end address of the va range.
+ */
+struct hl_hints_range {
+ u64 start_addr;
+ u64 end_addr;
+};
+
+/**
* struct asic_fixed_properties - ASIC specific immutable properties.
* @hw_queues_props: H/W queues properties.
* @cpucp_info: received various information from CPU-CP regarding the H/W, e.g.
@@ -392,6 +419,10 @@ struct hl_mmu_properties {
* @pmmu: PCI (host) MMU address translation properties.
* @pmmu_huge: PCI (host) MMU address translation properties for memory
* allocated with huge pages.
+ * @hints_dram_reserved_va_range: dram hint addresses reserved range.
+ * @hints_host_reserved_va_range: host hint addresses reserved range.
+ * @hints_host_hpage_reserved_va_range: host huge page hint addresses reserved
+ * range.
* @sram_base_address: SRAM physical start address.
* @sram_end_address: SRAM physical end address.
* @sram_user_base_address - SRAM physical start address for user access.
@@ -412,6 +443,10 @@ struct hl_mmu_properties {
* to the device's MMU.
* @cb_va_end_addr: virtual end address of command buffers which are mapped to
* the device's MMU.
+ * @dram_hints_align_mask: dram va hint addresses alignment mask which is used
+ * for hints validity check.
+ * device_dma_offset_for_host_access: the offset to add to host DMA addresses
+ * to enable the device to access them.
* @mmu_pgt_size: MMU page tables total size.
* @mmu_pte_size: PTE size in MMU page tables.
* @mmu_hop_table_size: MMU hop table size.
@@ -459,6 +494,8 @@ struct hl_mmu_properties {
* reserved for the user
* @first_available_cq: first available CQ for the user.
* @user_interrupt_count: number of user interrupts.
+ * @server_type: Server type that the ASIC is currently installed in.
+ * The value is according to enum hl_server_type in uapi file.
* @tpc_enabled_mask: which TPCs are enabled.
* @completion_queues_count: number of completion queues.
* @fw_security_enabled: true if security measures are enabled in firmware,
@@ -470,6 +507,7 @@ struct hl_mmu_properties {
* @dram_supports_virtual_memory: is there an MMU towards the DRAM
* @hard_reset_done_by_fw: true if firmware is handling hard reset flow
* @num_functional_hbms: number of functional HBMs in each DCORE.
+ * @hints_range_reservation: device support hint addresses range reservation.
* @iatu_done_by_fw: true if iATU configuration is being done by FW.
* @dynamic_fw_load: is dynamic FW load is supported.
* @gic_interrupts_enable: true if FW is not blocking GIC controller,
@@ -483,6 +521,9 @@ struct asic_fixed_properties {
struct hl_mmu_properties dmmu;
struct hl_mmu_properties pmmu;
struct hl_mmu_properties pmmu_huge;
+ struct hl_hints_range hints_dram_reserved_va_range;
+ struct hl_hints_range hints_host_reserved_va_range;
+ struct hl_hints_range hints_host_hpage_reserved_va_range;
u64 sram_base_address;
u64 sram_end_address;
u64 sram_user_base_address;
@@ -500,6 +541,8 @@ struct asic_fixed_properties {
u64 mmu_dram_default_page_addr;
u64 cb_va_start_addr;
u64 cb_va_end_addr;
+ u64 dram_hints_align_mask;
+ u64 device_dma_offset_for_host_access;
u32 mmu_pgt_size;
u32 mmu_pte_size;
u32 mmu_hop_table_size;
@@ -534,6 +577,7 @@ struct asic_fixed_properties {
u16 first_available_user_msix_interrupt;
u16 first_available_cq[HL_MAX_DCORES];
u16 user_interrupt_count;
+ u16 server_type;
u8 tpc_enabled_mask;
u8 completion_queues_count;
u8 fw_security_enabled;
@@ -542,6 +586,7 @@ struct asic_fixed_properties {
u8 dram_supports_virtual_memory;
u8 hard_reset_done_by_fw;
u8 num_functional_hbms;
+ u8 hints_range_reservation;
u8 iatu_done_by_fw;
u8 dynamic_fw_load;
u8 gic_interrupts_enable;
@@ -552,40 +597,45 @@ struct asic_fixed_properties {
* @completion: fence is implemented using completion
* @refcount: refcount for this fence
* @cs_sequence: sequence of the corresponding command submission
+ * @stream_master_qid_map: streams masters QID bitmap to represent all streams
+ * masters QIDs that multi cs is waiting on
* @error: mark this fence with error
* @timestamp: timestamp upon completion
- *
*/
struct hl_fence {
struct completion completion;
struct kref refcount;
u64 cs_sequence;
+ u32 stream_master_qid_map;
int error;
ktime_t timestamp;
};
/**
* struct hl_cs_compl - command submission completion object.
- * @sob_reset_work: workqueue object to run SOB reset flow.
* @base_fence: hl fence object.
* @lock: spinlock to protect fence.
* @hdev: habanalabs device structure.
* @hw_sob: the H/W SOB used in this signal/wait CS.
+ * @encaps_sig_hdl: encaps signals hanlder.
* @cs_seq: command submission sequence number.
* @type: type of the CS - signal/wait.
* @sob_val: the SOB value that is used in this signal/wait CS.
* @sob_group: the SOB group that is used in this collective wait CS.
+ * @encaps_signals: indication whether it's a completion object of cs with
+ * encaps signals or not.
*/
struct hl_cs_compl {
- struct work_struct sob_reset_work;
struct hl_fence base_fence;
spinlock_t lock;
struct hl_device *hdev;
struct hl_hw_sob *hw_sob;
+ struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
u64 cs_seq;
enum hl_cs_type type;
u16 sob_val;
u16 sob_group;
+ bool encaps_signals;
};
/*
@@ -698,6 +748,17 @@ struct hl_sync_stream_properties {
};
/**
+ * struct hl_encaps_signals_mgr - describes sync stream encapsulated signals
+ * handlers manager
+ * @lock: protects handles.
+ * @handles: an idr to hold all encapsulated signals handles.
+ */
+struct hl_encaps_signals_mgr {
+ spinlock_t lock;
+ struct idr handles;
+};
+
+/**
* struct hl_hw_queue - describes a H/W transport queue.
* @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
* @sync_stream_prop: sync stream queue properties
@@ -875,7 +936,7 @@ struct pci_mem_region {
u64 region_base;
u64 region_size;
u64 bar_size;
- u32 offset_in_bar;
+ u64 offset_in_bar;
u8 bar_id;
u8 used;
};
@@ -996,7 +1057,7 @@ struct fw_load_mgr {
* hw_fini and before CS rollback.
* @suspend: handles IP specific H/W or SW changes for suspend.
* @resume: handles IP specific H/W or SW changes for resume.
- * @cb_mmap: maps a CB.
+ * @mmap: maps a memory.
* @ring_doorbell: increment PI on a given QMAN.
* @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
* function because the PQs are located in different memory areas
@@ -1101,6 +1162,10 @@ struct fw_load_mgr {
* generic f/w compatible PLL Indexes
* @init_firmware_loader: initialize data for FW loader.
* @init_cpu_scrambler_dram: Enable CPU specific DRAM scrambling
+ * @state_dump_init: initialize constants required for state dump
+ * @get_sob_addr: get SOB base address offset.
+ * @set_pci_memory_regions: setting properties of PCI memory regions
+ * @get_stream_master_qid_arr: get pointer to stream masters QID array
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -1110,11 +1175,11 @@ struct hl_asic_funcs {
int (*sw_init)(struct hl_device *hdev);
int (*sw_fini)(struct hl_device *hdev);
int (*hw_init)(struct hl_device *hdev);
- void (*hw_fini)(struct hl_device *hdev, bool hard_reset);
- void (*halt_engines)(struct hl_device *hdev, bool hard_reset);
+ void (*hw_fini)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
+ void (*halt_engines)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
int (*suspend)(struct hl_device *hdev);
int (*resume)(struct hl_device *hdev);
- int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
+ int (*mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
@@ -1210,10 +1275,11 @@ struct hl_asic_funcs {
void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group);
void (*set_dma_mask_from_fw)(struct hl_device *hdev);
u64 (*get_device_time)(struct hl_device *hdev);
- void (*collective_wait_init_cs)(struct hl_cs *cs);
+ int (*collective_wait_init_cs)(struct hl_cs *cs);
int (*collective_wait_create_jobs)(struct hl_device *hdev,
- struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
- u32 collective_engine_id);
+ struct hl_ctx *ctx, struct hl_cs *cs,
+ u32 wait_queue_id, u32 collective_engine_id,
+ u32 encaps_signal_offset);
u64 (*scramble_addr)(struct hl_device *hdev, u64 addr);
u64 (*descramble_addr)(struct hl_device *hdev, u64 addr);
void (*ack_protection_bits_errors)(struct hl_device *hdev);
@@ -1226,6 +1292,10 @@ struct hl_asic_funcs {
int (*map_pll_idx_to_fw_idx)(u32 pll_idx);
void (*init_firmware_loader)(struct hl_device *hdev);
void (*init_cpu_scrambler_dram)(struct hl_device *hdev);
+ void (*state_dump_init)(struct hl_device *hdev);
+ u32 (*get_sob_addr)(struct hl_device *hdev, u32 sob_id);
+ void (*set_pci_memory_regions)(struct hl_device *hdev);
+ u32* (*get_stream_master_qid_arr)(void);
};
@@ -1283,20 +1353,6 @@ struct hl_cs_counters_atomic {
};
/**
- * struct hl_pending_cb - pending command buffer structure
- * @cb_node: cb node in pending cb list
- * @cb: command buffer to send in next submission
- * @cb_size: command buffer size
- * @hw_queue_id: destination queue id
- */
-struct hl_pending_cb {
- struct list_head cb_node;
- struct hl_cb *cb;
- u32 cb_size;
- u32 hw_queue_id;
-};
-
-/**
* struct hl_ctx - user/kernel context.
* @mem_hash: holds mapping from virtual address to virtual memory area
* descriptor (hl_vm_phys_pg_list or hl_userptr).
@@ -1312,28 +1368,21 @@ struct hl_pending_cb {
* MMU hash or walking the PGT requires talking this lock.
* @hw_block_list_lock: protects the HW block memory list.
* @debugfs_list: node in debugfs list of contexts.
- * pending_cb_list: list of pending command buffers waiting to be sent upon
- * next user command submission context.
* @hw_block_mem_list: list of HW block virtual mapped addresses.
* @cs_counters: context command submission counters.
* @cb_va_pool: device VA pool for command buffers which are mapped to the
* device's MMU.
+ * @sig_mgr: encaps signals handle manager.
* @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
* to user so user could inquire about CS. It is used as
* index to cs_pending array.
* @dram_default_hops: array that holds all hops addresses needed for default
* DRAM mapping.
- * @pending_cb_lock: spinlock to protect pending cb list
* @cs_lock: spinlock to protect cs_sequence.
* @dram_phys_mem: amount of used physical DRAM memory by this context.
* @thread_ctx_switch_token: token to prevent multiple threads of the same
* context from running the context switch phase.
* Only a single thread should run it.
- * @thread_pending_cb_token: token to prevent multiple threads from processing
- * the pending CB list. Only a single thread should
- * process the list since it is protected by a
- * spinlock and we don't want to halt the entire
- * command submission sequence.
* @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
* the context switch phase from moving to their
* execution phase before the context switch phase
@@ -1353,17 +1402,15 @@ struct hl_ctx {
struct mutex mmu_lock;
struct mutex hw_block_list_lock;
struct list_head debugfs_list;
- struct list_head pending_cb_list;
struct list_head hw_block_mem_list;
struct hl_cs_counters_atomic cs_counters;
struct gen_pool *cb_va_pool;
+ struct hl_encaps_signals_mgr sig_mgr;
u64 cs_sequence;
u64 *dram_default_hops;
- spinlock_t pending_cb_lock;
spinlock_t cs_lock;
atomic64_t dram_phys_mem;
atomic_t thread_ctx_switch_token;
- atomic_t thread_pending_cb_token;
u32 thread_ctx_switch_wait_token;
u32 asid;
u32 handle;
@@ -1394,20 +1441,22 @@ struct hl_ctx_mgr {
* @sgt: pointer to the scatter-gather table that holds the pages.
* @dir: for DMA unmapping, the direction must be supplied, so save it.
* @debugfs_list: node in debugfs list of command submissions.
+ * @pid: the pid of the user process owning the memory
* @addr: user-space virtual address of the start of the memory area.
* @size: size of the memory area to pin & map.
* @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
*/
struct hl_userptr {
- enum vm_type_t vm_type; /* must be first */
+ enum vm_type vm_type; /* must be first */
struct list_head job_node;
struct page **pages;
unsigned int npages;
struct sg_table *sgt;
enum dma_data_direction dir;
struct list_head debugfs_list;
+ pid_t pid;
u64 addr;
- u32 size;
+ u64 size;
u8 dma_mapped;
};
@@ -1426,12 +1475,14 @@ struct hl_userptr {
* @mirror_node : node in device mirror list of command submissions.
* @staged_cs_node: node in the staged cs list.
* @debugfs_list: node in debugfs list of command submissions.
+ * @encaps_sig_hdl: holds the encaps signals handle.
* @sequence: the sequence number of this CS.
* @staged_sequence: the sequence of the staged submission this CS is part of,
* relevant only if staged_cs is set.
* @timeout_jiffies: cs timeout in jiffies.
* @submission_time_jiffies: submission time of the cs
* @type: CS_TYPE_*.
+ * @encaps_sig_hdl_id: encaps signals handle id, set for the first staged cs.
* @submitted: true if CS was submitted to H/W.
* @completed: true if CS was completed by device.
* @timedout : true if CS was timedout.
@@ -1445,6 +1496,7 @@ struct hl_userptr {
* @staged_cs: true if this CS is part of a staged submission.
* @skip_reset_on_timeout: true if we shall not reset the device in case
* timeout occurs (debug scenario).
+ * @encaps_signals: true if this CS has encaps reserved signals.
*/
struct hl_cs {
u16 *jobs_in_queue_cnt;
@@ -1459,11 +1511,13 @@ struct hl_cs {
struct list_head mirror_node;
struct list_head staged_cs_node;
struct list_head debugfs_list;
+ struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
u64 sequence;
u64 staged_sequence;
u64 timeout_jiffies;
u64 submission_time_jiffies;
enum hl_cs_type type;
+ u32 encaps_sig_hdl_id;
u8 submitted;
u8 completed;
u8 timedout;
@@ -1474,6 +1528,7 @@ struct hl_cs {
u8 staged_first;
u8 staged_cs;
u8 skip_reset_on_timeout;
+ u8 encaps_signals;
};
/**
@@ -1493,6 +1548,8 @@ struct hl_cs {
* @hw_queue_id: the id of the H/W queue this job is submitted to.
* @user_cb_size: the actual size of the CB we got from the user.
* @job_cb_size: the actual size of the CB that we put on the queue.
+ * @encaps_sig_wait_offset: encapsulated signals offset, which allow user
+ * to wait on part of the reserved signals.
* @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
* handle to a kernel-allocated CB object, false
* otherwise (SRAM/DRAM/host address).
@@ -1517,6 +1574,7 @@ struct hl_cs_job {
u32 hw_queue_id;
u32 user_cb_size;
u32 job_cb_size;
+ u32 encaps_sig_wait_offset;
u8 is_kernel_allocated_cb;
u8 contains_dma_pkt;
};
@@ -1613,7 +1671,7 @@ struct hl_vm_hw_block_list_node {
* @created_from_userptr: is product of host virtual address.
*/
struct hl_vm_phys_pg_pack {
- enum vm_type_t vm_type; /* must be first */
+ enum vm_type vm_type; /* must be first */
u64 *pages;
u64 npages;
u64 total_size;
@@ -1759,9 +1817,13 @@ struct hl_debugfs_entry {
* @ctx_mem_hash_list: list of available contexts with MMU mappings.
* @ctx_mem_hash_spinlock: protects cb_list.
* @blob_desc: descriptor of blob
+ * @state_dump: data of the system states in case of a bad cs.
+ * @state_dump_sem: protects state_dump.
* @addr: next address to read/write from/to in read/write32.
* @mmu_addr: next virtual address to translate to physical address in mmu_show.
+ * @userptr_lookup: the target user ptr to look up for on demand.
* @mmu_asid: ASID to use while translating in mmu_show.
+ * @state_dump_head: index of the latest state dump
* @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
* @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
* @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
@@ -1783,14 +1845,149 @@ struct hl_dbg_device_entry {
struct list_head ctx_mem_hash_list;
spinlock_t ctx_mem_hash_spinlock;
struct debugfs_blob_wrapper blob_desc;
+ char *state_dump[HL_STATE_DUMP_HIST_LEN];
+ struct rw_semaphore state_dump_sem;
u64 addr;
u64 mmu_addr;
+ u64 userptr_lookup;
u32 mmu_asid;
+ u32 state_dump_head;
u8 i2c_bus;
u8 i2c_addr;
u8 i2c_reg;
};
+/**
+ * struct hl_hw_obj_name_entry - single hw object name, member of
+ * hl_state_dump_specs
+ * @node: link to the containing hash table
+ * @name: hw object name
+ * @id: object identifier
+ */
+struct hl_hw_obj_name_entry {
+ struct hlist_node node;
+ const char *name;
+ u32 id;
+};
+
+enum hl_state_dump_specs_props {
+ SP_SYNC_OBJ_BASE_ADDR,
+ SP_NEXT_SYNC_OBJ_ADDR,
+ SP_SYNC_OBJ_AMOUNT,
+ SP_MON_OBJ_WR_ADDR_LOW,
+ SP_MON_OBJ_WR_ADDR_HIGH,
+ SP_MON_OBJ_WR_DATA,
+ SP_MON_OBJ_ARM_DATA,
+ SP_MON_OBJ_STATUS,
+ SP_MONITORS_AMOUNT,
+ SP_TPC0_CMDQ,
+ SP_TPC0_CFG_SO,
+ SP_NEXT_TPC,
+ SP_MME_CMDQ,
+ SP_MME_CFG_SO,
+ SP_NEXT_MME,
+ SP_DMA_CMDQ,
+ SP_DMA_CFG_SO,
+ SP_DMA_QUEUES_OFFSET,
+ SP_NUM_OF_MME_ENGINES,
+ SP_SUB_MME_ENG_NUM,
+ SP_NUM_OF_DMA_ENGINES,
+ SP_NUM_OF_TPC_ENGINES,
+ SP_ENGINE_NUM_OF_QUEUES,
+ SP_ENGINE_NUM_OF_STREAMS,
+ SP_ENGINE_NUM_OF_FENCES,
+ SP_FENCE0_CNT_OFFSET,
+ SP_FENCE0_RDATA_OFFSET,
+ SP_CP_STS_OFFSET,
+ SP_NUM_CORES,
+
+ SP_MAX
+};
+
+enum hl_sync_engine_type {
+ ENGINE_TPC,
+ ENGINE_DMA,
+ ENGINE_MME,
+};
+
+/**
+ * struct hl_mon_state_dump - represents a state dump of a single monitor
+ * @id: monitor id
+ * @wr_addr_low: address monitor will write to, low bits
+ * @wr_addr_high: address monitor will write to, high bits
+ * @wr_data: data monitor will write
+ * @arm_data: register value containing monitor configuration
+ * @status: monitor status
+ */
+struct hl_mon_state_dump {
+ u32 id;
+ u32 wr_addr_low;
+ u32 wr_addr_high;
+ u32 wr_data;
+ u32 arm_data;
+ u32 status;
+};
+
+/**
+ * struct hl_sync_to_engine_map_entry - sync object id to engine mapping entry
+ * @engine_type: type of the engine
+ * @engine_id: id of the engine
+ * @sync_id: id of the sync object
+ */
+struct hl_sync_to_engine_map_entry {
+ struct hlist_node node;
+ enum hl_sync_engine_type engine_type;
+ u32 engine_id;
+ u32 sync_id;
+};
+
+/**
+ * struct hl_sync_to_engine_map - maps sync object id to associated engine id
+ * @tb: hash table containing the mapping, each element is of type
+ * struct hl_sync_to_engine_map_entry
+ */
+struct hl_sync_to_engine_map {
+ DECLARE_HASHTABLE(tb, SYNC_TO_ENGINE_HASH_TABLE_BITS);
+};
+
+/**
+ * struct hl_state_dump_specs_funcs - virtual functions used by the state dump
+ * @gen_sync_to_engine_map: generate a hash map from sync obj id to its engine
+ * @print_single_monitor: format monitor data as string
+ * @monitor_valid: return true if given monitor dump is valid
+ * @print_fences_single_engine: format fences data as string
+ */
+struct hl_state_dump_specs_funcs {
+ int (*gen_sync_to_engine_map)(struct hl_device *hdev,
+ struct hl_sync_to_engine_map *map);
+ int (*print_single_monitor)(char **buf, size_t *size, size_t *offset,
+ struct hl_device *hdev,
+ struct hl_mon_state_dump *mon);
+ int (*monitor_valid)(struct hl_mon_state_dump *mon);
+ int (*print_fences_single_engine)(struct hl_device *hdev,
+ u64 base_offset,
+ u64 status_base_offset,
+ enum hl_sync_engine_type engine_type,
+ u32 engine_id, char **buf,
+ size_t *size, size_t *offset);
+};
+
+/**
+ * struct hl_state_dump_specs - defines ASIC known hw objects names
+ * @so_id_to_str_tb: sync objects names index table
+ * @monitor_id_to_str_tb: monitors names index table
+ * @funcs: virtual functions used for state dump
+ * @sync_namager_names: readable names for sync manager if available (ex: N_E)
+ * @props: pointer to a per asic const props array required for state dump
+ */
+struct hl_state_dump_specs {
+ DECLARE_HASHTABLE(so_id_to_str_tb, OBJ_NAMES_HASH_TABLE_BITS);
+ DECLARE_HASHTABLE(monitor_id_to_str_tb, OBJ_NAMES_HASH_TABLE_BITS);
+ struct hl_state_dump_specs_funcs funcs;
+ const char * const *sync_namager_names;
+ s64 *props;
+};
+
/*
* DEVICES
@@ -1798,7 +1995,7 @@ struct hl_dbg_device_entry {
#define HL_STR_MAX 32
-#define HL_DEV_STS_MAX (HL_DEVICE_STATUS_NEEDS_RESET + 1)
+#define HL_DEV_STS_MAX (HL_DEVICE_STATUS_LAST + 1)
/* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
* x16 cards. In extreme cases, there are hosts that can accommodate 16 cards.
@@ -1946,11 +2143,13 @@ struct hwmon_chip_info;
* @wq: work queue for device reset procedure.
* @reset_work: reset work to be done.
* @hdev: habanalabs device structure.
+ * @fw_reset: whether f/w will do the reset without us sending them a message to do it.
*/
struct hl_device_reset_work {
struct workqueue_struct *wq;
struct delayed_work reset_work;
struct hl_device *hdev;
+ bool fw_reset;
};
/**
@@ -2065,6 +2264,58 @@ struct hl_mmu_funcs {
};
/**
+ * number of user contexts allowed to call wait_for_multi_cs ioctl in
+ * parallel
+ */
+#define MULTI_CS_MAX_USER_CTX 2
+
+/**
+ * struct multi_cs_completion - multi CS wait completion.
+ * @completion: completion of any of the CS in the list
+ * @lock: spinlock for the completion structure
+ * @timestamp: timestamp for the multi-CS completion
+ * @stream_master_qid_map: bitmap of all stream masters on which the multi-CS
+ * is waiting
+ * @used: 1 if in use, otherwise 0
+ */
+struct multi_cs_completion {
+ struct completion completion;
+ spinlock_t lock;
+ s64 timestamp;
+ u32 stream_master_qid_map;
+ u8 used;
+};
+
+/**
+ * struct multi_cs_data - internal data for multi CS call
+ * @ctx: pointer to the context structure
+ * @fence_arr: array of fences of all CSs
+ * @seq_arr: array of CS sequence numbers
+ * @timeout_us: timeout in usec for waiting for CS to complete
+ * @timestamp: timestamp of first completed CS
+ * @wait_status: wait for CS status
+ * @completion_bitmap: bitmap of completed CSs (1- completed, otherwise 0)
+ * @stream_master_qid_map: bitmap of all stream master QIDs on which the
+ * multi-CS is waiting
+ * @arr_len: fence_arr and seq_arr array length
+ * @gone_cs: indication of gone CS (1- there was gone CS, otherwise 0)
+ * @update_ts: update timestamp. 1- update the timestamp, otherwise 0.
+ */
+struct multi_cs_data {
+ struct hl_ctx *ctx;
+ struct hl_fence **fence_arr;
+ u64 *seq_arr;
+ s64 timeout_us;
+ s64 timestamp;
+ long wait_status;
+ u32 completion_bitmap;
+ u32 stream_master_qid_map;
+ u8 arr_len;
+ u8 gone_cs;
+ u8 update_ts;
+};
+
+/**
* struct hl_device - habanalabs device structure.
* @pdev: pointer to PCI device, can be NULL in case of simulator device.
* @pcie_bar_phys: array of available PCIe bars physical addresses.
@@ -2129,6 +2380,8 @@ struct hl_mmu_funcs {
* @mmu_func: device-related MMU functions.
* @fw_loader: FW loader manager.
* @pci_mem_region: array of memory regions in the PCI
+ * @state_dump_specs: constants and dictionaries needed to dump system state.
+ * @multi_cs_completion: array of multi-CS completion.
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
@@ -2205,6 +2458,7 @@ struct hl_mmu_funcs {
* halted. We can't halt it again because the COMMS
* protocol will throw an error. Relevant only for
* cases where Linux was not loaded to device CPU
+ * @supports_wait_for_multi_cs: true if wait for multi CS is supported
*/
struct hl_device {
struct pci_dev *pdev;
@@ -2273,6 +2527,11 @@ struct hl_device {
struct pci_mem_region pci_mem_region[PCI_REGION_NUMBER];
+ struct hl_state_dump_specs state_dump_specs;
+
+ struct multi_cs_completion multi_cs_completion[
+ MULTI_CS_MAX_USER_CTX];
+ u32 *stream_master_qid_arr;
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
@@ -2322,6 +2581,8 @@ struct hl_device {
u8 curr_reset_cause;
u8 skip_reset_on_timeout;
u8 device_cpu_is_halted;
+ u8 supports_wait_for_multi_cs;
+ u8 stream_master_qid_arr_size;
/* Parameters for bring-up */
u64 nic_ports_mask;
@@ -2343,6 +2604,29 @@ struct hl_device {
};
+/**
+ * struct hl_cs_encaps_sig_handle - encapsulated signals handle structure
+ * @refcount: refcount used to protect removing this id when several
+ * wait cs are used to wait of the reserved encaps signals.
+ * @hdev: pointer to habanalabs device structure.
+ * @hw_sob: pointer to H/W SOB used in the reservation.
+ * @cs_seq: staged cs sequence which contains encapsulated signals
+ * @id: idr handler id to be used to fetch the handler info
+ * @q_idx: stream queue index
+ * @pre_sob_val: current SOB value before reservation
+ * @count: signals number
+ */
+struct hl_cs_encaps_sig_handle {
+ struct kref refcount;
+ struct hl_device *hdev;
+ struct hl_hw_sob *hw_sob;
+ u64 cs_seq;
+ u32 id;
+ u32 q_idx;
+ u32 pre_sob_val;
+ u32 count;
+};
+
/*
* IOCTLs
*/
@@ -2373,6 +2657,23 @@ struct hl_ioctl_desc {
*/
/**
+ * hl_get_sg_info() - get number of pages and the DMA address from SG list.
+ * @sg: the SG list.
+ * @dma_addr: pointer to DMA address to return.
+ *
+ * Calculate the number of consecutive pages described by the SG list. Take the
+ * offset of the address in the first page, add to it the length and round it up
+ * to the number of needed pages.
+ */
+static inline u32 hl_get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
+{
+ *dma_addr = sg_dma_address(sg);
+
+ return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+}
+
+/**
* hl_mem_area_inside_range() - Checks whether address+size are inside a range.
* @address: The start address of the area we want to validate.
* @size: The size in bytes of the area we want to validate.
@@ -2436,7 +2737,9 @@ void destroy_hdev(struct hl_device *hdev);
int hl_hw_queues_create(struct hl_device *hdev);
void hl_hw_queues_destroy(struct hl_device *hdev);
int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
- u32 cb_size, u64 cb_ptr);
+ u32 cb_size, u64 cb_ptr);
+void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
+ u32 ctl, u32 len, u64 ptr);
int hl_hw_queue_schedule_cs(struct hl_cs *cs);
u32 hl_hw_queue_add_ptr(u32 ptr, u16 val);
void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id);
@@ -2470,6 +2773,8 @@ void hl_ctx_do_release(struct kref *ref);
void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
int hl_ctx_put(struct hl_ctx *ctx);
struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
+int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
+ struct hl_fence **fence, u32 arr_len);
void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
@@ -2511,18 +2816,19 @@ int hl_cb_va_pool_init(struct hl_ctx *ctx);
void hl_cb_va_pool_fini(struct hl_ctx *ctx);
void hl_cs_rollback_all(struct hl_device *hdev);
-void hl_pending_cb_list_flush(struct hl_ctx *ctx);
struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
void hl_sob_reset_error(struct kref *ref);
int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask);
void hl_fence_put(struct hl_fence *fence);
+void hl_fences_put(struct hl_fence **fence, int len);
void hl_fence_get(struct hl_fence *fence);
void cs_get(struct hl_cs *cs);
bool cs_needs_completion(struct hl_cs *cs);
bool cs_needs_timeout(struct hl_cs *cs);
bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs);
struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq);
+void hl_multi_cs_completion_init(struct hl_device *hdev);
void goya_set_asic_funcs(struct hl_device *hdev);
void gaudi_set_asic_funcs(struct hl_device *hdev);
@@ -2650,9 +2956,25 @@ int hl_set_voltage(struct hl_device *hdev,
int sensor_index, u32 attr, long value);
int hl_set_current(struct hl_device *hdev,
int sensor_index, u32 attr, long value);
+void hw_sob_get(struct hl_hw_sob *hw_sob);
+void hw_sob_put(struct hl_hw_sob *hw_sob);
+void hl_encaps_handle_do_release(struct kref *ref);
+void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
+ struct hl_cs *cs, struct hl_cs_job *job,
+ struct hl_cs_compl *cs_cmpl);
void hl_release_pending_user_interrupts(struct hl_device *hdev);
int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
- struct hl_hw_sob **hw_sob, u32 count);
+ struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig);
+
+int hl_state_dump(struct hl_device *hdev);
+const char *hl_state_dump_get_sync_name(struct hl_device *hdev, u32 sync_id);
+const char *hl_state_dump_get_monitor_name(struct hl_device *hdev,
+ struct hl_mon_state_dump *mon);
+void hl_state_dump_free_sync_to_engine_map(struct hl_sync_to_engine_map *map);
+__printf(4, 5) int hl_snprintf_resize(char **buf, size_t *size, size_t *offset,
+ const char *format, ...);
+char *hl_format_as_binary(char *buf, size_t buf_len, u32 n);
+const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
#ifdef CONFIG_DEBUG_FS
@@ -2673,6 +2995,8 @@ void hl_debugfs_remove_userptr(struct hl_device *hdev,
struct hl_userptr *userptr);
void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
+void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
+ unsigned long length);
#else
@@ -2746,6 +3070,11 @@ static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
{
}
+static inline void hl_debugfs_set_state_dump(struct hl_device *hdev,
+ char *data, unsigned long length)
+{
+}
+
#endif
/* IOCTLs */
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 4194cda2d04c..a75e4fceb9d8 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -141,7 +141,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
hl_cb_mgr_init(&hpriv->cb_mgr);
hl_ctx_mgr_init(&hpriv->ctx_mgr);
- hpriv->taskpid = find_get_pid(current->pid);
+ hpriv->taskpid = get_task_pid(current, PIDTYPE_PID);
mutex_lock(&hdev->fpriv_list_lock);
@@ -194,7 +194,6 @@ int hl_device_open(struct inode *inode, struct file *filp)
out_err:
mutex_unlock(&hdev->fpriv_list_lock);
-
hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
filp->private_data = NULL;
@@ -318,12 +317,16 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
hdev->asic_prop.fw_security_enabled = false;
/* Assign status description string */
- strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION],
- "disabled", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_OPERATIONAL],
+ "operational", HL_STR_MAX);
strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET],
"in reset", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION],
+ "disabled", HL_STR_MAX);
strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET],
"needs reset", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_IN_DEVICE_CREATION],
+ "in device creation", HL_STR_MAX);
hdev->major = hl_major;
hdev->reset_on_lockup = reset_on_lockup;
@@ -532,7 +535,7 @@ hl_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state)
result = PCI_ERS_RESULT_NONE;
}
- hdev->asic_funcs->halt_engines(hdev, true);
+ hdev->asic_funcs->halt_engines(hdev, true, false);
return result;
}
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index f4dda7b4acdd..86c3257d9ae1 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -94,6 +94,8 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.first_available_interrupt_id =
prop->first_available_user_msix_interrupt;
+ hw_ip.server_type = prop->server_type;
+
return copy_to_user(out, &hw_ip,
min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
}
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index bcabfdbf1e01..76b7de8f1406 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -65,7 +65,7 @@ void hl_hw_queue_update_ci(struct hl_cs *cs)
}
/*
- * ext_and_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
+ * hl_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
* H/W queue.
* @hdev: pointer to habanalabs device structure
* @q: pointer to habanalabs queue structure
@@ -80,8 +80,8 @@ void hl_hw_queue_update_ci(struct hl_cs *cs)
* This function must be called when the scheduler mutex is taken
*
*/
-static void ext_and_hw_queue_submit_bd(struct hl_device *hdev,
- struct hl_hw_queue *q, u32 ctl, u32 len, u64 ptr)
+void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
+ u32 ctl, u32 len, u64 ptr)
{
struct hl_bd *bd;
@@ -222,8 +222,8 @@ static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
* @cb_size: size of CB
* @cb_ptr: pointer to CB location
*
- * This function sends a single CB, that must NOT generate a completion entry
- *
+ * This function sends a single CB, that must NOT generate a completion entry.
+ * Sending CPU messages can be done instead via 'hl_hw_queue_submit_bd()'
*/
int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
u32 cb_size, u64 cb_ptr)
@@ -231,16 +231,7 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
int rc = 0;
- /*
- * The CPU queue is a synchronous queue with an effective depth of
- * a single entry (although it is allocated with room for multiple
- * entries). Therefore, there is a different lock, called
- * send_cpu_message_lock, that serializes accesses to the CPU queue.
- * As a result, we don't need to lock the access to the entire H/W
- * queues module when submitting a JOB to the CPU queue
- */
- if (q->queue_type != QUEUE_TYPE_CPU)
- hdev->asic_funcs->hw_queues_lock(hdev);
+ hdev->asic_funcs->hw_queues_lock(hdev);
if (hdev->disabled) {
rc = -EPERM;
@@ -258,11 +249,10 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
goto out;
}
- ext_and_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
+ hl_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
out:
- if (q->queue_type != QUEUE_TYPE_CPU)
- hdev->asic_funcs->hw_queues_unlock(hdev);
+ hdev->asic_funcs->hw_queues_unlock(hdev);
return rc;
}
@@ -328,7 +318,7 @@ static void ext_queue_schedule_job(struct hl_cs_job *job)
cq->pi = hl_cq_inc_ptr(cq->pi);
submit_bd:
- ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
+ hl_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
/*
@@ -407,7 +397,7 @@ static void hw_queue_schedule_job(struct hl_cs_job *job)
else
ptr = (u64) (uintptr_t) job->user_cb;
- ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
+ hl_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
static int init_signal_cs(struct hl_device *hdev,
@@ -426,8 +416,9 @@ static int init_signal_cs(struct hl_device *hdev,
cs_cmpl->sob_val = prop->next_sob_val;
dev_dbg(hdev->dev,
- "generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
- cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
+ "generate signal CB, sob_id: %d, sob val: %u, q_idx: %d, seq: %llu\n",
+ cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx,
+ cs_cmpl->cs_seq);
/* we set an EB since we must make sure all oeprations are done
* when sending the signal
@@ -435,17 +426,37 @@ static int init_signal_cs(struct hl_device *hdev,
hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
cs_cmpl->hw_sob->sob_id, 0, true);
- rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1);
+ rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1,
+ false);
return rc;
}
-static void init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
+void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
+ struct hl_cs *cs, struct hl_cs_job *job,
+ struct hl_cs_compl *cs_cmpl)
+{
+ struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl;
+
+ cs_cmpl->hw_sob = handle->hw_sob;
+
+ /* Note that encaps_sig_wait_offset was validated earlier in the flow
+ * for offset value which exceeds the max reserved signal count.
+ * always decrement 1 of the offset since when the user
+ * set offset 1 for example he mean to wait only for the first
+ * signal only, which will be pre_sob_val, and if he set offset 2
+ * then the value required is (pre_sob_val + 1) and so on...
+ */
+ cs_cmpl->sob_val = handle->pre_sob_val +
+ (job->encaps_sig_wait_offset - 1);
+}
+
+static int init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl)
{
- struct hl_cs_compl *signal_cs_cmpl;
- struct hl_sync_stream_properties *prop;
struct hl_gen_wait_properties wait_prop;
+ struct hl_sync_stream_properties *prop;
+ struct hl_cs_compl *signal_cs_cmpl;
u32 q_idx;
q_idx = job->hw_queue_id;
@@ -455,14 +466,51 @@ static void init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
struct hl_cs_compl,
base_fence);
- /* copy the SOB id and value of the signal CS */
- cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
- cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+ if (cs->encaps_signals) {
+ /* use the encaps signal handle stored earlier in the flow
+ * and set the SOB information from the encaps
+ * signals handle
+ */
+ hl_hw_queue_encaps_sig_set_sob_info(hdev, cs, job, cs_cmpl);
+
+ dev_dbg(hdev->dev, "Wait for encaps signals handle, qidx(%u), CS sequence(%llu), sob val: 0x%x, offset: %u\n",
+ cs->encaps_sig_hdl->q_idx,
+ cs->encaps_sig_hdl->cs_seq,
+ cs_cmpl->sob_val,
+ job->encaps_sig_wait_offset);
+ } else {
+ /* Copy the SOB id and value of the signal CS */
+ cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
+ cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+ }
+
+ /* check again if the signal cs already completed.
+ * if yes then don't send any wait cs since the hw_sob
+ * could be in reset already. if signal is not completed
+ * then get refcount to hw_sob to prevent resetting the sob
+ * while wait cs is not submitted.
+ * note that this check is protected by two locks,
+ * hw queue lock and completion object lock,
+ * and the same completion object lock also protects
+ * the hw_sob reset handler function.
+ * The hw_queue lock prevent out of sync of hw_sob
+ * refcount value, changed by signal/wait flows.
+ */
+ spin_lock(&signal_cs_cmpl->lock);
+
+ if (completion_done(&cs->signal_fence->completion)) {
+ spin_unlock(&signal_cs_cmpl->lock);
+ return -EINVAL;
+ }
+
+ kref_get(&cs_cmpl->hw_sob->kref);
+
+ spin_unlock(&signal_cs_cmpl->lock);
dev_dbg(hdev->dev,
- "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d\n",
+ "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d, seq: %llu\n",
cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
- prop->base_mon_id, q_idx);
+ prop->base_mon_id, q_idx, cs->sequence);
wait_prop.data = (void *) job->patched_cb;
wait_prop.sob_base = cs_cmpl->hw_sob->sob_id;
@@ -471,17 +519,14 @@ static void init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
wait_prop.mon_id = prop->base_mon_id;
wait_prop.q_idx = q_idx;
wait_prop.size = 0;
+
hdev->asic_funcs->gen_wait_cb(hdev, &wait_prop);
- kref_get(&cs_cmpl->hw_sob->kref);
- /*
- * Must put the signal fence after the SOB refcnt increment so
- * the SOB refcnt won't turn 0 and reset the SOB before the
- * wait CS was submitted.
- */
mb();
hl_fence_put(cs->signal_fence);
cs->signal_fence = NULL;
+
+ return 0;
}
/*
@@ -506,7 +551,60 @@ static int init_signal_wait_cs(struct hl_cs *cs)
if (cs->type & CS_TYPE_SIGNAL)
rc = init_signal_cs(hdev, job, cs_cmpl);
else if (cs->type & CS_TYPE_WAIT)
- init_wait_cs(hdev, cs, job, cs_cmpl);
+ rc = init_wait_cs(hdev, cs, job, cs_cmpl);
+
+ return rc;
+}
+
+static int encaps_sig_first_staged_cs_handler
+ (struct hl_device *hdev, struct hl_cs *cs)
+{
+ struct hl_cs_compl *cs_cmpl =
+ container_of(cs->fence,
+ struct hl_cs_compl, base_fence);
+ struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
+ struct hl_encaps_signals_mgr *mgr;
+ int rc = 0;
+
+ mgr = &hdev->compute_ctx->sig_mgr;
+
+ spin_lock(&mgr->lock);
+ encaps_sig_hdl = idr_find(&mgr->handles, cs->encaps_sig_hdl_id);
+ if (encaps_sig_hdl) {
+ /*
+ * Set handler CS sequence,
+ * the CS which contains the encapsulated signals.
+ */
+ encaps_sig_hdl->cs_seq = cs->sequence;
+ /* store the handle and set encaps signal indication,
+ * to be used later in cs_do_release to put the last
+ * reference to encaps signals handlers.
+ */
+ cs_cmpl->encaps_signals = true;
+ cs_cmpl->encaps_sig_hdl = encaps_sig_hdl;
+
+ /* set hw_sob pointer in completion object
+ * since it's used in cs_do_release flow to put
+ * refcount to sob
+ */
+ cs_cmpl->hw_sob = encaps_sig_hdl->hw_sob;
+ cs_cmpl->sob_val = encaps_sig_hdl->pre_sob_val +
+ encaps_sig_hdl->count;
+
+ dev_dbg(hdev->dev, "CS seq (%llu) added to encaps signal handler id (%u), count(%u), qidx(%u), sob(%u), val(%u)\n",
+ cs->sequence, encaps_sig_hdl->id,
+ encaps_sig_hdl->count,
+ encaps_sig_hdl->q_idx,
+ cs_cmpl->hw_sob->sob_id,
+ cs_cmpl->sob_val);
+
+ } else {
+ dev_err(hdev->dev, "encaps handle id(%u) wasn't found!\n",
+ cs->encaps_sig_hdl_id);
+ rc = -EINVAL;
+ }
+
+ spin_unlock(&mgr->lock);
return rc;
}
@@ -581,14 +679,21 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT)) {
rc = init_signal_wait_cs(cs);
- if (rc) {
- dev_err(hdev->dev, "Failed to submit signal cs\n");
+ if (rc)
goto unroll_cq_resv;
- }
- } else if (cs->type == CS_TYPE_COLLECTIVE_WAIT)
- hdev->asic_funcs->collective_wait_init_cs(cs);
+ } else if (cs->type == CS_TYPE_COLLECTIVE_WAIT) {
+ rc = hdev->asic_funcs->collective_wait_init_cs(cs);
+ if (rc)
+ goto unroll_cq_resv;
+ }
+ if (cs->encaps_signals && cs->staged_first) {
+ rc = encaps_sig_first_staged_cs_handler(hdev, cs);
+ if (rc)
+ goto unroll_cq_resv;
+ }
+
spin_lock(&hdev->cs_mirror_lock);
/* Verify staged CS exists and add to the staged list */
@@ -613,6 +718,11 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
}
list_add_tail(&cs->staged_cs_node, &staged_cs->staged_cs_node);
+
+ /* update stream map of the first CS */
+ if (hdev->supports_wait_for_multi_cs)
+ staged_cs->fence->stream_master_qid_map |=
+ cs->fence->stream_master_qid_map;
}
list_add_tail(&cs->mirror_node, &hdev->cs_mirror_list);
@@ -834,6 +944,8 @@ static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx)
hw_sob = &sync_stream_prop->hw_sob[sob];
hw_sob->hdev = hdev;
hw_sob->sob_id = sync_stream_prop->base_sob_id + sob;
+ hw_sob->sob_addr =
+ hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
hw_sob->q_idx = q_idx;
kref_init(&hw_sob->kref);
}
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index af339ce1ab4f..33986933aa9e 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -124,7 +124,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
spin_lock(&vm->idr_lock);
handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
- GFP_KERNEL);
+ GFP_ATOMIC);
spin_unlock(&vm->idr_lock);
if (handle < 0) {
@@ -529,6 +529,33 @@ static inline int add_va_block(struct hl_device *hdev,
}
/**
+ * is_hint_crossing_range() - check if hint address crossing specified reserved
+ * range.
+ */
+static inline bool is_hint_crossing_range(enum hl_va_range_type range_type,
+ u64 start_addr, u32 size, struct asic_fixed_properties *prop) {
+ bool range_cross;
+
+ if (range_type == HL_VA_RANGE_TYPE_DRAM)
+ range_cross =
+ hl_mem_area_crosses_range(start_addr, size,
+ prop->hints_dram_reserved_va_range.start_addr,
+ prop->hints_dram_reserved_va_range.end_addr);
+ else if (range_type == HL_VA_RANGE_TYPE_HOST)
+ range_cross =
+ hl_mem_area_crosses_range(start_addr, size,
+ prop->hints_host_reserved_va_range.start_addr,
+ prop->hints_host_reserved_va_range.end_addr);
+ else
+ range_cross =
+ hl_mem_area_crosses_range(start_addr, size,
+ prop->hints_host_hpage_reserved_va_range.start_addr,
+ prop->hints_host_hpage_reserved_va_range.end_addr);
+
+ return range_cross;
+}
+
+/**
* get_va_block() - get a virtual block for the given size and alignment.
*
* @hdev: pointer to the habanalabs device structure.
@@ -536,6 +563,8 @@ static inline int add_va_block(struct hl_device *hdev,
* @size: requested block size.
* @hint_addr: hint for requested address by the user.
* @va_block_align: required alignment of the virtual block start address.
+ * @range_type: va range type (host, dram)
+ * @flags: additional memory flags, currently only uses HL_MEM_FORCE_HINT
*
* This function does the following:
* - Iterate on the virtual block list to find a suitable virtual block for the
@@ -545,13 +574,19 @@ static inline int add_va_block(struct hl_device *hdev,
*/
static u64 get_va_block(struct hl_device *hdev,
struct hl_va_range *va_range,
- u64 size, u64 hint_addr, u32 va_block_align)
+ u64 size, u64 hint_addr, u32 va_block_align,
+ enum hl_va_range_type range_type,
+ u32 flags)
{
struct hl_vm_va_block *va_block, *new_va_block = NULL;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 tmp_hint_addr, valid_start, valid_size, prev_start, prev_end,
- align_mask, reserved_valid_start = 0, reserved_valid_size = 0;
+ align_mask, reserved_valid_start = 0, reserved_valid_size = 0,
+ dram_hint_mask = prop->dram_hints_align_mask;
bool add_prev = false;
bool is_align_pow_2 = is_power_of_2(va_range->page_size);
+ bool is_hint_dram_addr = hl_is_dram_va(hdev, hint_addr);
+ bool force_hint = flags & HL_MEM_FORCE_HINT;
if (is_align_pow_2)
align_mask = ~((u64)va_block_align - 1);
@@ -564,12 +599,20 @@ static u64 get_va_block(struct hl_device *hdev,
size = DIV_ROUND_UP_ULL(size, va_range->page_size) *
va_range->page_size;
- tmp_hint_addr = hint_addr;
+ tmp_hint_addr = hint_addr & ~dram_hint_mask;
/* Check if we need to ignore hint address */
if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
- (!is_align_pow_2 &&
- do_div(tmp_hint_addr, va_range->page_size))) {
+ (!is_align_pow_2 && is_hint_dram_addr &&
+ do_div(tmp_hint_addr, va_range->page_size))) {
+
+ if (force_hint) {
+ /* Hint must be respected, so here we just fail */
+ dev_err(hdev->dev,
+ "Hint address 0x%llx is not page aligned - cannot be respected\n",
+ hint_addr);
+ return 0;
+ }
dev_dbg(hdev->dev,
"Hint address 0x%llx will be ignored because it is not aligned\n",
@@ -596,6 +639,16 @@ static u64 get_va_block(struct hl_device *hdev,
if (valid_size < size)
continue;
+ /*
+ * In case hint address is 0, and arc_hints_range_reservation
+ * property enabled, then avoid allocating va blocks from the
+ * range reserved for hint addresses
+ */
+ if (prop->hints_range_reservation && !hint_addr)
+ if (is_hint_crossing_range(range_type, valid_start,
+ size, prop))
+ continue;
+
/* Pick the minimal length block which has the required size */
if (!new_va_block || (valid_size < reserved_valid_size)) {
new_va_block = va_block;
@@ -618,6 +671,17 @@ static u64 get_va_block(struct hl_device *hdev,
goto out;
}
+ if (force_hint && reserved_valid_start != hint_addr) {
+ /* Hint address must be respected. If we are here - this means
+ * we could not respect it.
+ */
+ dev_err(hdev->dev,
+ "Hint address 0x%llx could not be respected\n",
+ hint_addr);
+ reserved_valid_start = 0;
+ goto out;
+ }
+
/*
* Check if there is some leftover range due to reserving the new
* va block, then return it to the main virtual addresses list.
@@ -670,7 +734,8 @@ u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
enum hl_va_range_type type, u32 size, u32 alignment)
{
return get_va_block(hdev, ctx->va_range[type], size, 0,
- max(alignment, ctx->va_range[type]->page_size));
+ max(alignment, ctx->va_range[type]->page_size),
+ type, 0);
}
/**
@@ -732,28 +797,15 @@ int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
}
/**
- * get_sg_info() - get number of pages and the DMA address from SG list.
- * @sg: the SG list.
- * @dma_addr: pointer to DMA address to return.
- *
- * Calculate the number of consecutive pages described by the SG list. Take the
- * offset of the address in the first page, add to it the length and round it up
- * to the number of needed pages.
- */
-static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
-{
- *dma_addr = sg_dma_address(sg);
-
- return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
- (PAGE_SIZE - 1)) >> PAGE_SHIFT;
-}
-
-/**
* init_phys_pg_pack_from_userptr() - initialize physical page pack from host
* memory
* @ctx: pointer to the context structure.
* @userptr: userptr to initialize from.
* @pphys_pg_pack: result pointer.
+ * @force_regular_page: tell the function to ignore huge page optimization,
+ * even if possible. Needed for cases where the device VA
+ * is allocated before we know the composition of the
+ * physical pages
*
* This function does the following:
* - Pin the physical pages related to the given virtual block.
@@ -762,17 +814,18 @@ static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
*/
static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
struct hl_userptr *userptr,
- struct hl_vm_phys_pg_pack **pphys_pg_pack)
+ struct hl_vm_phys_pg_pack **pphys_pg_pack,
+ bool force_regular_page)
{
+ u32 npages, page_size = PAGE_SIZE,
+ huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
+ u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
struct hl_vm_phys_pg_pack *phys_pg_pack;
+ bool first = true, is_huge_page_opt;
+ u64 page_mask, total_npages;
struct scatterlist *sg;
dma_addr_t dma_addr;
- u64 page_mask, total_npages;
- u32 npages, page_size = PAGE_SIZE,
- huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
- bool first = true, is_huge_page_opt = true;
int rc, i, j;
- u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
if (!phys_pg_pack)
@@ -783,6 +836,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
phys_pg_pack->asid = ctx->asid;
atomic_set(&phys_pg_pack->mapping_cnt, 1);
+ is_huge_page_opt = (force_regular_page ? false : true);
+
/* Only if all dma_addrs are aligned to 2MB and their
* sizes is at least 2MB, we can use huge page mapping.
* We limit the 2MB optimization to this condition,
@@ -791,7 +846,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
*/
total_npages = 0;
for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
- npages = get_sg_info(sg, &dma_addr);
+ npages = hl_get_sg_info(sg, &dma_addr);
total_npages += npages;
@@ -820,7 +875,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
j = 0;
for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
- npages = get_sg_info(sg, &dma_addr);
+ npages = hl_get_sg_info(sg, &dma_addr);
/* align down to physical page size and save the offset */
if (first) {
@@ -1001,11 +1056,12 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
struct hl_userptr *userptr = NULL;
struct hl_vm_hash_node *hnode;
struct hl_va_range *va_range;
- enum vm_type_t *vm_type;
+ enum vm_type *vm_type;
u64 ret_vaddr, hint_addr;
u32 handle = 0, va_block_align;
int rc;
bool is_userptr = args->flags & HL_MEM_USERPTR;
+ enum hl_va_range_type va_range_type = 0;
/* Assume failure */
*device_addr = 0;
@@ -1023,7 +1079,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
}
rc = init_phys_pg_pack_from_userptr(ctx, userptr,
- &phys_pg_pack);
+ &phys_pg_pack, false);
if (rc) {
dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n",
@@ -1031,14 +1087,14 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto init_page_pack_err;
}
- vm_type = (enum vm_type_t *) userptr;
+ vm_type = (enum vm_type *) userptr;
hint_addr = args->map_host.hint_addr;
handle = phys_pg_pack->handle;
/* get required alignment */
if (phys_pg_pack->page_size == page_size) {
va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
-
+ va_range_type = HL_VA_RANGE_TYPE_HOST;
/*
* huge page alignment may be needed in case of regular
* page mapping, depending on the host VA alignment
@@ -1053,6 +1109,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
* mapping
*/
va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
+ va_range_type = HL_VA_RANGE_TYPE_HOST_HUGE;
va_block_align = huge_page_size;
}
} else {
@@ -1072,12 +1129,13 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
spin_unlock(&vm->idr_lock);
- vm_type = (enum vm_type_t *) phys_pg_pack;
+ vm_type = (enum vm_type *) phys_pg_pack;
hint_addr = args->map_device.hint_addr;
/* DRAM VA alignment is the same as the MMU page size */
va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
+ va_range_type = HL_VA_RANGE_TYPE_DRAM;
va_block_align = hdev->asic_prop.dmmu.page_size;
}
@@ -1100,8 +1158,23 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto hnode_err;
}
+ if (hint_addr && phys_pg_pack->offset) {
+ if (args->flags & HL_MEM_FORCE_HINT) {
+ /* Fail if hint must be respected but it can't be */
+ dev_err(hdev->dev,
+ "Hint address 0x%llx cannot be respected because source memory is not aligned 0x%x\n",
+ hint_addr, phys_pg_pack->offset);
+ rc = -EINVAL;
+ goto va_block_err;
+ }
+ dev_dbg(hdev->dev,
+ "Hint address 0x%llx will be ignored because source memory is not aligned 0x%x\n",
+ hint_addr, phys_pg_pack->offset);
+ }
+
ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
- hint_addr, va_block_align);
+ hint_addr, va_block_align,
+ va_range_type, args->flags);
if (!ret_vaddr) {
dev_err(hdev->dev, "no available va block for handle %u\n",
handle);
@@ -1181,17 +1254,19 @@ init_page_pack_err:
static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
bool ctx_free)
{
- struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
+ u64 vaddr = args->unmap.device_virt_addr;
struct hl_vm_hash_node *hnode = NULL;
+ struct asic_fixed_properties *prop;
+ struct hl_device *hdev = ctx->hdev;
struct hl_userptr *userptr = NULL;
struct hl_va_range *va_range;
- u64 vaddr = args->unmap.device_virt_addr;
- enum vm_type_t *vm_type;
+ enum vm_type *vm_type;
bool is_userptr;
int rc = 0;
+ prop = &hdev->asic_prop;
+
/* protect from double entrance */
mutex_lock(&ctx->mem_hash_lock);
hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
@@ -1214,8 +1289,9 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
if (*vm_type == VM_TYPE_USERPTR) {
is_userptr = true;
userptr = hnode->ptr;
- rc = init_phys_pg_pack_from_userptr(ctx, userptr,
- &phys_pg_pack);
+
+ rc = init_phys_pg_pack_from_userptr(ctx, userptr, &phys_pg_pack,
+ false);
if (rc) {
dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n",
@@ -1299,7 +1375,7 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
kfree(hnode);
if (is_userptr) {
- rc = free_phys_pg_pack(hdev, phys_pg_pack);
+ free_phys_pg_pack(hdev, phys_pg_pack);
dma_unmap_host_va(hdev, userptr);
}
@@ -1669,6 +1745,7 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EINVAL;
}
+ userptr->pid = current->pid;
userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_KERNEL);
if (!userptr->sgt)
return -ENOMEM;
@@ -2033,7 +2110,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
* another side effect error
*/
if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
- dev_notice(hdev->dev,
+ dev_dbg(hdev->dev,
"user released device without removing its memory mappings\n");
hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
index c5e93ff32586..0f536f79dd9c 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
@@ -470,13 +470,13 @@ static void hl_mmu_v1_fini(struct hl_device *hdev)
if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) {
kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
- }
- /* Make sure that if we arrive here again without init was called we
- * won't cause kernel panic. This can happen for example if we fail
- * during hard reset code at certain points
- */
- hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
+ /* Make sure that if we arrive here again without init was
+ * called we won't cause kernel panic. This can happen for
+ * example if we fail during hard reset code at certain points
+ */
+ hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
+ }
}
/**
diff --git a/drivers/misc/habanalabs/common/pci/pci.c b/drivers/misc/habanalabs/common/pci/pci.c
index d5bedf5ba011..0b5366cc84fd 100644
--- a/drivers/misc/habanalabs/common/pci/pci.c
+++ b/drivers/misc/habanalabs/common/pci/pci.c
@@ -436,6 +436,8 @@ int hl_pci_init(struct hl_device *hdev)
goto unmap_pci_bars;
}
+ dma_set_max_seg_size(&pdev->dev, U32_MAX);
+
return 0;
unmap_pci_bars:
diff --git a/drivers/misc/habanalabs/common/state_dump.c b/drivers/misc/habanalabs/common/state_dump.c
new file mode 100644
index 000000000000..74726907c95e
--- /dev/null
+++ b/drivers/misc/habanalabs/common/state_dump.c
@@ -0,0 +1,718 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2021 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include <uapi/misc/habanalabs.h>
+#include "habanalabs.h"
+
+/**
+ * hl_format_as_binary - helper function, format an integer as binary
+ * using supplied scratch buffer
+ * @buf: the buffer to use
+ * @buf_len: buffer capacity
+ * @n: number to format
+ *
+ * Returns pointer to buffer
+ */
+char *hl_format_as_binary(char *buf, size_t buf_len, u32 n)
+{
+ int i;
+ u32 bit;
+ bool leading0 = true;
+ char *wrptr = buf;
+
+ if (buf_len > 0 && buf_len < 3) {
+ *wrptr = '\0';
+ return buf;
+ }
+
+ wrptr[0] = '0';
+ wrptr[1] = 'b';
+ wrptr += 2;
+ /* Remove 3 characters from length for '0b' and '\0' termination */
+ buf_len -= 3;
+
+ for (i = 0; i < sizeof(n) * BITS_PER_BYTE && buf_len; ++i, n <<= 1) {
+ /* Writing bit calculation in one line would cause a false
+ * positive static code analysis error, so splitting.
+ */
+ bit = n & (1 << (sizeof(n) * BITS_PER_BYTE - 1));
+ bit = !!bit;
+ leading0 &= !bit;
+ if (!leading0) {
+ *wrptr = '0' + bit;
+ ++wrptr;
+ }
+ }
+
+ *wrptr = '\0';
+
+ return buf;
+}
+
+/**
+ * resize_to_fit - helper function, resize buffer to fit given amount of data
+ * @buf: destination buffer double pointer
+ * @size: pointer to the size container
+ * @desired_size: size the buffer must contain
+ *
+ * Returns 0 on success or error code on failure.
+ * On success, the size of buffer is at least desired_size. Buffer is allocated
+ * via vmalloc and must be freed with vfree.
+ */
+static int resize_to_fit(char **buf, size_t *size, size_t desired_size)
+{
+ char *resized_buf;
+ size_t new_size;
+
+ if (*size >= desired_size)
+ return 0;
+
+ /* Not enough space to print all, have to resize */
+ new_size = max_t(size_t, PAGE_SIZE, round_up(desired_size, PAGE_SIZE));
+ resized_buf = vmalloc(new_size);
+ if (!resized_buf)
+ return -ENOMEM;
+ memcpy(resized_buf, *buf, *size);
+ vfree(*buf);
+ *buf = resized_buf;
+ *size = new_size;
+
+ return 1;
+}
+
+/**
+ * hl_snprintf_resize() - print formatted data to buffer, resize as needed
+ * @buf: buffer double pointer, to be written to and resized, must be either
+ * NULL or allocated with vmalloc.
+ * @size: current size of the buffer
+ * @offset: current offset to write to
+ * @format: format of the data
+ *
+ * This function will write formatted data into the buffer. If buffer is not
+ * large enough, it will be resized using vmalloc. Size may be modified if the
+ * buffer was resized, offset will be advanced by the number of bytes written
+ * not including the terminating character
+ *
+ * Returns 0 on success or error code on failure
+ *
+ * Note that the buffer has to be manually released using vfree.
+ */
+int hl_snprintf_resize(char **buf, size_t *size, size_t *offset,
+ const char *format, ...)
+{
+ va_list args;
+ size_t length;
+ int rc;
+
+ if (*buf == NULL && (*size != 0 || *offset != 0))
+ return -EINVAL;
+
+ va_start(args, format);
+ length = vsnprintf(*buf + *offset, *size - *offset, format, args);
+ va_end(args);
+
+ rc = resize_to_fit(buf, size, *offset + length + 1);
+ if (rc < 0)
+ return rc;
+ else if (rc > 0) {
+ /* Resize was needed, write again */
+ va_start(args, format);
+ length = vsnprintf(*buf + *offset, *size - *offset, format,
+ args);
+ va_end(args);
+ }
+
+ *offset += length;
+
+ return 0;
+}
+
+/**
+ * hl_sync_engine_to_string - convert engine type enum to string literal
+ * @engine_type: engine type (TPC/MME/DMA)
+ *
+ * Return the resolved string literal
+ */
+const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type)
+{
+ switch (engine_type) {
+ case ENGINE_DMA:
+ return "DMA";
+ case ENGINE_MME:
+ return "MME";
+ case ENGINE_TPC:
+ return "TPC";
+ }
+ return "Invalid Engine Type";
+}
+
+/**
+ * hl_print_resize_sync_engine - helper function, format engine name and ID
+ * using hl_snprintf_resize
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ * @engine_type: engine type (TPC/MME/DMA)
+ * @engine_id: engine numerical id
+ *
+ * Returns 0 on success or error code on failure
+ */
+static int hl_print_resize_sync_engine(char **buf, size_t *size, size_t *offset,
+ enum hl_sync_engine_type engine_type,
+ u32 engine_id)
+{
+ return hl_snprintf_resize(buf, size, offset, "%s%u",
+ hl_sync_engine_to_string(engine_type), engine_id);
+}
+
+/**
+ * hl_state_dump_get_sync_name - transform sync object id to name if available
+ * @hdev: pointer to the device
+ * @sync_id: sync object id
+ *
+ * Returns a name literal or NULL if not resolved.
+ * Note: returning NULL shall not be considered as a failure, as not all
+ * sync objects are named.
+ */
+const char *hl_state_dump_get_sync_name(struct hl_device *hdev, u32 sync_id)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct hl_hw_obj_name_entry *entry;
+
+ hash_for_each_possible(sds->so_id_to_str_tb, entry,
+ node, sync_id)
+ if (sync_id == entry->id)
+ return entry->name;
+
+ return NULL;
+}
+
+/**
+ * hl_state_dump_get_monitor_name - transform monitor object dump to monitor
+ * name if available
+ * @hdev: pointer to the device
+ * @mon: monitor state dump
+ *
+ * Returns a name literal or NULL if not resolved.
+ * Note: returning NULL shall not be considered as a failure, as not all
+ * monitors are named.
+ */
+const char *hl_state_dump_get_monitor_name(struct hl_device *hdev,
+ struct hl_mon_state_dump *mon)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct hl_hw_obj_name_entry *entry;
+
+ hash_for_each_possible(sds->monitor_id_to_str_tb,
+ entry, node, mon->id)
+ if (mon->id == entry->id)
+ return entry->name;
+
+ return NULL;
+}
+
+/**
+ * hl_state_dump_free_sync_to_engine_map - free sync object to engine map
+ * @map: sync object to engine map
+ *
+ * Note: generic free implementation, the allocation is implemented per ASIC.
+ */
+void hl_state_dump_free_sync_to_engine_map(struct hl_sync_to_engine_map *map)
+{
+ struct hl_sync_to_engine_map_entry *entry;
+ struct hlist_node *tmp_node;
+ int i;
+
+ hash_for_each_safe(map->tb, i, tmp_node, entry, node) {
+ hash_del(&entry->node);
+ kfree(entry);
+ }
+}
+
+/**
+ * hl_state_dump_get_sync_to_engine - transform sync_id to
+ * hl_sync_to_engine_map_entry if available for current id
+ * @map: sync object to engine map
+ * @sync_id: sync object id
+ *
+ * Returns the translation entry if found or NULL if not.
+ * Note, returned NULL shall not be considered as a failure as the map
+ * does not cover all possible, it is a best effort sync ids.
+ */
+static struct hl_sync_to_engine_map_entry *
+hl_state_dump_get_sync_to_engine(struct hl_sync_to_engine_map *map, u32 sync_id)
+{
+ struct hl_sync_to_engine_map_entry *entry;
+
+ hash_for_each_possible(map->tb, entry, node, sync_id)
+ if (entry->sync_id == sync_id)
+ return entry;
+ return NULL;
+}
+
+/**
+ * hl_state_dump_read_sync_objects - read sync objects array
+ * @hdev: pointer to the device
+ * @index: sync manager block index starting with E_N
+ *
+ * Returns array of size SP_SYNC_OBJ_AMOUNT on success or NULL on failure
+ */
+static u32 *hl_state_dump_read_sync_objects(struct hl_device *hdev, u32 index)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ u32 *sync_objects;
+ s64 base_addr; /* Base addr can be negative */
+ int i;
+
+ base_addr = sds->props[SP_SYNC_OBJ_BASE_ADDR] +
+ sds->props[SP_NEXT_SYNC_OBJ_ADDR] * index;
+
+ sync_objects = vmalloc(sds->props[SP_SYNC_OBJ_AMOUNT] * sizeof(u32));
+ if (!sync_objects)
+ return NULL;
+
+ for (i = 0; i < sds->props[SP_SYNC_OBJ_AMOUNT]; ++i)
+ sync_objects[i] = RREG32(base_addr + i * sizeof(u32));
+
+ return sync_objects;
+}
+
+/**
+ * hl_state_dump_free_sync_objects - free sync objects array allocated by
+ * hl_state_dump_read_sync_objects
+ * @sync_objects: sync objects array
+ */
+static void hl_state_dump_free_sync_objects(u32 *sync_objects)
+{
+ vfree(sync_objects);
+}
+
+
+/**
+ * hl_state_dump_print_syncs_single_block - print active sync objects on a
+ * single block
+ * @hdev: pointer to the device
+ * @index: sync manager block index starting with E_N
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ * @map: sync engines names map
+ *
+ * Returns 0 on success or error code on failure
+ */
+static int
+hl_state_dump_print_syncs_single_block(struct hl_device *hdev, u32 index,
+ char **buf, size_t *size, size_t *offset,
+ struct hl_sync_to_engine_map *map)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ const char *sync_name;
+ u32 *sync_objects = NULL;
+ int rc = 0, i;
+
+ if (sds->sync_namager_names) {
+ rc = hl_snprintf_resize(
+ buf, size, offset, "%s\n",
+ sds->sync_namager_names[index]);
+ if (rc)
+ goto out;
+ }
+
+ sync_objects = hl_state_dump_read_sync_objects(hdev, index);
+ if (!sync_objects) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < sds->props[SP_SYNC_OBJ_AMOUNT]; ++i) {
+ struct hl_sync_to_engine_map_entry *entry;
+ u64 sync_object_addr;
+
+ if (!sync_objects[i])
+ continue;
+
+ sync_object_addr = sds->props[SP_SYNC_OBJ_BASE_ADDR] +
+ sds->props[SP_NEXT_SYNC_OBJ_ADDR] * index +
+ i * sizeof(u32);
+
+ rc = hl_snprintf_resize(buf, size, offset, "sync id: %u", i);
+ if (rc)
+ goto free_sync_objects;
+ sync_name = hl_state_dump_get_sync_name(hdev, i);
+ if (sync_name) {
+ rc = hl_snprintf_resize(buf, size, offset, " %s",
+ sync_name);
+ if (rc)
+ goto free_sync_objects;
+ }
+ rc = hl_snprintf_resize(buf, size, offset, ", value: %u",
+ sync_objects[i]);
+ if (rc)
+ goto free_sync_objects;
+
+ /* Append engine string */
+ entry = hl_state_dump_get_sync_to_engine(map,
+ (u32)sync_object_addr);
+ if (entry) {
+ rc = hl_snprintf_resize(buf, size, offset,
+ ", Engine: ");
+ if (rc)
+ goto free_sync_objects;
+ rc = hl_print_resize_sync_engine(buf, size, offset,
+ entry->engine_type,
+ entry->engine_id);
+ if (rc)
+ goto free_sync_objects;
+ }
+
+ rc = hl_snprintf_resize(buf, size, offset, "\n");
+ if (rc)
+ goto free_sync_objects;
+ }
+
+free_sync_objects:
+ hl_state_dump_free_sync_objects(sync_objects);
+out:
+ return rc;
+}
+
+/**
+ * hl_state_dump_print_syncs - print active sync objects
+ * @hdev: pointer to the device
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ *
+ * Returns 0 on success or error code on failure
+ */
+static int hl_state_dump_print_syncs(struct hl_device *hdev,
+ char **buf, size_t *size,
+ size_t *offset)
+
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct hl_sync_to_engine_map *map;
+ u32 index;
+ int rc = 0;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ rc = sds->funcs.gen_sync_to_engine_map(hdev, map);
+ if (rc)
+ goto free_map_mem;
+
+ rc = hl_snprintf_resize(buf, size, offset, "Non zero sync objects:\n");
+ if (rc)
+ goto out;
+
+ if (sds->sync_namager_names) {
+ for (index = 0; sds->sync_namager_names[index]; ++index) {
+ rc = hl_state_dump_print_syncs_single_block(
+ hdev, index, buf, size, offset, map);
+ if (rc)
+ goto out;
+ }
+ } else {
+ for (index = 0; index < sds->props[SP_NUM_CORES]; ++index) {
+ rc = hl_state_dump_print_syncs_single_block(
+ hdev, index, buf, size, offset, map);
+ if (rc)
+ goto out;
+ }
+ }
+
+out:
+ hl_state_dump_free_sync_to_engine_map(map);
+free_map_mem:
+ kfree(map);
+
+ return rc;
+}
+
+/**
+ * hl_state_dump_alloc_read_sm_block_monitors - read monitors for a specific
+ * block
+ * @hdev: pointer to the device
+ * @index: sync manager block index starting with E_N
+ *
+ * Returns an array of monitor data of size SP_MONITORS_AMOUNT or NULL
+ * on error
+ */
+static struct hl_mon_state_dump *
+hl_state_dump_alloc_read_sm_block_monitors(struct hl_device *hdev, u32 index)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct hl_mon_state_dump *monitors;
+ s64 base_addr; /* Base addr can be negative */
+ int i;
+
+ monitors = vmalloc(sds->props[SP_MONITORS_AMOUNT] *
+ sizeof(struct hl_mon_state_dump));
+ if (!monitors)
+ return NULL;
+
+ base_addr = sds->props[SP_NEXT_SYNC_OBJ_ADDR] * index;
+
+ for (i = 0; i < sds->props[SP_MONITORS_AMOUNT]; ++i) {
+ monitors[i].id = i;
+ monitors[i].wr_addr_low =
+ RREG32(base_addr + sds->props[SP_MON_OBJ_WR_ADDR_LOW] +
+ i * sizeof(u32));
+
+ monitors[i].wr_addr_high =
+ RREG32(base_addr + sds->props[SP_MON_OBJ_WR_ADDR_HIGH] +
+ i * sizeof(u32));
+
+ monitors[i].wr_data =
+ RREG32(base_addr + sds->props[SP_MON_OBJ_WR_DATA] +
+ i * sizeof(u32));
+
+ monitors[i].arm_data =
+ RREG32(base_addr + sds->props[SP_MON_OBJ_ARM_DATA] +
+ i * sizeof(u32));
+
+ monitors[i].status =
+ RREG32(base_addr + sds->props[SP_MON_OBJ_STATUS] +
+ i * sizeof(u32));
+ }
+
+ return monitors;
+}
+
+/**
+ * hl_state_dump_free_monitors - free the monitors structure
+ * @monitors: monitors array created with
+ * hl_state_dump_alloc_read_sm_block_monitors
+ */
+static void hl_state_dump_free_monitors(struct hl_mon_state_dump *monitors)
+{
+ vfree(monitors);
+}
+
+/**
+ * hl_state_dump_print_monitors_single_block - print active monitors on a
+ * single block
+ * @hdev: pointer to the device
+ * @index: sync manager block index starting with E_N
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ *
+ * Returns 0 on success or error code on failure
+ */
+static int hl_state_dump_print_monitors_single_block(struct hl_device *hdev,
+ u32 index,
+ char **buf, size_t *size,
+ size_t *offset)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct hl_mon_state_dump *monitors = NULL;
+ int rc = 0, i;
+
+ if (sds->sync_namager_names) {
+ rc = hl_snprintf_resize(
+ buf, size, offset, "%s\n",
+ sds->sync_namager_names[index]);
+ if (rc)
+ goto out;
+ }
+
+ monitors = hl_state_dump_alloc_read_sm_block_monitors(hdev, index);
+ if (!monitors) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < sds->props[SP_MONITORS_AMOUNT]; ++i) {
+ if (!(sds->funcs.monitor_valid(&monitors[i])))
+ continue;
+
+ /* Monitor is valid, dump it */
+ rc = sds->funcs.print_single_monitor(buf, size, offset, hdev,
+ &monitors[i]);
+ if (rc)
+ goto free_monitors;
+
+ hl_snprintf_resize(buf, size, offset, "\n");
+ }
+
+free_monitors:
+ hl_state_dump_free_monitors(monitors);
+out:
+ return rc;
+}
+
+/**
+ * hl_state_dump_print_monitors - print active monitors
+ * @hdev: pointer to the device
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ *
+ * Returns 0 on success or error code on failure
+ */
+static int hl_state_dump_print_monitors(struct hl_device *hdev,
+ char **buf, size_t *size,
+ size_t *offset)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ u32 index;
+ int rc = 0;
+
+ rc = hl_snprintf_resize(buf, size, offset,
+ "Valid (armed) monitor objects:\n");
+ if (rc)
+ goto out;
+
+ if (sds->sync_namager_names) {
+ for (index = 0; sds->sync_namager_names[index]; ++index) {
+ rc = hl_state_dump_print_monitors_single_block(
+ hdev, index, buf, size, offset);
+ if (rc)
+ goto out;
+ }
+ } else {
+ for (index = 0; index < sds->props[SP_NUM_CORES]; ++index) {
+ rc = hl_state_dump_print_monitors_single_block(
+ hdev, index, buf, size, offset);
+ if (rc)
+ goto out;
+ }
+ }
+
+out:
+ return rc;
+}
+
+/**
+ * hl_state_dump_print_engine_fences - print active fences for a specific
+ * engine
+ * @hdev: pointer to the device
+ * @engine_type: engine type to use
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ */
+static int
+hl_state_dump_print_engine_fences(struct hl_device *hdev,
+ enum hl_sync_engine_type engine_type,
+ char **buf, size_t *size, size_t *offset)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ int rc = 0, i, n_fences;
+ u64 base_addr, next_fence;
+
+ switch (engine_type) {
+ case ENGINE_TPC:
+ n_fences = sds->props[SP_NUM_OF_TPC_ENGINES];
+ base_addr = sds->props[SP_TPC0_CMDQ];
+ next_fence = sds->props[SP_NEXT_TPC];
+ break;
+ case ENGINE_MME:
+ n_fences = sds->props[SP_NUM_OF_MME_ENGINES];
+ base_addr = sds->props[SP_MME_CMDQ];
+ next_fence = sds->props[SP_NEXT_MME];
+ break;
+ case ENGINE_DMA:
+ n_fences = sds->props[SP_NUM_OF_DMA_ENGINES];
+ base_addr = sds->props[SP_DMA_CMDQ];
+ next_fence = sds->props[SP_DMA_QUEUES_OFFSET];
+ break;
+ default:
+ return -EINVAL;
+ }
+ for (i = 0; i < n_fences; ++i) {
+ rc = sds->funcs.print_fences_single_engine(
+ hdev,
+ base_addr + next_fence * i +
+ sds->props[SP_FENCE0_CNT_OFFSET],
+ base_addr + next_fence * i +
+ sds->props[SP_CP_STS_OFFSET],
+ engine_type, i, buf, size, offset);
+ if (rc)
+ goto out;
+ }
+out:
+ return rc;
+}
+
+/**
+ * hl_state_dump_print_fences - print active fences
+ * @hdev: pointer to the device
+ * @buf: destination buffer double pointer to be used with hl_snprintf_resize
+ * @size: pointer to the size container
+ * @offset: pointer to the offset container
+ */
+static int hl_state_dump_print_fences(struct hl_device *hdev, char **buf,
+ size_t *size, size_t *offset)
+{
+ int rc = 0;
+
+ rc = hl_snprintf_resize(buf, size, offset, "Valid (armed) fences:\n");
+ if (rc)
+ goto out;
+
+ rc = hl_state_dump_print_engine_fences(hdev, ENGINE_TPC, buf, size, offset);
+ if (rc)
+ goto out;
+
+ rc = hl_state_dump_print_engine_fences(hdev, ENGINE_MME, buf, size, offset);
+ if (rc)
+ goto out;
+
+ rc = hl_state_dump_print_engine_fences(hdev, ENGINE_DMA, buf, size, offset);
+ if (rc)
+ goto out;
+
+out:
+ return rc;
+}
+
+/**
+ * hl_state_dump() - dump system state
+ * @hdev: pointer to device structure
+ */
+int hl_state_dump(struct hl_device *hdev)
+{
+ char *buf = NULL;
+ size_t offset = 0, size = 0;
+ int rc;
+
+ rc = hl_snprintf_resize(&buf, &size, &offset,
+ "Timestamp taken on: %llu\n\n",
+ ktime_to_ns(ktime_get()));
+ if (rc)
+ goto err;
+
+ rc = hl_state_dump_print_syncs(hdev, &buf, &size, &offset);
+ if (rc)
+ goto err;
+
+ hl_snprintf_resize(&buf, &size, &offset, "\n");
+
+ rc = hl_state_dump_print_monitors(hdev, &buf, &size, &offset);
+ if (rc)
+ goto err;
+
+ hl_snprintf_resize(&buf, &size, &offset, "\n");
+
+ rc = hl_state_dump_print_fences(hdev, &buf, &size, &offset);
+ if (rc)
+ goto err;
+
+ hl_snprintf_resize(&buf, &size, &offset, "\n");
+
+ hl_debugfs_set_state_dump(hdev, buf, size);
+
+ return 0;
+err:
+ vfree(buf);
+ return rc;
+}
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index db72df282ef8..34f9f2779962 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -9,8 +9,7 @@
#include <linux/pci.h>
-long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
- bool curr)
+long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
{
struct cpucp_packet pkt;
u32 used_pll_idx;
@@ -44,8 +43,7 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
return (long) result;
}
-void hl_set_frequency(struct hl_device *hdev, u32 pll_index,
- u64 freq)
+void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
{
struct cpucp_packet pkt;
u32 used_pll_idx;
@@ -285,16 +283,12 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
- char *str;
+ char str[HL_STR_MAX];
- if (atomic_read(&hdev->in_reset))
- str = "In reset";
- else if (hdev->disabled)
- str = "Malfunction";
- else if (hdev->needs_reset)
- str = "Needs Reset";
- else
- str = "Operational";
+ strscpy(str, hdev->status[hl_device_status(hdev)], HL_STR_MAX);
+
+ /* use uppercase for backward compatibility */
+ str[0] = 'A' + (str[0] - 'a');
return sprintf(buf, "%s\n", str);
}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index aa8a0ca5aca2..383865be3c2c 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -76,7 +76,7 @@
#define GAUDI_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
#define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
-#define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
+#define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 4000000 /* 4s */
#define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
#define GAUDI_WAIT_FOR_BL_TIMEOUT_USEC 15000000 /* 15s */
@@ -106,6 +106,21 @@
#define GAUDI_PLL_MAX 10
+#define BIN_REG_STRING_SIZE sizeof("0b10101010101010101010101010101010")
+
+#define MONITOR_SOB_STRING_SIZE 256
+
+static u32 gaudi_stream_master[GAUDI_STREAM_MASTER_ARR_SIZE] = {
+ GAUDI_QUEUE_ID_DMA_0_0,
+ GAUDI_QUEUE_ID_DMA_0_1,
+ GAUDI_QUEUE_ID_DMA_0_2,
+ GAUDI_QUEUE_ID_DMA_0_3,
+ GAUDI_QUEUE_ID_DMA_1_0,
+ GAUDI_QUEUE_ID_DMA_1_1,
+ GAUDI_QUEUE_ID_DMA_1_2,
+ GAUDI_QUEUE_ID_DMA_1_3
+};
+
static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
"gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
"gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
@@ -348,6 +363,97 @@ static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = {
QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_3 */
};
+static struct hl_hw_obj_name_entry gaudi_so_id_to_str[] = {
+ { .id = 0, .name = "SYNC_OBJ_DMA_DOWN_FEEDBACK" },
+ { .id = 1, .name = "SYNC_OBJ_DMA_UP_FEEDBACK" },
+ { .id = 2, .name = "SYNC_OBJ_DMA_STATIC_DRAM_SRAM_FEEDBACK" },
+ { .id = 3, .name = "SYNC_OBJ_DMA_SRAM_DRAM_FEEDBACK" },
+ { .id = 4, .name = "SYNC_OBJ_FIRST_COMPUTE_FINISH" },
+ { .id = 5, .name = "SYNC_OBJ_HOST_DRAM_DONE" },
+ { .id = 6, .name = "SYNC_OBJ_DBG_CTR_DEPRECATED" },
+ { .id = 7, .name = "SYNC_OBJ_DMA_ACTIVATIONS_DRAM_SRAM_FEEDBACK" },
+ { .id = 8, .name = "SYNC_OBJ_ENGINE_SEM_MME_0" },
+ { .id = 9, .name = "SYNC_OBJ_ENGINE_SEM_MME_1" },
+ { .id = 10, .name = "SYNC_OBJ_ENGINE_SEM_TPC_0" },
+ { .id = 11, .name = "SYNC_OBJ_ENGINE_SEM_TPC_1" },
+ { .id = 12, .name = "SYNC_OBJ_ENGINE_SEM_TPC_2" },
+ { .id = 13, .name = "SYNC_OBJ_ENGINE_SEM_TPC_3" },
+ { .id = 14, .name = "SYNC_OBJ_ENGINE_SEM_TPC_4" },
+ { .id = 15, .name = "SYNC_OBJ_ENGINE_SEM_TPC_5" },
+ { .id = 16, .name = "SYNC_OBJ_ENGINE_SEM_TPC_6" },
+ { .id = 17, .name = "SYNC_OBJ_ENGINE_SEM_TPC_7" },
+ { .id = 18, .name = "SYNC_OBJ_ENGINE_SEM_DMA_1" },
+ { .id = 19, .name = "SYNC_OBJ_ENGINE_SEM_DMA_2" },
+ { .id = 20, .name = "SYNC_OBJ_ENGINE_SEM_DMA_3" },
+ { .id = 21, .name = "SYNC_OBJ_ENGINE_SEM_DMA_4" },
+ { .id = 22, .name = "SYNC_OBJ_ENGINE_SEM_DMA_5" },
+ { .id = 23, .name = "SYNC_OBJ_ENGINE_SEM_DMA_6" },
+ { .id = 24, .name = "SYNC_OBJ_ENGINE_SEM_DMA_7" },
+ { .id = 25, .name = "SYNC_OBJ_DBG_CTR_0" },
+ { .id = 26, .name = "SYNC_OBJ_DBG_CTR_1" },
+};
+
+static struct hl_hw_obj_name_entry gaudi_monitor_id_to_str[] = {
+ { .id = 200, .name = "MON_OBJ_DMA_DOWN_FEEDBACK_RESET" },
+ { .id = 201, .name = "MON_OBJ_DMA_UP_FEADBACK_RESET" },
+ { .id = 203, .name = "MON_OBJ_DRAM_TO_SRAM_QUEUE_FENCE" },
+ { .id = 204, .name = "MON_OBJ_TPC_0_CLK_GATE" },
+ { .id = 205, .name = "MON_OBJ_TPC_1_CLK_GATE" },
+ { .id = 206, .name = "MON_OBJ_TPC_2_CLK_GATE" },
+ { .id = 207, .name = "MON_OBJ_TPC_3_CLK_GATE" },
+ { .id = 208, .name = "MON_OBJ_TPC_4_CLK_GATE" },
+ { .id = 209, .name = "MON_OBJ_TPC_5_CLK_GATE" },
+ { .id = 210, .name = "MON_OBJ_TPC_6_CLK_GATE" },
+ { .id = 211, .name = "MON_OBJ_TPC_7_CLK_GATE" },
+};
+
+static s64 gaudi_state_dump_specs_props[] = {
+ [SP_SYNC_OBJ_BASE_ADDR] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0,
+ [SP_NEXT_SYNC_OBJ_ADDR] = NEXT_SYNC_OBJ_ADDR_INTERVAL,
+ [SP_SYNC_OBJ_AMOUNT] = NUM_OF_SOB_IN_BLOCK,
+ [SP_MON_OBJ_WR_ADDR_LOW] =
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0,
+ [SP_MON_OBJ_WR_ADDR_HIGH] =
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0,
+ [SP_MON_OBJ_WR_DATA] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_DATA_0,
+ [SP_MON_OBJ_ARM_DATA] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_ARM_0,
+ [SP_MON_OBJ_STATUS] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0,
+ [SP_MONITORS_AMOUNT] = NUM_OF_MONITORS_IN_BLOCK,
+ [SP_TPC0_CMDQ] = mmTPC0_QM_GLBL_CFG0,
+ [SP_TPC0_CFG_SO] = mmTPC0_CFG_QM_SYNC_OBJECT_ADDR,
+ [SP_NEXT_TPC] = mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0,
+ [SP_MME_CMDQ] = mmMME0_QM_GLBL_CFG0,
+ [SP_MME_CFG_SO] = mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL,
+ [SP_NEXT_MME] = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0,
+ [SP_DMA_CMDQ] = mmDMA0_QM_GLBL_CFG0,
+ [SP_DMA_CFG_SO] = mmDMA0_CORE_WR_COMP_ADDR_LO,
+ [SP_DMA_QUEUES_OFFSET] = mmDMA1_QM_GLBL_CFG0 - mmDMA0_QM_GLBL_CFG0,
+ [SP_NUM_OF_MME_ENGINES] = NUM_OF_MME_ENGINES,
+ [SP_SUB_MME_ENG_NUM] = NUM_OF_MME_SUB_ENGINES,
+ [SP_NUM_OF_DMA_ENGINES] = NUM_OF_DMA_ENGINES,
+ [SP_NUM_OF_TPC_ENGINES] = NUM_OF_TPC_ENGINES,
+ [SP_ENGINE_NUM_OF_QUEUES] = NUM_OF_QUEUES,
+ [SP_ENGINE_NUM_OF_STREAMS] = NUM_OF_STREAMS,
+ [SP_ENGINE_NUM_OF_FENCES] = NUM_OF_FENCES,
+ [SP_FENCE0_CNT_OFFSET] =
+ mmDMA0_QM_CP_FENCE0_CNT_0 - mmDMA0_QM_GLBL_CFG0,
+ [SP_FENCE0_RDATA_OFFSET] =
+ mmDMA0_QM_CP_FENCE0_RDATA_0 - mmDMA0_QM_GLBL_CFG0,
+ [SP_CP_STS_OFFSET] = mmDMA0_QM_CP_STS_0 - mmDMA0_QM_GLBL_CFG0,
+ [SP_NUM_CORES] = 1,
+};
+
+/* The order here is opposite to the order of the indexing in the h/w.
+ * i.e. SYNC_MGR_W_S is actually 0, SYNC_MGR_E_S is 1, etc.
+ */
+static const char * const gaudi_sync_manager_names[] = {
+ "SYNC_MGR_E_N",
+ "SYNC_MGR_W_N",
+ "SYNC_MGR_E_S",
+ "SYNC_MGR_W_S",
+ NULL
+};
+
struct ecc_info_extract_params {
u64 block_address;
u32 num_memories;
@@ -363,8 +469,6 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
u32 size, u64 val);
static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base,
u32 num_regs, u32 val);
-static int gaudi_schedule_register_memset(struct hl_device *hdev,
- u32 hw_queue_id, u64 reg_base, u32 num_regs, u32 val);
static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
u32 tpc_id);
static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev);
@@ -375,7 +479,6 @@ static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
u32 size, bool eb);
static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
-
static inline enum hl_collective_mode
get_collective_mode(struct hl_device *hdev, u32 queue_id)
{
@@ -403,7 +506,11 @@ static inline void set_default_power_values(struct hl_device *hdev)
if (hdev->card_type == cpucp_card_type_pmc) {
prop->max_power_default = MAX_POWER_DEFAULT_PMC;
- prop->dc_power_default = DC_POWER_DEFAULT_PMC;
+
+ if (prop->fw_security_enabled)
+ prop->dc_power_default = DC_POWER_DEFAULT_PMC_SEC;
+ else
+ prop->dc_power_default = DC_POWER_DEFAULT_PMC;
} else {
prop->max_power_default = MAX_POWER_DEFAULT_PCI;
prop->dc_power_default = DC_POWER_DEFAULT_PCI;
@@ -450,6 +557,7 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
get_collective_mode(hdev, i);
}
+ prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
prop->collective_first_sob = 0;
prop->collective_first_mon = 0;
@@ -551,6 +659,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->hard_reset_done_by_fw = false;
prop->gic_interrupts_enable = true;
+ prop->server_type = HL_SERVER_TYPE_UNKNOWN;
+
return 0;
}
@@ -723,14 +833,14 @@ pci_init:
GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC);
if (rc) {
if (hdev->reset_on_preboot_fail)
- hdev->asic_funcs->hw_fini(hdev, true);
+ hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_info(hdev->dev,
"H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
+ hdev->asic_funcs->hw_fini(hdev, true, false);
}
return 0;
@@ -974,17 +1084,11 @@ static void gaudi_sob_group_hw_reset(struct kref *ref)
struct gaudi_hw_sob_group *hw_sob_group =
container_of(ref, struct gaudi_hw_sob_group, kref);
struct hl_device *hdev = hw_sob_group->hdev;
- u64 base_addr;
- int rc;
+ int i;
- base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
- hw_sob_group->base_sob_id * 4;
- rc = gaudi_schedule_register_memset(hdev, hw_sob_group->queue_id,
- base_addr, NUMBER_OF_SOBS_IN_GRP, 0);
- if (rc)
- dev_err(hdev->dev,
- "failed resetting sob group - sob base %u, count %u",
- hw_sob_group->base_sob_id, NUMBER_OF_SOBS_IN_GRP);
+ for (i = 0 ; i < NUMBER_OF_SOBS_IN_GRP ; i++)
+ WREG32((mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ (hw_sob_group->base_sob_id * 4) + (i * 4)), 0);
kref_init(&hw_sob_group->kref);
}
@@ -1121,6 +1225,20 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
queue_id = job->hw_queue_id;
prop = &hdev->kernel_queues[queue_id].sync_stream_prop;
+ if (job->cs->encaps_signals) {
+ /* use the encaps signal handle store earlier in the flow
+ * and set the SOB information from the encaps
+ * signals handle
+ */
+ hl_hw_queue_encaps_sig_set_sob_info(hdev, job->cs, job,
+ cs_cmpl);
+
+ dev_dbg(hdev->dev, "collective wait: Sequence %llu found, sob_id: %u, wait for sob_val: %u\n",
+ job->cs->sequence,
+ cs_cmpl->hw_sob->sob_id,
+ cs_cmpl->sob_val);
+ }
+
/* Add to wait CBs using slave monitor */
wait_prop.data = (void *) job->user_cb;
wait_prop.sob_base = cs_cmpl->hw_sob->sob_id;
@@ -1131,7 +1249,7 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
wait_prop.size = cb_size;
dev_dbg(hdev->dev,
- "Generate slave wait CB, sob %d, val:0x%x, mon %d, q %d\n",
+ "Generate slave wait CB, sob %d, val:%x, mon %d, q %d\n",
cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
prop->collective_slave_mon_id, queue_id);
@@ -1145,7 +1263,7 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
prop->collective_sob_id, cb_size, false);
}
-static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
+static int gaudi_collective_wait_init_cs(struct hl_cs *cs)
{
struct hl_cs_compl *signal_cs_cmpl =
container_of(cs->signal_fence, struct hl_cs_compl, base_fence);
@@ -1163,9 +1281,37 @@ static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
gaudi = hdev->asic_specific;
cprop = &gaudi->collective_props;
- /* copy the SOB id and value of the signal CS */
- cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
- cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+ /* In encaps signals case the SOB info will be retrieved from
+ * the handle in gaudi_collective_slave_init_job.
+ */
+ if (!cs->encaps_signals) {
+ /* copy the SOB id and value of the signal CS */
+ cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
+ cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+ }
+
+ /* check again if the signal cs already completed.
+ * if yes then don't send any wait cs since the hw_sob
+ * could be in reset already. if signal is not completed
+ * then get refcount to hw_sob to prevent resetting the sob
+ * while wait cs is not submitted.
+ * note that this check is protected by two locks,
+ * hw queue lock and completion object lock,
+ * and the same completion object lock also protects
+ * the hw_sob reset handler function.
+ * The hw_queue lock prevent out of sync of hw_sob
+ * refcount value, changed by signal/wait flows.
+ */
+ spin_lock(&signal_cs_cmpl->lock);
+
+ if (completion_done(&cs->signal_fence->completion)) {
+ spin_unlock(&signal_cs_cmpl->lock);
+ return -EINVAL;
+ }
+ /* Increment kref since all slave queues are now waiting on it */
+ kref_get(&cs_cmpl->hw_sob->kref);
+
+ spin_unlock(&signal_cs_cmpl->lock);
/* Calculate the stream from collective master queue (1st job) */
job = list_first_entry(&cs->job_list, struct hl_cs_job, cs_node);
@@ -1210,21 +1356,17 @@ static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
cprop->curr_sob_group_idx[stream], stream);
}
- /* Increment kref since all slave queues are now waiting on it */
- kref_get(&cs_cmpl->hw_sob->kref);
- /*
- * Must put the signal fence after the SOB refcnt increment so
- * the SOB refcnt won't turn 0 and reset the SOB before the
- * wait CS was submitted.
- */
mb();
hl_fence_put(cs->signal_fence);
cs->signal_fence = NULL;
+
+ return 0;
}
static int gaudi_collective_wait_create_job(struct hl_device *hdev,
struct hl_ctx *ctx, struct hl_cs *cs,
- enum hl_collective_mode mode, u32 queue_id, u32 wait_queue_id)
+ enum hl_collective_mode mode, u32 queue_id, u32 wait_queue_id,
+ u32 encaps_signal_offset)
{
struct hw_queue_properties *hw_queue_prop;
struct hl_cs_counters_atomic *cntr;
@@ -1284,6 +1426,13 @@ static int gaudi_collective_wait_create_job(struct hl_device *hdev,
job->user_cb_size = cb_size;
job->hw_queue_id = queue_id;
+ /* since its guaranteed to have only one chunk in the collective wait
+ * cs, we can use this chunk to set the encapsulated signal offset
+ * in the jobs.
+ */
+ if (cs->encaps_signals)
+ job->encaps_sig_wait_offset = encaps_signal_offset;
+
/*
* No need in parsing, user CB is the patched CB.
* We call hl_cb_destroy() out of two reasons - we don't need
@@ -1312,8 +1461,9 @@ static int gaudi_collective_wait_create_job(struct hl_device *hdev,
}
static int gaudi_collective_wait_create_jobs(struct hl_device *hdev,
- struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
- u32 collective_engine_id)
+ struct hl_ctx *ctx, struct hl_cs *cs,
+ u32 wait_queue_id, u32 collective_engine_id,
+ u32 encaps_signal_offset)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct hw_queue_properties *hw_queue_prop;
@@ -1363,7 +1513,8 @@ static int gaudi_collective_wait_create_jobs(struct hl_device *hdev,
if (i == 0) {
queue_id = wait_queue_id;
rc = gaudi_collective_wait_create_job(hdev, ctx, cs,
- HL_COLLECTIVE_MASTER, queue_id, wait_queue_id);
+ HL_COLLECTIVE_MASTER, queue_id,
+ wait_queue_id, encaps_signal_offset);
} else {
if (nic_idx < NIC_NUMBER_OF_ENGINES) {
if (gaudi->hw_cap_initialized &
@@ -1383,7 +1534,8 @@ static int gaudi_collective_wait_create_jobs(struct hl_device *hdev,
}
rc = gaudi_collective_wait_create_job(hdev, ctx, cs,
- HL_COLLECTIVE_SLAVE, queue_id, wait_queue_id);
+ HL_COLLECTIVE_SLAVE, queue_id,
+ wait_queue_id, encaps_signal_offset);
}
if (rc)
@@ -1431,6 +1583,11 @@ static int gaudi_late_init(struct hl_device *hdev)
return rc;
}
+ /* Scrub both SRAM and DRAM */
+ rc = hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
+ if (rc)
+ goto disable_pci_access;
+
rc = gaudi_fetch_psoc_frequency(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to fetch psoc frequency\n");
@@ -1455,6 +1612,11 @@ static int gaudi_late_init(struct hl_device *hdev)
goto disable_pci_access;
}
+ /* We only support a single ASID for the user, so for the sake of optimization, just
+ * initialize the ASID one time during device initialization with the fixed value of 1
+ */
+ gaudi_mmu_prepare(hdev, 1);
+
return 0;
disable_pci_access:
@@ -1720,8 +1882,12 @@ static int gaudi_sw_init(struct hl_device *hdev)
hdev->supports_sync_stream = true;
hdev->supports_coresight = true;
hdev->supports_staged_submission = true;
+ hdev->supports_wait_for_multi_cs = true;
- gaudi_set_pci_memory_regions(hdev);
+ hdev->asic_funcs->set_pci_memory_regions(hdev);
+ hdev->stream_master_qid_arr =
+ hdev->asic_funcs->get_stream_master_qid_arr();
+ hdev->stream_master_qid_arr_size = GAUDI_STREAM_MASTER_ARR_SIZE;
return 0;
@@ -2523,7 +2689,7 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
tpc_id < TPC_NUMBER_OF_ENGINES;
tpc_id++, tpc_offset += TPC_CFG_OFFSET) {
/* Mask all arithmetic interrupts from TPC */
- WREG32(mmTPC0_CFG_TPC_INTR_MASK + tpc_offset, 0x8FFF);
+ WREG32(mmTPC0_CFG_TPC_INTR_MASK + tpc_offset, 0x8FFE);
/* Set 16 cache lines */
WREG32_FIELD(TPC0_CFG_MSS_CONFIG, tpc_offset,
ICACHE_FETCH_LINE_NUM, 2);
@@ -3670,7 +3836,7 @@ static void gaudi_disable_timestamp(struct hl_device *hdev)
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
}
-static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
+static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
u32 wait_timeout_ms;
@@ -3682,6 +3848,9 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
else
wait_timeout_ms = GAUDI_RESET_WAIT_MSEC;
+ if (fw_reset)
+ goto skip_engines;
+
gaudi_stop_nic_qmans(hdev);
gaudi_stop_mme_qmans(hdev);
gaudi_stop_tpc_qmans(hdev);
@@ -3707,6 +3876,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
gaudi_disable_timestamp(hdev);
+skip_engines:
gaudi_disable_msi(hdev);
}
@@ -3739,6 +3909,9 @@ static int gaudi_mmu_init(struct hl_device *hdev)
WREG32(mmSTLB_CACHE_INV_BASE_39_8, MMU_CACHE_MNG_ADDR >> 8);
WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
+ /* mem cache invalidation */
+ WREG32(mmSTLB_MEM_CACHE_INVALIDATION, 1);
+
hdev->asic_funcs->mmu_invalidate_cache(hdev, true, 0);
WREG32(mmMMU_UP_MMU_ENABLE, 1);
@@ -4071,7 +4244,7 @@ disable_queues:
return rc;
}
-static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
+static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
struct cpu_dyn_regs *dyn_regs =
&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
@@ -4092,6 +4265,14 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
cpu_timeout_ms = GAUDI_CPU_RESET_WAIT_MSEC;
}
+ if (fw_reset) {
+ dev_info(hdev->dev,
+ "Firmware performs HARD reset, going to wait %dms\n",
+ reset_timeout_ms);
+
+ goto skip_reset;
+ }
+
driver_performs_reset = !!(!hdev->asic_prop.fw_security_enabled &&
!hdev->asic_prop.hard_reset_done_by_fw);
@@ -4168,6 +4349,7 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
reset_timeout_ms);
}
+skip_reset:
/*
* After hard reset, we can't poll the BTM_FSM register because the PSOC
* itself is in reset. Need to wait until the reset is deasserted
@@ -4212,7 +4394,7 @@ static int gaudi_resume(struct hl_device *hdev)
return gaudi_init_iatu(hdev);
}
-static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
+static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int rc;
@@ -4621,8 +4803,8 @@ static int gaudi_hbm_scrubbing(struct hl_device *hdev)
"Doing HBM scrubbing for 0x%09llx - 0x%09llx\n",
cur_addr, cur_addr + chunk_size);
- WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, 0);
- WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, 0);
+ WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, 0xdeadbeaf);
+ WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, 0xdeadbeaf);
WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset,
lower_32_bits(cur_addr));
WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset,
@@ -5796,78 +5978,6 @@ release_cb:
return rc;
}
-static int gaudi_schedule_register_memset(struct hl_device *hdev,
- u32 hw_queue_id, u64 reg_base, u32 num_regs, u32 val)
-{
- struct hl_ctx *ctx;
- struct hl_pending_cb *pending_cb;
- struct packet_msg_long *pkt;
- u32 cb_size, ctl;
- struct hl_cb *cb;
- int i, rc;
-
- mutex_lock(&hdev->fpriv_list_lock);
- ctx = hdev->compute_ctx;
-
- /* If no compute context available or context is going down
- * memset registers directly
- */
- if (!ctx || kref_read(&ctx->refcount) == 0) {
- rc = gaudi_memset_registers(hdev, reg_base, num_regs, val);
- mutex_unlock(&hdev->fpriv_list_lock);
- return rc;
- }
-
- mutex_unlock(&hdev->fpriv_list_lock);
-
- cb_size = (sizeof(*pkt) * num_regs) +
- sizeof(struct packet_msg_prot) * 2;
-
- if (cb_size > SZ_2M) {
- dev_err(hdev->dev, "CB size must be smaller than %uMB", SZ_2M);
- return -ENOMEM;
- }
-
- pending_cb = kzalloc(sizeof(*pending_cb), GFP_KERNEL);
- if (!pending_cb)
- return -ENOMEM;
-
- cb = hl_cb_kernel_create(hdev, cb_size, false);
- if (!cb) {
- kfree(pending_cb);
- return -EFAULT;
- }
-
- pkt = cb->kernel_address;
-
- ctl = FIELD_PREP(GAUDI_PKT_LONG_CTL_OP_MASK, 0); /* write the value */
- ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_LONG);
- ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
- ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
-
- for (i = 0; i < num_regs ; i++, pkt++) {
- pkt->ctl = cpu_to_le32(ctl);
- pkt->value = cpu_to_le32(val);
- pkt->addr = cpu_to_le64(reg_base + (i * 4));
- }
-
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
-
- pending_cb->cb = cb;
- pending_cb->cb_size = cb_size;
- /* The queue ID MUST be an external queue ID. Otherwise, we will
- * have undefined behavior
- */
- pending_cb->hw_queue_id = hw_queue_id;
-
- spin_lock(&ctx->pending_cb_lock);
- list_add_tail(&pending_cb->cb_node, &ctx->pending_cb_list);
- spin_unlock(&ctx->pending_cb_lock);
-
- return 0;
-}
-
static int gaudi_restore_sm_registers(struct hl_device *hdev)
{
u64 base_addr;
@@ -6013,7 +6123,7 @@ static int gaudi_restore_user_registers(struct hl_device *hdev)
static int gaudi_context_switch(struct hl_device *hdev, u32 asid)
{
- return gaudi_restore_user_registers(hdev);
+ return 0;
}
static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev)
@@ -6723,6 +6833,9 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
asid);
}
+ gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid);
+ gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid);
+
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
@@ -6772,7 +6885,8 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev,
dma_offset = gaudi_dma_assignment[GAUDI_PCI_DMA_1] * DMA_CORE_OFFSET;
- WREG32_OR(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_VAL_SHIFT));
+ WREG32(mmDMA0_CORE_PROT + dma_offset,
+ BIT(DMA0_CORE_PROT_ERR_VAL_SHIFT) | BIT(DMA0_CORE_PROT_VAL_SHIFT));
rc = hl_hw_queue_send_cb_no_cmpl(hdev, GAUDI_QUEUE_ID_DMA_0_0,
job->job_cb_size, cb->bus_address);
@@ -6793,8 +6907,7 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev,
}
free_fence_ptr:
- WREG32_AND(mmDMA0_CORE_PROT + dma_offset,
- ~BIT(DMA0_CORE_PROT_VAL_SHIFT));
+ WREG32(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_ERR_VAL_SHIFT));
hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
fence_dma_addr);
@@ -7168,7 +7281,7 @@ static void gaudi_print_sw_config_stream_data(struct hl_device *hdev, u32 stream
cq_ptr = (((u64) RREG32(cq_ptr_hi)) << 32) | RREG32(cq_ptr_lo);
size = RREG32(cq_tsize);
- dev_info(hdev->dev, "stop on err: stream: %u, addr: %#llx, size: %x\n",
+ dev_info(hdev->dev, "stop on err: stream: %u, addr: %#llx, size: %u\n",
stream, cq_ptr, size);
}
@@ -7224,7 +7337,7 @@ static void gaudi_print_last_pqes_on_err(struct hl_device *hdev, u32 qid_base,
addr = le64_to_cpu(bd->ptr);
- dev_info(hdev->dev, "stop on err PQE(stream %u): ci: %u, addr: %#llx, size: %x\n",
+ dev_info(hdev->dev, "stop on err PQE(stream %u): ci: %u, addr: %#llx, size: %u\n",
stream, ci, addr, len);
/* get previous ci, wrap if needed */
@@ -7326,24 +7439,30 @@ static void gaudi_print_sm_sei_info(struct hl_device *hdev, u16 event_type,
{
u32 index = event_type - GAUDI_EVENT_DMA_IF_SEI_0;
+ /* Flip the bits as the enum is ordered in the opposite way */
+ index = (index ^ 0x3) & 0x3;
+
switch (sei_data->sei_cause) {
case SM_SEI_SO_OVERFLOW:
- dev_err(hdev->dev,
- "SM %u SEI Error: SO %u overflow/underflow",
- index, le32_to_cpu(sei_data->sei_log));
+ dev_err_ratelimited(hdev->dev,
+ "%s SEI Error: SOB Group %u overflow/underflow",
+ gaudi_sync_manager_names[index],
+ le32_to_cpu(sei_data->sei_log));
break;
case SM_SEI_LBW_4B_UNALIGNED:
- dev_err(hdev->dev,
- "SM %u SEI Error: Unaligned 4B LBW access, monitor agent address low - %#x",
- index, le32_to_cpu(sei_data->sei_log));
+ dev_err_ratelimited(hdev->dev,
+ "%s SEI Error: Unaligned 4B LBW access, monitor agent address low - %#x",
+ gaudi_sync_manager_names[index],
+ le32_to_cpu(sei_data->sei_log));
break;
case SM_SEI_AXI_RESPONSE_ERR:
- dev_err(hdev->dev,
- "SM %u SEI Error: AXI ID %u response error",
- index, le32_to_cpu(sei_data->sei_log));
+ dev_err_ratelimited(hdev->dev,
+ "%s SEI Error: AXI ID %u response error",
+ gaudi_sync_manager_names[index],
+ le32_to_cpu(sei_data->sei_log));
break;
default:
- dev_err(hdev->dev, "Unknown SM SEI cause %u",
+ dev_err_ratelimited(hdev->dev, "Unknown SM SEI cause %u",
le32_to_cpu(sei_data->sei_log));
break;
}
@@ -7358,6 +7477,11 @@ static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
bool extract_info_from_fw;
int rc;
+ if (hdev->asic_prop.fw_security_enabled) {
+ extract_info_from_fw = true;
+ goto extract_ecc_info;
+ }
+
switch (event_type) {
case GAUDI_EVENT_PCIE_CORE_SERR ... GAUDI_EVENT_PCIE_PHY_DERR:
case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_MMU_DERR:
@@ -7430,6 +7554,7 @@ static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
return;
}
+extract_ecc_info:
if (extract_info_from_fw) {
ecc_address = le64_to_cpu(ecc_data->ecc_address);
ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom);
@@ -7806,8 +7931,15 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
>> EQ_CTL_EVENT_TYPE_SHIFT);
- u8 cause;
bool reset_required;
+ u8 cause;
+ int rc;
+
+ if (event_type >= GAUDI_EVENT_SIZE) {
+ dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
+ event_type, GAUDI_EVENT_SIZE - 1);
+ return;
+ }
gaudi->events_stat[event_type]++;
gaudi->events_stat_aggregate[event_type]++;
@@ -7880,10 +8012,10 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
tpc_dec_event_to_tpc_id(event_type),
"AXI_SLV_DEC_Error");
if (reset_required) {
- dev_err(hdev->dev, "hard reset required due to %s\n",
+ dev_err(hdev->dev, "reset required due to %s\n",
gaudi_irq_map_table[event_type].name);
- goto reset_device;
+ hl_device_reset(hdev, 0);
} else {
hl_fw_unmask_irq(hdev, event_type);
}
@@ -7902,10 +8034,10 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
tpc_krn_event_to_tpc_id(event_type),
"KRN_ERR");
if (reset_required) {
- dev_err(hdev->dev, "hard reset required due to %s\n",
+ dev_err(hdev->dev, "reset required due to %s\n",
gaudi_irq_map_table[event_type].name);
- goto reset_device;
+ hl_device_reset(hdev, 0);
} else {
hl_fw_unmask_irq(hdev, event_type);
}
@@ -7993,6 +8125,10 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
gaudi_print_irq_info(hdev, event_type, false);
gaudi_print_sm_sei_info(hdev, event_type,
&eq_entry->sm_sei_data);
+ rc = hl_state_dump(hdev);
+ if (rc)
+ dev_err(hdev->dev,
+ "Error during system state dump %d\n", rc);
hl_fw_unmask_irq(hdev, event_type);
break;
@@ -8031,7 +8167,9 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
return;
reset_device:
- if (hdev->hard_reset_on_fw_events)
+ if (hdev->asic_prop.fw_security_enabled)
+ hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_FW);
+ else if (hdev->hard_reset_on_fw_events)
hl_device_reset(hdev, HL_RESET_HARD);
else
hl_fw_unmask_irq(hdev, event_type);
@@ -8563,11 +8701,20 @@ static void gaudi_internal_cb_pool_fini(struct hl_device *hdev,
static int gaudi_ctx_init(struct hl_ctx *ctx)
{
+ int rc;
+
if (ctx->asid == HL_KERNEL_ASID_ID)
return 0;
- gaudi_mmu_prepare(ctx->hdev, ctx->asid);
- return gaudi_internal_cb_pool_init(ctx->hdev, ctx);
+ rc = gaudi_internal_cb_pool_init(ctx->hdev, ctx);
+ if (rc)
+ return rc;
+
+ rc = gaudi_restore_user_registers(ctx->hdev);
+ if (rc)
+ gaudi_internal_cb_pool_fini(ctx->hdev, ctx);
+
+ return rc;
}
static void gaudi_ctx_fini(struct hl_ctx *ctx)
@@ -8596,6 +8743,11 @@ static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
sizeof(struct packet_msg_prot) * 2;
}
+static u32 gaudi_get_sob_addr(struct hl_device *hdev, u32 sob_id)
+{
+ return mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + (sob_id * 4);
+}
+
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
u32 size, bool eb)
{
@@ -8902,16 +9054,12 @@ static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
static void gaudi_reset_sob(struct hl_device *hdev, void *data)
{
struct hl_hw_sob *hw_sob = (struct hl_hw_sob *) data;
- int rc;
dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx,
hw_sob->sob_id);
- rc = gaudi_schedule_register_memset(hdev, hw_sob->q_idx,
- CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
- hw_sob->sob_id * 4, 1, 0);
- if (rc)
- dev_err(hdev->dev, "failed resetting sob %u", hw_sob->sob_id);
+ WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ hw_sob->sob_id * 4, 0);
kref_init(&hw_sob->kref);
}
@@ -8977,6 +9125,280 @@ static int gaudi_map_pll_idx_to_fw_idx(u32 pll_idx)
}
}
+static int gaudi_add_sync_to_engine_map_entry(
+ struct hl_sync_to_engine_map *map, u32 reg_value,
+ enum hl_sync_engine_type engine_type, u32 engine_id)
+{
+ struct hl_sync_to_engine_map_entry *entry;
+
+ /* Reg value represents a partial address of sync object,
+ * it is used as unique identifier. For this we need to
+ * clear the cutoff cfg base bits from the value.
+ */
+ if (reg_value == 0 || reg_value == 0xffffffff)
+ return 0;
+ reg_value -= (u32)CFG_BASE;
+
+ /* create a new hash entry */
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+ entry->engine_type = engine_type;
+ entry->engine_id = engine_id;
+ entry->sync_id = reg_value;
+ hash_add(map->tb, &entry->node, reg_value);
+
+ return 0;
+}
+
+static int gaudi_gen_sync_to_engine_map(struct hl_device *hdev,
+ struct hl_sync_to_engine_map *map)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int i, j, rc;
+ u32 reg_value;
+
+ /* Iterate over TPC engines */
+ for (i = 0; i < sds->props[SP_NUM_OF_TPC_ENGINES]; ++i) {
+ /* TPC registered must be accessed with clock gating disabled */
+ mutex_lock(&gaudi->clk_gate_mutex);
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
+ reg_value = RREG32(sds->props[SP_TPC0_CFG_SO] +
+ sds->props[SP_NEXT_TPC] * i);
+
+ /* We can reenable clock_gating */
+ hdev->asic_funcs->set_clock_gating(hdev);
+ mutex_unlock(&gaudi->clk_gate_mutex);
+
+ rc = gaudi_add_sync_to_engine_map_entry(map, reg_value,
+ ENGINE_TPC, i);
+ if (rc)
+ goto free_sync_to_engine_map;
+ }
+
+ /* Iterate over MME engines */
+ for (i = 0; i < sds->props[SP_NUM_OF_MME_ENGINES]; ++i) {
+ for (j = 0; j < sds->props[SP_SUB_MME_ENG_NUM]; ++j) {
+ /* MME registered must be accessed with clock gating
+ * disabled
+ */
+ mutex_lock(&gaudi->clk_gate_mutex);
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
+ reg_value = RREG32(sds->props[SP_MME_CFG_SO] +
+ sds->props[SP_NEXT_MME] * i +
+ j * sizeof(u32));
+
+ /* We can reenable clock_gating */
+ hdev->asic_funcs->set_clock_gating(hdev);
+ mutex_unlock(&gaudi->clk_gate_mutex);
+
+ rc = gaudi_add_sync_to_engine_map_entry(
+ map, reg_value, ENGINE_MME,
+ i * sds->props[SP_SUB_MME_ENG_NUM] + j);
+ if (rc)
+ goto free_sync_to_engine_map;
+ }
+ }
+
+ /* Iterate over DMA engines */
+ for (i = 0; i < sds->props[SP_NUM_OF_DMA_ENGINES]; ++i) {
+ reg_value = RREG32(sds->props[SP_DMA_CFG_SO] +
+ sds->props[SP_DMA_QUEUES_OFFSET] * i);
+ rc = gaudi_add_sync_to_engine_map_entry(map, reg_value,
+ ENGINE_DMA, i);
+ if (rc)
+ goto free_sync_to_engine_map;
+ }
+
+ return 0;
+
+free_sync_to_engine_map:
+ hl_state_dump_free_sync_to_engine_map(map);
+
+ return rc;
+}
+
+static int gaudi_monitor_valid(struct hl_mon_state_dump *mon)
+{
+ return FIELD_GET(
+ SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_VALID_MASK,
+ mon->status);
+}
+
+static void gaudi_fill_sobs_from_mon(char *sobs, struct hl_mon_state_dump *mon)
+{
+ const size_t max_write = 10;
+ u32 gid, mask, sob;
+ int i, offset;
+
+ /* Sync object ID is calculated as follows:
+ * (8 * group_id + cleared bits in mask)
+ */
+ gid = FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SID_MASK,
+ mon->arm_data);
+ mask = FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_MASK_MASK,
+ mon->arm_data);
+
+ for (i = 0, offset = 0; mask && offset < MONITOR_SOB_STRING_SIZE -
+ max_write; mask >>= 1, i++) {
+ if (!(mask & 1)) {
+ sob = gid * MONITOR_MAX_SOBS + i;
+
+ if (offset > 0)
+ offset += snprintf(sobs + offset, max_write,
+ ", ");
+
+ offset += snprintf(sobs + offset, max_write, "%u", sob);
+ }
+ }
+}
+
+static int gaudi_print_single_monitor(char **buf, size_t *size, size_t *offset,
+ struct hl_device *hdev,
+ struct hl_mon_state_dump *mon)
+{
+ const char *name;
+ char scratch_buf1[BIN_REG_STRING_SIZE],
+ scratch_buf2[BIN_REG_STRING_SIZE];
+ char monitored_sobs[MONITOR_SOB_STRING_SIZE] = {0};
+
+ name = hl_state_dump_get_monitor_name(hdev, mon);
+ if (!name)
+ name = "";
+
+ gaudi_fill_sobs_from_mon(monitored_sobs, mon);
+
+ return hl_snprintf_resize(
+ buf, size, offset,
+ "Mon id: %u%s, wait for group id: %u mask %s to reach val: %u and write %u to address 0x%llx. Pending: %s. Means sync objects [%s] are being monitored.",
+ mon->id, name,
+ FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SID_MASK,
+ mon->arm_data),
+ hl_format_as_binary(
+ scratch_buf1, sizeof(scratch_buf1),
+ FIELD_GET(
+ SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_MASK_MASK,
+ mon->arm_data)),
+ FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOD_MASK,
+ mon->arm_data),
+ mon->wr_data,
+ (((u64)mon->wr_addr_high) << 32) | mon->wr_addr_low,
+ hl_format_as_binary(
+ scratch_buf2, sizeof(scratch_buf2),
+ FIELD_GET(
+ SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_PENDING_MASK,
+ mon->status)),
+ monitored_sobs);
+}
+
+
+static int gaudi_print_fences_single_engine(
+ struct hl_device *hdev, u64 base_offset, u64 status_base_offset,
+ enum hl_sync_engine_type engine_type, u32 engine_id, char **buf,
+ size_t *size, size_t *offset)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ int rc = -ENOMEM, i;
+ u32 *statuses, *fences;
+
+ statuses = kcalloc(sds->props[SP_ENGINE_NUM_OF_QUEUES],
+ sizeof(*statuses), GFP_KERNEL);
+ if (!statuses)
+ goto out;
+
+ fences = kcalloc(sds->props[SP_ENGINE_NUM_OF_FENCES] *
+ sds->props[SP_ENGINE_NUM_OF_QUEUES],
+ sizeof(*fences), GFP_KERNEL);
+ if (!fences)
+ goto free_status;
+
+ for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_FENCES]; ++i)
+ statuses[i] = RREG32(status_base_offset + i * sizeof(u32));
+
+ for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_FENCES] *
+ sds->props[SP_ENGINE_NUM_OF_QUEUES]; ++i)
+ fences[i] = RREG32(base_offset + i * sizeof(u32));
+
+ /* The actual print */
+ for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_QUEUES]; ++i) {
+ u32 fence_id;
+ u64 fence_cnt, fence_rdata;
+ const char *engine_name;
+
+ if (!FIELD_GET(TPC0_QM_CP_STS_0_FENCE_IN_PROGRESS_MASK,
+ statuses[i]))
+ continue;
+
+ fence_id =
+ FIELD_GET(TPC0_QM_CP_STS_0_FENCE_ID_MASK, statuses[i]);
+ fence_cnt = base_offset + CFG_BASE +
+ sizeof(u32) *
+ (i + fence_id * sds->props[SP_ENGINE_NUM_OF_QUEUES]);
+ fence_rdata = fence_cnt - sds->props[SP_FENCE0_CNT_OFFSET] +
+ sds->props[SP_FENCE0_RDATA_OFFSET];
+ engine_name = hl_sync_engine_to_string(engine_type);
+
+ rc = hl_snprintf_resize(
+ buf, size, offset,
+ "%s%u, stream %u: fence id %u cnt = 0x%llx (%s%u_QM.CP_FENCE%u_CNT_%u) rdata = 0x%llx (%s%u_QM.CP_FENCE%u_RDATA_%u) value = %u, cp_status = %u\n",
+ engine_name, engine_id,
+ i, fence_id,
+ fence_cnt, engine_name, engine_id, fence_id, i,
+ fence_rdata, engine_name, engine_id, fence_id, i,
+ fences[fence_id],
+ statuses[i]);
+ if (rc)
+ goto free_fences;
+ }
+
+ rc = 0;
+
+free_fences:
+ kfree(fences);
+free_status:
+ kfree(statuses);
+out:
+ return rc;
+}
+
+
+static struct hl_state_dump_specs_funcs gaudi_state_dump_funcs = {
+ .monitor_valid = gaudi_monitor_valid,
+ .print_single_monitor = gaudi_print_single_monitor,
+ .gen_sync_to_engine_map = gaudi_gen_sync_to_engine_map,
+ .print_fences_single_engine = gaudi_print_fences_single_engine,
+};
+
+static void gaudi_state_dump_init(struct hl_device *hdev)
+{
+ struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gaudi_so_id_to_str); ++i)
+ hash_add(sds->so_id_to_str_tb,
+ &gaudi_so_id_to_str[i].node,
+ gaudi_so_id_to_str[i].id);
+
+ for (i = 0; i < ARRAY_SIZE(gaudi_monitor_id_to_str); ++i)
+ hash_add(sds->monitor_id_to_str_tb,
+ &gaudi_monitor_id_to_str[i].node,
+ gaudi_monitor_id_to_str[i].id);
+
+ sds->props = gaudi_state_dump_specs_props;
+
+ sds->sync_namager_names = gaudi_sync_manager_names;
+
+ sds->funcs = gaudi_state_dump_funcs;
+}
+
+static u32 *gaudi_get_stream_master_qid_arr(void)
+{
+ return gaudi_stream_master;
+}
+
static const struct hl_asic_funcs gaudi_funcs = {
.early_init = gaudi_early_init,
.early_fini = gaudi_early_fini,
@@ -8989,7 +9411,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.halt_engines = gaudi_halt_engines,
.suspend = gaudi_suspend,
.resume = gaudi_resume,
- .cb_mmap = gaudi_cb_mmap,
+ .mmap = gaudi_mmap,
.ring_doorbell = gaudi_ring_doorbell,
.pqe_write = gaudi_pqe_write,
.asic_dma_alloc_coherent = gaudi_dma_alloc_coherent,
@@ -9062,7 +9484,11 @@ static const struct hl_asic_funcs gaudi_funcs = {
.enable_events_from_fw = gaudi_enable_events_from_fw,
.map_pll_idx_to_fw_idx = gaudi_map_pll_idx_to_fw_idx,
.init_firmware_loader = gaudi_init_firmware_loader,
- .init_cpu_scrambler_dram = gaudi_init_scrambler_hbm
+ .init_cpu_scrambler_dram = gaudi_init_scrambler_hbm,
+ .state_dump_init = gaudi_state_dump_init,
+ .get_sob_addr = gaudi_get_sob_addr,
+ .set_pci_memory_regions = gaudi_set_pci_memory_regions,
+ .get_stream_master_qid_arr = gaudi_get_stream_master_qid_arr
};
/**
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index 957bf3720f70..bbbf1c343e75 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -36,6 +36,8 @@
#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + \
NUMBER_OF_CPU_HW_QUEUES)
+#define GAUDI_STREAM_MASTER_ARR_SIZE 8
+
#if (NUMBER_OF_INTERRUPTS > GAUDI_MSI_ENTRIES)
#error "Number of MSI interrupts must be smaller or equal to GAUDI_MSI_ENTRIES"
#endif
@@ -50,6 +52,8 @@
#define DC_POWER_DEFAULT_PCI 60000 /* 60W */
#define DC_POWER_DEFAULT_PMC 60000 /* 60W */
+#define DC_POWER_DEFAULT_PMC_SEC 97000 /* 97W */
+
#define GAUDI_CPU_TIMEOUT_USEC 30000000 /* 30s */
#define TPC_ENABLED_MASK 0xFF
@@ -62,7 +66,7 @@
#define DMA_MAX_TRANSFER_SIZE U32_MAX
-#define GAUDI_DEFAULT_CARD_NAME "HL2000"
+#define GAUDI_DEFAULT_CARD_NAME "HL205"
#define GAUDI_MAX_PENDING_CS SZ_16K
@@ -117,6 +121,7 @@
(((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_511 - \
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0) + 4) >> 2)
+#define MONITOR_MAX_SOBS 8
/* DRAM Memory Map */
@@ -200,6 +205,18 @@
#define HW_CAP_TPC_MASK GENMASK(31, 24)
#define HW_CAP_TPC_SHIFT 24
+#define NEXT_SYNC_OBJ_ADDR_INTERVAL \
+ (mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0 - \
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0)
+#define NUM_OF_MME_ENGINES 2
+#define NUM_OF_MME_SUB_ENGINES 2
+#define NUM_OF_TPC_ENGINES 8
+#define NUM_OF_DMA_ENGINES 8
+#define NUM_OF_QUEUES 5
+#define NUM_OF_STREAMS 4
+#define NUM_OF_FENCES 4
+
+
#define GAUDI_CPU_PCI_MSB_ADDR(addr) (((addr) & GENMASK_ULL(49, 39)) >> 39)
#define GAUDI_PCI_TO_CPU_ADDR(addr) \
do { \
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index c2a27ed1c4d1..5349c1be13f9 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -622,11 +622,6 @@ static int gaudi_config_etr(struct hl_device *hdev,
return -EINVAL;
}
- gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER,
- hdev->compute_ctx->asid);
- gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER,
- hdev->compute_ctx->asid);
-
msb = upper_32_bits(input->buffer_address) >> 8;
msb &= PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_MASK;
WREG32(mmPSOC_GLOBAL_CONF_TRACE_ADDR, msb);
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
index 0d3240f1f7d7..cb265c00cf73 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -9559,6 +9559,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC0_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -10013,6 +10014,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC1_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -10466,6 +10468,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC2_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -10919,6 +10922,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC3_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -11372,6 +11376,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC4_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -11825,6 +11830,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC5_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -12280,6 +12286,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC6_CFG_MSS_CONFIG & 0x7F) >> 2);
@@ -12735,6 +12742,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1U << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_ICACHE_BASE_ADDERESS_HIGH & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
mask |= 1U << ((mmTPC7_CFG_MSS_CONFIG & 0x7F) >> 2);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 755e08cf2ecc..031c1849da14 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -350,6 +350,8 @@ static u32 goya_all_events[] = {
GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E
};
+static s64 goya_state_dump_specs_props[SP_MAX] = {0};
+
static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev);
@@ -387,6 +389,7 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_USER;
}
+ prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
prop->dram_base_address = DRAM_PHYS_BASE;
@@ -466,6 +469,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->hard_reset_done_by_fw = false;
prop->gic_interrupts_enable = true;
+ prop->server_type = HL_SERVER_TYPE_UNKNOWN;
+
return 0;
}
@@ -649,14 +654,14 @@ pci_init:
GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
if (rc) {
if (hdev->reset_on_preboot_fail)
- hdev->asic_funcs->hw_fini(hdev, true);
+ hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_info(hdev->dev,
"H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
+ hdev->asic_funcs->hw_fini(hdev, true, false);
}
if (!hdev->pldm) {
@@ -955,8 +960,9 @@ static int goya_sw_init(struct hl_device *hdev)
hdev->supports_coresight = true;
hdev->supports_soft_reset = true;
hdev->allow_external_soft_reset = true;
+ hdev->supports_wait_for_multi_cs = false;
- goya_set_pci_memory_regions(hdev);
+ hdev->asic_funcs->set_pci_memory_regions(hdev);
return 0;
@@ -2374,7 +2380,7 @@ static void goya_disable_timestamp(struct hl_device *hdev)
WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
}
-static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
+static void goya_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
u32 wait_timeout_ms;
@@ -2493,6 +2499,7 @@ static void goya_init_firmware_loader(struct hl_device *hdev)
struct fw_load_mgr *fw_loader = &hdev->fw_loader;
/* fill common fields */
+ fw_loader->linux_loaded = false;
fw_loader->boot_fit_img.image_name = GOYA_BOOT_FIT_FILE;
fw_loader->linux_img.image_name = GOYA_LINUX_FW_FILE;
fw_loader->cpu_timeout = GOYA_CPU_TIMEOUT_USEC;
@@ -2696,14 +2703,7 @@ disable_queues:
return rc;
}
-/*
- * goya_hw_fini - Goya hardware tear-down code
- *
- * @hdev: pointer to hl_device structure
- * @hard_reset: should we do hard reset to all engines or just reset the
- * compute/dma engines
- */
-static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
+static void goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
{
struct goya_device *goya = hdev->asic_specific;
u32 reset_timeout_ms, cpu_timeout_ms, status;
@@ -2796,7 +2796,7 @@ int goya_resume(struct hl_device *hdev)
return goya_init_iatu(hdev);
}
-static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
+static int goya_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int rc;
@@ -4797,6 +4797,12 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
>> EQ_CTL_EVENT_TYPE_SHIFT);
struct goya_device *goya = hdev->asic_specific;
+ if (event_type >= GOYA_ASYNC_EVENT_ID_SIZE) {
+ dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
+ event_type, GOYA_ASYNC_EVENT_ID_SIZE - 1);
+ return;
+ }
+
goya->events_stat[event_type]++;
goya->events_stat_aggregate[event_type]++;
@@ -5475,14 +5481,14 @@ u64 goya_get_device_time(struct hl_device *hdev)
return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
}
-static void goya_collective_wait_init_cs(struct hl_cs *cs)
+static int goya_collective_wait_init_cs(struct hl_cs *cs)
{
-
+ return 0;
}
static int goya_collective_wait_create_jobs(struct hl_device *hdev,
struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
- u32 collective_engine_id)
+ u32 collective_engine_id, u32 encaps_signal_offset)
{
return -EINVAL;
}
@@ -5524,6 +5530,62 @@ static int goya_map_pll_idx_to_fw_idx(u32 pll_idx)
}
}
+static int goya_gen_sync_to_engine_map(struct hl_device *hdev,
+ struct hl_sync_to_engine_map *map)
+{
+ /* Not implemented */
+ return 0;
+}
+
+static int goya_monitor_valid(struct hl_mon_state_dump *mon)
+{
+ /* Not implemented */
+ return 0;
+}
+
+static int goya_print_single_monitor(char **buf, size_t *size, size_t *offset,
+ struct hl_device *hdev,
+ struct hl_mon_state_dump *mon)
+{
+ /* Not implemented */
+ return 0;
+}
+
+
+static int goya_print_fences_single_engine(
+ struct hl_device *hdev, u64 base_offset, u64 status_base_offset,
+ enum hl_sync_engine_type engine_type, u32 engine_id, char **buf,
+ size_t *size, size_t *offset)
+{
+ /* Not implemented */
+ return 0;
+}
+
+
+static struct hl_state_dump_specs_funcs goya_state_dump_funcs = {
+ .monitor_valid = goya_monitor_valid,
+ .print_single_monitor = goya_print_single_monitor,
+ .gen_sync_to_engine_map = goya_gen_sync_to_engine_map,
+ .print_fences_single_engine = goya_print_fences_single_engine,
+};
+
+static void goya_state_dump_init(struct hl_device *hdev)
+{
+ /* Not implemented */
+ hdev->state_dump_specs.props = goya_state_dump_specs_props;
+ hdev->state_dump_specs.funcs = goya_state_dump_funcs;
+}
+
+static u32 goya_get_sob_addr(struct hl_device *hdev, u32 sob_id)
+{
+ return 0;
+}
+
+static u32 *goya_get_stream_master_qid_arr(void)
+{
+ return NULL;
+}
+
static const struct hl_asic_funcs goya_funcs = {
.early_init = goya_early_init,
.early_fini = goya_early_fini,
@@ -5536,7 +5598,7 @@ static const struct hl_asic_funcs goya_funcs = {
.halt_engines = goya_halt_engines,
.suspend = goya_suspend,
.resume = goya_resume,
- .cb_mmap = goya_cb_mmap,
+ .mmap = goya_mmap,
.ring_doorbell = goya_ring_doorbell,
.pqe_write = goya_pqe_write,
.asic_dma_alloc_coherent = goya_dma_alloc_coherent,
@@ -5609,7 +5671,11 @@ static const struct hl_asic_funcs goya_funcs = {
.enable_events_from_fw = goya_enable_events_from_fw,
.map_pll_idx_to_fw_idx = goya_map_pll_idx_to_fw_idx,
.init_firmware_loader = goya_init_firmware_loader,
- .init_cpu_scrambler_dram = goya_cpu_init_scrambler_dram
+ .init_cpu_scrambler_dram = goya_cpu_init_scrambler_dram,
+ .state_dump_init = goya_state_dump_init,
+ .get_sob_addr = &goya_get_sob_addr,
+ .set_pci_memory_regions = goya_set_pci_memory_regions,
+ .get_stream_master_qid_arr = goya_get_stream_master_qid_arr,
};
/*
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index 80b1d5a9d9f1..9ff6a448f0d4 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -98,6 +98,18 @@ struct hl_eq_fw_alive {
__u8 pad[7];
};
+enum hl_pcie_addr_dec_cause {
+ PCIE_ADDR_DEC_HBW_ERR_RESP,
+ PCIE_ADDR_DEC_LBW_ERR_RESP,
+ PCIE_ADDR_DEC_TLP_BLOCKED_BY_RR
+};
+
+struct hl_eq_pcie_addr_dec_data {
+ /* enum hl_pcie_addr_dec_cause */
+ __u8 addr_dec_cause;
+ __u8 pad[7];
+};
+
struct hl_eq_entry {
struct hl_eq_header hdr;
union {
@@ -106,6 +118,7 @@ struct hl_eq_entry {
struct hl_eq_sm_sei_data sm_sei_data;
struct cpucp_pkt_sync_err pkt_sync_err;
struct hl_eq_fw_alive fw_alive;
+ struct hl_eq_pcie_addr_dec_data pcie_addr_dec_data;
__le64 data[7];
};
};
@@ -116,7 +129,7 @@ struct hl_eq_entry {
#define EQ_CTL_READY_MASK 0x80000000
#define EQ_CTL_EVENT_TYPE_SHIFT 16
-#define EQ_CTL_EVENT_TYPE_MASK 0x03FF0000
+#define EQ_CTL_EVENT_TYPE_MASK 0x0FFF0000
#define EQ_CTL_INDEX_SHIFT 0
#define EQ_CTL_INDEX_MASK 0x0000FFFF
@@ -300,7 +313,7 @@ enum pq_init_status {
* The packet's arguments specify the desired sensor and the field to
* set.
*
- * CPUCP_PACKET_PCIE_THROUGHPUT_GET
+ * CPUCP_PACKET_PCIE_THROUGHPUT_GET -
* Get throughput of PCIe.
* The packet's arguments specify the transaction direction (TX/RX).
* The window measurement is 10[msec], and the return value is in KB/sec.
@@ -309,19 +322,19 @@ enum pq_init_status {
* Replay count measures number of "replay" events, which is basicly
* number of retries done by PCIe.
*
- * CPUCP_PACKET_TOTAL_ENERGY_GET
+ * CPUCP_PACKET_TOTAL_ENERGY_GET -
* Total Energy is measurement of energy from the time FW Linux
* is loaded. It is calculated by multiplying the average power
* by time (passed from armcp start). The units are in MilliJouls.
*
- * CPUCP_PACKET_PLL_INFO_GET
+ * CPUCP_PACKET_PLL_INFO_GET -
* Fetch frequencies of PLL from the required PLL IP.
* The packet's arguments specify the device PLL type
* Pll type is the PLL from device pll_index enum.
* The result is composed of 4 outputs, each is 16-bit
* frequency in MHz.
*
- * CPUCP_PACKET_POWER_GET
+ * CPUCP_PACKET_POWER_GET -
* Fetch the present power consumption of the device (Current * Voltage).
*
* CPUCP_PACKET_NIC_PFC_SET -
@@ -345,6 +358,24 @@ enum pq_init_status {
* CPUCP_PACKET_MSI_INFO_SET -
* set the index number for each supported msi type going from
* host to device
+ *
+ * CPUCP_PACKET_NIC_XPCS91_REGS_GET -
+ * Fetch the un/correctable counters values from the NIC MAC.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_GET -
+ * Fetch various NIC MAC counters from the NIC STAT.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_CLR -
+ * Clear the various NIC MAC counters in the NIC STAT.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_ALL_GET -
+ * Fetch all NIC MAC counters from the NIC STAT.
+ *
+ * CPUCP_PACKET_IS_IDLE_CHECK -
+ * Check if the device is IDLE in regard to the DMA/compute engines
+ * and QMANs. The f/w will return a bitmask where each bit represents
+ * a different engine or QMAN according to enum cpucp_idle_mask.
+ * The bit will be 1 if the engine is NOT idle.
*/
enum cpucp_packet_id {
@@ -385,6 +416,11 @@ enum cpucp_packet_id {
CPUCP_PACKET_NIC_LPBK_SET, /* internal */
CPUCP_PACKET_NIC_MAC_CFG, /* internal */
CPUCP_PACKET_MSI_INFO_SET, /* internal */
+ CPUCP_PACKET_NIC_XPCS91_REGS_GET, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_GET, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_CLR, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_ALL_GET, /* internal */
+ CPUCP_PACKET_IS_IDLE_CHECK, /* internal */
};
#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
@@ -414,6 +450,11 @@ enum cpucp_packet_id {
#define CPUCP_PKT_VAL_LPBK_IN2_SHIFT 1
#define CPUCP_PKT_VAL_LPBK_IN2_MASK 0x000000000000001Eull
+#define CPUCP_PKT_VAL_MAC_CNT_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_MAC_CNT_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_MAC_CNT_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_MAC_CNT_IN2_MASK 0x00000000FFFFFFFEull
+
/* heartbeat status bits */
#define CPUCP_PKT_HB_STATUS_EQ_FAULT_SHIFT 0
#define CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK 0x00000001
@@ -467,7 +508,8 @@ struct cpucp_packet {
__le32 status_mask;
};
- __le32 reserved;
+ /* For NIC requests */
+ __le32 port_index;
};
struct cpucp_unmask_irq_arr_packet {
@@ -476,6 +518,12 @@ struct cpucp_unmask_irq_arr_packet {
__le32 irqs[0];
};
+struct cpucp_nic_status_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 data[0];
+};
+
struct cpucp_array_data_packet {
struct cpucp_packet cpucp_pkt;
__le32 length;
@@ -595,6 +643,18 @@ enum pll_index {
PLL_MAX
};
+enum rl_index {
+ TPC_RL = 0,
+ MME_RL,
+};
+
+enum pvt_index {
+ PVT_SW,
+ PVT_SE,
+ PVT_NW,
+ PVT_NE
+};
+
/* Event Queue Packets */
struct eq_generic_event {
@@ -700,6 +760,15 @@ struct cpucp_mac_addr {
__u8 mac_addr[ETH_ALEN];
};
+enum cpucp_serdes_type {
+ TYPE_1_SERDES_TYPE,
+ TYPE_2_SERDES_TYPE,
+ HLS1_SERDES_TYPE,
+ HLS1H_SERDES_TYPE,
+ UNKNOWN_SERDES_TYPE,
+ MAX_NUM_SERDES_TYPE = UNKNOWN_SERDES_TYPE
+};
+
struct cpucp_nic_info {
struct cpucp_mac_addr mac_addrs[CPUCP_MAX_NICS];
__le64 link_mask[CPUCP_NIC_MASK_ARR_LEN];
@@ -708,6 +777,40 @@ struct cpucp_nic_info {
__le64 link_ext_mask[CPUCP_NIC_MASK_ARR_LEN];
__u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN];
__le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __le16 serdes_type; /* enum cpucp_serdes_type */
+ __u8 reserved[6];
+};
+
+/*
+ * struct cpucp_nic_status - describes the status of a NIC port.
+ * @port: NIC port index.
+ * @bad_format_cnt: e.g. CRC.
+ * @responder_out_of_sequence_psn_cnt: e.g NAK.
+ * @high_ber_reinit_cnt: link reinit due to high BER.
+ * @correctable_err_cnt: e.g. bit-flip.
+ * @uncorrectable_err_cnt: e.g. MAC errors.
+ * @retraining_cnt: re-training counter.
+ * @up: is port up.
+ * @pcs_link: has PCS link.
+ * @phy_ready: is PHY ready.
+ * @auto_neg: is Autoneg enabled.
+ * @timeout_retransmission_cnt: timeout retransmission events
+ * @high_ber_cnt: high ber events
+ */
+struct cpucp_nic_status {
+ __le32 port;
+ __le32 bad_format_cnt;
+ __le32 responder_out_of_sequence_psn_cnt;
+ __le32 high_ber_reinit;
+ __le32 correctable_err_cnt;
+ __le32 uncorrectable_err_cnt;
+ __le32 retraining_cnt;
+ __u8 up;
+ __u8 pcs_link;
+ __u8 phy_ready;
+ __u8 auto_neg;
+ __le32 timeout_retransmission_cnt;
+ __le32 high_ber_cnt;
};
#endif /* CPUCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index fa8a5ad2d438..3099653234e4 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -78,6 +78,26 @@
* CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL Device is unusable and customer support
* should be contacted.
*
+ * CPU_BOOT_ERR0_ARC0_HALT_ACK_NOT_RCVD HALT ACK from ARC0 is not received
+ * within specified retries after issuing
+ * HALT request. ARC0 appears to be in bad
+ * reset.
+ *
+ * CPU_BOOT_ERR0_ARC1_HALT_ACK_NOT_RCVD HALT ACK from ARC1 is not received
+ * within specified retries after issuing
+ * HALT request. ARC1 appears to be in bad
+ * reset.
+ *
+ * CPU_BOOT_ERR0_ARC0_RUN_ACK_NOT_RCVD RUN ACK from ARC0 is not received
+ * within specified timeout after issuing
+ * RUN request. ARC0 appears to be in bad
+ * reset.
+ *
+ * CPU_BOOT_ERR0_ARC1_RUN_ACK_NOT_RCVD RUN ACK from ARC1 is not received
+ * within specified timeout after issuing
+ * RUN request. ARC1 appears to be in bad
+ * reset.
+ *
* CPU_BOOT_ERR0_ENABLED Error registers enabled.
* This is a main indication that the
* running FW populates the error
@@ -98,6 +118,10 @@
#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << 11)
#define CPU_BOOT_ERR0_PLL_FAIL (1 << 12)
#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << 13)
+#define CPU_BOOT_ERR0_ARC0_HALT_ACK_NOT_RCVD (1 << 14)
+#define CPU_BOOT_ERR0_ARC1_HALT_ACK_NOT_RCVD (1 << 15)
+#define CPU_BOOT_ERR0_ARC0_RUN_ACK_NOT_RCVD (1 << 16)
+#define CPU_BOOT_ERR0_ARC1_RUN_ACK_NOT_RCVD (1 << 17)
#define CPU_BOOT_ERR0_ENABLED (1 << 31)
#define CPU_BOOT_ERR1_ENABLED (1 << 31)
@@ -186,6 +210,10 @@
* configured and is ready for use.
* Initialized in: ppboot
*
+ * CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN NIC MAC channels init is done by FW and
+ * any access to them is done via the FW.
+ * Initialized in: linux
+ *
* CPU_BOOT_DEV_STS0_DYN_PLL_EN Dynamic PLL configuration is enabled.
* FW sends to host a bitmap of supported
* PLLs.
@@ -209,6 +237,21 @@
* prevent IRQs overriding each other.
* Initialized in: linux
*
+ * CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN
+ * NIC STAT and XPCS91 access is restricted
+ * and is done via FW only.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN
+ * NIC STAT get all is supported.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN
+ * F/W checks if the device is idle by reading defined set
+ * of registers. It returns a bitmask of all the engines,
+ * where a bit is set if the engine is not idle.
+ * Initialized in: linux
+ *
* CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
* This is a main indication that the
* running FW populates the device status
@@ -236,10 +279,14 @@
#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << 15)
#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN (1 << 16)
#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN (1 << 17)
+#define CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN (1 << 18)
#define CPU_BOOT_DEV_STS0_DYN_PLL_EN (1 << 19)
#define CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN (1 << 20)
#define CPU_BOOT_DEV_STS0_EQ_INDEX_EN (1 << 21)
#define CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN (1 << 22)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN (1 << 23)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << 24)
+#define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << 25)
#define CPU_BOOT_DEV_STS0_ENABLED (1 << 31)
#define CPU_BOOT_DEV_STS1_ENABLED (1 << 31)
@@ -313,10 +360,7 @@ struct cpu_dyn_regs {
__le32 hw_state;
__le32 kmd_msg_to_cpu;
__le32 cpu_cmd_status_to_host;
- union {
- __le32 gic_host_irq_ctrl;
- __le32 gic_host_pi_upd_irq;
- };
+ __le32 gic_host_pi_upd_irq;
__le32 gic_tpc_qm_irq_ctrl;
__le32 gic_mme_qm_irq_ctrl;
__le32 gic_dma_qm_irq_ctrl;
@@ -324,7 +368,9 @@ struct cpu_dyn_regs {
__le32 gic_dma_core_irq_ctrl;
__le32 gic_host_halt_irq;
__le32 gic_host_ints_irq;
- __le32 reserved1[24]; /* reserve for future use */
+ __le32 gic_host_soft_rst_irq;
+ __le32 gic_rot_qm_irq_ctrl;
+ __le32 reserved1[22]; /* reserve for future use */
};
/* TODO: remove the desc magic after the code is updated to use message */
@@ -462,6 +508,11 @@ struct lkd_fw_comms_msg {
* Do not wait for BMC response.
*
* COMMS_LOW_PLL_OPP Initialize PLLs for low OPP.
+ *
+ * COMMS_PREP_DESC_ELBI Same as COMMS_PREP_DESC only that the memory
+ * space is allocated in a ELBI access only
+ * address range.
+ *
*/
enum comms_cmd {
COMMS_NOOP = 0,
@@ -474,6 +525,7 @@ enum comms_cmd {
COMMS_GOTO_WFE = 7,
COMMS_SKIP_BMC = 8,
COMMS_LOW_PLL_OPP = 9,
+ COMMS_PREP_DESC_ELBI = 10,
COMMS_INVLD_LAST
};
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
index 5bb54b34a8ae..ffdfbd9b3220 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
@@ -126,6 +126,9 @@
#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_1 0x4F2004
#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 0x4F3FFC
#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 0x4F4000
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 0x4F4800
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_DATA_0 0x4F5000
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_ARM_0 0x4F5800
#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0 0x4F6000
#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_511 0x4F67FC
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
index 9aea7e996654..acc85d3ed98b 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
@@ -449,4 +449,21 @@ enum axi_id {
#define PCIE_AUX_FLR_CTRL_HW_CTRL_MASK 0x1
#define PCIE_AUX_FLR_CTRL_INT_MASK_MASK 0x2
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_VALID_SHIFT 0
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_VALID_MASK 0x1
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_PENDING_SHIFT 1
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_PENDING_MASK 0x1FE
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SID_SHIFT 0
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SID_MASK 0xFF
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_MASK_SHIFT 8
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_MASK_MASK 0xFF00
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOP_SHIFT 16
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOP_MASK 0x10000
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOD_SHIFT 17
+#define SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOD_MASK 0xFFFE0000
+#define TPC0_QM_CP_STS_0_FENCE_ID_SHIFT 20
+#define TPC0_QM_CP_STS_0_FENCE_ID_MASK 0x300000
+#define TPC0_QM_CP_STS_0_FENCE_IN_PROGRESS_SHIFT 22
+#define TPC0_QM_CP_STS_0_FENCE_IN_PROGRESS_MASK 0x400000
+
#endif /* GAUDI_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
index d95d4162ae2c..b9bd5a7f71eb 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
@@ -12,8 +12,6 @@
* PSOC scratch-pad registers
*/
#define mmHW_STATE mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
-/* TODO: remove mmGIC_HOST_IRQ_CTRL_POLL_REG */
-#define mmGIC_HOST_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
#define mmGIC_HOST_PI_UPD_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
#define mmGIC_TPC_QM_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_2
#define mmGIC_MME_QM_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_3
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 95b1c6800a22..fe6fd34b8caf 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
+#include <linux/utsname.h>
#define DEFAULT_COUNT 10
@@ -210,6 +211,8 @@ module_param(cpoint_count, int, 0644);
MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
"crash point is to be hit to trigger action");
+/* For test debug reporting. */
+char *lkdtm_kernel_info;
/* Return the crashtype number or NULL if the name is invalid */
static const struct crashtype *find_crashtype(const char *name)
@@ -490,6 +493,11 @@ static int __init lkdtm_module_init(void)
crash_count = cpoint_count;
#endif
+ /* Common initialization. */
+ lkdtm_kernel_info = kasprintf(GFP_KERNEL, "kernel (%s %s)",
+ init_uts_ns.name.release,
+ init_uts_ns.name.machine);
+
/* Handle test-specific initialization. */
lkdtm_bugs_init(&recur_count);
lkdtm_perms_init();
@@ -538,6 +546,8 @@ static void __exit lkdtm_module_exit(void)
if (lkdtm_kprobe != NULL)
unregister_kprobe(lkdtm_kprobe);
+ kfree(lkdtm_kernel_info);
+
pr_info("Crash point unregistered\n");
}
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index d7d64d9765eb..c212a253edde 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -5,17 +5,17 @@
#define pr_fmt(fmt) "lkdtm: " fmt
#include <linux/kernel.h>
-#include <generated/compile.h>
-#include <generated/utsrelease.h>
-#define LKDTM_KERNEL "kernel (" UTS_RELEASE " " UTS_MACHINE ")"
+extern char *lkdtm_kernel_info;
#define pr_expected_config(kconfig) \
{ \
if (IS_ENABLED(kconfig)) \
- pr_err("Unexpected! This " LKDTM_KERNEL " was built with " #kconfig "=y\n"); \
+ pr_err("Unexpected! This %s was built with " #kconfig "=y\n", \
+ lkdtm_kernel_info); \
else \
- pr_warn("This is probably expected, since this " LKDTM_KERNEL " was built *without* " #kconfig "=y\n"); \
+ pr_warn("This is probably expected, since this %s was built *without* " #kconfig "=y\n", \
+ lkdtm_kernel_info); \
}
#ifndef MODULE
@@ -25,24 +25,30 @@ int lkdtm_check_bool_cmdline(const char *param);
if (IS_ENABLED(kconfig)) { \
switch (lkdtm_check_bool_cmdline(param)) { \
case 0: \
- pr_warn("This is probably expected, since this " LKDTM_KERNEL " was built with " #kconfig "=y but booted with '" param "=N'\n"); \
+ pr_warn("This is probably expected, since this %s was built with " #kconfig "=y but booted with '" param "=N'\n", \
+ lkdtm_kernel_info); \
break; \
case 1: \
- pr_err("Unexpected! This " LKDTM_KERNEL " was built with " #kconfig "=y and booted with '" param "=Y'\n"); \
+ pr_err("Unexpected! This %s was built with " #kconfig "=y and booted with '" param "=Y'\n", \
+ lkdtm_kernel_info); \
break; \
default: \
- pr_err("Unexpected! This " LKDTM_KERNEL " was built with " #kconfig "=y (and booted without '" param "' specified)\n"); \
+ pr_err("Unexpected! This %s was built with " #kconfig "=y (and booted without '" param "' specified)\n", \
+ lkdtm_kernel_info); \
} \
} else { \
switch (lkdtm_check_bool_cmdline(param)) { \
case 0: \
- pr_warn("This is probably expected, as this " LKDTM_KERNEL " was built *without* " #kconfig "=y and booted with '" param "=N'\n"); \
+ pr_warn("This is probably expected, as this %s was built *without* " #kconfig "=y and booted with '" param "=N'\n", \
+ lkdtm_kernel_info); \
break; \
case 1: \
- pr_err("Unexpected! This " LKDTM_KERNEL " was built *without* " #kconfig "=y but booted with '" param "=Y'\n"); \
+ pr_err("Unexpected! This %s was built *without* " #kconfig "=y but booted with '" param "=Y'\n", \
+ lkdtm_kernel_info); \
break; \
default: \
- pr_err("This is probably expected, since this " LKDTM_KERNEL " was built *without* " #kconfig "=y (and booted without '" param "' specified)\n"); \
+ pr_err("This is probably expected, since this %s was built *without* " #kconfig "=y (and booted without '" param "' specified)\n", \
+ lkdtm_kernel_info); \
break; \
} \
} \
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index d1137a95ad02..2ed7e3aaff3a 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -69,6 +69,8 @@
#define FLAG_USE_DMA BIT(0)
#define PCI_DEVICE_ID_TI_AM654 0xb00c
+#define PCI_DEVICE_ID_TI_J7200 0xb00f
+#define PCI_DEVICE_ID_TI_AM64 0xb010
#define PCI_DEVICE_ID_LS1088A 0x80c0
#define is_am654_pci_dev(pdev) \
@@ -970,6 +972,12 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
.driver_data = (kernel_ulong_t)&j721e_data,
},
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
+ .driver_data = (kernel_ulong_t)&j721e_data,
+ },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
+ .driver_data = (kernel_ulong_t)&j721e_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
@@ -979,6 +987,7 @@ static struct pci_driver pci_endpoint_test_driver = {
.id_table = pci_endpoint_test_tbl,
.probe = pci_endpoint_test_probe,
.remove = pci_endpoint_test_remove,
+ .sriov_configure = pci_sriov_configure_simple,
};
module_pci_driver(pci_endpoint_test_driver);
diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
index 29e8a546dcd6..b9784f3da7a1 100644
--- a/drivers/mtd/nand/raw/intel-nand-controller.c
+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/units.h>
#include <asm/unaligned.h>
#define EBU_CLC 0x000
@@ -102,7 +103,6 @@
#define MAX_CS 2
-#define HZ_PER_MHZ 1000000L
#define USEC_PER_SEC 1000000L
struct ebu_nand_cs {
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 64d6dfa83122..267324889dd6 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1885,6 +1885,12 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph
reset_control_assert(gphy_fw->reset);
+ /* The vendor BSP uses a 200ms delay after asserting the reset line.
+ * Without this some users are observing that the PHY is not coming up
+ * on the MDIO bus.
+ */
+ msleep(200);
+
ret = request_firmware(&fw, gphy_fw->fw_name, dev);
if (ret) {
dev_err(dev, "failed to load firmware: %s, error: %i\n",
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 1f63f50f73f1..bda5a9bf4f52 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -643,10 +643,8 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
}
static int
-qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data)
+qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
{
- struct qca8k_priv *priv = salve_bus->priv;
- struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
@@ -682,10 +680,8 @@ exit:
}
static int
-qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum)
+qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
{
- struct qca8k_priv *priv = salve_bus->priv;
- struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
@@ -727,6 +723,24 @@ exit:
}
static int
+qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
+{
+ struct qca8k_priv *priv = slave_bus->priv;
+ struct mii_bus *bus = priv->bus;
+
+ return qca8k_mdio_write(bus, phy, regnum, data);
+}
+
+static int
+qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
+{
+ struct qca8k_priv *priv = slave_bus->priv;
+ struct mii_bus *bus = priv->bus;
+
+ return qca8k_mdio_read(bus, phy, regnum);
+}
+
+static int
qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
{
struct qca8k_priv *priv = ds->priv;
@@ -775,8 +789,8 @@ qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
bus->priv = (void *)priv;
bus->name = "qca8k slave mii";
- bus->read = qca8k_mdio_read;
- bus->write = qca8k_mdio_write;
+ bus->read = qca8k_internal_mdio_read;
+ bus->write = qca8k_internal_mdio_write;
snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
ds->index);
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 8d90fed5d33e..6f0ea2facea9 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1050,7 +1050,7 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
#ifdef VORTEX_BUS_MASTER
if (vp->bus_master) {
/* Set the bus-master controller to transfer the packet. */
- outl((int) (skb->data), ioaddr + Wn7_MasterAddr);
+ outl(isa_virt_to_bus(skb->data), ioaddr + Wn7_MasterAddr);
outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
vp->tx_skb = skb;
outw(StartDMADown, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 53660bc8d6ff..9afc712f5948 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -922,13 +922,16 @@ static void __init ne_add_devices(void)
}
}
-#ifdef MODULE
static int __init ne_init(void)
{
int retval;
- ne_add_devices();
+
+ if (IS_MODULE(CONFIG_NE2000))
+ ne_add_devices();
+
retval = platform_driver_probe(&ne_driver, ne_drv_probe);
- if (retval) {
+
+ if (IS_MODULE(CONFIG_NE2000) && retval) {
if (io[0] == 0)
pr_notice("ne.c: You must supply \"io=0xNNN\""
" value(s) for ISA cards.\n");
@@ -941,18 +944,8 @@ static int __init ne_init(void)
return retval;
}
module_init(ne_init);
-#else /* MODULE */
-static int __init ne_init(void)
-{
- int retval = platform_driver_probe(&ne_driver, ne_drv_probe);
-
- /* Unregister unused platform_devices. */
- ne_loop_rm_unreg(0);
- return retval;
-}
-module_init(ne_init);
-#ifdef CONFIG_NETDEV_LEGACY_INIT
+#if !defined(MODULE) && defined(CONFIG_NETDEV_LEGACY_INIT)
struct net_device * __init ne_probe(int unit)
{
int this_dev;
@@ -994,7 +987,6 @@ struct net_device * __init ne_probe(int unit)
return ERR_PTR(-ENODEV);
}
#endif
-#endif /* MODULE */
static void __exit ne_exit(void)
{
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index b5df7ad5a83f..032e8922b482 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -748,7 +748,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
#ifdef XMT_VIA_SKB
skb_save[i] = p->tmd_skb[i];
#endif
- buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
+ buffer[i] = (unsigned long)isa_bus_to_virt(tmdp->u.buffer);
blen[i] = tmdp->blen;
tmdp->u.s.status = 0x0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index a705e2615307..8c83973adca5 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8038,9 +8038,9 @@ bnx2_get_pci_speed(struct bnx2 *bp)
static void
bnx2_read_vpd_fw_ver(struct bnx2 *bp)
{
+ unsigned int len;
int rc, i, j;
u8 *data;
- unsigned int block_end, rosize, len;
#define BNX2_VPD_NVRAM_OFFSET 0x300
#define BNX2_VPD_LEN 128
@@ -8057,38 +8057,21 @@ bnx2_read_vpd_fw_ver(struct bnx2 *bp)
for (i = 0; i < BNX2_VPD_LEN; i += 4)
swab32s((u32 *)&data[i]);
- i = pci_vpd_find_tag(data, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
- if (i < 0)
- goto vpd_done;
-
- rosize = pci_vpd_lrdt_size(&data[i]);
- i += PCI_VPD_LRDT_TAG_SIZE;
- block_end = i + rosize;
-
- if (block_end > BNX2_VPD_LEN)
- goto vpd_done;
-
- j = pci_vpd_find_info_keyword(data, i, rosize,
- PCI_VPD_RO_KEYWORD_MFR_ID);
+ j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
+ PCI_VPD_RO_KEYWORD_MFR_ID, &len);
if (j < 0)
goto vpd_done;
- len = pci_vpd_info_field_size(&data[j]);
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (j + len > block_end || len != 4 ||
- memcmp(&data[j], "1028", 4))
+ if (len != 4 || memcmp(&data[j], "1028", 4))
goto vpd_done;
- j = pci_vpd_find_info_keyword(data, i, rosize,
- PCI_VPD_RO_KEYWORD_VENDOR0);
+ j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
+ PCI_VPD_RO_KEYWORD_VENDOR0,
+ &len);
if (j < 0)
goto vpd_done;
- len = pci_vpd_info_field_size(&data[j]);
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
+ if (len > BNX2_MAX_VER_SLEN)
goto vpd_done;
memcpy(bp->fw_version, &data[j], len);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index d04994840b87..e789430f407c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2407,7 +2407,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
#endif
-#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4
#define VF_ACQUIRE_THRESH 3
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 6d98134913cd..ae87296ae1ff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12189,86 +12189,35 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
static void bnx2x_read_fwinfo(struct bnx2x *bp)
{
- int cnt, i, block_end, rodi;
- char vpd_start[BNX2X_VPD_LEN+1];
- char str_id_reg[VENDOR_ID_LEN+1];
- char str_id_cap[VENDOR_ID_LEN+1];
- char *vpd_data;
- char *vpd_extended_data = NULL;
- u8 len;
-
- cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
- memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
-
- if (cnt < BNX2X_VPD_LEN)
- goto out_not_found;
-
- /* VPD RO tag should be first tag after identifier string, hence
- * we should be able to find it in first BNX2X_VPD_LEN chars
- */
- i = pci_vpd_find_tag(vpd_start, BNX2X_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
- if (i < 0)
- goto out_not_found;
-
- block_end = i + PCI_VPD_LRDT_TAG_SIZE +
- pci_vpd_lrdt_size(&vpd_start[i]);
-
- i += PCI_VPD_LRDT_TAG_SIZE;
-
- if (block_end > BNX2X_VPD_LEN) {
- vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
- if (vpd_extended_data == NULL)
- goto out_not_found;
-
- /* read rest of vpd image into vpd_extended_data */
- memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
- cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
- block_end - BNX2X_VPD_LEN,
- vpd_extended_data + BNX2X_VPD_LEN);
- if (cnt < (block_end - BNX2X_VPD_LEN))
- goto out_not_found;
- vpd_data = vpd_extended_data;
- } else
- vpd_data = vpd_start;
+ char str_id[VENDOR_ID_LEN + 1];
+ unsigned int vpd_len, kw_len;
+ u8 *vpd_data;
+ int rodi;
- /* now vpd_data holds full vpd content in both cases */
-
- rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
- PCI_VPD_RO_KEYWORD_MFR_ID);
- if (rodi < 0)
- goto out_not_found;
+ memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
- len = pci_vpd_info_field_size(&vpd_data[rodi]);
+ vpd_data = pci_vpd_alloc(bp->pdev, &vpd_len);
+ if (IS_ERR(vpd_data))
+ return;
- if (len != VENDOR_ID_LEN)
+ rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len,
+ PCI_VPD_RO_KEYWORD_MFR_ID, &kw_len);
+ if (rodi < 0 || kw_len != VENDOR_ID_LEN)
goto out_not_found;
- rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
-
/* vendor specific info */
- snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
- snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
- if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
- !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
-
- rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
- PCI_VPD_RO_KEYWORD_VENDOR0);
- if (rodi >= 0) {
- len = pci_vpd_info_field_size(&vpd_data[rodi]);
-
- rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
-
- if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
- memcpy(bp->fw_ver, &vpd_data[rodi], len);
- bp->fw_ver[len] = ' ';
- }
+ snprintf(str_id, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+ if (!strncasecmp(str_id, &vpd_data[rodi], VENDOR_ID_LEN)) {
+ rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len,
+ PCI_VPD_RO_KEYWORD_VENDOR0,
+ &kw_len);
+ if (rodi >= 0 && kw_len < sizeof(bp->fw_ver)) {
+ memcpy(bp->fw_ver, &vpd_data[rodi], kw_len);
+ bp->fw_ver[kw_len] = ' ';
}
- kfree(vpd_extended_data);
- return;
}
out_not_found:
- kfree(vpd_extended_data);
- return;
+ kfree(vpd_data);
}
static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index f255fd0b16db..6fbf735fca31 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1224,7 +1224,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
/* SR-IOV capability was enabled but there are no VFs*/
if (iov->total == 0) {
- err = -EINVAL;
+ err = 0;
goto failed;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 9b86516e59a1..037767b370d5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2213,12 +2213,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
bp->current_interval * 10);
fw_health->tmr_counter = fw_health->tmr_multiplier;
- if (!fw_health->enabled) {
+ if (!fw_health->enabled)
fw_health->last_fw_heartbeat =
bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
- fw_health->last_fw_reset_cnt =
- bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
- }
+ fw_health->last_fw_reset_cnt =
+ bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
netif_info(bp, drv, bp->dev,
"Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
fw_health->master, fw_health->last_fw_reset_cnt,
@@ -2730,6 +2729,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
int j;
+ if (!txr->tx_buf_ring)
+ continue;
+
for (j = 0; j < max_idx;) {
struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
struct sk_buff *skb;
@@ -2814,6 +2816,9 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
}
skip_rx_tpa_free:
+ if (!rxr->rx_buf_ring)
+ goto skip_rx_buf_free;
+
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
dma_addr_t mapping = rx_buf->mapping;
@@ -2836,6 +2841,11 @@ skip_rx_tpa_free:
kfree(data);
}
}
+
+skip_rx_buf_free:
+ if (!rxr->rx_agg_ring)
+ goto skip_rx_agg_free;
+
for (i = 0; i < max_agg_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
struct page *page = rx_agg_buf->page;
@@ -2852,6 +2862,8 @@ skip_rx_tpa_free:
__free_page(page);
}
+
+skip_rx_agg_free:
if (rxr->rx_page) {
__free_page(rxr->rx_page);
rxr->rx_page = NULL;
@@ -2900,6 +2912,9 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
struct pci_dev *pdev = bp->pdev;
int i;
+ if (!rmem->pg_arr)
+ goto skip_pages;
+
for (i = 0; i < rmem->nr_pages; i++) {
if (!rmem->pg_arr[i])
continue;
@@ -2909,6 +2924,7 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
rmem->pg_arr[i] = NULL;
}
+skip_pages:
if (rmem->pg_tbl) {
size_t pg_tbl_size = rmem->nr_pages * 8;
@@ -3228,10 +3244,14 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
{
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
kfree(cpr->cp_desc_ring);
cpr->cp_desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
kfree(cpr->cp_desc_mapping);
cpr->cp_desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
}
static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
@@ -12207,6 +12227,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
return;
}
+ if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
+ bp->fw_health->enabled) {
+ bp->fw_health->last_fw_reset_cnt =
+ bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ }
bp->fw_reset_state = 0;
/* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic();
@@ -13100,66 +13125,35 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
return rc;
}
-#define BNXT_VPD_LEN 512
static void bnxt_vpd_read_info(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
- int i, len, pos, ro_size, size;
- ssize_t vpd_size;
+ unsigned int vpd_size, kw_len;
+ int pos, size;
u8 *vpd_data;
- vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
- if (!vpd_data)
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(pdev, "Unable to read VPD\n");
return;
-
- vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
- if (vpd_size <= 0) {
- netdev_err(bp->dev, "Unable to read VPD\n");
- goto exit;
- }
-
- i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
- netdev_err(bp->dev, "VPD READ-Only not found\n");
- goto exit;
- }
-
- i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
- netdev_err(bp->dev, "VPD READ-Only not found\n");
- goto exit;
}
- ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
- i += PCI_VPD_LRDT_TAG_SIZE;
- if (i + ro_size > vpd_size)
- goto exit;
-
- pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
- PCI_VPD_RO_KEYWORD_PARTNO);
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
if (pos < 0)
goto read_sn;
- len = pci_vpd_info_field_size(&vpd_data[pos]);
- pos += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (len + pos > vpd_size)
- goto read_sn;
-
- size = min(len, BNXT_VPD_FLD_LEN - 1);
+ size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
memcpy(bp->board_partno, &vpd_data[pos], size);
read_sn:
- pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
- PCI_VPD_RO_KEYWORD_SERIALNO);
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO,
+ &kw_len);
if (pos < 0)
goto exit;
- len = pci_vpd_info_field_size(&vpd_data[pos]);
- pos += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (len + pos > vpd_size)
- goto exit;
-
- size = min(len, BNXT_VPD_FLD_LEN - 1);
+ size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
memcpy(bp->board_serialno, &vpd_data[pos], size);
exit:
kfree(vpd_data);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 46fae1acbeed..e6a4a768b10b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1884,9 +1884,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
{
struct bnxt_flower_indr_block_cb_priv *cb_priv;
- /* All callback list access should be protected by RTNL. */
- ASSERT_RTNL();
-
list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
if (cb_priv->tunnel_netdev == netdev)
return cb_priv;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 8a238e349e02..5e0e0e70d801 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12788,7 +12788,7 @@ static void tg3_get_ethtool_stats(struct net_device *dev,
memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
}
-static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
+static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
{
int i;
__be32 *buf;
@@ -12822,15 +12822,11 @@ static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
offset = TG3_NVM_VPD_OFF;
len = TG3_NVM_VPD_LEN;
}
- } else {
- len = TG3_NVM_PCI_VPD_MAX_LEN;
- }
- buf = kmalloc(len, GFP_KERNEL);
- if (buf == NULL)
- return NULL;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return NULL;
- if (magic == TG3_EEPROM_MAGIC) {
for (i = 0; i < len; i += 4) {
/* The data is in little-endian format in NVRAM.
* Use the big-endian read routines to preserve
@@ -12841,12 +12837,9 @@ static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
}
*vpdlen = len;
} else {
- ssize_t cnt;
-
- cnt = pci_read_vpd(tp->pdev, 0, len, (u8 *)buf);
- if (cnt < 0)
- goto error;
- *vpdlen = cnt;
+ buf = pci_vpd_alloc(tp->pdev, vpdlen);
+ if (IS_ERR(buf))
+ return NULL;
}
return buf;
@@ -12868,9 +12861,10 @@ error:
static int tg3_test_nvram(struct tg3 *tp)
{
- u32 csum, magic, len;
+ u32 csum, magic;
__be32 *buf;
int i, j, k, err = 0, size;
+ unsigned int len;
if (tg3_flag(tp, NO_NVRAM))
return 0;
@@ -13013,33 +13007,10 @@ static int tg3_test_nvram(struct tg3 *tp)
if (!buf)
return -ENOMEM;
- i = pci_vpd_find_tag((u8 *)buf, len, PCI_VPD_LRDT_RO_DATA);
- if (i > 0) {
- j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
- if (j < 0)
- goto out;
-
- if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
- goto out;
-
- i += PCI_VPD_LRDT_TAG_SIZE;
- j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
- PCI_VPD_RO_KEYWORD_CHKSUM);
- if (j > 0) {
- u8 csum8 = 0;
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
-
- for (i = 0; i <= j; i++)
- csum8 += ((u8 *)buf)[i];
-
- if (csum8)
- goto out;
- }
- }
-
- err = 0;
-
+ err = pci_vpd_check_csum(buf, len);
+ /* go on if no checksum found */
+ if (err == 1)
+ err = 0;
out:
kfree(buf);
return err;
@@ -15624,64 +15595,36 @@ skip_phy_reset:
static void tg3_read_vpd(struct tg3 *tp)
{
u8 *vpd_data;
- unsigned int block_end, rosize, len;
- u32 vpdlen;
- int j, i = 0;
+ unsigned int len, vpdlen;
+ int i;
vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
if (!vpd_data)
goto out_no_vpd;
- i = pci_vpd_find_tag(vpd_data, vpdlen, PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
+ PCI_VPD_RO_KEYWORD_MFR_ID, &len);
if (i < 0)
- goto out_not_found;
-
- rosize = pci_vpd_lrdt_size(&vpd_data[i]);
- block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
- i += PCI_VPD_LRDT_TAG_SIZE;
+ goto partno;
- if (block_end > vpdlen)
- goto out_not_found;
-
- j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
- PCI_VPD_RO_KEYWORD_MFR_ID);
- if (j > 0) {
- len = pci_vpd_info_field_size(&vpd_data[j]);
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (j + len > block_end || len != 4 ||
- memcmp(&vpd_data[j], "1028", 4))
- goto partno;
-
- j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
- PCI_VPD_RO_KEYWORD_VENDOR0);
- if (j < 0)
- goto partno;
+ if (len != 4 || memcmp(vpd_data + i, "1028", 4))
+ goto partno;
- len = pci_vpd_info_field_size(&vpd_data[j]);
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (j + len > block_end)
- goto partno;
+ i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
+ PCI_VPD_RO_KEYWORD_VENDOR0, &len);
+ if (i < 0)
+ goto partno;
- if (len >= sizeof(tp->fw_ver))
- len = sizeof(tp->fw_ver) - 1;
- memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
- snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
- &vpd_data[j]);
- }
+ memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
+ snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
partno:
- i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
- PCI_VPD_RO_KEYWORD_PARTNO);
+ i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
+ PCI_VPD_RO_KEYWORD_PARTNO, &len);
if (i < 0)
goto out_not_found;
- len = pci_vpd_info_field_size(&vpd_data[i]);
-
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (len > TG3_BPN_SIZE ||
- (len + i) > vpdlen)
+ if (len > TG3_BPN_SIZE)
goto out_not_found;
memcpy(tp->board_part_number, &vpd_data[i], len);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 46ec4fdfd16a..1000c894064f 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2101,7 +2101,6 @@
/* Hardware Legacy NVRAM layout */
#define TG3_NVM_VPD_OFF 0x100
#define TG3_NVM_VPD_LEN 256
-#define TG3_NVM_PCI_VPD_MAX_LEN 512
/* Hardware Selfboot NVRAM layout */
#define TG3_NVM_HWSB_CFG1 0x00000004
diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
index 8b7b59908a1a..f66d22de5168 100644
--- a/drivers/net/ethernet/cadence/macb_pci.c
+++ b/drivers/net/ethernet/cadence/macb_pci.c
@@ -111,9 +111,9 @@ static void macb_remove(struct pci_dev *pdev)
struct platform_device *plat_dev = pci_get_drvdata(pdev);
struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
- platform_device_unregister(plat_dev);
clk_unregister(plat_data->pclk);
clk_unregister(plat_data->hclk);
+ platform_device_unregister(plat_dev);
}
static const struct pci_device_id dev_id_table[] = {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index c6fe0f2a4d0e..f6396ac64006 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -526,7 +526,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
oct->irq_name_storage = NULL;
}
/* Soft reset the octeon device before exiting */
- if (oct->pci_dev->reset_fn)
+ if (!pcie_reset_flr(oct->pci_dev, PCI_RESET_PROBE))
octeon_pci_flr(oct);
else
cn23xx_vf_ask_pf_to_do_flr(oct);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 9058f09f921e..ecea3cdd30b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -84,7 +84,6 @@ extern struct mutex uld_mutex;
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
- EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
PN_LEN = 16, /* Part Number length */
MACADDR_LEN = 12, /* MAC Address length */
@@ -391,7 +390,6 @@ struct tp_params {
struct vpd_params {
unsigned int cclk;
- u8 ec[EC_LEN + 1];
u8 sn[SERNUM_LEN + 1];
u8 id[ID_LEN + 1];
u8 pn[PN_LEN + 1];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 6606fb8b3e42..64144b6171d7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2743,10 +2743,9 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
*/
int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
- int i, ret = 0, addr;
- int ec, sn, pn, na;
- u8 *vpd, csum, base_val = 0;
- unsigned int vpdr_len, kw_offset, id_len;
+ unsigned int id_len, pn_len, sn_len, na_len;
+ int id, sn, pn, na, addr, ret = 0;
+ u8 *vpd, base_val = 0;
vpd = vmalloc(VPD_LEN);
if (!vpd)
@@ -2765,74 +2764,52 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (ret < 0)
goto out;
- if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
- dev_err(adapter->pdev_dev, "missing VPD ID string\n");
- ret = -EINVAL;
+ ret = pci_vpd_find_id_string(vpd, VPD_LEN, &id_len);
+ if (ret < 0)
goto out;
- }
+ id = ret;
- id_len = pci_vpd_lrdt_size(vpd);
- if (id_len > ID_LEN)
- id_len = ID_LEN;
-
- i = pci_vpd_find_tag(vpd, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
- dev_err(adapter->pdev_dev, "missing VPD-R section\n");
+ ret = pci_vpd_check_csum(vpd, VPD_LEN);
+ if (ret) {
+ dev_err(adapter->pdev_dev, "VPD checksum incorrect or missing\n");
ret = -EINVAL;
goto out;
}
- vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
- kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
- if (vpdr_len + kw_offset > VPD_LEN) {
- dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
- ret = -EINVAL;
+ ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &sn_len);
+ if (ret < 0)
goto out;
- }
-
-#define FIND_VPD_KW(var, name) do { \
- var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
- if (var < 0) { \
- dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
- ret = -EINVAL; \
- goto out; \
- } \
- var += PCI_VPD_INFO_FLD_HDR_SIZE; \
-} while (0)
-
- FIND_VPD_KW(i, "RV");
- for (csum = 0; i >= 0; i--)
- csum += vpd[i];
+ sn = ret;
- if (csum) {
- dev_err(adapter->pdev_dev,
- "corrupted VPD EEPROM, actual csum %u\n", csum);
- ret = -EINVAL;
+ ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN,
+ PCI_VPD_RO_KEYWORD_PARTNO, &pn_len);
+ if (ret < 0)
goto out;
- }
+ pn = ret;
- FIND_VPD_KW(ec, "EC");
- FIND_VPD_KW(sn, "SN");
- FIND_VPD_KW(pn, "PN");
- FIND_VPD_KW(na, "NA");
-#undef FIND_VPD_KW
+ ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN, "NA", &na_len);
+ if (ret < 0)
+ goto out;
+ na = ret;
- memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
+ memcpy(p->id, vpd + id, min_t(int, id_len, ID_LEN));
strim(p->id);
- memcpy(p->ec, vpd + ec, EC_LEN);
- strim(p->ec);
- i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
- memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
+ memcpy(p->sn, vpd + sn, min_t(int, sn_len, SERNUM_LEN));
strim(p->sn);
- i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
- memcpy(p->pn, vpd + pn, min(i, PN_LEN));
+ memcpy(p->pn, vpd + pn, min_t(int, pn_len, PN_LEN));
strim(p->pn);
- memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
+ memcpy(p->na, vpd + na, min_t(int, na_len, MACADDR_LEN));
strim((char *)p->na);
out:
vfree(vpd);
- return ret < 0 ? ret : 0;
+ if (ret < 0) {
+ dev_err(adapter->pdev_dev, "error reading VPD\n");
+ return ret;
+ }
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 22af3d6ce178..adc54a726661 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -61,6 +61,9 @@ static unsigned int tx_sgl = 1;
module_param(tx_sgl, uint, 0600);
MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
+static bool page_pool_enabled = true;
+module_param(page_pool_enabled, bool, 0400);
+
#define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \
sizeof(struct sg_table))
#define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \
@@ -73,6 +76,7 @@ MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to opt
#define HNS3_OUTER_VLAN_TAG 2
#define HNS3_MIN_TX_LEN 33U
+#define HNS3_MIN_TUN_PKT_LEN 65U
/* hns3_pci_tbl - PCI Device ID Table
*
@@ -1424,8 +1428,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
l4.tcp->doff);
break;
case IPPROTO_UDP:
- if (hns3_tunnel_csum_bug(skb))
- return skb_checksum_help(skb);
+ if (hns3_tunnel_csum_bug(skb)) {
+ int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
+
+ return ret ? ret : skb_checksum_help(skb);
+ }
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
@@ -4753,7 +4760,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
goto out_with_desc_cb;
if (!HNAE3_IS_TX_RING(ring)) {
- hns3_alloc_page_pool(ring);
+ if (page_pool_enabled)
+ hns3_alloc_page_pool(ring);
ret = hns3_alloc_ring_buffers(ring);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 61d30b2961f4..a983d012e26f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -1724,6 +1724,10 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
}
bd_num = le32_to_cpu(req->bd_num);
+ if (!bd_num) {
+ dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
+ return -EINVAL;
+ }
desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc_src)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 6d5f84666fa5..a0d0fa4f6a24 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -1531,9 +1531,10 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
static int hclge_configure(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ const struct cpumask *cpumask = cpu_online_mask;
struct hclge_cfg cfg;
unsigned int i;
- int ret;
+ int node, ret;
ret = hclge_get_cfg(hdev, &cfg);
if (ret)
@@ -1601,11 +1602,12 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_init_kdump_kernel_config(hdev);
- /* Set the init affinity based on pci func number */
- i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
- i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
- cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
- &hdev->affinity_mask);
+ /* Set the affinity based on numa node */
+ node = dev_to_node(&hdev->pdev->dev);
+ if (node != NUMA_NO_NODE)
+ cpumask = cpumask_of_node(node);
+
+ cpumask_copy(&hdev->affinity_mask, cpumask);
return ret;
}
@@ -8131,11 +8133,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
hclge_clear_arfs_rules(hdev);
spin_unlock_bh(&hdev->fd_rule_lock);
- /* If it is not PF reset, the firmware will disable the MAC,
+ /* If it is not PF reset or FLR, the firmware will disable the MAC,
* so it only need to stop phy here.
*/
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
- hdev->reset_type != HNAE3_FUNC_RESET) {
+ hdev->reset_type != HNAE3_FUNC_RESET &&
+ hdev->reset_type != HNAE3_FLR_RESET) {
hclge_mac_stop_phy(hdev);
hclge_update_link_status(hdev);
return;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 82e727020120..a69e892277b3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2465,6 +2465,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
hclgevf_enable_vector(&hdev->misc_vector, false);
event_cause = hclgevf_check_evt_cause(hdev, &clearval);
+ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
+ hclgevf_clear_event_cause(hdev, clearval);
switch (event_cause) {
case HCLGEVF_VECTOR0_EVENT_RST:
@@ -2477,10 +2479,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
break;
}
- if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
- hclgevf_clear_event_cause(hdev, clearval);
+ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
hclgevf_enable_vector(&hdev->misc_vector, true);
- }
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index b8a40146b895..b482f6f633bd 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1144,7 +1144,7 @@ static struct net_device * __init i82596_probe(void)
err = -ENODEV;
goto out;
}
- memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */
+ memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */
dev->base_addr = MVME_I596_BASE;
dev->irq = (unsigned) MVME16x_IRQ_I596;
goto found;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index d1be883f933a..8f17096e614d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -4890,6 +4890,22 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
return 0;
}
+ if (adapter->failover_pending) {
+ adapter->init_done_rc = -EAGAIN;
+ netdev_dbg(netdev, "Failover pending, ignoring login response\n");
+ complete(&adapter->init_done);
+ /* login response buffer will be released on reset */
+ return 0;
+ }
+
+ if (adapter->failover_pending) {
+ adapter->init_done_rc = -EAGAIN;
+ netdev_dbg(netdev, "Failover pending, ignoring login response\n");
+ complete(&adapter->init_done);
+ /* login response buffer will be released on reset */
+ return 0;
+ }
+
netdev->mtu = adapter->req_mtu - ETH_HLEN;
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index eadcb9958346..3c4f08d20414 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -695,6 +695,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
{
if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ set_bit(ICE_FLAG_AUX_ENA, pf->flags);
ice_plug_aux_dev(pf);
}
}
@@ -707,5 +708,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
{
ice_unplug_aux_dev(pf);
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 1f2afdf6cd48..adcc9a251595 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -271,6 +271,12 @@ int ice_plug_aux_dev(struct ice_pf *pf)
struct auxiliary_device *adev;
int ret;
+ /* if this PF doesn't support a technology that requires auxiliary
+ * devices, then gracefully exit
+ */
+ if (!ice_is_aux_ena(pf))
+ return 0;
+
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
if (!iadev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index b877efae61df..0e19b4d02e62 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -6350,7 +6350,9 @@ static int igc_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= netdev->features;
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= netdev->vlan_features;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index dde8c03c5cfe..7d56a927081d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -656,11 +656,10 @@ static const struct devlink_param enable_rdma_param =
static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
int err;
- if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
+ if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return 0;
err = devlink_param_register(devlink, &enable_rdma_param);
@@ -676,9 +675,7 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
-
- if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
+ if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return;
devlink_param_unregister(devlink, &enable_rdma_param);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 3f8a98093f8c..f9cf9fb31547 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
if (err) {
mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
- return err;
+ goto err_cancel_work;
}
err = mlx5_fw_tracer_create_mkey(tracer);
@@ -1031,6 +1031,7 @@ err_notifier_unregister:
mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
+err_cancel_work:
cancel_work_sync(&tracer->read_fw_strings_work);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 669a75f3537a..7b8c8187543a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -922,7 +922,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work);
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
-int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 0c38c2e319be..b5ddaa82755f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -137,7 +137,7 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
u16 vport_num, esw_owner_vhca_id;
struct netlink_ext_ack *extack;
int ifindex = upper->ifindex;
- int err;
+ int err = 0;
if (!netif_is_bridge_master(upper))
return 0;
@@ -244,7 +244,7 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
const struct switchdev_attr *attr = port_attr_info->attr;
u16 vport_num, esw_owner_vhca_id;
- int err;
+ int err = 0;
if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
&esw_owner_vhca_id))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 51a4d80f7fa3..de03684528bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -300,9 +300,6 @@ mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
{
struct mlx5e_rep_indr_block_priv *cb_priv;
- /* All callback list access should be protected by RTNL. */
- ASSERT_RTNL();
-
list_for_each_entry(cb_priv,
&rpriv->uplink_priv.tc_indr_block_priv_list,
list)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index bf0313e2682b..13056cb9757d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -572,7 +572,7 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
u32 rqn;
- if (mlx5e_channels_get_ptp_rqn(chs, &rqn))
+ if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
rqn = res->drop_rqn;
err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 2cfd12953909..306fb5d6a36d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1884,7 +1884,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
return set_pflag_cqe_based_moder(netdev, enable, true);
}
-int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val, bool rx_filter)
{
bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
struct mlx5e_params new_params;
@@ -1896,8 +1896,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
if (curr_val == new_val)
return 0;
- if (new_val && !priv->profile->rx_ptp_support &&
- priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (new_val && !priv->profile->rx_ptp_support && rx_filter) {
netdev_err(priv->netdev,
"Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
return -EINVAL;
@@ -1905,7 +1904,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
new_params = priv->channels.params;
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
- if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
+ if (rx_filter)
new_params.ptp_rx = new_val;
if (new_params.ptp_rx == priv->channels.params.ptp_rx)
@@ -1928,12 +1927,14 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ bool rx_filter;
int err;
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -EOPNOTSUPP;
- err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
+ rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE;
+ err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 47efd858964d..3fd515e7bf30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3554,14 +3554,14 @@ static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filte
if (!rx_filter)
/* Reset CQE compression to Admin default */
- return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def);
+ return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false);
if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
return 0;
/* Disable CQE compression */
netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
- err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+ err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true);
if (err)
netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 9fe8e3c204d6..fe501ba88bea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1682,14 +1682,13 @@ static int build_match_list(struct match_list *match_head,
curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
if (!curr_match) {
+ rcu_read_unlock();
free_match_list(match_head, ft_locked);
- err = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
curr_match->g = g;
list_add_tail(&curr_match->list, &match_head->list);
}
-out:
rcu_read_unlock();
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 49ca57c6d31d..ca5690b0a7ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -927,9 +927,12 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
struct mlx5_core_dev *dev1;
struct mlx5_lag *ldev;
+ ldev = mlx5_lag_dev(dev);
+ if (!ldev)
+ return;
+
mlx5_dev_list_lock();
- ldev = mlx5_lag_dev(dev);
dev0 = ldev->pf[MLX5_LAG_P1].dev;
dev1 = ldev->pf[MLX5_LAG_P2].dev;
@@ -946,8 +949,11 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
- mlx5_dev_list_lock();
ldev = mlx5_lag_dev(dev);
+ if (!ldev)
+ return;
+
+ mlx5_dev_list_lock();
ldev->mode_changes_in_progress--;
mlx5_dev_list_unlock();
mlx5_queue_bond_work(ldev, 0);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 3e85b17f5857..6704f5c1aa32 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -142,6 +142,13 @@ static int mlxbf_gige_open(struct net_device *netdev)
err = mlxbf_gige_clean_port(priv);
if (err)
goto free_irqs;
+
+ /* Clear driver's valid_polarity to match hardware,
+ * since the above call to clean_port() resets the
+ * receive polarity used by hardware.
+ */
+ priv->valid_polarity = 0;
+
err = mlxbf_gige_rx_init(priv);
if (err)
goto free_irqs;
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index c1310ea1c216..d5c485a6d284 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -398,9 +398,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
int err;
u16 i;
- dma_buf = kzalloc(sizeof(*dma_buf) +
- q_depth * sizeof(struct hwc_work_request),
- GFP_KERNEL);
+ dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
if (!dma_buf)
return -ENOMEM;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 556c3495211d..64c0ef57ad42 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1767,9 +1767,6 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
struct nfp_flower_indr_block_cb_priv *cb_priv;
struct nfp_flower_priv *priv = app->priv;
- /* All callback list access should be protected by RTNL. */
- ASSERT_RTNL();
-
list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
if (cb_priv->netdev == netdev)
return cb_priv;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 6e5a6cc97d0e..24cd41567775 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -3367,6 +3367,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
struct qed_nvm_image_att *p_image_att)
{
enum nvm_image_type type;
+ int rc;
u32 i;
/* Translate image_id into MFW definitions */
@@ -3395,7 +3396,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- qed_mcp_nvm_info_populate(p_hwfn);
+ rc = qed_mcp_nvm_info_populate(p_hwfn);
+ if (rc)
+ return rc;
+
for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
if (type == p_hwfn->nvm_info.image_att[i].image_type)
break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 0a2f34fc8b24..27dffa299ca6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1354,10 +1354,10 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
const struct firmware *fw = fw_info->fw;
u32 dest, *p_cache, *temp;
- int i, ret = -EIO;
__le32 *temp_le;
u8 data[16];
size_t size;
+ int i, ret;
u64 addr;
temp = vzalloc(fw->size);
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4b2eca5e08e2..01ef5efd7bc2 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -119,6 +119,8 @@
#define PHY_ST 0x8A /* PHY status register */
#define MAC_SM 0xAC /* MAC status machine */
#define MAC_SM_RST 0x0002 /* MAC status machine reset */
+#define MD_CSC 0xb6 /* MDC speed control register */
+#define MD_CSC_DEFAULT 0x0030
#define MAC_ID 0xBE /* Identifier register */
#define TX_DCNT 0x80 /* TX descriptor count */
@@ -355,8 +357,9 @@ static void r6040_reset_mac(struct r6040_private *lp)
{
void __iomem *ioaddr = lp->base;
int limit = MAC_DEF_TIMEOUT;
- u16 cmd;
+ u16 cmd, md_csc;
+ md_csc = ioread16(ioaddr + MD_CSC);
iowrite16(MAC_RST, ioaddr + MCR1);
while (limit--) {
cmd = ioread16(ioaddr + MCR1);
@@ -368,6 +371,10 @@ static void r6040_reset_mac(struct r6040_private *lp)
iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
iowrite16(0, ioaddr + MAC_SM);
mdelay(5);
+
+ /* Restore MDIO clock frequency */
+ if (md_csc != MD_CSC_DEFAULT)
+ iowrite16(md_csc, ioaddr + MD_CSC);
}
static void r6040_init_mac_regs(struct net_device *dev)
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index a295e2621cf3..43ef4f529028 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -900,74 +900,36 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* NIC VPD information
* Called during probe to display the part number of the
- * installed NIC. VPD is potentially very large but this should
- * always appear within the first 512 bytes.
+ * installed NIC.
*/
-#define SFC_VPD_LEN 512
static void efx_probe_vpd_strings(struct efx_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
- char vpd_data[SFC_VPD_LEN];
- ssize_t vpd_size;
- int ro_start, ro_size, i, j;
-
- /* Get the vpd data from the device */
- vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
- if (vpd_size <= 0) {
- netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
- return;
- }
-
- /* Get the Read only section */
- ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (ro_start < 0) {
- netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
- return;
- }
-
- ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- if (i + j > vpd_size)
- j = vpd_size - i;
-
- /* Get the Part number */
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Part number not found\n");
- return;
- }
+ unsigned int vpd_size, kw_len;
+ u8 *vpd_data;
+ int start;
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
+ vpd_data = pci_vpd_alloc(dev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(dev, "Unable to read VPD\n");
return;
}
- netif_info(efx, drv, efx->net_dev,
- "Part Number : %.*s\n", j, &vpd_data[i]);
-
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- j = ro_size;
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
- return;
- }
-
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
- return;
- }
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (start < 0)
+ pci_err(dev, "Part number not found or incomplete\n");
+ else
+ pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
- efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
- if (!efx->vpd_sn)
- return;
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
+ if (start < 0)
+ pci_err(dev, "Serial number not found or incomplete\n");
+ else
+ efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
- snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
+ kfree(vpd_data);
}
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index e5b0d795c301..3dbea028b325 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -166,32 +166,46 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
* We need a channel per event queue, plus a VI per tx queue.
* This may be more pessimistic than it needs to be.
*/
- if (n_channels + n_xdp_ev > max_channels) {
- netif_err(efx, drv, efx->net_dev,
- "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
- n_xdp_ev, n_channels, max_channels);
- netif_err(efx, drv, efx->net_dev,
- "XDP_TX and XDP_REDIRECT will not work on this interface");
- efx->n_xdp_channels = 0;
- efx->xdp_tx_per_channel = 0;
- efx->xdp_tx_queue_count = 0;
+ if (n_channels >= max_channels) {
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+ netif_warn(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+ n_xdp_ev, n_channels, max_channels);
+ netif_warn(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
} else if (n_channels + n_xdp_tx > efx->max_vis) {
- netif_err(efx, drv, efx->net_dev,
- "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
- n_xdp_tx, n_channels, efx->max_vis);
- netif_err(efx, drv, efx->net_dev,
- "XDP_TX and XDP_REDIRECT will not work on this interface");
- efx->n_xdp_channels = 0;
- efx->xdp_tx_per_channel = 0;
- efx->xdp_tx_queue_count = 0;
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+ netif_warn(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
+ n_xdp_tx, n_channels, efx->max_vis);
+ netif_warn(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
+ } else if (n_channels + n_xdp_ev > max_channels) {
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED;
+ netif_warn(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+ n_xdp_ev, n_channels, max_channels);
+
+ n_xdp_ev = max_channels - n_channels;
+ netif_warn(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
+ DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev));
} else {
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED;
+ }
+
+ if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) {
efx->n_xdp_channels = n_xdp_ev;
efx->xdp_tx_per_channel = tx_per_ev;
efx->xdp_tx_queue_count = n_xdp_tx;
n_channels += n_xdp_ev;
netif_dbg(efx, drv, efx->net_dev,
"Allocating %d TX and %d event queues for XDP\n",
- n_xdp_tx, n_xdp_ev);
+ n_xdp_ev * tx_per_ev, n_xdp_ev);
+ } else {
+ efx->n_xdp_channels = 0;
+ efx->xdp_tx_per_channel = 0;
+ efx->xdp_tx_queue_count = n_xdp_tx;
}
if (vec_count < n_channels) {
@@ -858,6 +872,20 @@ rollback:
goto out;
}
+static inline int
+efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
+ struct efx_tx_queue *tx_queue)
+{
+ if (xdp_queue_number >= efx->xdp_tx_queue_count)
+ return -EINVAL;
+
+ netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
+ tx_queue->channel->channel, tx_queue->label,
+ xdp_queue_number, tx_queue->queue);
+ efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ return 0;
+}
+
int efx_set_channels(struct efx_nic *efx)
{
struct efx_tx_queue *tx_queue;
@@ -896,20 +924,9 @@ int efx_set_channels(struct efx_nic *efx)
if (efx_channel_is_xdp_tx(channel)) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue = next_queue++;
-
- /* We may have a few left-over XDP TX
- * queues owing to xdp_tx_queue_count
- * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
- * We still allocate and probe those
- * TXQs, but never use them.
- */
- if (xdp_queue_number < efx->xdp_tx_queue_count) {
- netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
- channel->channel, tx_queue->label,
- xdp_queue_number, tx_queue->queue);
- efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+ if (rc == 0)
xdp_queue_number++;
- }
}
} else {
efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -918,10 +935,35 @@ int efx_set_channels(struct efx_nic *efx)
channel->channel, tx_queue->label,
tx_queue->queue);
}
+
+ /* If XDP is borrowing queues from net stack, it must use the queue
+ * with no csum offload, which is the first one of the channel
+ * (note: channel->tx_queue_by_type is not initialized yet)
+ */
+ if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+ tx_queue = &channel->tx_queue[0];
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
}
}
}
- WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
+ WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
+ xdp_queue_number != efx->xdp_tx_queue_count);
+ WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
+ xdp_queue_number > efx->xdp_tx_queue_count);
+
+ /* If we have more CPUs than assigned XDP TX queues, assign the already
+ * existing queues to the exceeding CPUs
+ */
+ next_queue = 0;
+ while (xdp_queue_number < efx->xdp_tx_queue_count) {
+ tx_queue = efx->xdp_tx_queues[next_queue++];
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
if (rc)
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index c177ea0f301e..423bdf81200f 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2780,75 +2780,36 @@ static void ef4_pci_remove(struct pci_dev *pci_dev)
};
/* NIC VPD information
- * Called during probe to display the part number of the
- * installed NIC. VPD is potentially very large but this should
- * always appear within the first 512 bytes.
+ * Called during probe to display the part number of the installed NIC.
*/
-#define SFC_VPD_LEN 512
static void ef4_probe_vpd_strings(struct ef4_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
- char vpd_data[SFC_VPD_LEN];
- ssize_t vpd_size;
- int ro_start, ro_size, i, j;
-
- /* Get the vpd data from the device */
- vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
- if (vpd_size <= 0) {
- netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
- return;
- }
-
- /* Get the Read only section */
- ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (ro_start < 0) {
- netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
- return;
- }
-
- ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- if (i + j > vpd_size)
- j = vpd_size - i;
-
- /* Get the Part number */
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Part number not found\n");
- return;
- }
+ unsigned int vpd_size, kw_len;
+ u8 *vpd_data;
+ int start;
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
+ vpd_data = pci_vpd_alloc(dev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(dev, "Unable to read VPD\n");
return;
}
- netif_info(efx, drv, efx->net_dev,
- "Part Number : %.*s\n", j, &vpd_data[i]);
-
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- j = ro_size;
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
- return;
- }
-
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
- return;
- }
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (start < 0)
+ pci_warn(dev, "Part number not found or incomplete\n");
+ else
+ pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
- efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
- if (!efx->vpd_sn)
- return;
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
+ if (start < 0)
+ pci_warn(dev, "Serial number not found or incomplete\n");
+ else
+ efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
- snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
+ kfree(vpd_data);
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 9b4b25704271..f6981810039d 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -782,6 +782,12 @@ struct efx_async_filter_insertion {
#define EFX_RPS_MAX_IN_FLIGHT 8
#endif /* CONFIG_RFS_ACCEL */
+enum efx_xdp_tx_queues_mode {
+ EFX_XDP_TX_QUEUES_DEDICATED, /* one queue per core, locking not needed */
+ EFX_XDP_TX_QUEUES_SHARED, /* each queue used by more than 1 core */
+ EFX_XDP_TX_QUEUES_BORROWED /* queues borrowed from net stack */
+};
+
/**
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
@@ -820,6 +826,7 @@ struct efx_async_filter_insertion {
* should be allocated for this NIC
* @xdp_tx_queue_count: Number of entries in %xdp_tx_queues.
* @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit.
+ * @xdp_txq_queues_mode: XDP TX queues sharing strategy.
* @rxq_entries: Size of receive queues requested by user.
* @txq_entries: Size of transmit queues requested by user.
* @txq_stop_thresh: TX queue fill level at or above which we stop it.
@@ -979,6 +986,7 @@ struct efx_nic {
unsigned int xdp_tx_queue_count;
struct efx_tx_queue **xdp_tx_queues;
+ enum efx_xdp_tx_queues_mode xdp_txq_queues_mode;
unsigned rxq_entries;
unsigned txq_entries;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 0c6650d2e239..d16e031e95f4 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -428,23 +428,32 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
unsigned int len;
int space;
int cpu;
- int i;
+ int i = 0;
- cpu = raw_smp_processor_id();
+ if (unlikely(n && !xdpfs))
+ return -EINVAL;
+ if (unlikely(!n))
+ return 0;
- if (!efx->xdp_tx_queue_count ||
- unlikely(cpu >= efx->xdp_tx_queue_count))
+ cpu = raw_smp_processor_id();
+ if (unlikely(cpu >= efx->xdp_tx_queue_count))
return -EINVAL;
tx_queue = efx->xdp_tx_queues[cpu];
if (unlikely(!tx_queue))
return -EINVAL;
- if (unlikely(n && !xdpfs))
- return -EINVAL;
+ if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+ HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
- if (!n)
- return 0;
+ /* If we're borrowing net stack queues we have to handle stop-restart
+ * or we might block the queue and it will be considered as frozen
+ */
+ if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+ if (netif_tx_queue_stopped(tx_queue->core_txq))
+ goto unlock;
+ efx_tx_maybe_stop_queue(tx_queue);
+ }
/* Check for available space. We should never need multiple
* descriptors per frame.
@@ -484,6 +493,10 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
if (flush && i > 0)
efx_nic_push_buffers(tx_queue);
+unlock:
+ if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+ HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq);
+
return i == 0 ? -EIO : i;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ece02b35a6ce..553c4403258a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -309,7 +309,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
priv->clk_csr = STMMAC_CSR_100_150M;
else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
priv->clk_csr = STMMAC_CSR_150_250M;
- else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
+ else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
priv->clk_csr = STMMAC_CSR_250_300M;
}
@@ -7118,7 +7118,6 @@ int stmmac_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
u32 chan;
- int ret;
if (!ndev || !netif_running(ndev))
return 0;
@@ -7150,13 +7149,6 @@ int stmmac_suspend(struct device *dev)
} else {
stmmac_mac_set(priv, priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
- /* Disable clock in case of PWM is off */
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
- ret = pm_runtime_force_suspend(dev);
- if (ret) {
- mutex_unlock(&priv->lock);
- return ret;
- }
}
mutex_unlock(&priv->lock);
@@ -7242,12 +7234,6 @@ int stmmac_resume(struct device *dev)
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
- /* enable the clk previously disabled */
- ret = pm_runtime_force_resume(dev);
- if (ret)
- return ret;
- if (priv->plat->clk_ptp_ref)
- clk_prepare_enable(priv->plat->clk_ptp_ref);
/* reset the phy so that it's ready */
if (priv->mii)
stmmac_mdio_reset(priv->mii);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 5ca710844cc1..62cec9bfcd33 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -9,6 +9,7 @@
*******************************************************************************/
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -771,9 +772,52 @@ static int __maybe_unused stmmac_runtime_resume(struct device *dev)
return stmmac_bus_clks_config(priv, true);
}
+static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ /* Disable clock in case of PWM is off */
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ /* enable the clk previously disabled */
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ clk_prepare_enable(priv->plat->clk_ptp_ref);
+ }
+
+ return 0;
+}
+
const struct dev_pm_ops stmmac_pltfr_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume)
SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume)
};
EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 8fe8887d506a..6192244b304a 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -68,9 +68,9 @@
#define SIXP_DAMA_OFF 0
/* default level 2 parameters */
-#define SIXP_TXDELAY (HZ/4) /* in 1 s */
+#define SIXP_TXDELAY 25 /* 250 ms */
#define SIXP_PERSIST 50 /* in 256ths */
-#define SIXP_SLOTTIME (HZ/10) /* in 1 s */
+#define SIXP_SLOTTIME 10 /* 100 ms */
#define SIXP_INIT_RESYNC_TIMEOUT (3*HZ/2) /* in 1 s */
#define SIXP_RESYNC_TIMEOUT 5*HZ /* in 1 s */
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 2324e1b93e37..1da334f54944 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -430,7 +430,8 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
* table region determines the number of entries it has.
*/
if (filter) {
- count = hweight32(ipa->filter_map);
+ /* Include one extra "slot" to hold the filter map itself */
+ count = 1 + hweight32(ipa->filter_map);
hash_count = hash_mem->size ? count : 0;
} else {
count = mem->size / sizeof(__le64);
diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h
index 21aa24c741b9..daae7fa58fb8 100644
--- a/drivers/net/phy/dp83640_reg.h
+++ b/drivers/net/phy/dp83640_reg.h
@@ -5,7 +5,7 @@
#ifndef HAVE_DP83640_REGISTERS
#define HAVE_DP83640_REGISTERS
-#define PAGE0 0x0000
+/* #define PAGE0 0x0000 */
#define PHYCR2 0x001c /* PHY Control Register 2 */
#define PAGE4 0x0004
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 9e2891d8e8dd..ba5ad86ec826 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -233,9 +233,11 @@ static DEFINE_MUTEX(phy_fixup_lock);
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
{
+ struct device_driver *drv = phydev->mdio.dev.driver;
+ struct phy_driver *phydrv = to_phy_driver(drv);
struct net_device *netdev = phydev->attached_dev;
- if (!phydev->drv->suspend)
+ if (!drv || !phydrv->suspend)
return false;
/* PHY not attached? May suspend if the PHY has not already been
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index a1464b764d4d..0a0abe8e4be0 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1607,6 +1607,32 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
return -EINVAL;
+ /* If this link is with an SFP, ensure that changes to advertised modes
+ * also cause the associated interface to be selected such that the
+ * link can be configured correctly.
+ */
+ if (pl->sfp_port && pl->sfp_bus) {
+ config.interface = sfp_select_interface(pl->sfp_bus,
+ config.advertising);
+ if (config.interface == PHY_INTERFACE_MODE_NA) {
+ phylink_err(pl,
+ "selection of interface failed, advertisement %*pb\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ config.advertising);
+ return -EINVAL;
+ }
+
+ /* Revalidate with the selected interface */
+ linkmode_copy(support, pl->supported);
+ if (phylink_validate(pl, support, &config)) {
+ phylink_err(pl, "validation of %s/%s with support %*pb failed\n",
+ phylink_an_mode_str(pl->cur_link_an_mode),
+ phy_modes(config.interface),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support);
+ return -EINVAL;
+ }
+ }
+
mutex_lock(&pl->state_mutex);
pl->link_config.speed = config.speed;
pl->link_config.duplex = config.duplex;
@@ -2186,7 +2212,9 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
if (phy_interface_mode_is_8023z(iface) && pl->phydev)
return -EINVAL;
- changed = !linkmode_equal(pl->supported, support);
+ changed = !linkmode_equal(pl->supported, support) ||
+ !linkmode_equal(pl->link_config.advertising,
+ config.advertising);
if (changed) {
linkmode_copy(pl->supported, support);
linkmode_copy(pl->link_config.advertising, config.advertising);
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index f6b92efffc94..480bcd1f6c1c 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -34,6 +34,8 @@ obj-$(CONFIG_SLIC_DS26522) += slic_ds26522.o
clean-files := wanxlfw.inc
$(obj)/wanxl.o: $(obj)/wanxlfw.inc
+CROSS_COMPILE_M68K = m68k-linux-gnu-
+
ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y)
ifeq ($(ARCH),m68k)
M68KCC = $(CC)
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 9251441fd8a3..7f473f9db300 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -346,29 +346,45 @@ static bool preamble_next(struct nvdimm_drvdata *ndd,
free, nslot);
}
+static bool nsl_validate_checksum(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ u64 sum, sum_save;
+
+ if (!namespace_label_has(ndd, checksum))
+ return true;
+
+ sum_save = nsl_get_checksum(ndd, nd_label);
+ nsl_set_checksum(ndd, nd_label, 0);
+ sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
+ nsl_set_checksum(ndd, nd_label, sum_save);
+ return sum == sum_save;
+}
+
+static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ u64 sum;
+
+ if (!namespace_label_has(ndd, checksum))
+ return;
+ nsl_set_checksum(ndd, nd_label, 0);
+ sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
+ nsl_set_checksum(ndd, nd_label, sum);
+}
+
static bool slot_valid(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label, u32 slot)
{
+ bool valid;
+
/* check that we are written where we expect to be written */
- if (slot != __le32_to_cpu(nd_label->slot))
+ if (slot != nsl_get_slot(ndd, nd_label))
return false;
-
- /* check checksum */
- if (namespace_label_has(ndd, checksum)) {
- u64 sum, sum_save;
-
- sum_save = __le64_to_cpu(nd_label->checksum);
- nd_label->checksum = __cpu_to_le64(0);
- sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
- nd_label->checksum = __cpu_to_le64(sum_save);
- if (sum != sum_save) {
- dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
- slot, sum);
- return false;
- }
- }
-
- return true;
+ valid = nsl_validate_checksum(ndd, nd_label);
+ if (!valid)
+ dev_dbg(ndd->dev, "fail checksum. slot: %d\n", slot);
+ return valid;
}
int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
@@ -395,13 +411,13 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
continue;
memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
- flags = __le32_to_cpu(nd_label->flags);
+ flags = nsl_get_flags(ndd, nd_label);
if (test_bit(NDD_NOBLK, &nvdimm->flags))
flags &= ~NSLABEL_FLAG_LOCAL;
nd_label_gen_id(&label_id, label_uuid, flags);
res = nvdimm_allocate_dpa(ndd, &label_id,
- __le64_to_cpu(nd_label->dpa),
- __le64_to_cpu(nd_label->rawsize));
+ nsl_get_dpa(ndd, nd_label),
+ nsl_get_rawsize(ndd, nd_label));
nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
if (!res)
return -EBUSY;
@@ -548,9 +564,9 @@ int nd_label_active_count(struct nvdimm_drvdata *ndd)
nd_label = to_label(ndd, slot);
if (!slot_valid(ndd, nd_label, slot)) {
- u32 label_slot = __le32_to_cpu(nd_label->slot);
- u64 size = __le64_to_cpu(nd_label->rawsize);
- u64 dpa = __le64_to_cpu(nd_label->dpa);
+ u32 label_slot = nsl_get_slot(ndd, nd_label);
+ u64 size = nsl_get_rawsize(ndd, nd_label);
+ u64 dpa = nsl_get_dpa(ndd, nd_label);
dev_dbg(ndd->dev,
"slot%d invalid slot: %d dpa: %llx size: %llx\n",
@@ -708,7 +724,7 @@ static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
- (unsigned long) to_namespace_index(ndd, 0);
}
-enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
+static enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
{
if (guid_equal(guid, &nvdimm_btt_guid))
return NVDIMM_CCLASS_BTT;
@@ -756,6 +772,45 @@ static void reap_victim(struct nd_mapping *nd_mapping,
victim->label = NULL;
}
+static void nsl_set_type_guid(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, guid_t *guid)
+{
+ if (namespace_label_has(ndd, type_guid))
+ guid_copy(&nd_label->type_guid, guid);
+}
+
+bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, guid_t *guid)
+{
+ if (!namespace_label_has(ndd, type_guid))
+ return true;
+ if (!guid_equal(&nd_label->type_guid, guid)) {
+ dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", guid,
+ &nd_label->type_guid);
+ return false;
+ }
+ return true;
+}
+
+static void nsl_set_claim_class(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ enum nvdimm_claim_class claim_class)
+{
+ if (!namespace_label_has(ndd, abstraction_guid))
+ return;
+ guid_copy(&nd_label->abstraction_guid,
+ to_abstraction_guid(claim_class,
+ &nd_label->abstraction_guid));
+}
+
+enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ if (!namespace_label_has(ndd, abstraction_guid))
+ return NVDIMM_CCLASS_NONE;
+ return to_nvdimm_cclass(&nd_label->abstraction_guid);
+}
+
static int __pmem_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
int pos, unsigned long flags)
@@ -797,29 +852,18 @@ static int __pmem_label_update(struct nd_region *nd_region,
nd_label = to_label(ndd, slot);
memset(nd_label, 0, sizeof_namespace_label(ndd));
memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
- if (nspm->alt_name)
- memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
- nd_label->flags = __cpu_to_le32(flags);
- nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
- nd_label->position = __cpu_to_le16(pos);
- nd_label->isetcookie = __cpu_to_le64(cookie);
- nd_label->rawsize = __cpu_to_le64(resource_size(res));
- nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
- nd_label->dpa = __cpu_to_le64(res->start);
- nd_label->slot = __cpu_to_le32(slot);
- if (namespace_label_has(ndd, type_guid))
- guid_copy(&nd_label->type_guid, &nd_set->type_guid);
- if (namespace_label_has(ndd, abstraction_guid))
- guid_copy(&nd_label->abstraction_guid,
- to_abstraction_guid(ndns->claim_class,
- &nd_label->abstraction_guid));
- if (namespace_label_has(ndd, checksum)) {
- u64 sum;
-
- nd_label->checksum = __cpu_to_le64(0);
- sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
- nd_label->checksum = __cpu_to_le64(sum);
- }
+ nsl_set_name(ndd, nd_label, nspm->alt_name);
+ nsl_set_flags(ndd, nd_label, flags);
+ nsl_set_nlabel(ndd, nd_label, nd_region->ndr_mappings);
+ nsl_set_position(ndd, nd_label, pos);
+ nsl_set_isetcookie(ndd, nd_label, cookie);
+ nsl_set_rawsize(ndd, nd_label, resource_size(res));
+ nsl_set_lbasize(ndd, nd_label, nspm->lbasize);
+ nsl_set_dpa(ndd, nd_label, res->start);
+ nsl_set_slot(ndd, nd_label, slot);
+ nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid);
+ nsl_set_claim_class(ndd, nd_label, ndns->claim_class);
+ nsl_calculate_checksum(ndd, nd_label);
nd_dbg_dpa(nd_region, ndd, res, "\n");
/* update label */
@@ -879,9 +923,9 @@ static struct resource *to_resource(struct nvdimm_drvdata *ndd,
struct resource *res;
for_each_dpa_resource(ndd, res) {
- if (res->start != __le64_to_cpu(nd_label->dpa))
+ if (res->start != nsl_get_dpa(ndd, nd_label))
continue;
- if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
+ if (resource_size(res) != nsl_get_rawsize(ndd, nd_label))
continue;
return res;
}
@@ -890,6 +934,59 @@ static struct resource *to_resource(struct nvdimm_drvdata *ndd,
}
/*
+ * Use the presence of the type_guid as a flag to determine isetcookie
+ * usage and nlabel + position policy for blk-aperture namespaces.
+ */
+static void nsl_set_blk_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 isetcookie)
+{
+ if (namespace_label_has(ndd, type_guid)) {
+ nsl_set_isetcookie(ndd, nd_label, isetcookie);
+ return;
+ }
+ nsl_set_isetcookie(ndd, nd_label, 0); /* N/A */
+}
+
+bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 isetcookie)
+{
+ if (!namespace_label_has(ndd, type_guid))
+ return true;
+
+ if (nsl_get_isetcookie(ndd, nd_label) != isetcookie) {
+ dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n", isetcookie,
+ nsl_get_isetcookie(ndd, nd_label));
+ return false;
+ }
+
+ return true;
+}
+
+static void nsl_set_blk_nlabel(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, int nlabel,
+ bool first)
+{
+ if (!namespace_label_has(ndd, type_guid)) {
+ nsl_set_nlabel(ndd, nd_label, 0); /* N/A */
+ return;
+ }
+ nsl_set_nlabel(ndd, nd_label, first ? nlabel : 0xffff);
+}
+
+static void nsl_set_blk_position(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ bool first)
+{
+ if (!namespace_label_has(ndd, type_guid)) {
+ nsl_set_position(ndd, nd_label, 0);
+ return;
+ }
+ nsl_set_position(ndd, nd_label, first ? 0 : 0xffff);
+}
+
+/*
* 1/ Account all the labels that can be freed after this update
* 2/ Allocate and write the label to the staging (next) index
* 3/ Record the resources in the namespace device
@@ -1017,50 +1114,21 @@ static int __blk_label_update(struct nd_region *nd_region,
nd_label = to_label(ndd, slot);
memset(nd_label, 0, sizeof_namespace_label(ndd));
memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
- if (nsblk->alt_name)
- memcpy(nd_label->name, nsblk->alt_name,
- NSLABEL_NAME_LEN);
- nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
-
- /*
- * Use the presence of the type_guid as a flag to
- * determine isetcookie usage and nlabel + position
- * policy for blk-aperture namespaces.
- */
- if (namespace_label_has(ndd, type_guid)) {
- if (i == min_dpa_idx) {
- nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
- nd_label->position = __cpu_to_le16(0);
- } else {
- nd_label->nlabel = __cpu_to_le16(0xffff);
- nd_label->position = __cpu_to_le16(0xffff);
- }
- nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
- } else {
- nd_label->nlabel = __cpu_to_le16(0); /* N/A */
- nd_label->position = __cpu_to_le16(0); /* N/A */
- nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
- }
-
- nd_label->dpa = __cpu_to_le64(res->start);
- nd_label->rawsize = __cpu_to_le64(resource_size(res));
- nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
- nd_label->slot = __cpu_to_le32(slot);
- if (namespace_label_has(ndd, type_guid))
- guid_copy(&nd_label->type_guid, &nd_set->type_guid);
- if (namespace_label_has(ndd, abstraction_guid))
- guid_copy(&nd_label->abstraction_guid,
- to_abstraction_guid(ndns->claim_class,
- &nd_label->abstraction_guid));
-
- if (namespace_label_has(ndd, checksum)) {
- u64 sum;
-
- nd_label->checksum = __cpu_to_le64(0);
- sum = nd_fletcher64(nd_label,
- sizeof_namespace_label(ndd), 1);
- nd_label->checksum = __cpu_to_le64(sum);
- }
+ nsl_set_name(ndd, nd_label, nsblk->alt_name);
+ nsl_set_flags(ndd, nd_label, NSLABEL_FLAG_LOCAL);
+
+ nsl_set_blk_nlabel(ndd, nd_label, nsblk->num_resources,
+ i == min_dpa_idx);
+ nsl_set_blk_position(ndd, nd_label, i == min_dpa_idx);
+ nsl_set_blk_isetcookie(ndd, nd_label, nd_set->cookie2);
+
+ nsl_set_dpa(ndd, nd_label, res->start);
+ nsl_set_rawsize(ndd, nd_label, resource_size(res));
+ nsl_set_lbasize(ndd, nd_label, nsblk->lbasize);
+ nsl_set_slot(ndd, nd_label, slot);
+ nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid);
+ nsl_set_claim_class(ndd, nd_label, ndns->claim_class);
+ nsl_calculate_checksum(ndd, nd_label);
/* update label */
offset = nd_label_offset(ndd, nd_label);
diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h
index 956b6d1bd8cc..31f94fad7b92 100644
--- a/drivers/nvdimm/label.h
+++ b/drivers/nvdimm/label.h
@@ -135,7 +135,6 @@ struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n);
u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd);
bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot);
u32 nd_label_nfree(struct nvdimm_drvdata *ndd);
-enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid);
struct nd_region;
struct nd_namespace_pmem;
struct nd_namespace_blk;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 745478213ff2..4cec171c934d 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1235,7 +1235,7 @@ static int namespace_update_uuid(struct nd_region *nd_region,
if (!nd_label)
continue;
nd_label_gen_id(&label_id, nd_label->uuid,
- __le32_to_cpu(nd_label->flags));
+ nsl_get_flags(ndd, nd_label));
if (strcmp(old_label_id.id, label_id.id) == 0)
set_bit(ND_LABEL_REAP, &label_ent->flags);
}
@@ -1847,28 +1847,21 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
struct nd_namespace_label *nd_label = label_ent->label;
u16 position, nlabel;
- u64 isetcookie;
if (!nd_label)
continue;
- isetcookie = __le64_to_cpu(nd_label->isetcookie);
- position = __le16_to_cpu(nd_label->position);
- nlabel = __le16_to_cpu(nd_label->nlabel);
+ position = nsl_get_position(ndd, nd_label);
+ nlabel = nsl_get_nlabel(ndd, nd_label);
- if (isetcookie != cookie)
+ if (!nsl_validate_isetcookie(ndd, nd_label, cookie))
continue;
if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
continue;
- if (namespace_label_has(ndd, type_guid)
- && !guid_equal(&nd_set->type_guid,
- &nd_label->type_guid)) {
- dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
- &nd_set->type_guid,
- &nd_label->type_guid);
+ if (!nsl_validate_type_guid(ndd, nd_label,
+ &nd_set->type_guid))
continue;
- }
if (found_uuid) {
dev_dbg(ndd->dev, "duplicate entry for uuid\n");
@@ -1923,8 +1916,8 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
*/
hw_start = nd_mapping->start;
hw_end = hw_start + nd_mapping->size;
- pmem_start = __le64_to_cpu(nd_label->dpa);
- pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
+ pmem_start = nsl_get_dpa(ndd, nd_label);
+ pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label);
if (pmem_start >= hw_start && pmem_start < hw_end
&& pmem_end <= hw_end && pmem_end > hw_start)
/* pass */;
@@ -1947,14 +1940,16 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
* @nd_label: target pmem namespace label to evaluate
*/
static struct device *create_namespace_pmem(struct nd_region *nd_region,
- struct nd_namespace_index *nsindex,
- struct nd_namespace_label *nd_label)
+ struct nd_mapping *nd_mapping,
+ struct nd_namespace_label *nd_label)
{
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ struct nd_namespace_index *nsindex =
+ to_namespace_index(ndd, ndd->ns_current);
u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
struct nd_label_ent *label_ent;
struct nd_namespace_pmem *nspm;
- struct nd_mapping *nd_mapping;
resource_size_t size = 0;
struct resource *res;
struct device *dev;
@@ -1966,10 +1961,10 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
return ERR_PTR(-ENXIO);
}
- if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
+ if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) {
dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
nd_label->uuid);
- if (__le64_to_cpu(nd_label->isetcookie) != altcookie)
+ if (!nsl_validate_isetcookie(ndd, nd_label, altcookie))
return ERR_PTR(-EAGAIN);
dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
@@ -2037,20 +2032,18 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
continue;
}
- size += __le64_to_cpu(label0->rawsize);
- if (__le16_to_cpu(label0->position) != 0)
+ ndd = to_ndd(nd_mapping);
+ size += nsl_get_rawsize(ndd, label0);
+ if (nsl_get_position(ndd, label0) != 0)
continue;
WARN_ON(nspm->alt_name || nspm->uuid);
- nspm->alt_name = kmemdup((void __force *) label0->name,
- NSLABEL_NAME_LEN, GFP_KERNEL);
+ nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0),
+ NSLABEL_NAME_LEN, GFP_KERNEL);
nspm->uuid = kmemdup((void __force *) label0->uuid,
NSLABEL_UUID_LEN, GFP_KERNEL);
- nspm->lbasize = __le64_to_cpu(label0->lbasize);
- ndd = to_ndd(nd_mapping);
- if (namespace_label_has(ndd, abstraction_guid))
- nspm->nsio.common.claim_class
- = to_nvdimm_cclass(&label0->abstraction_guid);
-
+ nspm->lbasize = nsl_get_lbasize(ndd, label0);
+ nspm->nsio.common.claim_class =
+ nsl_get_claim_class(ndd, label0);
}
if (!nspm->alt_name || !nspm->uuid) {
@@ -2237,7 +2230,7 @@ static int add_namespace_resource(struct nd_region *nd_region,
if (is_namespace_blk(devs[i])) {
res = nsblk_add_resource(nd_region, ndd,
to_nd_namespace_blk(devs[i]),
- __le64_to_cpu(nd_label->dpa));
+ nsl_get_dpa(ndd, nd_label));
if (!res)
return -ENXIO;
nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
@@ -2265,21 +2258,10 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
struct device *dev = NULL;
struct resource *res;
- if (namespace_label_has(ndd, type_guid)) {
- if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) {
- dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
- &nd_set->type_guid,
- &nd_label->type_guid);
- return ERR_PTR(-EAGAIN);
- }
-
- if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) {
- dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n",
- nd_set->cookie2,
- __le64_to_cpu(nd_label->isetcookie));
- return ERR_PTR(-EAGAIN);
- }
- }
+ if (!nsl_validate_type_guid(ndd, nd_label, &nd_set->type_guid))
+ return ERR_PTR(-EAGAIN);
+ if (!nsl_validate_blk_isetcookie(ndd, nd_label, nd_set->cookie2))
+ return ERR_PTR(-EAGAIN);
nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
if (!nsblk)
@@ -2288,23 +2270,19 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
dev->type = &namespace_blk_device_type;
dev->parent = &nd_region->dev;
nsblk->id = -1;
- nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
- nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
- GFP_KERNEL);
- if (namespace_label_has(ndd, abstraction_guid))
- nsblk->common.claim_class
- = to_nvdimm_cclass(&nd_label->abstraction_guid);
+ nsblk->lbasize = nsl_get_lbasize(ndd, nd_label);
+ nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, GFP_KERNEL);
+ nsblk->common.claim_class = nsl_get_claim_class(ndd, nd_label);
if (!nsblk->uuid)
goto blk_err;
- memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
+ nsl_get_name(ndd, nd_label, name);
if (name[0]) {
- nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
- GFP_KERNEL);
+ nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, GFP_KERNEL);
if (!nsblk->alt_name)
goto blk_err;
}
res = nsblk_add_resource(nd_region, ndd, nsblk,
- __le64_to_cpu(nd_label->dpa));
+ nsl_get_dpa(ndd, nd_label));
if (!res)
goto blk_err;
nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
@@ -2345,6 +2323,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
struct device *dev, **devs = NULL;
struct nd_label_ent *label_ent, *e;
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
/* "safe" because create_namespace_pmem() might list_move() label_ent */
@@ -2355,7 +2334,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
if (!nd_label)
continue;
- flags = __le32_to_cpu(nd_label->flags);
+ flags = nsl_get_flags(ndd, nd_label);
if (is_nd_blk(&nd_region->dev)
== !!(flags & NSLABEL_FLAG_LOCAL))
/* pass, region matches label type */;
@@ -2363,9 +2342,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
continue;
/* skip labels that describe extents outside of the region */
- if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start ||
- __le64_to_cpu(nd_label->dpa) > map_end)
- continue;
+ if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start ||
+ nsl_get_dpa(ndd, nd_label) > map_end)
+ continue;
i = add_namespace_resource(nd_region, nd_label, devs, count);
if (i < 0)
@@ -2381,13 +2360,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
if (is_nd_blk(&nd_region->dev))
dev = create_namespace_blk(nd_region, nd_label, count);
- else {
- struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
- struct nd_namespace_index *nsindex;
-
- nsindex = to_namespace_index(ndd, ndd->ns_current);
- dev = create_namespace_pmem(nd_region, nsindex, nd_label);
- }
+ else
+ dev = create_namespace_pmem(nd_region, nd_mapping,
+ nd_label);
if (IS_ERR(dev)) {
switch (PTR_ERR(dev)) {
@@ -2571,10 +2546,10 @@ static int init_active_labels(struct nd_region *nd_region)
break;
label = nd_label_active(ndd, j);
if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
- u32 flags = __le32_to_cpu(label->flags);
+ u32 flags = nsl_get_flags(ndd, label);
flags &= ~NSLABEL_FLAG_LOCAL;
- label->flags = __cpu_to_le32(flags);
+ nsl_set_flags(ndd, label, flags);
}
label_ent->label = label;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 696b55556d4d..5467ebbb4a6b 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -35,6 +35,156 @@ struct nvdimm_drvdata {
struct kref kref;
};
+static inline const u8 *nsl_ref_name(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return nd_label->name;
+}
+
+static inline u8 *nsl_get_name(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, u8 *name)
+{
+ return memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
+}
+
+static inline u8 *nsl_set_name(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, u8 *name)
+{
+ if (!name)
+ return NULL;
+ return memcpy(nd_label->name, name, NSLABEL_NAME_LEN);
+}
+
+static inline u32 nsl_get_slot(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le32_to_cpu(nd_label->slot);
+}
+
+static inline void nsl_set_slot(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, u32 slot)
+{
+ nd_label->slot = __cpu_to_le32(slot);
+}
+
+static inline u64 nsl_get_checksum(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le64_to_cpu(nd_label->checksum);
+}
+
+static inline void nsl_set_checksum(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 checksum)
+{
+ nd_label->checksum = __cpu_to_le64(checksum);
+}
+
+static inline u32 nsl_get_flags(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le32_to_cpu(nd_label->flags);
+}
+
+static inline void nsl_set_flags(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, u32 flags)
+{
+ nd_label->flags = __cpu_to_le32(flags);
+}
+
+static inline u64 nsl_get_dpa(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le64_to_cpu(nd_label->dpa);
+}
+
+static inline void nsl_set_dpa(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, u64 dpa)
+{
+ nd_label->dpa = __cpu_to_le64(dpa);
+}
+
+static inline u64 nsl_get_rawsize(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le64_to_cpu(nd_label->rawsize);
+}
+
+static inline void nsl_set_rawsize(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 rawsize)
+{
+ nd_label->rawsize = __cpu_to_le64(rawsize);
+}
+
+static inline u64 nsl_get_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le64_to_cpu(nd_label->isetcookie);
+}
+
+static inline void nsl_set_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 isetcookie)
+{
+ nd_label->isetcookie = __cpu_to_le64(isetcookie);
+}
+
+static inline bool nsl_validate_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 cookie)
+{
+ return cookie == __le64_to_cpu(nd_label->isetcookie);
+}
+
+static inline u16 nsl_get_position(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le16_to_cpu(nd_label->position);
+}
+
+static inline void nsl_set_position(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u16 position)
+{
+ nd_label->position = __cpu_to_le16(position);
+}
+
+
+static inline u16 nsl_get_nlabel(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le16_to_cpu(nd_label->nlabel);
+}
+
+static inline void nsl_set_nlabel(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u16 nlabel)
+{
+ nd_label->nlabel = __cpu_to_le16(nlabel);
+}
+
+static inline u64 nsl_get_lbasize(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label)
+{
+ return __le64_to_cpu(nd_label->lbasize);
+}
+
+static inline void nsl_set_lbasize(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 lbasize)
+{
+ nd_label->lbasize = __cpu_to_le64(lbasize);
+}
+
+bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label,
+ u64 isetcookie);
+bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, guid_t *guid);
+enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label);
+
struct nd_region_data {
int ns_count;
int ns_active;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 1e0615b8565e..72de88ff0d30 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -450,11 +450,11 @@ static int pmem_attach_disk(struct device *dev,
pmem->pfn_flags |= PFN_MAP;
bb_range = pmem->pgmap.range;
} else {
+ addr = devm_memremap(dev, pmem->phys_addr,
+ pmem->size, ARCH_MEMREMAP_PMEM);
if (devm_add_action_or_reset(dev, pmem_release_queue,
&pmem->pgmap))
return -ENOMEM;
- addr = devm_memremap(dev, pmem->phys_addr,
- pmem->size, ARCH_MEMREMAP_PMEM);
bb_range.start = res->start;
bb_range.end = res->end;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8679a108f571..7efb31b87f37 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -116,6 +116,8 @@ static struct class *nvme_ns_chr_class;
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
unsigned nsid);
+static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd);
/*
* Prepare a queue for teardown.
@@ -1152,7 +1154,8 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return effects;
}
-static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
+static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
+ struct nvme_command *cmd, int status)
{
if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
nvme_unfreeze(ctrl);
@@ -1167,6 +1170,26 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
nvme_queue_scan(ctrl);
flush_work(&ctrl->scan_work);
}
+
+ switch (cmd->common.opcode) {
+ case nvme_admin_set_features:
+ switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) {
+ case NVME_FEAT_KATO:
+ /*
+ * Keep alive commands interval on the host should be
+ * updated when KATO is modified by Set Features
+ * commands.
+ */
+ if (!status)
+ nvme_update_keep_alive(ctrl, cmd);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
}
int nvme_execute_passthru_rq(struct request *rq)
@@ -1181,7 +1204,7 @@ int nvme_execute_passthru_rq(struct request *rq)
effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
ret = nvme_execute_rq(disk, rq, false);
if (effects) /* nothing to be done for zero cmd effects */
- nvme_passthru_end(ctrl, effects);
+ nvme_passthru_end(ctrl, effects, cmd, ret);
return ret;
}
@@ -1269,6 +1292,21 @@ void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
+static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd)
+{
+ unsigned int new_kato =
+ DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000);
+
+ dev_info(ctrl->device,
+ "keep alive interval updated from %u ms to %u ms\n",
+ ctrl->kato * 1000 / 2, new_kato * 1000 / 2);
+
+ nvme_stop_keep_alive(ctrl);
+ ctrl->kato = new_kato;
+ nvme_start_keep_alive(ctrl);
+}
+
/*
* In NVMe 1.0 the CNS field was just a binary controller or namespace
* flag, thus sending any new CNS opcodes has a big chance of not working.
@@ -1302,11 +1340,6 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
return error;
}
-static bool nvme_multi_css(struct nvme_ctrl *ctrl)
-{
- return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
-}
-
static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
struct nvme_ns_id_desc *cur, bool *csi_seen)
{
@@ -1874,6 +1907,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
goto out_unfreeze;
}
+ set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
@@ -1885,6 +1919,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (nvme_ns_head_multipath(ns->head)) {
blk_mq_freeze_queue(ns->head->disk->queue);
nvme_update_disk_info(ns->head->disk, ns, id);
+ nvme_mpath_revalidate_paths(ns);
blk_stack_limits(&ns->head->disk->queue->limits,
&ns->queue->limits, 0);
disk_update_readahead(ns->head->disk);
@@ -3763,7 +3798,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
nvme_get_ctrl(ctrl);
- device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
+ if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
+ goto out_cleanup_ns_from_list;
+
if (!nvme_ns_head_multipath(ns->head))
nvme_add_ns_cdev(ns);
@@ -3773,6 +3810,11 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
return;
+ out_cleanup_ns_from_list:
+ nvme_put_ctrl(ctrl);
+ down_write(&ctrl->namespaces_rwsem);
+ list_del_init(&ns->list);
+ up_write(&ctrl->namespaces_rwsem);
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
@@ -3795,6 +3837,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
+ clear_bit(NVME_NS_READY, &ns->flags);
set_capacity(ns->disk, 0);
nvme_fault_inject_fini(&ns->fault_inject);
@@ -3802,9 +3845,12 @@ static void nvme_ns_remove(struct nvme_ns *ns)
list_del_rcu(&ns->siblings);
mutex_unlock(&ns->ctrl->subsys->lock);
- synchronize_rcu(); /* guarantee not available in head->list */
- nvme_mpath_clear_current_path(ns);
- synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
+ /* guarantee not available in head->list */
+ synchronize_rcu();
+
+ /* wait for concurrent submissions */
+ if (nvme_mpath_clear_current_path(ns))
+ synchronize_srcu(&ns->head->srcu);
if (!nvme_ns_head_multipath(ns->head))
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 37ce3e8b1db2..5d7bc58a27bd 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -147,6 +147,21 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
mutex_unlock(&ctrl->scan_lock);
}
+void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
+{
+ struct nvme_ns_head *head = ns->head;
+ sector_t capacity = get_capacity(head->disk);
+ int node;
+
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ if (capacity != get_capacity(ns->disk))
+ clear_bit(NVME_NS_READY, &ns->flags);
+ }
+
+ for_each_node(node)
+ rcu_assign_pointer(head->current_path[node], NULL);
+}
+
static bool nvme_path_is_disabled(struct nvme_ns *ns)
{
/*
@@ -158,7 +173,7 @@ static bool nvme_path_is_disabled(struct nvme_ns *ns)
ns->ctrl->state != NVME_CTRL_DELETING)
return true;
if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
- test_bit(NVME_NS_REMOVING, &ns->flags))
+ !test_bit(NVME_NS_READY, &ns->flags))
return true;
return false;
}
@@ -465,6 +480,8 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
ctrl->subsys->instance, head->instance);
blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
+
/* set to a default value of 512 until the disk is validated */
blk_queue_logical_block_size(head->disk->queue, 512);
blk_set_stacking_limits(&head->disk->queue->limits);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index a2e1f298b217..9871c0c9374c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -456,6 +456,7 @@ struct nvme_ns {
#define NVME_NS_DEAD 1
#define NVME_NS_ANA_PENDING 2
#define NVME_NS_FORCE_RO 3
+#define NVME_NS_READY 4
struct cdev cdev;
struct device cdev_device;
@@ -748,6 +749,7 @@ void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
+void nvme_mpath_revalidate_paths(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
@@ -795,6 +797,9 @@ static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
return false;
}
+static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
+{
+}
static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
}
@@ -887,4 +892,9 @@ struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
void nvme_put_ns(struct nvme_ns *ns);
+static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
+{
+ return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
+}
+
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 645025620154..e2ab12f3f51c 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -45,6 +45,7 @@ struct nvme_tcp_request {
u32 pdu_len;
u32 pdu_sent;
u16 ttag;
+ __le16 status;
struct list_head entry;
struct llist_node lentry;
__le32 ddgst;
@@ -485,6 +486,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
struct nvme_completion *cqe)
{
+ struct nvme_tcp_request *req;
struct request *rq;
rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
@@ -496,7 +498,11 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
return -EINVAL;
}
- if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
+ req = blk_mq_rq_to_pdu(rq);
+ if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
+ req->status = cqe->status;
+
+ if (!nvme_try_complete_req(rq, req->status, cqe->result))
nvme_complete_rq(rq);
queue->nr_cqe++;
@@ -758,7 +764,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
} else {
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
- nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
+ nvme_tcp_end_request(rq,
+ le16_to_cpu(req->status));
queue->nr_cqe++;
}
nvme_tcp_init_recv_ctx(queue);
@@ -788,18 +795,24 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
return 0;
if (queue->recv_ddgst != queue->exp_ddgst) {
+ struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
+ pdu->command_id);
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+
+ req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
+
dev_err(queue->ctrl->ctrl.device,
"data digest error: recv %#x expected %#x\n",
le32_to_cpu(queue->recv_ddgst),
le32_to_cpu(queue->exp_ddgst));
- return -EIO;
}
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
pdu->command_id);
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
+ nvme_tcp_end_request(rq, le16_to_cpu(req->status));
queue->nr_cqe++;
}
@@ -2293,6 +2306,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
return ret;
req->state = NVME_TCP_SEND_CMD_PDU;
+ req->status = cpu_to_le16(NVME_SC_SUCCESS);
req->offset = 0;
req->data_sent = 0;
req->pdu_len = 0;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 0cb98f2bbc8c..aa6d84d8848e 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -1015,7 +1015,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (unlikely(ret))
return ret;
- if (nvmet_req_passthru_ctrl(req))
+ if (nvmet_is_passthru_req(req))
return nvmet_parse_passthru_admin_cmd(req);
switch (cmd->common.opcode) {
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 273555127188..d784f3c200b4 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1028,7 +1028,7 @@ nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
}
/* passthru subsystems use the underlying controller's version */
- if (nvmet_passthru_ctrl(subsys))
+ if (nvmet_is_passthru_subsys(subsys))
return -EINVAL;
ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
@@ -1067,7 +1067,8 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
{
struct nvmet_subsys *subsys = to_subsys(item);
- return snprintf(page, PAGE_SIZE, "%s\n", subsys->serial);
+ return snprintf(page, PAGE_SIZE, "%*s\n",
+ NVMET_SN_MAX_SIZE, subsys->serial);
}
static ssize_t
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 66d05eecc2a9..b8425fa34300 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -553,7 +553,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
mutex_lock(&subsys->lock);
ret = 0;
- if (nvmet_passthru_ctrl(subsys)) {
+ if (nvmet_is_passthru_subsys(subsys)) {
pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
goto out_unlock;
}
@@ -869,7 +869,7 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
if (unlikely(ret))
return ret;
- if (nvmet_req_passthru_ctrl(req))
+ if (nvmet_is_passthru_req(req))
return nvmet_parse_passthru_io_cmd(req);
ret = nvmet_req_find_ns(req);
@@ -1206,6 +1206,9 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */
ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+
+ if (nvmet_is_passthru_subsys(ctrl->subsys))
+ nvmet_passthrough_override_cap(ctrl);
}
struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
@@ -1363,8 +1366,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
goto out_put_subsystem;
mutex_init(&ctrl->lock);
- nvmet_init_cap(ctrl);
-
ctrl->port = req->port;
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
@@ -1378,6 +1379,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
kref_init(&ctrl->ref);
ctrl->subsys = subsys;
+ nvmet_init_cap(ctrl);
WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 06dd3d537f07..7143c7fa7464 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -582,7 +582,7 @@ int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
-static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
+static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
{
return subsys->passthru_ctrl;
}
@@ -601,18 +601,19 @@ static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
{
return 0;
}
-static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
+static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
{
return NULL;
}
#endif /* CONFIG_NVME_TARGET_PASSTHRU */
-static inline struct nvme_ctrl *
-nvmet_req_passthru_ctrl(struct nvmet_req *req)
+static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
{
- return nvmet_passthru_ctrl(nvmet_req_subsys(req));
+ return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
}
+void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
+
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 225cd1ffbe45..f0efb3537989 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -20,6 +20,16 @@ MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
*/
static DEFINE_XARRAY(passthru_subsystems);
+void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
+{
+ /*
+ * Multiple command set support can only be declared if the underlying
+ * controller actually supports it.
+ */
+ if (!nvme_multi_css(ctrl->subsys->passthru_ctrl))
+ ctrl->cap &= ~(1ULL << 43);
+}
+
static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -218,7 +228,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
{
- struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
+ struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
struct request_queue *q = ctrl->admin_q;
struct nvme_ns *ns = NULL;
struct request *rq = NULL;
@@ -299,7 +309,7 @@ out:
*/
static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
{
- struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
+ struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
struct nvme_feat_host_behavior *host;
u16 status = NVME_SC_INTERNAL;
int ret;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 0c0dc2e369c0..3fd74bb34819 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1444,6 +1444,9 @@ static int of_fwnode_add_links(struct fwnode_handle *fwnode)
struct property *p;
struct device_node *con_np = to_of_node(fwnode);
+ if (IS_ENABLED(CONFIG_X86))
+ return 0;
+
if (!con_np)
return -EINVAL;
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 889d7ce282eb..952a92504df6 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -156,15 +156,6 @@ static inline struct dino_device *DINO_DEV(struct pci_hba_data *hba)
return container_of(hba, struct dino_device, hba);
}
-/* Check if PCI device is behind a Card-mode Dino. */
-static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
-{
- struct dino_device *dino_dev;
-
- dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
- return is_card_dino(&dino_dev->hba.dev->id);
-}
-
/*
* Dino Configuration Space Accessor Functions
*/
@@ -447,6 +438,15 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev)
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus );
#ifdef CONFIG_TULIP
+/* Check if PCI device is behind a Card-mode Dino. */
+static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
+{
+ struct dino_device *dino_dev;
+
+ dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
+ return is_card_dino(&dino_dev->hba.dev->id);
+}
+
static void pci_fixup_tulip(struct pci_dev *dev)
{
if (!pci_dev_is_behind_card_dino(dev))
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 6d7d64939f82..c967ad6e2626 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -376,7 +376,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
if (WARN_ON(pdev->pasid_enabled))
return -EBUSY;
- if (!pdev->eetlp_prefix_path)
+ if (!pdev->eetlp_prefix_path && !pdev->pasid_no_tlp)
return -EINVAL;
if (!pasid)
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 5e1e3796efa4..326f7d13024f 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -40,6 +40,7 @@ config PCI_FTPCI100
config PCI_IXP4XX
bool "Intel IXP4xx PCI controller"
depends on ARM && OF
+ depends on ARCH_IXP4XX || COMPILE_TEST
default ARCH_IXP4XX
help
Say Y here if you want support for the PCI host controller found
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 35e61048e133..ffb176d288cd 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -27,6 +27,7 @@
#define STATUS_REG_SYS_2 0x508
#define STATUS_CLR_REG_SYS_2 0x708
#define LINK_DOWN BIT(1)
+#define J7200_LINK_DOWN BIT(10)
#define J721E_PCIE_USER_CMD_STATUS 0x4
#define LINK_TRAINING_ENABLE BIT(0)
@@ -57,6 +58,7 @@ struct j721e_pcie {
struct cdns_pcie *cdns_pcie;
void __iomem *user_cfg_base;
void __iomem *intd_cfg_base;
+ u32 linkdown_irq_regfield;
};
enum j721e_pcie_mode {
@@ -66,7 +68,10 @@ enum j721e_pcie_mode {
struct j721e_pcie_data {
enum j721e_pcie_mode mode;
- bool quirk_retrain_flag;
+ unsigned int quirk_retrain_flag:1;
+ unsigned int quirk_detect_quiet_flag:1;
+ u32 linkdown_irq_regfield;
+ unsigned int byte_access_allowed:1;
};
static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
@@ -98,12 +103,12 @@ static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
u32 reg;
reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2);
- if (!(reg & LINK_DOWN))
+ if (!(reg & pcie->linkdown_irq_regfield))
return IRQ_NONE;
dev_err(dev, "LINK DOWN!\n");
- j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, LINK_DOWN);
+ j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, pcie->linkdown_irq_regfield);
return IRQ_HANDLED;
}
@@ -112,7 +117,7 @@ static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie)
u32 reg;
reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_2);
- reg |= LINK_DOWN;
+ reg |= pcie->linkdown_irq_regfield;
j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg);
}
@@ -284,10 +289,36 @@ static struct pci_ops cdns_ti_pcie_host_ops = {
static const struct j721e_pcie_data j721e_pcie_rc_data = {
.mode = PCI_MODE_RC,
.quirk_retrain_flag = true,
+ .byte_access_allowed = false,
+ .linkdown_irq_regfield = LINK_DOWN,
};
static const struct j721e_pcie_data j721e_pcie_ep_data = {
.mode = PCI_MODE_EP,
+ .linkdown_irq_regfield = LINK_DOWN,
+};
+
+static const struct j721e_pcie_data j7200_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .quirk_detect_quiet_flag = true,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .byte_access_allowed = true,
+};
+
+static const struct j721e_pcie_data j7200_pcie_ep_data = {
+ .mode = PCI_MODE_EP,
+ .quirk_detect_quiet_flag = true,
+};
+
+static const struct j721e_pcie_data am64_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .byte_access_allowed = true,
+};
+
+static const struct j721e_pcie_data am64_pcie_ep_data = {
+ .mode = PCI_MODE_EP,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
};
static const struct of_device_id of_j721e_pcie_match[] = {
@@ -299,6 +330,22 @@ static const struct of_device_id of_j721e_pcie_match[] = {
.compatible = "ti,j721e-pcie-ep",
.data = &j721e_pcie_ep_data,
},
+ {
+ .compatible = "ti,j7200-pcie-host",
+ .data = &j7200_pcie_rc_data,
+ },
+ {
+ .compatible = "ti,j7200-pcie-ep",
+ .data = &j7200_pcie_ep_data,
+ },
+ {
+ .compatible = "ti,am64-pcie-host",
+ .data = &am64_pcie_rc_data,
+ },
+ {
+ .compatible = "ti,am64-pcie-ep",
+ .data = &am64_pcie_ep_data,
+ },
{},
};
@@ -332,6 +379,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pcie->dev = dev;
pcie->mode = mode;
+ pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg");
if (IS_ERR(base))
@@ -391,9 +439,11 @@ static int j721e_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
- bridge->ops = &cdns_ti_pcie_host_ops;
+ if (!data->byte_access_allowed)
+ bridge->ops = &cdns_ti_pcie_host_ops;
rc = pci_host_bridge_priv(bridge);
rc->quirk_retrain_flag = data->quirk_retrain_flag;
+ rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
cdns_pcie = &rc->pcie;
cdns_pcie->dev = dev;
@@ -459,6 +509,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err_get_sync;
}
+ ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
cdns_pcie = &ep->pcie;
cdns_pcie->dev = dev;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 897cdde02bd8..88e05b9c2e5b 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -16,11 +16,37 @@
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
-static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn)
+{
+ u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
+ u32 first_vf_offset, stride;
+
+ if (vfn == 0)
+ return fn;
+
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+
+ return fn;
+}
+
+static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie *pcie = &ep->pcie;
+ u32 reg;
+
+ if (vfn > 1) {
+ dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n");
+ return -EINVAL;
+ } else if (vfn == 1) {
+ reg = cap + PCI_SRIOV_VF_DID;
+ cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid);
+ return 0;
+ }
cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
@@ -47,7 +73,7 @@ static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -92,32 +118,36 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
addr0 = lower_32_bits(bar_phys);
addr1 = upper_32_bits(bar_phys);
+
+ if (vfn == 1)
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn);
+ else
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn);
+ b = (bar < BAR_4) ? bar : bar - BAR_4;
+
+ if (vfn == 0 || vfn == 1) {
+ cfg = cdns_pcie_readl(pcie, reg);
+ cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
+ cdns_pcie_writel(pcie, reg, cfg);
+ }
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
addr1);
- if (bar < BAR_4) {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
- b = bar;
- } else {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
- b = bar - BAR_4;
- }
-
- cfg = cdns_pcie_readl(pcie, reg);
- cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
- CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
- cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
- CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
- cdns_pcie_writel(pcie, reg, cfg);
-
+ if (vfn > 0)
+ epf = &epf->epf[vfn - 1];
epf->epf_bar[bar] = epf_bar;
return 0;
}
-static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -126,29 +156,32 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
enum pci_barno bar = epf_bar->barno;
u32 reg, cfg, b, ctrl;
- if (bar < BAR_4) {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
- b = bar;
- } else {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
- b = bar - BAR_4;
+ if (vfn == 1)
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn);
+ else
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn);
+ b = (bar < BAR_4) ? bar : bar - BAR_4;
+
+ if (vfn == 0 || vfn == 1) {
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
+ cfg = cdns_pcie_readl(pcie, reg);
+ cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
+ cdns_pcie_writel(pcie, reg, cfg);
}
- ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
- cfg = cdns_pcie_readl(pcie, reg);
- cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
- CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
- cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
- cdns_pcie_writel(pcie, reg, cfg);
-
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
+ if (vfn > 0)
+ epf = &epf->epf[vfn - 1];
epf->epf_bar[bar] = NULL;
}
-static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
- u64 pci_addr, size_t size)
+static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
+ phys_addr_t addr, u64 pci_addr, size_t size)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
@@ -161,6 +194,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
return -EINVAL;
}
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size);
set_bit(r, &ep->ob_region_map);
@@ -169,7 +203,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
return 0;
}
-static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -189,13 +223,15 @@ static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
clear_bit(r, &ep->ob_region_map);
}
-static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
+static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags;
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
/*
* Set the Multiple Message Capable bitfield into the Message Control
* register.
@@ -209,13 +245,15 @@ static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
return 0;
}
-static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mme;
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
/* Validate that the MSI feature is actually enabled. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
if (!(flags & PCI_MSI_FLAGS_ENABLE))
@@ -230,13 +268,15 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
return mme;
}
-static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 val, reg;
+ func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no);
+
reg = cap + PCI_MSIX_FLAGS;
val = cdns_pcie_ep_fn_readw(pcie, func_no, reg);
if (!(val & PCI_MSIX_FLAGS_ENABLE))
@@ -247,14 +287,17 @@ static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
return val;
}
-static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts,
- enum pci_barno bir, u32 offset)
+static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
+ u16 interrupts, enum pci_barno bir,
+ u32 offset)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 val, reg;
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
reg = cap + PCI_MSIX_FLAGS;
val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
@@ -274,8 +317,8 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts,
return 0;
}
-static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
- u8 intx, bool is_asserted)
+static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
+ bool is_asserted)
{
struct cdns_pcie *pcie = &ep->pcie;
unsigned long flags;
@@ -317,7 +360,8 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
writel(0, ep->irq_cpu_addr + offset);
}
-static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
+static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
+ u8 intx)
{
u16 cmd;
@@ -334,7 +378,7 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
return 0;
}
-static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
+static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u8 interrupt_num)
{
struct cdns_pcie *pcie = &ep->pcie;
@@ -343,6 +387,8 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
u8 msi_count;
u64 pci_addr, pci_addr_mask = 0xff;
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
/* Check whether the MSI feature has been enabled by the PCI host. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
if (!(flags & PCI_MSI_FLAGS_ENABLE))
@@ -382,7 +428,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
return 0;
}
-static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u8 interrupt_num,
u32 entry_size, u32 *msi_data,
u32 *msi_addr_offset)
@@ -396,6 +442,8 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
int ret;
int i;
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
/* Check whether the MSI feature has been enabled by the PCI host. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
if (!(flags & PCI_MSI_FLAGS_ENABLE))
@@ -419,7 +467,7 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
pci_addr &= GENMASK_ULL(63, 2);
for (i = 0; i < interrupt_num; i++) {
- ret = cdns_pcie_ep_map_addr(epc, fn, addr,
+ ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr,
pci_addr & ~pci_addr_mask,
entry_size);
if (ret)
@@ -433,7 +481,7 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn,
return 0;
}
-static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
+static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u16 interrupt_num)
{
u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
@@ -446,6 +494,12 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
u16 flags;
u8 bir;
+ epf = &ep->epf[fn];
+ if (vfn > 0)
+ epf = &epf->epf[vfn - 1];
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
/* Check whether the MSI-X feature has been enabled by the PCI host. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS);
if (!(flags & PCI_MSIX_FLAGS_ENABLE))
@@ -456,7 +510,6 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
bir = tbl_offset & PCI_MSIX_TABLE_BIR;
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
- epf = &ep->epf[fn];
msix_tbl = epf->epf_bar[bir]->addr + tbl_offset;
msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
@@ -478,21 +531,27 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
return 0;
}
-static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
enum pci_epc_irq_type type,
u16 interrupt_num)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie *pcie = &ep->pcie;
+ struct device *dev = pcie->dev;
switch (type) {
case PCI_EPC_IRQ_LEGACY:
- return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
+ if (vfn > 0) {
+ dev_err(dev, "Cannot raise legacy interrupts for VF\n");
+ return -EINVAL;
+ }
+ return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0);
case PCI_EPC_IRQ_MSI:
- return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
+ return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num);
case PCI_EPC_IRQ_MSIX:
- return cdns_pcie_ep_send_msix_irq(ep, fn, interrupt_num);
+ return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num);
default:
break;
@@ -523,6 +582,13 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
return 0;
}
+static const struct pci_epc_features cdns_pcie_epc_vf_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+ .align = 65536,
+};
+
static const struct pci_epc_features cdns_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
@@ -531,9 +597,12 @@ static const struct pci_epc_features cdns_pcie_epc_features = {
};
static const struct pci_epc_features*
-cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
- return &cdns_pcie_epc_features;
+ if (!vfunc_no)
+ return &cdns_pcie_epc_features;
+
+ return &cdns_pcie_epc_vf_features;
}
static const struct pci_epc_ops cdns_pcie_epc_ops = {
@@ -559,9 +628,11 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
struct cdns_pcie *pcie = &ep->pcie;
+ struct cdns_pcie_epf *epf;
struct resource *res;
struct pci_epc *epc;
int ret;
+ int i;
pcie->is_rc = false;
@@ -606,6 +677,25 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
if (!ep->epf)
return -ENOMEM;
+ epc->max_vfs = devm_kcalloc(dev, epc->max_functions,
+ sizeof(*epc->max_vfs), GFP_KERNEL);
+ if (!epc->max_vfs)
+ return -ENOMEM;
+
+ ret = of_property_read_u8_array(np, "max-virtual-functions",
+ epc->max_vfs, epc->max_functions);
+ if (ret == 0) {
+ for (i = 0; i < epc->max_functions; i++) {
+ epf = &ep->epf[i];
+ if (epc->max_vfs[i] == 0)
+ continue;
+ epf->epf = devm_kcalloc(dev, epc->max_vfs[i],
+ sizeof(*ep->epf), GFP_KERNEL);
+ if (!epf->epf)
+ return -ENOMEM;
+ }
+ }
+
ret = pci_epc_mem_init(epc, pcie->mem_res->start,
resource_size(pcie->mem_res), PAGE_SIZE);
if (ret < 0) {
@@ -623,6 +713,10 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
/* Reserve region 0 for IRQs */
set_bit(0, &ep->ob_region_map);
+
+ if (ep->quirk_detect_quiet_flag)
+ cdns_pcie_detect_quiet_min_delay_set(&ep->pcie);
+
spin_lock_init(&ep->lock);
return 0;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index ae1c55503513..fb96d37a135c 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -498,6 +498,9 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
return PTR_ERR(rc->cfg_base);
rc->cfg_res = res;
+ if (rc->quirk_detect_quiet_flag)
+ cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
+
ret = cdns_pcie_start_link(pcie);
if (ret) {
dev_err(dev, "Failed to start link\n");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 3c3646502d05..52767f26048f 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -7,6 +7,22 @@
#include "pcie-cadence.h"
+void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
+{
+ u32 delay = 0x3;
+ u32 ltssm_control_cap;
+
+ /*
+ * Set the LTSSM Detect Quiet state min. delay to 2ms.
+ */
+ ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP);
+ ltssm_control_cap = ((ltssm_control_cap &
+ ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
+ CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
+
+ cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
+}
+
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
u32 r, bool is_io,
u64 cpu_addr, u64 pci_addr, size_t size)
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 30db2d68c17a..262421e5d917 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/pci-epf.h>
#include <linux/phy/phy.h>
/* Parameters for the waiting for link up routine */
@@ -46,10 +47,18 @@
#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
/* Endpoint Function f BAR b Configuration Registers */
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn) \
+ (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn))
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
(CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
(CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn) \
+ (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn))
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008)
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
(GENMASK(4, 0) << ((b) * 8))
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
@@ -114,6 +123,7 @@
#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
+#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
/*
* Root Port Registers (PCI configuration space for the root port function)
@@ -189,6 +199,14 @@
/* AXI link down register */
#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
+/* LTSSM Capabilities register */
+#define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054)
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1)
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
+ (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
+ CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
+
enum cdns_pcie_rp_bar {
RP_BAR_UNDEFINED = -1,
RP_BAR0,
@@ -295,6 +313,7 @@ struct cdns_pcie {
* @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
* available
* @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
+ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
*/
struct cdns_pcie_rc {
struct cdns_pcie pcie;
@@ -303,14 +322,17 @@ struct cdns_pcie_rc {
u32 vendor_id;
u32 device_id;
bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
- bool quirk_retrain_flag;
+ unsigned int quirk_retrain_flag:1;
+ unsigned int quirk_detect_quiet_flag:1;
};
/**
* struct cdns_pcie_epf - Structure to hold info about endpoint function
+ * @epf: Info about virtual functions attached to the physical function
* @epf_bar: reference to the pci_epf_bar for the six Base Address Registers
*/
struct cdns_pcie_epf {
+ struct cdns_pcie_epf *epf;
struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
};
@@ -334,6 +356,7 @@ struct cdns_pcie_epf {
* registers fields (RMW) accessible by both remote RC and EP to
* minimize time between read and write
* @epf: Structure to hold info about endpoint function
+ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
*/
struct cdns_pcie_ep {
struct cdns_pcie pcie;
@@ -348,6 +371,7 @@ struct cdns_pcie_ep {
/* protect writing to PCI_STATUS while raising legacy interrupts */
spinlock_t lock;
struct cdns_pcie_epf *epf;
+ unsigned int quirk_detect_quiet_flag:1;
};
@@ -508,6 +532,9 @@ static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
return 0;
}
#endif
+
+void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
+
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
u32 r, bool is_io,
u64 cpu_addr, u64 pci_addr, size_t size);
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 423d35872ce4..76c0a63a3f64 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -214,6 +214,17 @@ config PCIE_ARTPEC6_EP
Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
endpoint mode. This uses the DesignWare core.
+config PCIE_ROCKCHIP_DW_HOST
+ bool "Rockchip DesignWare PCIe controller"
+ select PCIE_DW
+ select PCIE_DW_HOST
+ depends on PCI_MSI_IRQ_DOMAIN
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ help
+ Enables support for the DesignWare PCIe controller in the
+ Rockchip SoC except RK3399.
+
config PCIE_INTEL_GW
bool "Intel Gateway PCIe host controller support"
depends on OF && (X86 || COMPILE_TEST)
@@ -225,6 +236,34 @@ config PCIE_INTEL_GW
The PCIe controller uses the DesignWare core plus Intel-specific
hardware wrappers.
+config PCIE_KEEMBAY
+ bool
+
+config PCIE_KEEMBAY_HOST
+ bool "Intel Keem Bay PCIe controller - Host mode"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on PCI && PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ select PCIE_KEEMBAY
+ help
+ Say 'Y' here to enable support for the PCIe controller in Keem Bay
+ to work in host mode.
+ The PCIe controller is based on DesignWare Hardware and uses
+ DesignWare core functions.
+
+config PCIE_KEEMBAY_EP
+ bool "Intel Keem Bay PCIe controller - Endpoint mode"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_KEEMBAY
+ help
+ Say 'Y' here to enable support for the PCIe controller in Keem Bay
+ to work in endpoint mode.
+ The PCIe controller is based on DesignWare Hardware and uses
+ DesignWare core functions.
+
config PCIE_KIRIN
depends on OF && (ARM64 || COMPILE_TEST)
bool "HiSilicon Kirin series SoCs PCIe controllers"
@@ -286,6 +325,15 @@ config PCIE_TEGRA194_EP
in order to enable device-specific features PCIE_TEGRA194_EP must be
selected. This uses the DesignWare core.
+config PCIE_VISCONTI_HOST
+ bool "Toshiba Visconti PCIe controllers"
+ depends on ARCH_VISCONTI || COMPILE_TEST
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support on Toshiba Visconti SoC.
+ This driver supports TMPV7708 SoC.
+
config PCIE_UNIPHIER
bool "Socionext UniPhier PCIe host controllers"
depends on ARCH_UNIPHIER || COMPILE_TEST
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 9e6ce0dc2f53..73244409792c 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -14,13 +14,16 @@ obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
+obj-$(CONFIG_PCIE_ROCKCHIP_DW_HOST) += pcie-dw-rockchip.o
obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
+obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
obj-$(CONFIG_PCI_MESON) += pci-meson.o
obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
+obj-$(CONFIG_PCIE_VISCONTI_HOST) += pcie-visconti.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 047cfbdc1330..fbbb78f6885e 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -204,7 +204,7 @@ static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned long val;
- int pos, irq;
+ int pos;
val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
(index * MSI_REG_CTRL_BLOCK_SIZE));
@@ -213,9 +213,8 @@ static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0);
while (pos != MAX_MSI_IRQS_PER_CTRL) {
- irq = irq_find_mapping(pp->irq_domain,
- (index * MAX_MSI_IRQS_PER_CTRL) + pos);
- generic_handle_irq(irq);
+ generic_handle_domain_irq(pp->irq_domain,
+ (index * MAX_MSI_IRQS_PER_CTRL) + pos);
pos++;
pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos);
}
@@ -257,7 +256,7 @@ static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
struct dw_pcie *pci;
struct pcie_port *pp;
unsigned long reg;
- u32 virq, bit;
+ u32 bit;
chained_irq_enter(chip, desc);
@@ -276,11 +275,8 @@ static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
case INTB:
case INTC:
case INTD:
- for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
- virq = irq_find_mapping(dra7xx->irq_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(dra7xx->irq_domain, bit);
break;
}
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index bde3b2824e89..865258d8c53c 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -259,14 +259,12 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
struct dw_pcie *pci = ks_pcie->pci;
struct device *dev = pci->dev;
u32 pending;
- int virq;
pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
if (BIT(0) & pending) {
- virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
- dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
- generic_handle_irq(virq);
+ dev_dbg(dev, ": irq: irq_offset %d", offset);
+ generic_handle_domain_irq(ks_pcie->legacy_irq_domain, offset);
}
/* EOI the INTx interrupt */
@@ -579,7 +577,7 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
struct pcie_port *pp = &pci->pp;
struct device *dev = pci->dev;
struct irq_chip *chip = irq_desc_get_chip(desc);
- u32 vector, virq, reg, pos;
+ u32 vector, reg, pos;
dev_dbg(dev, "%s, irq %d\n", __func__, irq);
@@ -600,10 +598,8 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
continue;
vector = offset + (pos << 3);
- virq = irq_linear_revmap(pp->irq_domain, vector);
- dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector,
- virq);
- generic_handle_irq(virq);
+ dev_dbg(dev, "irq: bit %d, vector %d\n", pos, vector);
+ generic_handle_domain_irq(pp->irq_domain, vector);
}
chained_irq_exit(chip, desc);
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 597c282f586c..c91fc1954432 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -384,6 +384,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
const struct artpec_pcie_of_data *data;
enum artpec_pcie_variants variant;
enum dw_pcie_device_mode mode;
+ u32 val;
match = of_match_device(artpec6_pcie_of_match, dev);
if (!match)
@@ -432,9 +433,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
break;
- case DW_PCIE_EP_TYPE: {
- u32 val;
-
+ case DW_PCIE_EP_TYPE:
if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP))
return -ENODEV;
@@ -445,8 +444,6 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
return dw_pcie_ep_init(&pci->ep);
- break;
- }
default:
dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 8d028a88b375..998b698f4085 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -125,7 +125,7 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
}
-static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -202,7 +202,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
-static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
+static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -217,7 +217,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
ep->epf_bar[bar] = NULL;
}
-static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
int ret;
@@ -276,7 +276,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
return -EINVAL;
}
-static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
+static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr)
{
int ret;
@@ -292,9 +292,8 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
clear_bit(atu_index, ep->ob_window_map);
}
-static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
- phys_addr_t addr,
- u64 pci_addr, size_t size)
+static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t addr, u64 pci_addr, size_t size)
{
int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -309,7 +308,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
return 0;
}
-static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
+static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -333,7 +332,8 @@ static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
return val;
}
-static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -358,7 +358,7 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
return 0;
}
-static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -382,8 +382,8 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
return val;
}
-static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno bir, u32 offset)
+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno bir, u32 offset)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -418,7 +418,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
return 0;
}
-static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -450,7 +450,7 @@ static int dw_pcie_ep_start(struct pci_epc *epc)
}
static const struct pci_epc_features*
-dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -525,14 +525,14 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
msg_addr = ((u64)msg_addr_upper) << 32 |
(msg_addr_lower & ~aligned_offset);
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
return ret;
writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
- dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
return 0;
}
@@ -593,14 +593,14 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
}
aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
return ret;
writel(msg_data, ep->msi_mem + aligned_offset);
- dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index a608ae1fad57..d1d9b8344ec9 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -55,7 +55,7 @@ static struct msi_domain_info dw_pcie_msi_domain_info = {
/* MSI int handler */
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
- int i, pos, irq;
+ int i, pos;
unsigned long val;
u32 status, num_ctrls;
irqreturn_t ret = IRQ_NONE;
@@ -74,10 +74,9 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
pos = 0;
while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
pos)) != MAX_MSI_IRQS_PER_CTRL) {
- irq = irq_find_mapping(pp->irq_domain,
- (i * MAX_MSI_IRQS_PER_CTRL) +
- pos);
- generic_handle_irq(irq);
+ generic_handle_domain_irq(pp->irq_domain,
+ (i * MAX_MSI_IRQS_PER_CTRL) +
+ pos);
pos++;
}
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 9b397c807261..8851eb161a0e 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -164,7 +164,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
return dw_pcie_ep_init(&pci->ep);
- break;
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
}
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
new file mode 100644
index 000000000000..c9b341e55cbb
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Rockchip SoCs.
+ *
+ * Copyright (C) 2021 Rockchip Electronics Co., Ltd.
+ * http://www.rock-chips.com
+ *
+ * Author: Simon Xue <xxm@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+/*
+ * The upper 16 bits of PCIE_CLIENT_CONFIG are a write
+ * mask for the lower 16 bits.
+ */
+#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
+#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+
+#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
+#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
+#define PCIE_SMLH_LINKUP BIT(16)
+#define PCIE_RDLH_LINKUP BIT(17)
+#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
+#define PCIE_L0S_ENTRY 0x11
+#define PCIE_CLIENT_GENERAL_CONTROL 0x0
+#define PCIE_CLIENT_GENERAL_DEBUG 0x104
+#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+#define PCIE_CLIENT_LTSSM_STATUS 0x300
+#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
+#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
+
+struct rockchip_pcie {
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ unsigned int clk_cnt;
+ struct reset_control *rst;
+ struct gpio_desc *rst_gpio;
+ struct regulator *vpcie3v3;
+};
+
+static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip,
+ u32 reg)
+{
+ return readl_relaxed(rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
+ u32 val, u32 reg)
+{
+ writel_relaxed(val, rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
+ PCIE_CLIENT_GENERAL_CONTROL);
+}
+
+static int rockchip_pcie_link_up(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ u32 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+
+ if ((val & PCIE_LINKUP) == PCIE_LINKUP &&
+ (val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY)
+ return 1;
+
+ return 0;
+}
+
+static int rockchip_pcie_start_link(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ /* Reset device */
+ gpiod_set_value_cansleep(rockchip->rst_gpio, 0);
+
+ rockchip_pcie_enable_ltssm(rockchip);
+
+ /*
+ * PCIe requires the refclk to be stable for 100µs prior to releasing
+ * PERST. See table 2-4 in section 2.6.2 AC Specifications of the PCI
+ * Express Card Electromechanical Specification, 1.1. However, we don't
+ * know if the refclk is coming from RC's PHY or external OSC. If it's
+ * from RC, so enabling LTSSM is the just right place to release #PERST.
+ * We need more extra time as before, rather than setting just
+ * 100us as we don't know how long should the device need to reset.
+ */
+ msleep(100);
+ gpiod_set_value_cansleep(rockchip->rst_gpio, 1);
+
+ return 0;
+}
+
+static int rockchip_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+
+ /* LTSSM enable control mode */
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
+ PCIE_CLIENT_GENERAL_CONTROL);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {
+ .host_init = rockchip_pcie_host_init,
+};
+
+static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ int ret;
+
+ ret = devm_clk_bulk_get_all(dev, &rockchip->clks);
+ if (ret < 0)
+ return ret;
+
+ rockchip->clk_cnt = ret;
+
+ return clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+}
+
+static int rockchip_pcie_resource_get(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(rockchip->apb_base))
+ return PTR_ERR(rockchip->apb_base);
+
+ rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(rockchip->rst_gpio))
+ return PTR_ERR(rockchip->rst_gpio);
+
+ return 0;
+}
+
+static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ int ret;
+
+ rockchip->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(rockchip->phy))
+ return dev_err_probe(dev, PTR_ERR(rockchip->phy),
+ "missing PHY\n");
+
+ ret = phy_init(rockchip->phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_power_on(rockchip->phy);
+ if (ret)
+ phy_exit(rockchip->phy);
+
+ return ret;
+}
+
+static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
+{
+ phy_exit(rockchip->phy);
+ phy_power_off(rockchip->phy);
+}
+
+static int rockchip_pcie_reset_control_release(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+
+ rockchip->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(rockchip->rst))
+ return dev_err_probe(dev, PTR_ERR(rockchip->rst),
+ "failed to get reset lines\n");
+
+ return reset_control_deassert(rockchip->rst);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = rockchip_pcie_link_up,
+ .start_link = rockchip_pcie_start_link,
+};
+
+static int rockchip_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie *rockchip;
+ struct pcie_port *pp;
+ int ret;
+
+ rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
+ if (!rockchip)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rockchip);
+
+ rockchip->pci.dev = dev;
+ rockchip->pci.ops = &dw_pcie_ops;
+
+ pp = &rockchip->pci.pp;
+ pp->ops = &rockchip_pcie_host_ops;
+
+ ret = rockchip_pcie_resource_get(pdev, rockchip);
+ if (ret)
+ return ret;
+
+ /* DON'T MOVE ME: must be enable before PHY init */
+ rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+ if (IS_ERR(rockchip->vpcie3v3)) {
+ if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(rockchip->vpcie3v3),
+ "failed to get vpcie3v3 regulator\n");
+ rockchip->vpcie3v3 = NULL;
+ } else {
+ ret = regulator_enable(rockchip->vpcie3v3);
+ if (ret) {
+ dev_err(dev, "failed to enable vpcie3v3 regulator\n");
+ return ret;
+ }
+ }
+
+ ret = rockchip_pcie_phy_init(rockchip);
+ if (ret)
+ goto disable_regulator;
+
+ ret = rockchip_pcie_reset_control_release(rockchip);
+ if (ret)
+ goto deinit_phy;
+
+ ret = rockchip_pcie_clk_init(rockchip);
+ if (ret)
+ goto deinit_phy;
+
+ ret = dw_pcie_host_init(pp);
+ if (!ret)
+ return 0;
+
+ clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks);
+deinit_phy:
+ rockchip_pcie_phy_deinit(rockchip);
+disable_regulator:
+ if (rockchip->vpcie3v3)
+ regulator_disable(rockchip->vpcie3v3);
+
+ return ret;
+}
+
+static const struct of_device_id rockchip_pcie_of_match[] = {
+ { .compatible = "rockchip,rk3568-pcie", },
+ {},
+};
+
+static struct platform_driver rockchip_pcie_driver = {
+ .driver = {
+ .name = "rockchip-dw-pcie",
+ .of_match_table = rockchip_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rockchip_pcie_probe,
+};
+builtin_platform_driver(rockchip_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
new file mode 100644
index 000000000000..1ac29a6eef22
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -0,0 +1,460 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCIe controller driver for Intel Keem Bay
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include "pcie-designware.h"
+
+/* PCIE_REGS_APB_SLV Registers */
+#define PCIE_REGS_PCIE_CFG 0x0004
+#define PCIE_DEVICE_TYPE BIT(8)
+#define PCIE_RSTN BIT(0)
+#define PCIE_REGS_PCIE_APP_CNTRL 0x0008
+#define APP_LTSSM_ENABLE BIT(0)
+#define PCIE_REGS_INTERRUPT_ENABLE 0x0028
+#define MSI_CTRL_INT_EN BIT(8)
+#define EDMA_INT_EN GENMASK(7, 0)
+#define PCIE_REGS_INTERRUPT_STATUS 0x002c
+#define MSI_CTRL_INT BIT(8)
+#define PCIE_REGS_PCIE_SII_PM_STATE 0x00b0
+#define SMLH_LINK_UP BIT(19)
+#define RDLH_LINK_UP BIT(8)
+#define PCIE_REGS_PCIE_SII_LINK_UP (SMLH_LINK_UP | RDLH_LINK_UP)
+#define PCIE_REGS_PCIE_PHY_CNTL 0x0164
+#define PHY0_SRAM_BYPASS BIT(8)
+#define PCIE_REGS_PCIE_PHY_STAT 0x0168
+#define PHY0_MPLLA_STATE BIT(1)
+#define PCIE_REGS_LJPLL_STA 0x016c
+#define LJPLL_LOCK BIT(0)
+#define PCIE_REGS_LJPLL_CNTRL_0 0x0170
+#define LJPLL_EN BIT(29)
+#define LJPLL_FOUT_EN GENMASK(24, 21)
+#define PCIE_REGS_LJPLL_CNTRL_2 0x0178
+#define LJPLL_REF_DIV GENMASK(17, 12)
+#define LJPLL_FB_DIV GENMASK(11, 0)
+#define PCIE_REGS_LJPLL_CNTRL_3 0x017c
+#define LJPLL_POST_DIV3A GENMASK(24, 22)
+#define LJPLL_POST_DIV2A GENMASK(18, 16)
+
+#define PERST_DELAY_US 1000
+#define AUX_CLK_RATE_HZ 24000000
+
+struct keembay_pcie {
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ enum dw_pcie_device_mode mode;
+
+ struct clk *clk_master;
+ struct clk *clk_aux;
+ struct gpio_desc *reset;
+};
+
+struct keembay_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+};
+
+static void keembay_ep_reset_assert(struct keembay_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 1);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void keembay_ep_reset_deassert(struct keembay_pcie *pcie)
+{
+ /*
+ * Ensure that PERST# is asserted for a minimum of 100ms.
+ *
+ * For more details, refer to PCI Express Card Electromechanical
+ * Specification Revision 1.1, Table-2.4.
+ */
+ msleep(100);
+
+ gpiod_set_value_cansleep(pcie->reset, 0);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void keembay_pcie_ltssm_set(struct keembay_pcie *pcie, bool enable)
+{
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+ if (enable)
+ val |= APP_LTSSM_ENABLE;
+ else
+ val &= ~APP_LTSSM_ENABLE;
+ writel(val, pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+}
+
+static int keembay_pcie_link_up(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_SII_PM_STATE);
+
+ return (val & PCIE_REGS_PCIE_SII_LINK_UP) == PCIE_REGS_PCIE_SII_LINK_UP;
+}
+
+static int keembay_pcie_start_link(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+ int ret;
+
+ if (pcie->mode == DW_PCIE_EP_TYPE)
+ return 0;
+
+ keembay_pcie_ltssm_set(pcie, false);
+
+ ret = readl_poll_timeout(pcie->apb_base + PCIE_REGS_PCIE_PHY_STAT,
+ val, val & PHY0_MPLLA_STATE, 20,
+ 500 * USEC_PER_MSEC);
+ if (ret) {
+ dev_err(pci->dev, "MPLLA is not locked\n");
+ return ret;
+ }
+
+ keembay_pcie_ltssm_set(pcie, true);
+
+ return 0;
+}
+
+static void keembay_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ keembay_pcie_ltssm_set(pcie, false);
+}
+
+static const struct dw_pcie_ops keembay_pcie_ops = {
+ .link_up = keembay_pcie_link_up,
+ .start_link = keembay_pcie_start_link,
+ .stop_link = keembay_pcie_stop_link,
+};
+
+static inline struct clk *keembay_pcie_probe_clock(struct device *dev,
+ const char *id, u64 rate)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+
+ if (rate) {
+ ret = clk_set_rate(clk, rate);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = devm_add_action_or_reset(dev,
+ (void(*)(void *))clk_disable_unprepare,
+ clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk;
+}
+
+static int keembay_pcie_probe_clocks(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+
+ pcie->clk_master = keembay_pcie_probe_clock(dev, "master", 0);
+ if (IS_ERR(pcie->clk_master))
+ return dev_err_probe(dev, PTR_ERR(pcie->clk_master),
+ "Failed to enable master clock");
+
+ pcie->clk_aux = keembay_pcie_probe_clock(dev, "aux", AUX_CLK_RATE_HZ);
+ if (IS_ERR(pcie->clk_aux))
+ return dev_err_probe(dev, PTR_ERR(pcie->clk_aux),
+ "Failed to enable auxiliary clock");
+
+ return 0;
+}
+
+/*
+ * Initialize the internal PCIe PLL in Host mode.
+ * See the following sections in Keem Bay data book,
+ * (1) 6.4.6.1 PCIe Subsystem Example Initialization,
+ * (2) 6.8 PCIe Low Jitter PLL for Ref Clk Generation.
+ */
+static int keembay_pcie_pll_init(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val;
+ int ret;
+
+ val = FIELD_PREP(LJPLL_REF_DIV, 0) | FIELD_PREP(LJPLL_FB_DIV, 0x32);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_2);
+
+ val = FIELD_PREP(LJPLL_POST_DIV3A, 0x2) |
+ FIELD_PREP(LJPLL_POST_DIV2A, 0x2);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_3);
+
+ val = FIELD_PREP(LJPLL_EN, 0x1) | FIELD_PREP(LJPLL_FOUT_EN, 0xc);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_0);
+
+ ret = readl_poll_timeout(pcie->apb_base + PCIE_REGS_LJPLL_STA,
+ val, val & LJPLL_LOCK, 20,
+ 500 * USEC_PER_MSEC);
+ if (ret)
+ dev_err(pci->dev, "Low jitter PLL is not locked\n");
+
+ return ret;
+}
+
+static void keembay_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+ struct keembay_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 val, mask, status;
+ struct pcie_port *pp;
+
+ /*
+ * Keem Bay PCIe Controller provides an additional IP logic on top of
+ * standard DWC IP to clear MSI IRQ by writing '1' to the respective
+ * bit of the status register.
+ *
+ * So, a chained irq handler is defined to handle this additional
+ * IP logic.
+ */
+
+ chained_irq_enter(chip, desc);
+
+ pp = &pcie->pci.pp;
+ val = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_STATUS);
+ mask = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+
+ status = val & mask;
+
+ if (status & MSI_CTRL_INT) {
+ dw_handle_msi_irq(pp);
+ writel(status, pcie->apb_base + PCIE_REGS_INTERRUPT_STATUS);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int keembay_pcie_setup_msi_irq(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "pcie");
+ if (irq < 0)
+ return irq;
+
+ irq_set_chained_handler_and_data(irq, keembay_pcie_msi_irq_handler,
+ pcie);
+
+ return 0;
+}
+
+static void keembay_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ writel(EDMA_INT_EN, pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+}
+
+static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ /* Legacy interrupts are not supported in Keem Bay */
+ dev_err(pci->dev, "Legacy IRQ is not supported\n");
+ return -EINVAL;
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "Unknown IRQ type %d\n", type);
+ return -EINVAL;
+ }
+}
+
+static const struct pci_epc_features keembay_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = true,
+ .reserved_bar = BIT(BAR_1) | BIT(BAR_3) | BIT(BAR_5),
+ .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .align = SZ_16K,
+};
+
+static const struct pci_epc_features *
+keembay_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &keembay_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops keembay_pcie_ep_ops = {
+ .ep_init = keembay_pcie_ep_init,
+ .raise_irq = keembay_pcie_ep_raise_irq,
+ .get_features = keembay_pcie_get_features,
+};
+
+static const struct dw_pcie_host_ops keembay_pcie_host_ops = {
+};
+
+static int keembay_pcie_add_pcie_port(struct keembay_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ u32 val;
+ int ret;
+
+ pp->ops = &keembay_pcie_host_ops;
+ pp->msi_irq = -ENODEV;
+
+ ret = keembay_pcie_setup_msi_irq(pcie);
+ if (ret)
+ return ret;
+
+ pcie->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset))
+ return PTR_ERR(pcie->reset);
+
+ ret = keembay_pcie_probe_clocks(pcie);
+ if (ret)
+ return ret;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_PHY_CNTL);
+ val |= PHY0_SRAM_BYPASS;
+ writel(val, pcie->apb_base + PCIE_REGS_PCIE_PHY_CNTL);
+
+ writel(PCIE_DEVICE_TYPE, pcie->apb_base + PCIE_REGS_PCIE_CFG);
+
+ ret = keembay_pcie_pll_init(pcie);
+ if (ret)
+ return ret;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_CFG);
+ writel(val | PCIE_RSTN, pcie->apb_base + PCIE_REGS_PCIE_CFG);
+ keembay_ep_reset_deassert(pcie);
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ keembay_ep_reset_assert(pcie);
+ dev_err(dev, "Failed to initialize host: %d\n", ret);
+ return ret;
+ }
+
+ val = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ val |= MSI_CTRL_INT_EN;
+ writel(val, pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+
+ return 0;
+}
+
+static int keembay_pcie_probe(struct platform_device *pdev)
+{
+ const struct keembay_pcie_of_data *data;
+ struct device *dev = &pdev->dev;
+ struct keembay_pcie *pcie;
+ struct dw_pcie *pci;
+ enum dw_pcie_device_mode mode;
+
+ data = device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ mode = (enum dw_pcie_device_mode)data->mode;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pci->ops = &keembay_pcie_ops;
+
+ pcie->mode = mode;
+
+ pcie->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(pcie->apb_base))
+ return PTR_ERR(pcie->apb_base);
+
+ platform_set_drvdata(pdev, pcie);
+
+ switch (pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_KEEMBAY_HOST))
+ return -ENODEV;
+
+ return keembay_pcie_add_pcie_port(pcie, pdev);
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_KEEMBAY_EP))
+ return -ENODEV;
+
+ pci->ep.ops = &keembay_pcie_ep_ops;
+ return dw_pcie_ep_init(&pci->ep);
+ default:
+ dev_err(dev, "Invalid device type %d\n", pcie->mode);
+ return -ENODEV;
+ }
+}
+
+static const struct keembay_pcie_of_data keembay_pcie_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct keembay_pcie_of_data keembay_pcie_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id keembay_pcie_of_match[] = {
+ {
+ .compatible = "intel,keembay-pcie",
+ .data = &keembay_pcie_rc_of_data,
+ },
+ {
+ .compatible = "intel,keembay-pcie-ep",
+ .data = &keembay_pcie_ep_of_data,
+ },
+ {}
+};
+
+static struct platform_driver keembay_pcie_driver = {
+ .driver = {
+ .name = "keembay-pcie",
+ .of_match_table = keembay_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = keembay_pcie_probe,
+};
+builtin_platform_driver(keembay_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 3ec7b29d5dc7..904976913081 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -497,19 +497,19 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
struct tegra_pcie_dw *pcie = arg;
struct dw_pcie_ep *ep = &pcie->pci.ep;
int spurious = 1;
- u32 val, tmp;
+ u32 status_l0, status_l1, link_status;
- val = appl_readl(pcie, APPL_INTR_STATUS_L0);
- if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
- appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
+ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
- if (val & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
+ if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
pex_ep_event_hot_rst_done(pcie);
- if (val & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
- tmp = appl_readl(pcie, APPL_LINK_STATUS);
- if (tmp & APPL_LINK_STATUS_RDLH_LINK_UP) {
+ if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
+ link_status = appl_readl(pcie, APPL_LINK_STATUS);
+ if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
dev_dbg(pcie->dev, "Link is up with Host\n");
dw_pcie_ep_linkup(ep);
}
@@ -518,11 +518,11 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
spurious = 0;
}
- if (val & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
- val = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
- appl_writel(pcie, val, APPL_INTR_STATUS_L1_15);
+ if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
- if (val & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
+ if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
return IRQ_WAKE_THREAD;
spurious = 0;
@@ -530,8 +530,8 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
if (spurious) {
dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
- val);
- appl_writel(pcie, val, APPL_INTR_STATUS_L0);
+ status_l0);
+ appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
}
return IRQ_HANDLED;
@@ -1493,6 +1493,16 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
return;
}
+ /*
+ * PCIe controller exits from L2 only if reset is applied, so
+ * controller doesn't handle interrupts. But in cases where
+ * L2 entry fails, PERST# is asserted which can trigger surprise
+ * link down AER. However this function call happens in
+ * suspend_noirq(), so AER interrupt will not be processed.
+ * Disable all interrupts to avoid such a scenario.
+ */
+ appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0);
+
if (tegra_pcie_try_link_l2(pcie)) {
dev_info(pcie->dev, "Link didn't transition to L2 state\n");
/*
@@ -1763,7 +1773,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
- val = (lower_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
+ val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
ret = dw_pcie_ep_init_complete(ep);
@@ -1935,13 +1945,6 @@ static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
return ret;
}
- name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_ep_work",
- pcie->cid);
- if (!name) {
- dev_err(dev, "Failed to create PCIe EP work thread string\n");
- return -ENOMEM;
- }
-
pm_runtime_enable(dev);
ret = dw_pcie_ep_init(ep);
@@ -2236,6 +2239,11 @@ static int tegra_pcie_dw_resume_early(struct device *dev)
struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
u32 val;
+ if (pcie->mode == DW_PCIE_EP_TYPE) {
+ dev_err(dev, "Suspend is not supported in EP mode");
+ return -ENOTSUPP;
+ }
+
if (!pcie->link_state)
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 7e8bad326770..d842fd018129 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -235,7 +235,7 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long reg;
- u32 val, bit, virq;
+ u32 val, bit;
/* INT for debug */
val = readl(priv->base + PCL_RCV_INT);
@@ -257,10 +257,8 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
val = readl(priv->base + PCL_RCV_INTX);
reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
- for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
- virq = irq_linear_revmap(priv->legacy_irq_domain, bit);
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(priv->legacy_irq_domain, bit);
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
new file mode 100644
index 000000000000..a88eab6829bb
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DWC PCIe RC driver for Toshiba Visconti ARM SoC
+ *
+ * Copyright (C) 2021 Toshiba Electronic Device & Storage Corporation
+ * Copyright (C) 2021 TOSHIBA CORPORATION
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+#include "../../pci.h"
+
+struct visconti_pcie {
+ struct dw_pcie pci;
+ void __iomem *ulreg_base;
+ void __iomem *smu_base;
+ void __iomem *mpu_base;
+ struct clk *refclk;
+ struct clk *coreclk;
+ struct clk *auxclk;
+};
+
+#define PCIE_UL_REG_S_PCIE_MODE 0x00F4
+#define PCIE_UL_REG_S_PCIE_MODE_EP 0x00
+#define PCIE_UL_REG_S_PCIE_MODE_RC 0x04
+
+#define PCIE_UL_REG_S_PERSTN_CTRL 0x00F8
+#define PCIE_UL_IOM_PCIE_PERSTN_I_EN BIT(3)
+#define PCIE_UL_DIRECT_PERSTN_EN BIT(2)
+#define PCIE_UL_PERSTN_OUT BIT(1)
+#define PCIE_UL_DIRECT_PERSTN BIT(0)
+#define PCIE_UL_REG_S_PERSTN_CTRL_INIT (PCIE_UL_IOM_PCIE_PERSTN_I_EN | \
+ PCIE_UL_DIRECT_PERSTN_EN | \
+ PCIE_UL_DIRECT_PERSTN)
+
+#define PCIE_UL_REG_S_PHY_INIT_02 0x0104
+#define PCIE_UL_PHY0_SRAM_EXT_LD_DONE BIT(0)
+
+#define PCIE_UL_REG_S_PHY_INIT_03 0x0108
+#define PCIE_UL_PHY0_SRAM_INIT_DONE BIT(0)
+
+#define PCIE_UL_REG_S_INT_EVENT_MASK1 0x0138
+#define PCIE_UL_CFG_PME_INT BIT(0)
+#define PCIE_UL_CFG_LINK_EQ_REQ_INT BIT(1)
+#define PCIE_UL_EDMA_INT0 BIT(2)
+#define PCIE_UL_EDMA_INT1 BIT(3)
+#define PCIE_UL_EDMA_INT2 BIT(4)
+#define PCIE_UL_EDMA_INT3 BIT(5)
+#define PCIE_UL_S_INT_EVENT_MASK1_ALL (PCIE_UL_CFG_PME_INT | \
+ PCIE_UL_CFG_LINK_EQ_REQ_INT | \
+ PCIE_UL_EDMA_INT0 | \
+ PCIE_UL_EDMA_INT1 | \
+ PCIE_UL_EDMA_INT2 | \
+ PCIE_UL_EDMA_INT3)
+
+#define PCIE_UL_REG_S_SB_MON 0x0198
+#define PCIE_UL_REG_S_SIG_MON 0x019C
+#define PCIE_UL_CORE_RST_N_MON BIT(0)
+
+#define PCIE_UL_REG_V_SII_DBG_00 0x0844
+#define PCIE_UL_REG_V_SII_GEN_CTRL_01 0x0860
+#define PCIE_UL_APP_LTSSM_ENABLE BIT(0)
+
+#define PCIE_UL_REG_V_PHY_ST_00 0x0864
+#define PCIE_UL_SMLH_LINK_UP BIT(0)
+
+#define PCIE_UL_REG_V_PHY_ST_02 0x0868
+#define PCIE_UL_S_DETECT_ACT 0x01
+#define PCIE_UL_S_L0 0x11
+
+#define PISMU_CKON_PCIE 0x0038
+#define PISMU_CKON_PCIE_AUX_CLK BIT(1)
+#define PISMU_CKON_PCIE_MSTR_ACLK BIT(0)
+
+#define PISMU_RSOFF_PCIE 0x0538
+#define PISMU_RSOFF_PCIE_ULREG_RST_N BIT(1)
+#define PISMU_RSOFF_PCIE_PWR_UP_RST_N BIT(0)
+
+#define PCIE_MPU_REG_MP_EN 0x0
+#define MPU_MP_EN_DISABLE BIT(0)
+
+/* Access registers in PCIe ulreg */
+static void visconti_ulreg_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->ulreg_base + reg);
+}
+
+static u32 visconti_ulreg_readl(struct visconti_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->ulreg_base + reg);
+}
+
+/* Access registers in PCIe smu */
+static void visconti_smu_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->smu_base + reg);
+}
+
+/* Access registers in PCIe mpu */
+static void visconti_mpu_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->mpu_base + reg);
+}
+
+static u32 visconti_mpu_readl(struct visconti_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->mpu_base + reg);
+}
+
+static int visconti_pcie_link_up(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr = pcie->ulreg_base;
+ u32 val = readl_relaxed(addr + PCIE_UL_REG_V_PHY_ST_02);
+
+ return !!(val & PCIE_UL_S_L0);
+}
+
+static int visconti_pcie_start_link(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr = pcie->ulreg_base;
+ u32 val;
+ int ret;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_APP_LTSSM_ENABLE,
+ PCIE_UL_REG_V_SII_GEN_CTRL_01);
+
+ ret = readl_relaxed_poll_timeout(addr + PCIE_UL_REG_V_PHY_ST_02,
+ val, (val & PCIE_UL_S_L0),
+ 90000, 100000);
+ if (ret)
+ return ret;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_S_INT_EVENT_MASK1_ALL,
+ PCIE_UL_REG_S_INT_EVENT_MASK1);
+
+ if (dw_pcie_link_up(pci)) {
+ val = visconti_mpu_readl(pcie, PCIE_MPU_REG_MP_EN);
+ visconti_mpu_writel(pcie, val & ~MPU_MP_EN_DISABLE,
+ PCIE_MPU_REG_MP_EN);
+ }
+
+ return 0;
+}
+
+static void visconti_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+
+ val = visconti_ulreg_readl(pcie, PCIE_UL_REG_V_SII_GEN_CTRL_01);
+ val &= ~PCIE_UL_APP_LTSSM_ENABLE;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_V_SII_GEN_CTRL_01);
+
+ val = visconti_mpu_readl(pcie, PCIE_MPU_REG_MP_EN);
+ visconti_mpu_writel(pcie, val | MPU_MP_EN_DISABLE, PCIE_MPU_REG_MP_EN);
+}
+
+/*
+ * In this SoC specification, the CPU bus outputs the offset value from
+ * 0x40000000 to the PCIe bus, so 0x40000000 is subtracted from the CPU
+ * bus address. This 0x40000000 is also based on io_base from DT.
+ */
+static u64 visconti_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
+{
+ struct pcie_port *pp = &pci->pp;
+
+ return cpu_addr & ~pp->io_base;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .cpu_addr_fixup = visconti_pcie_cpu_addr_fixup,
+ .link_up = visconti_pcie_link_up,
+ .start_link = visconti_pcie_start_link,
+ .stop_link = visconti_pcie_stop_link,
+};
+
+static int visconti_pcie_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr;
+ int err;
+ u32 val;
+
+ visconti_smu_writel(pcie,
+ PISMU_CKON_PCIE_AUX_CLK | PISMU_CKON_PCIE_MSTR_ACLK,
+ PISMU_CKON_PCIE);
+ ndelay(250);
+
+ visconti_smu_writel(pcie, PISMU_RSOFF_PCIE_ULREG_RST_N,
+ PISMU_RSOFF_PCIE);
+ visconti_ulreg_writel(pcie, PCIE_UL_REG_S_PCIE_MODE_RC,
+ PCIE_UL_REG_S_PCIE_MODE);
+
+ val = PCIE_UL_REG_S_PERSTN_CTRL_INIT;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_S_PERSTN_CTRL);
+ udelay(100);
+
+ val |= PCIE_UL_PERSTN_OUT;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_S_PERSTN_CTRL);
+ udelay(100);
+
+ visconti_smu_writel(pcie, PISMU_RSOFF_PCIE_PWR_UP_RST_N,
+ PISMU_RSOFF_PCIE);
+
+ addr = pcie->ulreg_base + PCIE_UL_REG_S_PHY_INIT_03;
+ err = readl_relaxed_poll_timeout(addr, val,
+ (val & PCIE_UL_PHY0_SRAM_INIT_DONE),
+ 100, 1000);
+ if (err)
+ return err;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_PHY0_SRAM_EXT_LD_DONE,
+ PCIE_UL_REG_S_PHY_INIT_02);
+
+ addr = pcie->ulreg_base + PCIE_UL_REG_S_SIG_MON;
+ return readl_relaxed_poll_timeout(addr, val,
+ (val & PCIE_UL_CORE_RST_N_MON), 100,
+ 1000);
+}
+
+static const struct dw_pcie_host_ops visconti_pcie_host_ops = {
+ .host_init = visconti_pcie_host_init,
+};
+
+static int visconti_get_resources(struct platform_device *pdev,
+ struct visconti_pcie *pcie)
+{
+ struct device *dev = &pdev->dev;
+
+ pcie->ulreg_base = devm_platform_ioremap_resource_byname(pdev, "ulreg");
+ if (IS_ERR(pcie->ulreg_base))
+ return PTR_ERR(pcie->ulreg_base);
+
+ pcie->smu_base = devm_platform_ioremap_resource_byname(pdev, "smu");
+ if (IS_ERR(pcie->smu_base))
+ return PTR_ERR(pcie->smu_base);
+
+ pcie->mpu_base = devm_platform_ioremap_resource_byname(pdev, "mpu");
+ if (IS_ERR(pcie->mpu_base))
+ return PTR_ERR(pcie->mpu_base);
+
+ pcie->refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(pcie->refclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->refclk),
+ "Failed to get ref clock\n");
+
+ pcie->coreclk = devm_clk_get(dev, "core");
+ if (IS_ERR(pcie->coreclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->coreclk),
+ "Failed to get core clock\n");
+
+ pcie->auxclk = devm_clk_get(dev, "aux");
+ if (IS_ERR(pcie->auxclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->auxclk),
+ "Failed to get aux clock\n");
+
+ return 0;
+}
+
+static int visconti_add_pcie_port(struct visconti_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+
+ pp->irq = platform_get_irq_byname(pdev, "intr");
+ if (pp->irq < 0) {
+ dev_err(dev, "Interrupt intr is missing");
+ return pp->irq;
+ }
+
+ pp->ops = &visconti_pcie_host_ops;
+
+ return dw_pcie_host_init(pp);
+}
+
+static int visconti_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct visconti_pcie *pcie;
+ struct dw_pcie *pci;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ ret = visconti_get_resources(pdev, pcie);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, pcie);
+
+ return visconti_add_pcie_port(pcie, pdev);
+}
+
+static const struct of_device_id visconti_pcie_match[] = {
+ { .compatible = "toshiba,visconti-pcie" },
+ {},
+};
+
+static struct platform_driver visconti_pcie_driver = {
+ .probe = visconti_pcie_probe,
+ .driver = {
+ .name = "visconti-pcie",
+ .of_match_table = visconti_pcie_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(visconti_pcie_driver);
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index c637de3a389b..f3547aa60140 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -92,7 +92,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
u32 msi_data, msi_addr_lo, msi_addr_hi;
u32 intr_status, msi_status;
unsigned long shifted_status;
- u32 bit, virq, val, mask;
+ u32 bit, val, mask;
/*
* The core provides a single interrupt for both INTx/MSI messages.
@@ -114,11 +114,10 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
shifted_status >>= PAB_INTX_START;
do {
for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
- virq = irq_find_mapping(rp->intx_domain,
- bit + 1);
- if (virq)
- generic_handle_irq(virq);
- else
+ int ret;
+ ret = generic_handle_domain_irq(rp->intx_domain,
+ bit + 1);
+ if (ret)
dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
bit);
@@ -155,9 +154,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
msi_data, msi_addr_hi, msi_addr_lo);
- virq = irq_find_mapping(msi->dev_domain, msi_data);
- if (virq)
- generic_handle_irq(virq);
+ generic_handle_domain_irq(msi->dev_domain, msi_data);
msi_status = readl_relaxed(pcie->apb_csr_base +
MSI_STATUS_OFFSET);
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index c95ebe808f92..596ebcfcc82d 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -58,6 +58,7 @@
#define PIO_COMPLETION_STATUS_CRS 2
#define PIO_COMPLETION_STATUS_CA 4
#define PIO_NON_POSTED_REQ BIT(10)
+#define PIO_ERR_STATUS BIT(11)
#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
@@ -118,6 +119,46 @@
#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
+/* PCIe window configuration */
+#define OB_WIN_BASE_ADDR 0x4c00
+#define OB_WIN_BLOCK_SIZE 0x20
+#define OB_WIN_COUNT 8
+#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
+ OB_WIN_BLOCK_SIZE * (win) + \
+ (offset))
+#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
+#define OB_WIN_ENABLE BIT(0)
+#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
+#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
+#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
+#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
+#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
+#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
+#define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
+#define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
+#define OB_WIN_FUNC_NUM_SHIFT 24
+#define OB_WIN_FUNC_NUM_ENABLE BIT(23)
+#define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
+#define OB_WIN_BUS_NUM_BITS_SHIFT 20
+#define OB_WIN_MSG_CODE_ENABLE BIT(22)
+#define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
+#define OB_WIN_MSG_CODE_SHIFT 14
+#define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
+#define OB_WIN_ATTR_ENABLE BIT(11)
+#define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
+#define OB_WIN_ATTR_TC_SHIFT 8
+#define OB_WIN_ATTR_RELAXED BIT(7)
+#define OB_WIN_ATTR_NOSNOOP BIT(6)
+#define OB_WIN_ATTR_POISON BIT(5)
+#define OB_WIN_ATTR_IDO BIT(4)
+#define OB_WIN_TYPE_MASK GENMASK(3, 0)
+#define OB_WIN_TYPE_SHIFT 0
+#define OB_WIN_TYPE_MEM 0x0
+#define OB_WIN_TYPE_IO 0x4
+#define OB_WIN_TYPE_CONFIG_TYPE0 0x8
+#define OB_WIN_TYPE_CONFIG_TYPE1 0x9
+#define OB_WIN_TYPE_MSG 0xc
+
/* LMI registers base address and register offsets */
#define LMI_BASE_ADDR 0x6000
#define CFG_REG (LMI_BASE_ADDR + 0x0)
@@ -166,7 +207,7 @@
#define PCIE_CONFIG_WR_TYPE0 0xa
#define PCIE_CONFIG_WR_TYPE1 0xb
-#define PIO_RETRY_CNT 500
+#define PIO_RETRY_CNT 750000 /* 1.5 s */
#define PIO_RETRY_DELAY 2 /* 2 us*/
#define LINK_WAIT_MAX_RETRIES 10
@@ -177,11 +218,21 @@
#define MSI_IRQ_NUM 32
+#define CFG_RD_CRS_VAL 0xffff0001
+
struct advk_pcie {
struct platform_device *pdev;
void __iomem *base;
+ struct {
+ phys_addr_t match;
+ phys_addr_t remap;
+ phys_addr_t mask;
+ u32 actions;
+ } wins[OB_WIN_COUNT];
+ u8 wins_count;
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
+ raw_spinlock_t irq_lock;
struct irq_domain *msi_domain;
struct irq_domain *msi_inner_domain;
struct irq_chip msi_bottom_irq_chip;
@@ -366,9 +417,39 @@ err:
dev_err(dev, "link never came up\n");
}
+/*
+ * Set PCIe address window register which could be used for memory
+ * mapping.
+ */
+static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
+ phys_addr_t match, phys_addr_t remap,
+ phys_addr_t mask, u32 actions)
+{
+ advk_writel(pcie, OB_WIN_ENABLE |
+ lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
+ advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
+ advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
+ advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
+ advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
+ advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
+ advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
+}
+
+static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
+{
+ advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
+ advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
+ advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
+ advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
+ advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
+ advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
+ advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
+}
+
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
{
u32 reg;
+ int i;
/* Enable TX */
reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
@@ -447,15 +528,51 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
+ /*
+ * Enable AXI address window location generation:
+ * When it is enabled, the default outbound window
+ * configurations (Default User Field: 0xD0074CFC)
+ * are used to transparent address translation for
+ * the outbound transactions. Thus, PCIe address
+ * windows are not required for transparent memory
+ * access when default outbound window configuration
+ * is set for memory access.
+ */
reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
- /* Bypass the address window mapping for PIO */
+ /*
+ * Set memory access in Default User Field so it
+ * is not required to configure PCIe address for
+ * transparent memory access.
+ */
+ advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
+
+ /*
+ * Bypass the address window mapping for PIO:
+ * Since PIO access already contains all required
+ * info over AXI interface by PIO registers, the
+ * address window is not required.
+ */
reg = advk_readl(pcie, PIO_CTRL);
reg |= PIO_CTRL_ADDR_WIN_DISABLE;
advk_writel(pcie, reg, PIO_CTRL);
+ /*
+ * Configure PCIe address windows for non-memory or
+ * non-transparent access as by default PCIe uses
+ * transparent memory access.
+ */
+ for (i = 0; i < pcie->wins_count; i++)
+ advk_pcie_set_ob_win(pcie, i,
+ pcie->wins[i].match, pcie->wins[i].remap,
+ pcie->wins[i].mask, pcie->wins[i].actions);
+
+ /* Disable remaining PCIe outbound windows */
+ for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
+ advk_pcie_disable_ob_win(pcie, i);
+
advk_pcie_train_link(pcie);
/*
@@ -472,7 +589,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
}
-static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
+static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
{
struct device *dev = &pcie->pdev->dev;
u32 reg;
@@ -483,14 +600,70 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
status = (reg & PIO_COMPLETION_STATUS_MASK) >>
PIO_COMPLETION_STATUS_SHIFT;
- if (!status)
- return;
-
+ /*
+ * According to HW spec, the PIO status check sequence as below:
+ * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
+ * it still needs to check Error Status(bit11), only when this bit
+ * indicates no error happen, the operation is successful.
+ * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
+ * means a PIO write error, and for PIO read it is successful with
+ * a read value of 0xFFFFFFFF.
+ * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
+ * only means a PIO write error, and for PIO read it is successful
+ * with a read value of 0xFFFF0001.
+ * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
+ * error for both PIO read and PIO write operation.
+ * 5) other errors are indicated as 'unknown'.
+ */
switch (status) {
+ case PIO_COMPLETION_STATUS_OK:
+ if (reg & PIO_ERR_STATUS) {
+ strcomp_status = "COMP_ERR";
+ break;
+ }
+ /* Get the read result */
+ if (val)
+ *val = advk_readl(pcie, PIO_RD_DATA);
+ /* No error */
+ strcomp_status = NULL;
+ break;
case PIO_COMPLETION_STATUS_UR:
strcomp_status = "UR";
break;
case PIO_COMPLETION_STATUS_CRS:
+ if (allow_crs && val) {
+ /* PCIe r4.0, sec 2.3.2, says:
+ * If CRS Software Visibility is enabled:
+ * For a Configuration Read Request that includes both
+ * bytes of the Vendor ID field of a device Function's
+ * Configuration Space Header, the Root Complex must
+ * complete the Request to the host by returning a
+ * read-data value of 0001h for the Vendor ID field and
+ * all '1's for any additional bytes included in the
+ * request.
+ *
+ * So CRS in this case is not an error status.
+ */
+ *val = CFG_RD_CRS_VAL;
+ strcomp_status = NULL;
+ break;
+ }
+ /* PCIe r4.0, sec 2.3.2, says:
+ * If CRS Software Visibility is not enabled, the Root Complex
+ * must re-issue the Configuration Request as a new Request.
+ * If CRS Software Visibility is enabled: For a Configuration
+ * Write Request or for any other Configuration Read Request,
+ * the Root Complex must re-issue the Configuration Request as
+ * a new Request.
+ * A Root Complex implementation may choose to limit the number
+ * of Configuration Request/CRS Completion Status loops before
+ * determining that something is wrong with the target of the
+ * Request and taking appropriate action, e.g., complete the
+ * Request to the host as a failed transaction.
+ *
+ * To simplify implementation do not re-issue the Configuration
+ * Request and complete the Request as a failed transaction.
+ */
strcomp_status = "CRS";
break;
case PIO_COMPLETION_STATUS_CA:
@@ -501,6 +674,9 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
break;
}
+ if (!strcomp_status)
+ return 0;
+
if (reg & PIO_NON_POSTED_REQ)
str_posted = "Non-posted";
else
@@ -508,6 +684,8 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
+
+ return -EFAULT;
}
static int advk_pcie_wait_pio(struct advk_pcie *pcie)
@@ -545,6 +723,7 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
case PCI_EXP_RTCTL: {
u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
*value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
+ *value |= PCI_EXP_RTCAP_CRSVIS << 16;
return PCI_BRIDGE_EMUL_HANDLED;
}
@@ -626,6 +805,7 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
{
struct pci_bridge_emul *bridge = &pcie->bridge;
+ int ret;
bridge->conf.vendor =
cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
@@ -649,7 +829,15 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
bridge->data = pcie;
bridge->ops = &advk_pci_bridge_emul_ops;
- return pci_bridge_emul_init(bridge, 0);
+ /* PCIe config space can be initialized after pci_bridge_emul_init() */
+ ret = pci_bridge_emul_init(bridge, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Indicates supports for Completion Retry Status */
+ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
+
+ return 0;
}
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
@@ -701,6 +889,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
{
struct advk_pcie *pcie = bus->sysdata;
+ bool allow_crs;
u32 reg;
int ret;
@@ -713,7 +902,24 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
return pci_bridge_emul_conf_read(&pcie->bridge, where,
size, val);
+ /*
+ * Completion Retry Status is possible to return only when reading all
+ * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
+ * CRSSVE flag on Root Bridge is enabled.
+ */
+ allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
+ (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
+ PCI_EXP_RTCTL_CRSSVE);
+
if (advk_pcie_pio_is_running(pcie)) {
+ /*
+ * If it is possible return Completion Retry Status so caller
+ * tries to issue the request again instead of failing.
+ */
+ if (allow_crs) {
+ *val = CFG_RD_CRS_VAL;
+ return PCIBIOS_SUCCESSFUL;
+ }
*val = 0xffffffff;
return PCIBIOS_SET_FAILED;
}
@@ -741,14 +947,25 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
ret = advk_pcie_wait_pio(pcie);
if (ret < 0) {
+ /*
+ * If it is possible return Completion Retry Status so caller
+ * tries to issue the request again instead of failing.
+ */
+ if (allow_crs) {
+ *val = CFG_RD_CRS_VAL;
+ return PCIBIOS_SUCCESSFUL;
+ }
*val = 0xffffffff;
return PCIBIOS_SET_FAILED;
}
- advk_pcie_check_pio_status(pcie);
+ /* Check PIO status and get the read result */
+ ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
+ if (ret < 0) {
+ *val = 0xffffffff;
+ return PCIBIOS_SET_FAILED;
+ }
- /* Get the read result */
- *val = advk_readl(pcie, PIO_RD_DATA);
if (size == 1)
*val = (*val >> (8 * (where & 3))) & 0xff;
else if (size == 2)
@@ -812,7 +1029,9 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
if (ret < 0)
return PCIBIOS_SET_FAILED;
- advk_pcie_check_pio_status(pcie);
+ ret = advk_pcie_check_pio_status(pcie, false, NULL);
+ if (ret < 0)
+ return PCIBIOS_SET_FAILED;
return PCIBIOS_SUCCESSFUL;
}
@@ -886,22 +1105,28 @@ static void advk_pcie_irq_mask(struct irq_data *d)
{
struct advk_pcie *pcie = d->domain->host_data;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
u32 mask;
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
static void advk_pcie_irq_unmask(struct irq_data *d)
{
struct advk_pcie *pcie = d->domain->host_data;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
u32 mask;
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
static int advk_pcie_irq_map(struct irq_domain *h,
@@ -985,6 +1210,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
struct irq_chip *irq_chip;
int ret = 0;
+ raw_spin_lock_init(&pcie->irq_lock);
+
pcie_intc_node = of_get_next_child(node, NULL);
if (!pcie_intc_node) {
dev_err(dev, "No PCIe Intc node found\n");
@@ -1049,7 +1276,7 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
{
u32 isr0_val, isr0_mask, isr0_status;
u32 isr1_val, isr1_mask, isr1_status;
- int i, virq;
+ int i;
isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
@@ -1077,8 +1304,7 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
PCIE_ISR1_REG);
- virq = irq_find_mapping(pcie->irq_domain, i);
- generic_handle_irq(virq);
+ generic_handle_domain_irq(pcie->irq_domain, i);
}
}
@@ -1162,6 +1388,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct advk_pcie *pcie;
struct pci_host_bridge *bridge;
+ struct resource_entry *entry;
int ret, irq;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
@@ -1172,6 +1399,80 @@ static int advk_pcie_probe(struct platform_device *pdev)
pcie->pdev = pdev;
platform_set_drvdata(pdev, pcie);
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ resource_size_t start = entry->res->start;
+ resource_size_t size = resource_size(entry->res);
+ unsigned long type = resource_type(entry->res);
+ u64 win_size;
+
+ /*
+ * Aardvark hardware allows to configure also PCIe window
+ * for config type 0 and type 1 mapping, but driver uses
+ * only PIO for issuing configuration transfers which does
+ * not use PCIe window configuration.
+ */
+ if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
+ type != IORESOURCE_IO)
+ continue;
+
+ /*
+ * Skip transparent memory resources. Default outbound access
+ * configuration is set to transparent memory access so it
+ * does not need window configuration.
+ */
+ if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
+ entry->offset == 0)
+ continue;
+
+ /*
+ * The n-th PCIe window is configured by tuple (match, remap, mask)
+ * and an access to address A uses this window if A matches the
+ * match with given mask.
+ * So every PCIe window size must be a power of two and every start
+ * address must be aligned to window size. Minimal size is 64 KiB
+ * because lower 16 bits of mask must be zero. Remapped address
+ * may have set only bits from the mask.
+ */
+ while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
+ /* Calculate the largest aligned window size */
+ win_size = (1ULL << (fls64(size)-1)) |
+ (start ? (1ULL << __ffs64(start)) : 0);
+ win_size = 1ULL << __ffs64(win_size);
+ if (win_size < 0x10000)
+ break;
+
+ dev_dbg(dev,
+ "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
+ pcie->wins_count, (unsigned long long)start,
+ (unsigned long long)start + win_size, type);
+
+ if (type == IORESOURCE_IO) {
+ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
+ pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
+ } else {
+ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
+ pcie->wins[pcie->wins_count].match = start;
+ }
+ pcie->wins[pcie->wins_count].remap = start - entry->offset;
+ pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
+
+ if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
+ break;
+
+ start += win_size;
+ size -= win_size;
+ pcie->wins_count++;
+ }
+
+ if (size > 0) {
+ dev_err(&pcie->pdev->dev,
+ "Invalid PCIe region [0x%llx-0x%llx]\n",
+ (unsigned long long)entry->res->start,
+ (unsigned long long)entry->res->end + 1);
+ return -EINVAL;
+ }
+ }
+
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
@@ -1252,6 +1553,7 @@ static int advk_pcie_remove(struct platform_device *pdev)
{
struct advk_pcie *pcie = platform_get_drvdata(pdev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ int i;
pci_lock_rescan_remove();
pci_stop_root_bus(bridge->bus);
@@ -1261,6 +1563,10 @@ static int advk_pcie_remove(struct platform_device *pdev)
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
+ /* Disable outbound address windows mapping */
+ for (i = 0; i < OB_WIN_COUNT; i++)
+ advk_pcie_disable_ob_win(pcie, i);
+
return 0;
}
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index aefef1986201..88980a44461d 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -314,7 +314,7 @@ static void faraday_pci_irq_handler(struct irq_desc *desc)
for (i = 0; i < 4; i++) {
if ((irq_stat & BIT(i)) == 0)
continue;
- generic_handle_irq(irq_find_mapping(p->irqdomain, i));
+ generic_handle_domain_irq(p->irqdomain, i);
}
chained_irq_exit(irqchip, desc);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index a53bd8728d0d..eaec915ffe62 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -40,6 +40,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/delay.h>
#include <linux/semaphore.h>
#include <linux/irqdomain.h>
@@ -64,6 +65,7 @@ enum pci_protocol_version_t {
PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */
PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */
PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3), /* Vibranium */
+ PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4), /* WS2022 */
};
#define CPU_AFFINITY_ALL -1ULL
@@ -73,6 +75,7 @@ enum pci_protocol_version_t {
* first.
*/
static enum pci_protocol_version_t pci_protocol_versions[] = {
+ PCI_PROTOCOL_VERSION_1_4,
PCI_PROTOCOL_VERSION_1_3,
PCI_PROTOCOL_VERSION_1_2,
PCI_PROTOCOL_VERSION_1_1,
@@ -122,6 +125,8 @@ enum pci_message_type {
PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */
PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19,
+ PCI_RESOURCES_ASSIGNED3 = PCI_MESSAGE_BASE + 0x1A,
+ PCI_CREATE_INTERRUPT_MESSAGE3 = PCI_MESSAGE_BASE + 0x1B,
PCI_MESSAGE_MAXIMUM
};
@@ -235,6 +240,21 @@ struct hv_msi_desc2 {
u16 processor_array[32];
} __packed;
+/*
+ * struct hv_msi_desc3 - 1.3 version of hv_msi_desc
+ * Everything is the same as in 'hv_msi_desc2' except that the size of the
+ * 'vector' field is larger to support bigger vector values. For ex: LPI
+ * vectors on ARM.
+ */
+struct hv_msi_desc3 {
+ u32 vector;
+ u8 delivery_mode;
+ u8 reserved;
+ u16 vector_count;
+ u16 processor_count;
+ u16 processor_array[32];
+} __packed;
+
/**
* struct tran_int_desc
* @reserved: unused, padding
@@ -383,6 +403,12 @@ struct pci_create_interrupt2 {
struct hv_msi_desc2 int_desc;
} __packed;
+struct pci_create_interrupt3 {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ struct hv_msi_desc3 int_desc;
+} __packed;
+
struct pci_delete_interrupt {
struct pci_message message_type;
union win_slot_encoding wslot;
@@ -448,7 +474,13 @@ enum hv_pcibus_state {
};
struct hv_pcibus_device {
+#ifdef CONFIG_X86
struct pci_sysdata sysdata;
+#elif defined(CONFIG_ARM64)
+ struct pci_config_window sysdata;
+#endif
+ struct pci_host_bridge *bridge;
+ struct fwnode_handle *fwnode;
/* Protocol version negotiated with the host */
enum pci_protocol_version_t protocol_version;
enum hv_pcibus_state state;
@@ -464,8 +496,6 @@ struct hv_pcibus_device {
spinlock_t device_list_lock; /* Protect lists below */
void __iomem *cfg_addr;
- struct list_head resources_for_children;
-
struct list_head children;
struct list_head dr_list;
@@ -1328,6 +1358,15 @@ static u32 hv_compose_msi_req_v1(
return sizeof(*int_pkt);
}
+/*
+ * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
+ * by subsequent retarget in hv_irq_unmask().
+ */
+static int hv_compose_msi_req_get_cpu(struct cpumask *affinity)
+{
+ return cpumask_first_and(affinity, cpu_online_mask);
+}
+
static u32 hv_compose_msi_req_v2(
struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
u32 slot, u8 vector)
@@ -1339,12 +1378,27 @@ static u32 hv_compose_msi_req_v2(
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1;
int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
+ cpu = hv_compose_msi_req_get_cpu(affinity);
+ int_pkt->int_desc.processor_array[0] =
+ hv_cpu_number_to_vp_number(cpu);
+ int_pkt->int_desc.processor_count = 1;
- /*
- * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
- * by subsequent retarget in hv_irq_unmask().
- */
- cpu = cpumask_first_and(affinity, cpu_online_mask);
+ return sizeof(*int_pkt);
+}
+
+static u32 hv_compose_msi_req_v3(
+ struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity,
+ u32 slot, u32 vector)
+{
+ int cpu;
+
+ int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
+ int_pkt->wslot.slot = slot;
+ int_pkt->int_desc.vector = vector;
+ int_pkt->int_desc.reserved = 0;
+ int_pkt->int_desc.vector_count = 1;
+ int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
+ cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] =
hv_cpu_number_to_vp_number(cpu);
int_pkt->int_desc.processor_count = 1;
@@ -1379,6 +1433,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
union {
struct pci_create_interrupt v1;
struct pci_create_interrupt2 v2;
+ struct pci_create_interrupt3 v3;
} int_pkts;
} __packed ctxt;
@@ -1426,6 +1481,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
cfg->vector);
break;
+ case PCI_PROTOCOL_VERSION_1_4:
+ size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
+ dest,
+ hpdev->desc.win_slot.slot,
+ cfg->vector);
+ break;
+
default:
/* As we only negotiate protocol versions known to this driver,
* this path should never hit. However, this is it not a hot
@@ -1566,7 +1628,7 @@ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
hbus->msi_info.handler = handle_edge_irq;
hbus->msi_info.handler_name = "edge";
hbus->msi_info.data = hbus;
- hbus->irq_domain = pci_msi_create_irq_domain(hbus->sysdata.fwnode,
+ hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode,
&hbus->msi_info,
x86_vector_domain);
if (!hbus->irq_domain) {
@@ -1575,6 +1637,8 @@ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
return -ENODEV;
}
+ dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain);
+
return 0;
}
@@ -1797,7 +1861,7 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
- hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
+ hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr,
name, NULL);
if (IS_ERR(hpdev->pci_slot)) {
pr_warn("pci_create slot %s failed\n", name);
@@ -1827,7 +1891,7 @@ static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
{
struct pci_dev *dev;
- struct pci_bus *bus = hbus->pci_bus;
+ struct pci_bus *bus = hbus->bridge->bus;
struct hv_pci_dev *hv_dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -1850,21 +1914,22 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
*/
static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
{
- /* Register the device */
- hbus->pci_bus = pci_create_root_bus(&hbus->hdev->device,
- 0, /* bus number is always zero */
- &hv_pcifront_ops,
- &hbus->sysdata,
- &hbus->resources_for_children);
- if (!hbus->pci_bus)
- return -ENODEV;
+ int error;
+ struct pci_host_bridge *bridge = hbus->bridge;
+
+ bridge->dev.parent = &hbus->hdev->device;
+ bridge->sysdata = &hbus->sysdata;
+ bridge->ops = &hv_pcifront_ops;
+
+ error = pci_scan_root_bus_bridge(bridge);
+ if (error)
+ return error;
pci_lock_rescan_remove();
- pci_scan_child_bus(hbus->pci_bus);
hv_pci_assign_numa_node(hbus);
- pci_bus_assign_resources(hbus->pci_bus);
+ pci_bus_assign_resources(bridge->bus);
hv_pci_assign_slots(hbus);
- pci_bus_add_devices(hbus->pci_bus);
+ pci_bus_add_devices(bridge->bus);
pci_unlock_rescan_remove();
hbus->state = hv_pcibus_installed;
return 0;
@@ -2127,7 +2192,7 @@ static void pci_devices_present_work(struct work_struct *work)
* because there may have been changes.
*/
pci_lock_rescan_remove();
- pci_scan_child_bus(hbus->pci_bus);
+ pci_scan_child_bus(hbus->bridge->bus);
hv_pci_assign_numa_node(hbus);
hv_pci_assign_slots(hbus);
pci_unlock_rescan_remove();
@@ -2295,11 +2360,11 @@ static void hv_eject_device_work(struct work_struct *work)
/*
* Ejection can come before or after the PCI bus has been set up, so
* attempt to find it and tear down the bus state, if it exists. This
- * must be done without constructs like pci_domain_nr(hbus->pci_bus)
- * because hbus->pci_bus may not exist yet.
+ * must be done without constructs like pci_domain_nr(hbus->bridge->bus)
+ * because hbus->bridge->bus may not exist yet.
*/
wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
- pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot);
+ pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot);
if (pdev) {
pci_lock_rescan_remove();
pci_stop_and_remove_bus_device(pdev);
@@ -2662,8 +2727,7 @@ static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
/* Modify this resource to become a bridge window. */
hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
- pci_add_resource(&hbus->resources_for_children,
- hbus->low_mmio_res);
+ pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res);
}
if (hbus->high_mmio_space) {
@@ -2682,8 +2746,7 @@ static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
/* Modify this resource to become a bridge window. */
hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
- pci_add_resource(&hbus->resources_for_children,
- hbus->high_mmio_res);
+ pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res);
}
return 0;
@@ -3002,6 +3065,7 @@ static void hv_put_dom_num(u16 dom)
static int hv_pci_probe(struct hv_device *hdev,
const struct hv_vmbus_device_id *dev_id)
{
+ struct pci_host_bridge *bridge;
struct hv_pcibus_device *hbus;
u16 dom_req, dom;
char *name;
@@ -3014,6 +3078,10 @@ static int hv_pci_probe(struct hv_device *hdev,
*/
BUILD_BUG_ON(sizeof(*hbus) > HV_HYP_PAGE_SIZE);
+ bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
+ if (!bridge)
+ return -ENOMEM;
+
/*
* With the recent 59bb47985c1d ("mm, sl[aou]b: guarantee natural
* alignment for kmalloc(power-of-two)"), kzalloc() is able to allocate
@@ -3035,6 +3103,8 @@ static int hv_pci_probe(struct hv_device *hdev,
hbus = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!hbus)
return -ENOMEM;
+
+ hbus->bridge = bridge;
hbus->state = hv_pcibus_init;
hbus->wslot_res_allocated = -1;
@@ -3066,17 +3136,19 @@ static int hv_pci_probe(struct hv_device *hdev,
"PCI dom# 0x%hx has collision, using 0x%hx",
dom_req, dom);
+ hbus->bridge->domain_nr = dom;
+#ifdef CONFIG_X86
hbus->sysdata.domain = dom;
+#endif
hbus->hdev = hdev;
INIT_LIST_HEAD(&hbus->children);
INIT_LIST_HEAD(&hbus->dr_list);
- INIT_LIST_HEAD(&hbus->resources_for_children);
spin_lock_init(&hbus->config_lock);
spin_lock_init(&hbus->device_list_lock);
spin_lock_init(&hbus->retarget_msi_interrupt_lock);
hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
- hbus->sysdata.domain);
+ hbus->bridge->domain_nr);
if (!hbus->wq) {
ret = -ENOMEM;
goto free_dom;
@@ -3113,9 +3185,9 @@ static int hv_pci_probe(struct hv_device *hdev,
goto unmap;
}
- hbus->sysdata.fwnode = irq_domain_alloc_named_fwnode(name);
+ hbus->fwnode = irq_domain_alloc_named_fwnode(name);
kfree(name);
- if (!hbus->sysdata.fwnode) {
+ if (!hbus->fwnode) {
ret = -ENOMEM;
goto unmap;
}
@@ -3193,7 +3265,7 @@ exit_d0:
free_irq_domain:
irq_domain_remove(hbus->irq_domain);
free_fwnode:
- irq_domain_free_fwnode(hbus->sysdata.fwnode);
+ irq_domain_free_fwnode(hbus->fwnode);
unmap:
iounmap(hbus->cfg_addr);
free_config:
@@ -3203,7 +3275,7 @@ close:
destroy_wq:
destroy_workqueue(hbus->wq);
free_dom:
- hv_put_dom_num(hbus->sysdata.domain);
+ hv_put_dom_num(hbus->bridge->domain_nr);
free_bus:
kfree(hbus);
return ret;
@@ -3295,9 +3367,9 @@ static int hv_pci_remove(struct hv_device *hdev)
/* Remove the bus from PCI's point of view. */
pci_lock_rescan_remove();
- pci_stop_root_bus(hbus->pci_bus);
+ pci_stop_root_bus(hbus->bridge->bus);
hv_pci_remove_slots(hbus);
- pci_remove_root_bus(hbus->pci_bus);
+ pci_remove_root_bus(hbus->bridge->bus);
pci_unlock_rescan_remove();
}
@@ -3307,12 +3379,11 @@ static int hv_pci_remove(struct hv_device *hdev)
iounmap(hbus->cfg_addr);
hv_free_config_window(hbus);
- pci_free_resource_list(&hbus->resources_for_children);
hv_pci_free_bridge_windows(hbus);
irq_domain_remove(hbus->irq_domain);
- irq_domain_free_fwnode(hbus->sysdata.fwnode);
+ irq_domain_free_fwnode(hbus->fwnode);
- hv_put_dom_num(hbus->sysdata.domain);
+ hv_put_dom_num(hbus->bridge->domain_nr);
kfree(hbus);
return ret;
@@ -3390,7 +3461,7 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
*/
static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
{
- pci_walk_bus(hbus->pci_bus, hv_pci_restore_msi_msg, NULL);
+ pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL);
}
static int hv_pci_resume(struct hv_device *hdev)
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index c979229a6d0d..cb0aa65d6934 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -372,11 +372,6 @@ struct tegra_pcie_port {
struct gpio_desc *reset_gpio;
};
-struct tegra_pcie_bus {
- struct list_head list;
- unsigned int nr;
-};
-
static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
unsigned long offset)
{
@@ -764,7 +759,7 @@ static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
static irqreturn_t tegra_pcie_isr(int irq, void *arg)
{
- const char *err_msg[] = {
+ static const char * const err_msg[] = {
"Unknown",
"AXI slave error",
"AXI decode error",
@@ -1553,12 +1548,10 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc)
while (reg) {
unsigned int offset = find_first_bit(&reg, 32);
unsigned int index = i * 32 + offset;
- unsigned int irq;
+ int ret;
- irq = irq_find_mapping(msi->domain->parent, index);
- if (irq) {
- generic_handle_irq(irq);
- } else {
+ ret = generic_handle_domain_irq(msi->domain->parent, index);
+ if (ret) {
/*
* that's weird who triggered this?
* just clear it
@@ -2193,13 +2186,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->np = port;
rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
- if (IS_ERR(rp->base))
- return PTR_ERR(rp->base);
+ if (IS_ERR(rp->base)) {
+ err = PTR_ERR(rp->base);
+ goto err_node_put;
+ }
label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
if (!label) {
- dev_err(dev, "failed to create reset GPIO label\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_node_put;
}
/*
@@ -2217,7 +2212,8 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
} else {
dev_err(dev, "failed to get reset GPIO: %ld\n",
PTR_ERR(rp->reset_gpio));
- return PTR_ERR(rp->reset_gpio);
+ err = PTR_ERR(rp->reset_gpio);
+ goto err_node_put;
}
}
@@ -2548,7 +2544,7 @@ static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
if (list_empty(&pcie->ports))
return NULL;
- seq_printf(s, "Index Status\n");
+ seq_puts(s, "Index Status\n");
return seq_list_start(&pcie->ports, *pos);
}
@@ -2585,16 +2581,16 @@ static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
seq_printf(s, "%2u ", port->index);
if (up)
- seq_printf(s, "up");
+ seq_puts(s, "up");
if (active) {
if (up)
- seq_printf(s, ", ");
+ seq_puts(s, ", ");
- seq_printf(s, "active");
+ seq_puts(s, "active");
}
- seq_printf(s, "\n");
+ seq_puts(s, "\n");
return 0;
}
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index 1c34c897a7e2..b7a8e062fcc5 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -291,8 +291,7 @@ static void xgene_msi_isr(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
struct xgene_msi_group *msi_groups;
struct xgene_msi *xgene_msi;
- unsigned int virq;
- int msir_index, msir_val, hw_irq;
+ int msir_index, msir_val, hw_irq, ret;
u32 intr_index, grp_select, msi_grp;
chained_irq_enter(chip, desc);
@@ -330,10 +329,8 @@ static void xgene_msi_isr(struct irq_desc *desc)
* CPU0
*/
hw_irq = hwirq_to_canonical_hwirq(hw_irq);
- virq = irq_find_mapping(xgene_msi->inner_domain, hw_irq);
- WARN_ON(!virq);
- if (virq != 0)
- generic_handle_irq(virq);
+ ret = generic_handle_domain_irq(xgene_msi->inner_domain, hw_irq);
+ WARN_ON_ONCE(ret);
msir_val &= ~(1 << intr_index);
}
grp_select &= ~(1 << msir_index);
@@ -451,7 +448,6 @@ static int xgene_msi_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(xgene_msi->msi_regs)) {
- dev_err(&pdev->dev, "no reg space\n");
rc = PTR_ERR(xgene_msi->msi_regs);
goto error;
}
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index 98aa1dccc6e6..7b1d3ebc34ec 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -55,7 +55,7 @@ static void altera_msi_isr(struct irq_desc *desc)
struct altera_msi *msi;
unsigned long status;
u32 bit;
- u32 virq;
+ int ret;
chained_irq_enter(chip, desc);
msi = irq_desc_get_handler_data(desc);
@@ -65,11 +65,9 @@ static void altera_msi_isr(struct irq_desc *desc)
/* Dummy read from vector to clear the interrupt */
readl_relaxed(msi->vector_base + (bit * sizeof(u32)));
- virq = irq_find_mapping(msi->inner_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- else
- dev_err(&msi->pdev->dev, "unexpected MSI\n");
+ ret = generic_handle_domain_irq(msi->inner_domain, bit);
+ if (ret)
+ dev_err_ratelimited(&msi->pdev->dev, "unexpected MSI\n");
}
}
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index 523bd928b380..2513e9363236 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -646,7 +646,7 @@ static void altera_pcie_isr(struct irq_desc *desc)
struct device *dev;
unsigned long status;
u32 bit;
- u32 virq;
+ int ret;
chained_irq_enter(chip, desc);
pcie = irq_desc_get_handler_data(desc);
@@ -658,11 +658,9 @@ static void altera_pcie_isr(struct irq_desc *desc)
/* clear interrupts */
cra_writel(pcie, 1 << bit, P2A_INT_STATUS);
- virq = irq_find_mapping(pcie->irq_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- else
- dev_err(dev, "unexpected IRQ, INT%d\n", bit);
+ ret = generic_handle_domain_irq(pcie->irq_domain, bit);
+ if (ret)
+ dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", bit);
}
}
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 08bc788d9422..cc30215f5a43 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -476,7 +476,7 @@ static struct msi_domain_info brcm_msi_domain_info = {
static void brcm_pcie_msi_isr(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned long status, virq;
+ unsigned long status;
struct brcm_msi *msi;
struct device *dev;
u32 bit;
@@ -489,10 +489,9 @@ static void brcm_pcie_msi_isr(struct irq_desc *desc)
status >>= msi->legacy_shift;
for_each_set_bit(bit, &status, msi->nr) {
- virq = irq_find_mapping(msi->inner_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- else
+ int ret;
+ ret = generic_handle_domain_irq(msi->inner_domain, bit);
+ if (ret)
dev_dbg(dev, "unexpected MSI\n");
}
diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c
index 56b8ee7bf330..f918c713afb0 100644
--- a/drivers/pci/controller/pcie-iproc-bcma.c
+++ b/drivers/pci/controller/pcie-iproc-bcma.c
@@ -35,7 +35,6 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
{
struct device *dev = &bdev->dev;
struct iproc_pcie *pcie;
- LIST_HEAD(resources);
struct pci_host_bridge *bridge;
int ret;
@@ -60,19 +59,16 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
pcie->mem.name = "PCIe MEM space";
pcie->mem.flags = IORESOURCE_MEM;
- pci_add_resource(&resources, &pcie->mem);
+ pci_add_resource(&bridge->windows, &pcie->mem);
+ ret = devm_request_pci_bus_resources(dev, &bridge->windows);
+ if (ret)
+ return ret;
pcie->map_irq = iproc_pcie_bcma_map_irq;
- ret = iproc_pcie_setup(pcie, &resources);
- if (ret) {
- dev_err(dev, "PCIe controller setup failed\n");
- pci_free_resource_list(&resources);
- return ret;
- }
-
bcma_set_drvdata(bdev, pcie);
- return 0;
+
+ return iproc_pcie_setup(pcie, &bridge->windows);
}
static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 35a82124a126..757b7fbcdc59 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -326,7 +326,6 @@ static void iproc_msi_handler(struct irq_desc *desc)
struct iproc_msi *msi;
u32 eq, head, tail, nr_events;
unsigned long hwirq;
- int virq;
chained_irq_enter(chip, desc);
@@ -362,8 +361,7 @@ static void iproc_msi_handler(struct irq_desc *desc)
/* process all outstanding events */
while (nr_events--) {
hwirq = decode_msi_hwirq(msi, eq, head);
- virq = irq_find_mapping(msi->inner_domain, hwirq);
- generic_handle_irq(virq);
+ generic_handle_domain_irq(msi->inner_domain, hwirq);
head++;
head %= EQ_LEN;
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index f3aeb8d4eaca..17c59b0d6978 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -645,7 +645,6 @@ static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
{
struct mtk_msi_set *msi_set = &port->msi_sets[set_idx];
unsigned long msi_enable, msi_status;
- unsigned int virq;
irq_hw_number_t bit, hwirq;
msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
@@ -659,8 +658,7 @@ static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
- virq = irq_find_mapping(port->msi_bottom_domain, hwirq);
- generic_handle_irq(virq);
+ generic_handle_domain_irq(port->msi_bottom_domain, hwirq);
}
} while (true);
}
@@ -670,18 +668,15 @@ static void mtk_pcie_irq_handler(struct irq_desc *desc)
struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
unsigned long status;
- unsigned int virq;
irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
chained_irq_enter(irqchip, desc);
status = readl_relaxed(port->base + PCIE_INT_STATUS_REG);
for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
- PCIE_INTX_SHIFT) {
- virq = irq_find_mapping(port->intx_domain,
- irq_bit - PCIE_INTX_SHIFT);
- generic_handle_irq(virq);
- }
+ PCIE_INTX_SHIFT)
+ generic_handle_domain_irq(port->intx_domain,
+ irq_bit - PCIE_INTX_SHIFT);
irq_bit = PCIE_MSI_SHIFT;
for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 25bee693834f..2f3f974977a3 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -14,6 +14,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/msi.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -23,6 +24,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include "../pci.h"
@@ -207,6 +209,7 @@ struct mtk_pcie_port {
* struct mtk_pcie - PCIe host information
* @dev: pointer to PCIe device
* @base: IO mapped register base
+ * @cfg: IO mapped register map for PCIe config
* @free_ck: free-run reference clock
* @mem: non-prefetchable memory resource
* @ports: pointer to PCIe port information
@@ -215,6 +218,7 @@ struct mtk_pcie_port {
struct mtk_pcie {
struct device *dev;
void __iomem *base;
+ struct regmap *cfg;
struct clk *free_ck;
struct list_head ports;
@@ -602,7 +606,6 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
unsigned long status;
- u32 virq;
u32 bit = INTX_SHIFT;
chained_irq_enter(irqchip, desc);
@@ -612,9 +615,8 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
/* Clear the INTx */
writel(1 << bit, port->base + PCIE_INT_STATUS);
- virq = irq_find_mapping(port->irq_domain,
- bit - INTX_SHIFT);
- generic_handle_irq(virq);
+ generic_handle_domain_irq(port->irq_domain,
+ bit - INTX_SHIFT);
}
}
@@ -623,10 +625,8 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
unsigned long imsi_status;
while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
- for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
- virq = irq_find_mapping(port->inner_domain, bit);
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM)
+ generic_handle_domain_irq(port->inner_domain, bit);
}
/* Clear MSI interrupt status */
writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
@@ -650,7 +650,11 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
return err;
}
- port->irq = platform_get_irq(pdev, port->slot);
+ if (of_find_property(dev->of_node, "interrupt-names", NULL))
+ port->irq = platform_get_irq_byname(pdev, "pcie_irq");
+ else
+ port->irq = platform_get_irq(pdev, port->slot);
+
if (port->irq < 0)
return port->irq;
@@ -682,6 +686,10 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
val |= PCIE_CSR_LTSSM_EN(port->slot) |
PCIE_CSR_ASPM_L1_EN(port->slot);
writel(val, pcie->base + PCIE_SYS_CFG_V2);
+ } else if (pcie->cfg) {
+ val = PCIE_CSR_LTSSM_EN(port->slot) |
+ PCIE_CSR_ASPM_L1_EN(port->slot);
+ regmap_update_bits(pcie->cfg, PCIE_SYS_CFG_V2, val, val);
}
/* Assert all reset signals */
@@ -985,6 +993,7 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *regs;
+ struct device_node *cfg_node;
int err;
/* get shared registers, which are optional */
@@ -995,6 +1004,14 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
return PTR_ERR(pcie->base);
}
+ cfg_node = of_find_compatible_node(NULL, NULL,
+ "mediatek,generic-pciecfg");
+ if (cfg_node) {
+ pcie->cfg = syscon_node_to_regmap(cfg_node);
+ if (IS_ERR(pcie->cfg))
+ return PTR_ERR(pcie->cfg);
+ }
+
pcie->free_ck = devm_clk_get(dev, "free_ck");
if (IS_ERR(pcie->free_ck)) {
if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
@@ -1027,22 +1044,27 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
struct device *dev = pcie->dev;
struct device_node *node = dev->of_node, *child;
struct mtk_pcie_port *port, *tmp;
- int err;
+ int err, slot;
+
+ slot = of_get_pci_domain_nr(dev->of_node);
+ if (slot < 0) {
+ for_each_available_child_of_node(node, child) {
+ err = of_pci_get_devfn(child);
+ if (err < 0) {
+ dev_err(dev, "failed to get devfn: %d\n", err);
+ goto error_put_node;
+ }
- for_each_available_child_of_node(node, child) {
- int slot;
+ slot = PCI_SLOT(err);
- err = of_pci_get_devfn(child);
- if (err < 0) {
- dev_err(dev, "failed to parse devfn: %d\n", err);
- goto error_put_node;
+ err = mtk_pcie_parse_port(pcie, child, slot);
+ if (err)
+ goto error_put_node;
}
-
- slot = PCI_SLOT(err);
-
- err = mtk_pcie_parse_port(pcie, child, slot);
+ } else {
+ err = mtk_pcie_parse_port(pcie, node, slot);
if (err)
- goto error_put_node;
+ return err;
}
err = mtk_pcie_subsys_powerup(pcie);
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index fdab8202ae5d..329f930d17aa 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -412,16 +412,14 @@ static void mc_handle_msi(struct irq_desc *desc)
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
unsigned long status;
u32 bit;
- u32 virq;
+ int ret;
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
if (status & PM_MSI_INT_MSI_MASK) {
status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
for_each_set_bit(bit, &status, msi->num_vectors) {
- virq = irq_find_mapping(msi->dev_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- else
+ ret = generic_handle_domain_irq(msi->dev_domain, bit);
+ if (ret)
dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
bit);
}
@@ -570,17 +568,15 @@ static void mc_handle_intx(struct irq_desc *desc)
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
unsigned long status;
u32 bit;
- u32 virq;
+ int ret;
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
if (status & PM_MSI_INT_INTX_MASK) {
status &= PM_MSI_INT_INTX_MASK;
status >>= PM_MSI_INT_INTX_SHIFT;
for_each_set_bit(bit, &status, PCI_NUM_INTX) {
- virq = irq_find_mapping(port->intx_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- else
+ ret = generic_handle_domain_irq(port->intx_domain, bit);
+ if (ret)
dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
bit);
}
@@ -745,7 +741,7 @@ static void mc_handle_event(struct irq_desc *desc)
events = get_events(port);
for_each_set_bit(bit, &events, NUM_EVENTS)
- generic_handle_irq(irq_find_mapping(port->event_domain, bit));
+ generic_handle_domain_irq(port->event_domain, bit);
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index b4a288e24aaf..aa1cf24a5a72 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -159,7 +159,7 @@ static int rcar_pcie_ep_get_pdata(struct rcar_pcie_endpoint *ep,
return 0;
}
-static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -195,7 +195,7 @@ static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
+static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
int flags = epf_bar->flags | LAR_ENABLE | LAM_64BIT;
@@ -246,7 +246,7 @@ static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
return 0;
}
-static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -259,7 +259,8 @@ static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
clear_bit(atu_index + 1, ep->ib_window_map);
}
-static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 interrupts)
+static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
+ u8 interrupts)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
struct rcar_pcie *pcie = &ep->pcie;
@@ -272,7 +273,7 @@ static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 interrupts)
return 0;
}
-static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
struct rcar_pcie *pcie = &ep->pcie;
@@ -285,7 +286,7 @@ static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
}
-static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u64 pci_addr, size_t size)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -322,7 +323,7 @@ static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
return 0;
}
-static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
@@ -403,7 +404,7 @@ static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
return 0;
}
-static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
enum pci_epc_irq_type type,
u16 interrupt_num)
{
@@ -451,7 +452,7 @@ static const struct pci_epc_features rcar_pcie_epc_features = {
};
static const struct pci_epc_features*
-rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
return &rcar_pcie_epc_features;
}
@@ -492,9 +493,9 @@ static int rcar_pcie_ep_probe(struct platform_device *pdev)
pcie->dev = dev;
pm_runtime_enable(dev);
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err(dev, "pm_runtime_resume_and_get failed\n");
goto err_pm_disable;
}
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index 765cf2b45e24..8f3131844e77 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -13,12 +13,14 @@
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -41,6 +43,21 @@ struct rcar_msi {
int irq2;
};
+#ifdef CONFIG_ARM
+/*
+ * Here we keep a static copy of the remapped PCIe controller address.
+ * This is only used on aarch32 systems, all of which have one single
+ * PCIe controller, to provide quick access to the PCIe controller in
+ * the L1 link state fixup function, called from the ARM fault handler.
+ */
+static void __iomem *pcie_base;
+/*
+ * Static copy of bus clock pointer, so we can check whether the clock
+ * is enabled or not.
+ */
+static struct clk *pcie_bus_clk;
+#endif
+
/* Structure representing the PCIe interface */
struct rcar_pcie_host {
struct rcar_pcie pcie;
@@ -486,12 +503,10 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
while (reg) {
unsigned int index = find_first_bit(&reg, 32);
- unsigned int msi_irq;
+ int ret;
- msi_irq = irq_find_mapping(msi->domain->parent, index);
- if (msi_irq) {
- generic_handle_irq(msi_irq);
- } else {
+ ret = generic_handle_domain_irq(msi->domain->parent, index);
+ if (ret) {
/* Unknown MSI, just clear it */
dev_dbg(dev, "unexpected MSI\n");
rcar_pci_write_reg(pcie, BIT(index), PCIEMSIFR);
@@ -776,6 +791,12 @@ static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
}
host->msi.irq2 = i;
+#ifdef CONFIG_ARM
+ /* Cache static copy for L1 link state fixup hook on aarch32 */
+ pcie_base = pcie->base;
+ pcie_bus_clk = host->bus_clk;
+#endif
+
return 0;
err_irq2:
@@ -1031,4 +1052,67 @@ static struct platform_driver rcar_pcie_driver = {
},
.probe = rcar_pcie_probe,
};
+
+#ifdef CONFIG_ARM
+static DEFINE_SPINLOCK(pmsr_lock);
+static int rcar_pcie_aarch32_abort_handler(unsigned long addr,
+ unsigned int fsr, struct pt_regs *regs)
+{
+ unsigned long flags;
+ u32 pmsr, val;
+ int ret = 0;
+
+ spin_lock_irqsave(&pmsr_lock, flags);
+
+ if (!pcie_base || !__clk_is_enabled(pcie_bus_clk)) {
+ ret = 1;
+ goto unlock_exit;
+ }
+
+ pmsr = readl(pcie_base + PMSR);
+
+ /*
+ * Test if the PCIe controller received PM_ENTER_L1 DLLP and
+ * the PCIe controller is not in L1 link state. If true, apply
+ * fix, which will put the controller into L1 link state, from
+ * which it can return to L0s/L0 on its own.
+ */
+ if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) {
+ writel(L1IATN, pcie_base + PMCTLR);
+ ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
+ val & L1FAEG, 10, 1000);
+ WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
+ writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
+ }
+
+unlock_exit:
+ spin_unlock_irqrestore(&pmsr_lock, flags);
+ return ret;
+}
+
+static const struct of_device_id rcar_pcie_abort_handler_of_match[] __initconst = {
+ { .compatible = "renesas,pcie-r8a7779" },
+ { .compatible = "renesas,pcie-r8a7790" },
+ { .compatible = "renesas,pcie-r8a7791" },
+ { .compatible = "renesas,pcie-rcar-gen2" },
+ {},
+};
+
+static int __init rcar_pcie_init(void)
+{
+ if (of_find_matching_node(NULL, rcar_pcie_abort_handler_of_match)) {
+#ifdef CONFIG_ARM_LPAE
+ hook_fault_code(17, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
+ "asynchronous external abort");
+#else
+ hook_fault_code(22, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
+ "imprecise external abort");
+#endif
+ }
+
+ return platform_driver_register(&rcar_pcie_driver);
+}
+device_initcall(rcar_pcie_init);
+#else
builtin_platform_driver(rcar_pcie_driver);
+#endif
diff --git a/drivers/pci/controller/pcie-rcar.h b/drivers/pci/controller/pcie-rcar.h
index d4c698b5f821..9bb125db85c6 100644
--- a/drivers/pci/controller/pcie-rcar.h
+++ b/drivers/pci/controller/pcie-rcar.h
@@ -85,6 +85,13 @@
#define LTSMDIS BIT(31)
#define MACCTLR_INIT_VAL (LTSMDIS | MACCTLR_NFTS_MASK)
#define PMSR 0x01105c
+#define L1FAEG BIT(31)
+#define PMEL1RX BIT(23)
+#define PMSTATE GENMASK(18, 16)
+#define PMSTATE_L1 (3 << 16)
+#define PMCTLR 0x011060
+#define L1IATN BIT(31)
+
#define MACS2R 0x011078
#define MACCGSPSETR 0x011084
#define SPCNGRSN BIT(31)
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 7631dc3961c1..5fb9ce6e536e 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -122,7 +122,7 @@ static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r));
}
-static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -159,7 +159,7 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -227,7 +227,7 @@ static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
return 0;
}
-static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -256,7 +256,7 @@ static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
}
-static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u64 pci_addr,
size_t size)
{
@@ -284,7 +284,7 @@ static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
return 0;
}
-static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -308,7 +308,7 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
clear_bit(r, &ep->ob_region_map);
}
-static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
u8 multi_msg_cap)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
@@ -329,7 +329,7 @@ static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
return 0;
}
-static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
@@ -471,7 +471,7 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
return 0;
}
-static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
enum pci_epc_irq_type type,
u16 interrupt_num)
{
@@ -510,7 +510,7 @@ static const struct pci_epc_features rockchip_pcie_epc_features = {
};
static const struct pci_epc_features*
-rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
return &rockchip_pcie_epc_features;
}
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 78d04ac29cd5..c52316d0bfd2 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -517,7 +517,7 @@ static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
struct device *dev = rockchip->dev;
u32 reg;
u32 hwirq;
- u32 virq;
+ int ret;
chained_irq_enter(chip, desc);
@@ -528,10 +528,8 @@ static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
hwirq = ffs(reg) - 1;
reg &= ~BIT(hwirq);
- virq = irq_find_mapping(rockchip->irq_domain, hwirq);
- if (virq)
- generic_handle_irq(virq);
- else
+ ret = generic_handle_domain_irq(rockchip->irq_domain, hwirq);
+ if (ret)
dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
}
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index 67937facd90c..95426df03200 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -222,7 +222,7 @@ static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc)
pcie_read(port, XILINX_CPM_PCIE_REG_IDRN));
for_each_set_bit(i, &val, PCI_NUM_INTX)
- generic_handle_irq(irq_find_mapping(port->intx_domain, i));
+ generic_handle_domain_irq(port->intx_domain, i);
chained_irq_exit(chip, desc);
}
@@ -282,7 +282,7 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR);
val &= pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
for_each_set_bit(i, &val, 32)
- generic_handle_irq(irq_find_mapping(port->cpm_domain, i));
+ generic_handle_domain_irq(port->cpm_domain, i);
pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
/*
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 8689311c5ef6..a72b4f9a2b00 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -6,6 +6,7 @@
* (C) Copyright 2014 - 2015, Xilinx, Inc.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -169,6 +170,7 @@ struct nwl_pcie {
u8 last_busno;
struct nwl_msi msi;
struct irq_domain *legacy_irq_domain;
+ struct clk *clk;
raw_spinlock_t leg_mask_lock;
};
@@ -318,18 +320,14 @@ static void nwl_pcie_leg_handler(struct irq_desc *desc)
struct nwl_pcie *pcie;
unsigned long status;
u32 bit;
- u32 virq;
chained_irq_enter(chip, desc);
pcie = irq_desc_get_handler_data(desc);
while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
MSGF_LEG_SR_MASKALL) != 0) {
- for_each_set_bit(bit, &status, PCI_NUM_INTX) {
- virq = irq_find_mapping(pcie->legacy_irq_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &status, PCI_NUM_INTX)
+ generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
}
chained_irq_exit(chip, desc);
@@ -340,16 +338,13 @@ static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg)
struct nwl_msi *msi;
unsigned long status;
u32 bit;
- u32 virq;
msi = &pcie->msi;
while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) {
for_each_set_bit(bit, &status, 32) {
nwl_bridge_writel(pcie, 1 << bit, status_reg);
- virq = irq_find_mapping(msi->dev_domain, bit);
- if (virq)
- generic_handle_irq(virq);
+ generic_handle_domain_irq(msi->dev_domain, bit);
}
}
}
@@ -823,6 +818,16 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
+
+ err = clk_prepare_enable(pcie->clk);
+ if (err) {
+ dev_err(dev, "can't enable PCIe ref clock\n");
+ return err;
+ }
+
err = nwl_pcie_bridge_init(pcie);
if (err) {
dev_err(dev, "HW Initialization failed\n");
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index 14001febf59a..aa9bdcebc838 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -385,7 +385,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
}
if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) {
- unsigned int irq;
+ struct irq_domain *domain;
val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
@@ -399,19 +399,18 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
val = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
XILINX_PCIE_RPIFR2_MSG_DATA;
- irq = irq_find_mapping(port->msi_domain->parent, val);
+ domain = port->msi_domain->parent;
} else {
val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
XILINX_PCIE_RPIFR1_INTR_SHIFT;
- irq = irq_find_mapping(port->leg_domain, val);
+ domain = port->leg_domain;
}
/* Clear interrupt FIFO register 1 */
pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
XILINX_PCIE_REG_RPIFR1);
- if (irq)
- generic_handle_irq(irq);
+ generic_handle_domain_irq(domain, val);
}
if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index e3fcdfec58b3..a5987e52700e 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include <linux/srcu.h>
#include <linux/rculist.h>
@@ -447,6 +448,56 @@ static struct pci_ops vmd_ops = {
.write = vmd_pci_write,
};
+#ifdef CONFIG_ACPI
+static struct acpi_device *vmd_acpi_find_companion(struct pci_dev *pci_dev)
+{
+ struct pci_host_bridge *bridge;
+ u32 busnr, addr;
+
+ if (pci_dev->bus->ops != &vmd_ops)
+ return NULL;
+
+ bridge = pci_find_host_bridge(pci_dev->bus);
+ busnr = pci_dev->bus->number - bridge->bus->number;
+ /*
+ * The address computation below is only applicable to relative bus
+ * numbers below 32.
+ */
+ if (busnr > 31)
+ return NULL;
+
+ addr = (busnr << 24) | ((u32)pci_dev->devfn << 16) | 0x8000FFFFU;
+
+ dev_dbg(&pci_dev->dev, "Looking for ACPI companion (address 0x%x)\n",
+ addr);
+
+ return acpi_find_child_device(ACPI_COMPANION(bridge->dev.parent), addr,
+ false);
+}
+
+static bool hook_installed;
+
+static void vmd_acpi_begin(void)
+{
+ if (pci_acpi_set_companion_lookup_hook(vmd_acpi_find_companion))
+ return;
+
+ hook_installed = true;
+}
+
+static void vmd_acpi_end(void)
+{
+ if (!hook_installed)
+ return;
+
+ pci_acpi_clear_companion_lookup_hook();
+ hook_installed = false;
+}
+#else
+static inline void vmd_acpi_begin(void) { }
+static inline void vmd_acpi_end(void) { }
+#endif /* CONFIG_ACPI */
+
static void vmd_attach_resources(struct vmd_dev *vmd)
{
vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
@@ -747,6 +798,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
if (vmd->irq_domain)
dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
+ vmd_acpi_begin();
+
pci_scan_child_bus(vmd->bus);
pci_assign_unassigned_bus_resources(vmd->bus);
@@ -760,6 +813,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
pci_bus_add_devices(vmd->bus);
+ vmd_acpi_end();
+
WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
"domain"), "Can't create symlink to domain\n");
return 0;
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index bce274d02dcf..8b4756159f15 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -87,6 +87,7 @@ struct epf_ntb {
struct epf_ntb_epc {
u8 func_no;
+ u8 vfunc_no;
bool linkup;
bool is_msix;
int msix_bar;
@@ -143,14 +144,15 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
struct epf_ntb_epc *ntb_epc;
struct epf_ntb_ctrl *ctrl;
struct pci_epc *epc;
+ u8 func_no, vfunc_no;
bool is_msix;
- u8 func_no;
int ret;
for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) {
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
is_msix = ntb_epc->is_msix;
ctrl = ntb_epc->reg;
if (link_up)
@@ -158,7 +160,7 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
else
ctrl->link_status &= ~LINK_STATUS_UP;
irq_type = is_msix ? PCI_EPC_IRQ_MSIX : PCI_EPC_IRQ_MSI;
- ret = pci_epc_raise_irq(epc, func_no, irq_type, 1);
+ ret = pci_epc_raise_irq(epc, func_no, vfunc_no, irq_type, 1);
if (ret) {
dev_err(&epc->dev,
"%s intf: Failed to raise Link Up IRQ\n",
@@ -238,10 +240,10 @@ static int epf_ntb_configure_mw(struct epf_ntb *ntb,
enum pci_barno peer_barno;
struct epf_ntb_ctrl *ctrl;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
u64 addr, size;
int ret = 0;
- u8 func_no;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
@@ -267,8 +269,9 @@ static int epf_ntb_configure_mw(struct epf_ntb *ntb,
}
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
- ret = pci_epc_map_addr(epc, func_no, phys_addr, addr, size);
+ ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, addr, size);
if (ret)
dev_err(&epc->dev,
"%s intf: Failed to map memory window %d address\n",
@@ -296,8 +299,8 @@ static void epf_ntb_teardown_mw(struct epf_ntb *ntb,
enum pci_barno peer_barno;
struct epf_ntb_ctrl *ctrl;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
@@ -311,8 +314,9 @@ static void epf_ntb_teardown_mw(struct epf_ntb *ntb,
if (mw + NTB_MW_OFFSET == BAR_DB_MW1)
phys_addr += ctrl->mw1_offset;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
- pci_epc_unmap_addr(epc, func_no, phys_addr);
+ pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr);
}
/**
@@ -385,8 +389,8 @@ static int epf_ntb_configure_msi(struct epf_ntb *ntb,
struct epf_ntb_ctrl *peer_ctrl;
enum pci_barno peer_barno;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
int ret, i;
ntb_epc = ntb->epc[type];
@@ -400,8 +404,9 @@ static int epf_ntb_configure_msi(struct epf_ntb *ntb,
phys_addr = peer_epf_bar->phys_addr;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
- ret = pci_epc_map_msi_irq(epc, func_no, phys_addr, db_count,
+ ret = pci_epc_map_msi_irq(epc, func_no, vfunc_no, phys_addr, db_count,
db_entry_size, &db_data, &db_offset);
if (ret) {
dev_err(&epc->dev, "%s intf: Failed to map MSI IRQ\n",
@@ -491,10 +496,10 @@ static int epf_ntb_configure_msix(struct epf_ntb *ntb,
u32 db_entry_size, msg_data;
enum pci_barno peer_barno;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
size_t align;
u64 msg_addr;
- u8 func_no;
int ret, i;
ntb_epc = ntb->epc[type];
@@ -512,12 +517,13 @@ static int epf_ntb_configure_msix(struct epf_ntb *ntb,
align = epc_features->align;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
db_entry_size = peer_ctrl->db_entry_size;
for (i = 0; i < db_count; i++) {
msg_addr = ALIGN_DOWN(msix_tbl[i].msg_addr, align);
msg_data = msix_tbl[i].msg_data;
- ret = pci_epc_map_addr(epc, func_no, phys_addr, msg_addr,
+ ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, msg_addr,
db_entry_size);
if (ret) {
dev_err(&epc->dev,
@@ -586,8 +592,8 @@ epf_ntb_teardown_db(struct epf_ntb *ntb, enum pci_epc_interface_type type)
struct pci_epf_bar *peer_epf_bar;
enum pci_barno peer_barno;
phys_addr_t phys_addr;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
ntb_epc = ntb->epc[type];
epc = ntb_epc->epc;
@@ -597,8 +603,9 @@ epf_ntb_teardown_db(struct epf_ntb *ntb, enum pci_epc_interface_type type)
peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno];
phys_addr = peer_epf_bar->phys_addr;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
- pci_epc_unmap_addr(epc, func_no, phys_addr);
+ pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr);
}
/**
@@ -728,14 +735,15 @@ static void epf_ntb_peer_spad_bar_clear(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
epf_bar = &ntb_epc->epf_bar[barno];
- pci_epc_clear_bar(epc, func_no, epf_bar);
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
}
/**
@@ -775,9 +783,9 @@ static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
struct pci_epf_bar *peer_epf_bar, *epf_bar;
enum pci_barno peer_barno, barno;
u32 peer_spad_offset;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
struct device *dev;
- u8 func_no;
int ret;
dev = &ntb->epf->dev;
@@ -790,6 +798,7 @@ static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
epf_bar = &ntb_epc->epf_bar[barno];
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
epc = ntb_epc->epc;
peer_spad_offset = peer_ntb_epc->reg->spad_offset;
@@ -798,7 +807,7 @@ static int epf_ntb_peer_spad_bar_set(struct epf_ntb *ntb,
epf_bar->barno = barno;
epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32;
- ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "%s intf: peer SPAD BAR set failed\n",
pci_epc_interface_string(type));
@@ -842,14 +851,15 @@ static void epf_ntb_config_sspad_bar_clear(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
epf_bar = &ntb_epc->epf_bar[barno];
- pci_epc_clear_bar(epc, func_no, epf_bar);
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
}
/**
@@ -886,10 +896,10 @@ static int epf_ntb_config_sspad_bar_set(struct epf_ntb_epc *ntb_epc)
{
struct pci_epf_bar *epf_bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct epf_ntb *ntb;
struct pci_epc *epc;
struct device *dev;
- u8 func_no;
int ret;
ntb = ntb_epc->epf_ntb;
@@ -897,10 +907,11 @@ static int epf_ntb_config_sspad_bar_set(struct epf_ntb_epc *ntb_epc)
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
epf_bar = &ntb_epc->epf_bar[barno];
- ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "%s inft: Config/Status/SPAD BAR set failed\n",
pci_epc_interface_string(ntb_epc->type));
@@ -1214,17 +1225,18 @@ static void epf_ntb_db_mw_bar_clear(struct epf_ntb_epc *ntb_epc)
struct pci_epf_bar *epf_bar;
enum epf_ntb_bar bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
- u8 func_no;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) {
barno = ntb_epc->epf_ntb_bar[bar];
epf_bar = &ntb_epc->epf_bar[barno];
- pci_epc_clear_bar(epc, func_no, epf_bar);
+ pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar);
}
}
@@ -1263,10 +1275,10 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
const struct pci_epc_features *epc_features;
bool msix_capable, msi_capable;
struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
struct device *dev;
u32 db_count;
- u8 func_no;
int ret;
ntb_epc = ntb->epc[type];
@@ -1282,6 +1294,7 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
}
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
db_count = ntb->db_count;
if (db_count > MAX_DB_COUNT) {
@@ -1293,7 +1306,7 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
epc = ntb_epc->epc;
if (msi_capable) {
- ret = pci_epc_set_msi(epc, func_no, db_count);
+ ret = pci_epc_set_msi(epc, func_no, vfunc_no, db_count);
if (ret) {
dev_err(dev, "%s intf: MSI configuration failed\n",
pci_epc_interface_string(type));
@@ -1302,7 +1315,7 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb,
}
if (msix_capable) {
- ret = pci_epc_set_msix(epc, func_no, db_count,
+ ret = pci_epc_set_msix(epc, func_no, vfunc_no, db_count,
ntb_epc->msix_bar,
ntb_epc->msix_table_offset);
if (ret) {
@@ -1423,11 +1436,11 @@ static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
u32 num_mws, db_count;
enum epf_ntb_bar bar;
enum pci_barno barno;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
struct device *dev;
size_t align;
int ret, i;
- u8 func_no;
u64 size;
ntb_epc = ntb->epc[type];
@@ -1437,6 +1450,7 @@ static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
epc_features = ntb_epc->epc_features;
align = epc_features->align;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
epc = ntb_epc->epc;
num_mws = ntb->num_mws;
db_count = ntb->db_count;
@@ -1464,7 +1478,7 @@ static int epf_ntb_db_mw_bar_init(struct epf_ntb *ntb,
barno = ntb_epc->epf_ntb_bar[bar];
epf_bar = &ntb_epc->epf_bar[barno];
- ret = pci_epc_set_bar(epc, func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar);
if (ret) {
dev_err(dev, "%s intf: DoorBell BAR set failed\n",
pci_epc_interface_string(type));
@@ -1536,9 +1550,9 @@ static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
const struct pci_epc_features *epc_features;
struct pci_epf_bar *epf_bar;
struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
struct pci_epf *epf;
struct device *dev;
- u8 func_no;
dev = &ntb->epf->dev;
@@ -1547,6 +1561,7 @@ static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
return -ENOMEM;
epf = ntb->epf;
+ vfunc_no = epf->vfunc_no;
if (type == PRIMARY_INTERFACE) {
func_no = epf->func_no;
epf_bar = epf->bar;
@@ -1558,11 +1573,12 @@ static int epf_ntb_epc_create_interface(struct epf_ntb *ntb,
ntb_epc->linkup = false;
ntb_epc->epc = epc;
ntb_epc->func_no = func_no;
+ ntb_epc->vfunc_no = vfunc_no;
ntb_epc->type = type;
ntb_epc->epf_bar = epf_bar;
ntb_epc->epf_ntb = ntb;
- epc_features = pci_epc_get_features(epc, func_no);
+ epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
if (!epc_features)
return -EINVAL;
ntb_epc->epc_features = epc_features;
@@ -1702,10 +1718,10 @@ static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
enum pci_epc_interface_type type)
{
struct epf_ntb_epc *ntb_epc;
+ u8 func_no, vfunc_no;
struct pci_epc *epc;
struct pci_epf *epf;
struct device *dev;
- u8 func_no;
int ret;
ntb_epc = ntb->epc[type];
@@ -1713,6 +1729,7 @@ static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
dev = &epf->dev;
epc = ntb_epc->epc;
func_no = ntb_epc->func_no;
+ vfunc_no = ntb_epc->vfunc_no;
ret = epf_ntb_config_sspad_bar_set(ntb->epc[type]);
if (ret) {
@@ -1742,11 +1759,13 @@ static int epf_ntb_epc_init_interface(struct epf_ntb *ntb,
goto err_db_mw_bar_init;
}
- ret = pci_epc_write_header(epc, func_no, epf->header);
- if (ret) {
- dev_err(dev, "%s intf: Configuration header write failed\n",
- pci_epc_interface_string(type));
- goto err_write_header;
+ if (vfunc_no <= 1) {
+ ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header);
+ if (ret) {
+ dev_err(dev, "%s intf: Configuration header write failed\n",
+ pci_epc_interface_string(type));
+ goto err_write_header;
+ }
}
INIT_DELAYED_WORK(&ntb->epc[type]->cmd_handler, epf_ntb_cmd_handler);
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index d2708ca4bece..90d84d3bc868 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -247,8 +247,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err;
}
- ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
+ reg->src_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map source address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
@@ -263,8 +263,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err_src_map_addr;
}
- ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
+ reg->dst_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map destination address\n");
reg->status = STATUS_DST_ADDR_INVALID;
@@ -291,13 +291,13 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
err_dst_addr:
pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
err_src_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
err_src_addr:
pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
@@ -331,8 +331,8 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
goto err;
}
- ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
+ reg->src_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
@@ -386,7 +386,7 @@ err_dma_map:
kfree(buf);
err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
err_addr:
pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
@@ -419,8 +419,8 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
goto err;
}
- ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
- reg->size);
+ ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
+ reg->dst_addr, reg->size);
if (ret) {
dev_err(dev, "Failed to map address\n");
reg->status = STATUS_DST_ADDR_INVALID;
@@ -479,7 +479,7 @@ err_dma_map:
kfree(buf);
err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
err_addr:
pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
@@ -501,13 +501,16 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
switch (irq_type) {
case IRQ_TYPE_LEGACY:
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_LEGACY, 0);
break;
case IRQ_TYPE_MSI:
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSI, irq);
break;
case IRQ_TYPE_MSIX:
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSIX, irq);
break;
default:
dev_err(dev, "Failed to raise IRQ, unknown type\n");
@@ -542,7 +545,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
if (command & COMMAND_RAISE_LEGACY_IRQ) {
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_LEGACY, 0);
goto reset_handler;
}
@@ -580,22 +584,22 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
}
if (command & COMMAND_RAISE_MSI_IRQ) {
- count = pci_epc_get_msi(epc, epf->func_no);
+ count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
if (reg->irq_number > count || count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
- reg->irq_number);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSI, reg->irq_number);
goto reset_handler;
}
if (command & COMMAND_RAISE_MSIX_IRQ) {
- count = pci_epc_get_msix(epc, epf->func_no);
+ count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
if (reg->irq_number > count || count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
- pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
- reg->irq_number);
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
+ PCI_EPC_IRQ_MSIX, reg->irq_number);
goto reset_handler;
}
@@ -618,7 +622,8 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
epf_bar = &epf->bar[bar];
if (epf_test->reg[bar]) {
- pci_epc_clear_bar(epc, epf->func_no, epf_bar);
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
+ epf_bar);
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
}
@@ -650,7 +655,8 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
if (!!(epc_features->reserved_bar & (1 << bar)))
continue;
- ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
+ epf_bar);
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
@@ -674,16 +680,18 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
bool msi_capable = true;
int ret;
- epc_features = pci_epc_get_features(epc, epf->func_no);
+ epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
if (epc_features) {
msix_capable = epc_features->msix_capable;
msi_capable = epc_features->msi_capable;
}
- ret = pci_epc_write_header(epc, epf->func_no, header);
- if (ret) {
- dev_err(dev, "Configuration header write failed\n");
- return ret;
+ if (epf->vfunc_no <= 1) {
+ ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
+ if (ret) {
+ dev_err(dev, "Configuration header write failed\n");
+ return ret;
+ }
}
ret = pci_epf_test_set_bar(epf);
@@ -691,7 +699,8 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
return ret;
if (msi_capable) {
- ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
+ ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
+ epf->msi_interrupts);
if (ret) {
dev_err(dev, "MSI configuration failed\n");
return ret;
@@ -699,7 +708,8 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
if (msix_capable) {
- ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts,
+ ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
+ epf->msix_interrupts,
epf_test->test_reg_bar,
epf_test->msix_table_offset);
if (ret) {
@@ -832,7 +842,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (WARN_ON_ONCE(!epc))
return -EINVAL;
- epc_features = pci_epc_get_features(epc, epf->func_no);
+ epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
if (!epc_features) {
dev_err(&epf->dev, "epc_features not implemented\n");
return -EOPNOTSUPP;
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index f3a8b833b479..999911801877 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -475,6 +475,28 @@ static struct configfs_attribute *pci_epf_attrs[] = {
NULL,
};
+static int pci_epf_vepf_link(struct config_item *epf_pf_item,
+ struct config_item *epf_vf_item)
+{
+ struct pci_epf_group *epf_vf_group = to_pci_epf_group(epf_vf_item);
+ struct pci_epf_group *epf_pf_group = to_pci_epf_group(epf_pf_item);
+ struct pci_epf *epf_pf = epf_pf_group->epf;
+ struct pci_epf *epf_vf = epf_vf_group->epf;
+
+ return pci_epf_add_vepf(epf_pf, epf_vf);
+}
+
+static void pci_epf_vepf_unlink(struct config_item *epf_pf_item,
+ struct config_item *epf_vf_item)
+{
+ struct pci_epf_group *epf_vf_group = to_pci_epf_group(epf_vf_item);
+ struct pci_epf_group *epf_pf_group = to_pci_epf_group(epf_pf_item);
+ struct pci_epf *epf_pf = epf_pf_group->epf;
+ struct pci_epf *epf_vf = epf_vf_group->epf;
+
+ pci_epf_remove_vepf(epf_pf, epf_vf);
+}
+
static void pci_epf_release(struct config_item *item)
{
struct pci_epf_group *epf_group = to_pci_epf_group(item);
@@ -487,6 +509,8 @@ static void pci_epf_release(struct config_item *item)
}
static struct configfs_item_operations pci_epf_ops = {
+ .allow_link = pci_epf_vepf_link,
+ .drop_link = pci_epf_vepf_unlink,
.release = pci_epf_release,
};
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index adec9bee72cf..ecbb0fb3b653 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -137,24 +137,29 @@ EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
* @epc: the features supported by *this* EPC device will be returned
* @func_no: the features supported by the EPC device specific to the
* endpoint function with func_no will be returned
+ * @vfunc_no: the features supported by the EPC device specific to the
+ * virtual endpoint function with vfunc_no will be returned
*
* Invoke to get the features provided by the EPC which may be
* specific to an endpoint function. Returns pci_epc_features on success
* and NULL for any failures.
*/
const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
- u8 func_no)
+ u8 func_no, u8 vfunc_no)
{
const struct pci_epc_features *epc_features;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return NULL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return NULL;
+
if (!epc->ops->get_features)
return NULL;
mutex_lock(&epc->lock);
- epc_features = epc->ops->get_features(epc, func_no);
+ epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
mutex_unlock(&epc->lock);
return epc_features;
@@ -205,13 +210,14 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
/**
* pci_epc_raise_irq() - interrupt the host system
* @epc: the EPC device which has to interrupt the host
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @type: specify the type of interrupt; legacy, MSI or MSI-X
* @interrupt_num: the MSI or MSI-X interrupt number
*
* Invoke to raise an legacy, MSI or MSI-X interrupt
*/
-int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
+int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num)
{
int ret;
@@ -219,11 +225,14 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->raise_irq)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
+ ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
mutex_unlock(&epc->lock);
return ret;
@@ -235,6 +244,7 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
* MSI data
* @epc: the EPC device which has the MSI capability
* @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @phys_addr: the physical address of the outbound region
* @interrupt_num: the MSI interrupt number
* @entry_size: Size of Outbound address region for each interrupt
@@ -250,21 +260,25 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
* physical address (in outbound region) of the other interface to ring
* doorbell.
*/
-int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, phys_addr_t phys_addr,
- u8 interrupt_num, u32 entry_size, u32 *msi_data,
- u32 *msi_addr_offset)
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
+ u32 *msi_data, u32 *msi_addr_offset)
{
int ret;
if (IS_ERR_OR_NULL(epc))
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->map_msi_irq)
return -EINVAL;
mutex_lock(&epc->lock);
- ret = epc->ops->map_msi_irq(epc, func_no, phys_addr, interrupt_num,
- entry_size, msi_data, msi_addr_offset);
+ ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
+ interrupt_num, entry_size, msi_data,
+ msi_addr_offset);
mutex_unlock(&epc->lock);
return ret;
@@ -274,22 +288,26 @@ EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
/**
* pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
* @epc: the EPC device to which MSI interrupts was requested
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
*
* Invoke to get the number of MSI interrupts allocated by the RC
*/
-int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
+int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
int interrupt;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return 0;
+
if (!epc->ops->get_msi)
return 0;
mutex_lock(&epc->lock);
- interrupt = epc->ops->get_msi(epc, func_no);
+ interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
mutex_unlock(&epc->lock);
if (interrupt < 0)
@@ -304,12 +322,13 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi);
/**
* pci_epc_set_msi() - set the number of MSI interrupt numbers required
* @epc: the EPC device on which MSI has to be configured
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @interrupts: number of MSI interrupts required by the EPF
*
* Invoke to set the required number of MSI interrupts.
*/
-int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
{
int ret;
u8 encode_int;
@@ -318,13 +337,16 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
interrupts > 32)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->set_msi)
return 0;
encode_int = order_base_2(interrupts);
mutex_lock(&epc->lock);
- ret = epc->ops->set_msi(epc, func_no, encode_int);
+ ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
mutex_unlock(&epc->lock);
return ret;
@@ -334,22 +356,26 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msi);
/**
* pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
* @epc: the EPC device to which MSI-X interrupts was requested
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
*
* Invoke to get the number of MSI-X interrupts allocated by the RC
*/
-int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
int interrupt;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return 0;
+
if (!epc->ops->get_msix)
return 0;
mutex_lock(&epc->lock);
- interrupt = epc->ops->get_msix(epc, func_no);
+ interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
mutex_unlock(&epc->lock);
if (interrupt < 0)
@@ -362,15 +388,16 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msix);
/**
* pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
* @epc: the EPC device on which MSI-X has to be configured
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @interrupts: number of MSI-X interrupts required by the EPF
* @bir: BAR where the MSI-X table resides
* @offset: Offset pointing to the start of MSI-X table
*
* Invoke to set the required number of MSI-X interrupts.
*/
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno bir, u32 offset)
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno bir, u32 offset)
{
int ret;
@@ -378,11 +405,15 @@ int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
interrupts < 1 || interrupts > 2048)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->set_msix)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->set_msix(epc, func_no, interrupts - 1, bir, offset);
+ ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
+ offset);
mutex_unlock(&epc->lock);
return ret;
@@ -392,22 +423,26 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msix);
/**
* pci_epc_unmap_addr() - unmap CPU address from PCI address
* @epc: the EPC device on which address is allocated
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @phys_addr: physical address of the local system
*
* Invoke to unmap the CPU address from PCI address.
*/
-void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
+void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr)
{
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return;
+
if (!epc->ops->unmap_addr)
return;
mutex_lock(&epc->lock);
- epc->ops->unmap_addr(epc, func_no, phys_addr);
+ epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
@@ -415,14 +450,15 @@ EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
/**
* pci_epc_map_addr() - map CPU address to PCI address
* @epc: the EPC device on which address is allocated
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @phys_addr: physical address of the local system
* @pci_addr: PCI address to which the physical address should be mapped
* @size: the size of the allocation
*
* Invoke to map CPU address with PCI address.
*/
-int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
+int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr, u64 pci_addr, size_t size)
{
int ret;
@@ -430,11 +466,15 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->map_addr)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
+ ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
+ size);
mutex_unlock(&epc->lock);
return ret;
@@ -444,12 +484,13 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
/**
* pci_epc_clear_bar() - reset the BAR
* @epc: the EPC device for which the BAR has to be cleared
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @epf_bar: the struct epf_bar that contains the BAR information
*
* Invoke to reset the BAR of the endpoint device.
*/
-void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
@@ -457,11 +498,14 @@ void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
return;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return;
+
if (!epc->ops->clear_bar)
return;
mutex_lock(&epc->lock);
- epc->ops->clear_bar(epc, func_no, epf_bar);
+ epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
@@ -469,12 +513,13 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
/**
* pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
* @epc: the EPC device on which BAR has to be configured
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @epf_bar: the struct epf_bar that contains the BAR information
*
* Invoke to configure the BAR of the endpoint device.
*/
-int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
+int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
int ret;
@@ -489,11 +534,14 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
if (!epc->ops->set_bar)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->set_bar(epc, func_no, epf_bar);
+ ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
mutex_unlock(&epc->lock);
return ret;
@@ -503,7 +551,8 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
/**
* pci_epc_write_header() - write standard configuration header
* @epc: the EPC device to which the configuration header should be written
- * @func_no: the endpoint function number in the EPC device
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
* @header: standard configuration header fields
*
* Invoke to write the configuration header to the endpoint controller. Every
@@ -511,7 +560,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
* configuration header would be written. The callback function should write
* the header fields to this dedicated location.
*/
-int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
+int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *header)
{
int ret;
@@ -519,11 +568,18 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return -EINVAL;
+
+ /* Only Virtual Function #1 has deviceID */
+ if (vfunc_no > 1)
+ return -EINVAL;
+
if (!epc->ops->write_header)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->write_header(epc, func_no, header);
+ ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
mutex_unlock(&epc->lock);
return ret;
@@ -548,7 +604,7 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
u32 func_no;
int ret = 0;
- if (IS_ERR_OR_NULL(epc))
+ if (IS_ERR_OR_NULL(epc) || epf->is_vf)
return -EINVAL;
if (type == PRIMARY_INTERFACE && epf->epc)
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 502eb79cd551..8aea16380870 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -62,13 +62,20 @@ EXPORT_SYMBOL_GPL(pci_epf_type_add_cfs);
*/
void pci_epf_unbind(struct pci_epf *epf)
{
+ struct pci_epf *epf_vf;
+
if (!epf->driver) {
dev_WARN(&epf->dev, "epf device not bound to driver\n");
return;
}
mutex_lock(&epf->lock);
- epf->driver->ops->unbind(epf);
+ list_for_each_entry(epf_vf, &epf->pci_vepf, list) {
+ if (epf_vf->is_bound)
+ epf_vf->driver->ops->unbind(epf_vf);
+ }
+ if (epf->is_bound)
+ epf->driver->ops->unbind(epf);
mutex_unlock(&epf->lock);
module_put(epf->driver->owner);
}
@@ -83,10 +90,14 @@ EXPORT_SYMBOL_GPL(pci_epf_unbind);
*/
int pci_epf_bind(struct pci_epf *epf)
{
+ struct device *dev = &epf->dev;
+ struct pci_epf *epf_vf;
+ u8 func_no, vfunc_no;
+ struct pci_epc *epc;
int ret;
if (!epf->driver) {
- dev_WARN(&epf->dev, "epf device not bound to driver\n");
+ dev_WARN(dev, "epf device not bound to driver\n");
return -EINVAL;
}
@@ -94,14 +105,141 @@ int pci_epf_bind(struct pci_epf *epf)
return -EAGAIN;
mutex_lock(&epf->lock);
+ list_for_each_entry(epf_vf, &epf->pci_vepf, list) {
+ vfunc_no = epf_vf->vfunc_no;
+
+ if (vfunc_no < 1) {
+ dev_err(dev, "Invalid virtual function number\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ epc = epf->epc;
+ func_no = epf->func_no;
+ if (!IS_ERR_OR_NULL(epc)) {
+ if (!epc->max_vfs) {
+ dev_err(dev, "No support for virt function\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ if (vfunc_no > epc->max_vfs[func_no]) {
+ dev_err(dev, "PF%d: Exceeds max vfunc number\n",
+ func_no);
+ ret = -EINVAL;
+ goto ret;
+ }
+ }
+
+ epc = epf->sec_epc;
+ func_no = epf->sec_epc_func_no;
+ if (!IS_ERR_OR_NULL(epc)) {
+ if (!epc->max_vfs) {
+ dev_err(dev, "No support for virt function\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ if (vfunc_no > epc->max_vfs[func_no]) {
+ dev_err(dev, "PF%d: Exceeds max vfunc number\n",
+ func_no);
+ ret = -EINVAL;
+ goto ret;
+ }
+ }
+
+ epf_vf->func_no = epf->func_no;
+ epf_vf->sec_epc_func_no = epf->sec_epc_func_no;
+ epf_vf->epc = epf->epc;
+ epf_vf->sec_epc = epf->sec_epc;
+ ret = epf_vf->driver->ops->bind(epf_vf);
+ if (ret)
+ goto ret;
+ epf_vf->is_bound = true;
+ }
+
ret = epf->driver->ops->bind(epf);
+ if (ret)
+ goto ret;
+ epf->is_bound = true;
+
+ mutex_unlock(&epf->lock);
+ return 0;
+
+ret:
mutex_unlock(&epf->lock);
+ pci_epf_unbind(epf);
return ret;
}
EXPORT_SYMBOL_GPL(pci_epf_bind);
/**
+ * pci_epf_add_vepf() - associate virtual EP function to physical EP function
+ * @epf_pf: the physical EP function to which the virtual EP function should be
+ * associated
+ * @epf_vf: the virtual EP function to be added
+ *
+ * A physical endpoint function can be associated with multiple virtual
+ * endpoint functions. Invoke pci_epf_add_epf() to add a virtual PCI endpoint
+ * function to a physical PCI endpoint function.
+ */
+int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
+{
+ u32 vfunc_no;
+
+ if (IS_ERR_OR_NULL(epf_pf) || IS_ERR_OR_NULL(epf_vf))
+ return -EINVAL;
+
+ if (epf_pf->epc || epf_vf->epc || epf_vf->epf_pf)
+ return -EBUSY;
+
+ if (epf_pf->sec_epc || epf_vf->sec_epc)
+ return -EBUSY;
+
+ mutex_lock(&epf_pf->lock);
+ vfunc_no = find_first_zero_bit(&epf_pf->vfunction_num_map,
+ BITS_PER_LONG);
+ if (vfunc_no >= BITS_PER_LONG) {
+ mutex_unlock(&epf_pf->lock);
+ return -EINVAL;
+ }
+
+ set_bit(vfunc_no, &epf_pf->vfunction_num_map);
+ epf_vf->vfunc_no = vfunc_no;
+
+ epf_vf->epf_pf = epf_pf;
+ epf_vf->is_vf = true;
+
+ list_add_tail(&epf_vf->list, &epf_pf->pci_vepf);
+ mutex_unlock(&epf_pf->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_add_vepf);
+
+/**
+ * pci_epf_remove_vepf() - remove virtual EP function from physical EP function
+ * @epf_pf: the physical EP function from which the virtual EP function should
+ * be removed
+ * @epf_vf: the virtual EP function to be removed
+ *
+ * Invoke to remove a virtual endpoint function from the physcial endpoint
+ * function.
+ */
+void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
+{
+ if (IS_ERR_OR_NULL(epf_pf) || IS_ERR_OR_NULL(epf_vf))
+ return;
+
+ mutex_lock(&epf_pf->lock);
+ clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map);
+ list_del(&epf_vf->list);
+ mutex_unlock(&epf_pf->lock);
+}
+EXPORT_SYMBOL_GPL(pci_epf_remove_vepf);
+
+/**
* pci_epf_free_space() - free the allocated PCI EPF register space
* @epf: the EPF device from whom to free the memory
* @addr: the virtual address of the PCI EPF register space
@@ -317,6 +455,10 @@ struct pci_epf *pci_epf_create(const char *name)
return ERR_PTR(-ENOMEM);
}
+ /* VFs are numbered starting with 1. So set BIT(0) by default */
+ epf->vfunction_num_map = 1;
+ INIT_LIST_HEAD(&epf->pci_vepf);
+
dev = &epf->dev;
device_initialize(dev);
dev->bus = &pci_epf_bus_type;
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
index e01d53f5b32f..afa50b446567 100644
--- a/drivers/pci/host-bridge.c
+++ b/drivers/pci/host-bridge.c
@@ -23,6 +23,7 @@ struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus)
return to_pci_host_bridge(root_bus->bridge);
}
+EXPORT_SYMBOL_GPL(pci_find_host_bridge);
struct device *pci_get_host_bridge_device(struct pci_dev *dev)
{
diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
index a32070be5adf..cc6194aa24c1 100644
--- a/drivers/pci/hotplug/TODO
+++ b/drivers/pci/hotplug/TODO
@@ -40,9 +40,6 @@ ibmphp:
* The return value of pci_hp_register() is not checked.
-* iounmap(io_mem) is called in the error path of ebda_rsrc_controller()
- and once more in the error path of its caller ibmphp_access_ebda().
-
* The various slot data structures are difficult to follow and need to be
simplified. A lot of functions are too large and too complex, they need
to be broken up into smaller, manageable pieces. Negative examples are
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 11a2661dc062..7fb75401ad8a 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -714,8 +714,7 @@ static int __init ebda_rsrc_controller(void)
/* init hpc structure */
hpc_ptr = alloc_ebda_hpc(slot_num, bus_num);
if (!hpc_ptr) {
- rc = -ENOMEM;
- goto error_no_hpc;
+ return -ENOMEM;
}
hpc_ptr->ctlr_id = ctlr_id;
hpc_ptr->ctlr_relative_id = ctlr;
@@ -910,8 +909,6 @@ error:
kfree(tmp_slot);
error_no_slot:
free_ebda_hpc(hpc_ptr);
-error_no_hpc:
- iounmap(io_mem);
return rc;
}
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index d4a930881054..69fd401691be 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -184,7 +184,7 @@ void pciehp_release_ctrl(struct controller *ctrl);
int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot);
int pciehp_sysfs_disable_slot(struct hotplug_slot *hotplug_slot);
-int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe);
+int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe);
int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status);
int pciehp_set_raw_indicator_status(struct hotplug_slot *h_slot, u8 status);
int pciehp_get_raw_indicator_status(struct hotplug_slot *h_slot, u8 *status);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 9d06939736c0..3024d7e85e6a 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -870,7 +870,7 @@ void pcie_disable_interrupt(struct controller *ctrl)
* momentarily, if we see that they could interfere. Also, clear any spurious
* events after.
*/
-int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe)
+int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
{
struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl_dev(ctrl);
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 04565162a449..f4c2e6e01be0 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -526,7 +526,7 @@ scan:
return 0;
}
-static int pnv_php_reset_slot(struct hotplug_slot *slot, int probe)
+static int pnv_php_reset_slot(struct hotplug_slot *slot, bool probe)
{
struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
struct pci_dev *bridge = php_slot->pdev;
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index a143b02b2dcd..d84381ce82b5 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -310,7 +310,7 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
/* Check for ranges property */
err = of_pci_range_parser_init(&parser, dev_node);
if (err)
- goto failed;
+ return 0;
dev_dbg(dev, "Parsing ranges property...\n");
for_each_of_pci_range(&parser, &range) {
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 36bc23e21759..a1b1e2a01632 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -17,6 +17,7 @@
#include <linux/pci-acpi.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
+#include <linux/rwsem.h>
#include "pci.h"
/*
@@ -934,58 +935,77 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
static struct acpi_device *acpi_pci_find_companion(struct device *dev);
+void pci_set_acpi_fwnode(struct pci_dev *dev)
+{
+ if (!ACPI_COMPANION(&dev->dev) && !pci_dev_is_added(dev))
+ ACPI_COMPANION_SET(&dev->dev,
+ acpi_pci_find_companion(&dev->dev));
+}
+
+/**
+ * pci_dev_acpi_reset - do a function level reset using _RST method
+ * @dev: device to reset
+ * @probe: if true, return 0 if device supports _RST
+ */
+int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
+{
+ acpi_handle handle = ACPI_HANDLE(&dev->dev);
+
+ if (!handle || !acpi_has_method(handle, "_RST"))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL, NULL))) {
+ pci_warn(dev, "ACPI _RST failed\n");
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static bool acpi_pci_power_manageable(struct pci_dev *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+
+ if (!adev)
+ return false;
+ return acpi_device_power_manageable(adev);
+}
+
static bool acpi_pci_bridge_d3(struct pci_dev *dev)
{
- const struct fwnode_handle *fwnode;
+ const union acpi_object *obj;
struct acpi_device *adev;
- struct pci_dev *root;
- u8 val;
+ struct pci_dev *rpdev;
if (!dev->is_hotplug_bridge)
return false;
/* Assume D3 support if the bridge is power-manageable by ACPI. */
- adev = ACPI_COMPANION(&dev->dev);
- if (!adev && !pci_dev_is_added(dev)) {
- adev = acpi_pci_find_companion(&dev->dev);
- ACPI_COMPANION_SET(&dev->dev, adev);
- }
-
- if (adev && acpi_device_power_manageable(adev))
+ if (acpi_pci_power_manageable(dev))
return true;
/*
- * Look for a special _DSD property for the root port and if it
- * is set we know the hierarchy behind it supports D3 just fine.
+ * The ACPI firmware will provide the device-specific properties through
+ * _DSD configuration object. Look for the 'HotPlugSupportInD3' property
+ * for the root port and if it is set we know the hierarchy behind it
+ * supports D3 just fine.
*/
- root = pcie_find_root_port(dev);
- if (!root)
+ rpdev = pcie_find_root_port(dev);
+ if (!rpdev)
return false;
- adev = ACPI_COMPANION(&root->dev);
- if (root == dev) {
- /*
- * It is possible that the ACPI companion is not yet bound
- * for the root port so look it up manually here.
- */
- if (!adev && !pci_dev_is_added(root))
- adev = acpi_pci_find_companion(&root->dev);
- }
-
+ adev = ACPI_COMPANION(&rpdev->dev);
if (!adev)
return false;
- fwnode = acpi_fwnode_handle(adev);
- if (fwnode_property_read_u8(fwnode, "HotPlugSupportInD3", &val))
+ if (acpi_dev_get_property(adev, "HotPlugSupportInD3",
+ ACPI_TYPE_INTEGER, &obj) < 0)
return false;
- return val == 1;
-}
-
-static bool acpi_pci_power_manageable(struct pci_dev *dev)
-{
- struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
- return adev ? acpi_device_power_manageable(adev) : false;
+ return obj->integer.value == 1;
}
static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
@@ -1159,6 +1179,69 @@ void acpi_pci_remove_bus(struct pci_bus *bus)
}
/* ACPI bus type */
+
+
+static DECLARE_RWSEM(pci_acpi_companion_lookup_sem);
+static struct acpi_device *(*pci_acpi_find_companion_hook)(struct pci_dev *);
+
+/**
+ * pci_acpi_set_companion_lookup_hook - Set ACPI companion lookup callback.
+ * @func: ACPI companion lookup callback pointer or NULL.
+ *
+ * Set a special ACPI companion lookup callback for PCI devices whose companion
+ * objects in the ACPI namespace have _ADR with non-standard bus-device-function
+ * encodings.
+ *
+ * Return 0 on success or a negative error code on failure (in which case no
+ * changes are made).
+ *
+ * The caller is responsible for the appropriate ordering of the invocations of
+ * this function with respect to the enumeration of the PCI devices needing the
+ * callback installed by it.
+ */
+int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *))
+{
+ int ret;
+
+ if (!func)
+ return -EINVAL;
+
+ down_write(&pci_acpi_companion_lookup_sem);
+
+ if (pci_acpi_find_companion_hook) {
+ ret = -EBUSY;
+ } else {
+ pci_acpi_find_companion_hook = func;
+ ret = 0;
+ }
+
+ up_write(&pci_acpi_companion_lookup_sem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_acpi_set_companion_lookup_hook);
+
+/**
+ * pci_acpi_clear_companion_lookup_hook - Clear ACPI companion lookup callback.
+ *
+ * Clear the special ACPI companion lookup callback previously set by
+ * pci_acpi_set_companion_lookup_hook(). Block until the last running instance
+ * of the callback returns before clearing it.
+ *
+ * The caller is responsible for the appropriate ordering of the invocations of
+ * this function with respect to the enumeration of the PCI devices needing the
+ * callback cleared by it.
+ */
+void pci_acpi_clear_companion_lookup_hook(void)
+{
+ down_write(&pci_acpi_companion_lookup_sem);
+
+ pci_acpi_find_companion_hook = NULL;
+
+ up_write(&pci_acpi_companion_lookup_sem);
+}
+EXPORT_SYMBOL_GPL(pci_acpi_clear_companion_lookup_hook);
+
static struct acpi_device *acpi_pci_find_companion(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -1166,6 +1249,16 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev)
bool check_children;
u64 addr;
+ down_read(&pci_acpi_companion_lookup_sem);
+
+ adev = pci_acpi_find_companion_hook ?
+ pci_acpi_find_companion_hook(pci_dev) : NULL;
+
+ up_read(&pci_acpi_companion_lookup_sem);
+
+ if (adev)
+ return adev;
+
check_children = pci_is_bridge(pci_dev);
/* Please ref to ACPI spec for the syntax of _ADR */
addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
index b31883022a8e..49bbd37ee318 100644
--- a/drivers/pci/pci-bridge-emul.h
+++ b/drivers/pci/pci-bridge-emul.h
@@ -54,7 +54,7 @@ struct pci_bridge_emul_pcie_conf {
__le16 slotctl;
__le16 slotsta;
__le16 rootctl;
- __le16 rsvd;
+ __le16 rootcap;
__le32 rootsta;
__le32 devcap2;
__le16 devctl2;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index b70f61fbcd4b..7fb5cd17cc98 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1367,7 +1367,7 @@ static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj,
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- if (!pdev->reset_fn)
+ if (!pci_reset_supported(pdev))
return 0;
return a->mode;
@@ -1491,6 +1491,7 @@ const struct attribute_group *pci_dev_groups[] = {
&pci_dev_config_attr_group,
&pci_dev_rom_attr_group,
&pci_dev_reset_attr_group,
+ &pci_dev_reset_method_attr_group,
&pci_dev_vpd_attr_group,
#ifdef CONFIG_DMI
&pci_dev_smbios_attr_group,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a5e6759c407b..ce2ab62b64cf 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -31,6 +31,7 @@
#include <linux/vmalloc.h>
#include <asm/dma.h>
#include <linux/aer.h>
+#include <linux/bitfield.h>
#include "pci.h"
DEFINE_MUTEX(pci_slot_mutex);
@@ -72,6 +73,11 @@ static void pci_dev_d3_sleep(struct pci_dev *dev)
msleep(delay);
}
+bool pci_reset_supported(struct pci_dev *dev)
+{
+ return dev->reset_methods[0] != 0;
+}
+
#ifdef CONFIG_PCI_DOMAINS
int pci_domains_supported = 1;
#endif
@@ -206,32 +212,36 @@ int pci_status_get_and_clear_errors(struct pci_dev *pdev)
EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
#ifdef CONFIG_HAS_IOMEM
-void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
+static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
+ bool write_combine)
{
struct resource *res = &pdev->resource[bar];
+ resource_size_t start = res->start;
+ resource_size_t size = resource_size(res);
/*
* Make sure the BAR is actually a memory resource, not an IO resource
*/
if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
- pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
+ pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
return NULL;
}
- return ioremap(res->start, resource_size(res));
+
+ if (write_combine)
+ return ioremap_wc(start, size);
+
+ return ioremap(start, size);
+}
+
+void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
+{
+ return __pci_ioremap_resource(pdev, bar, false);
}
EXPORT_SYMBOL_GPL(pci_ioremap_bar);
void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
{
- /*
- * Make sure the BAR is actually a memory resource, not an IO resource
- */
- if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
- WARN_ON(1);
- return NULL;
- }
- return ioremap_wc(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar));
+ return __pci_ioremap_resource(pdev, bar, true);
}
EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
#endif
@@ -265,7 +275,7 @@ static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
*endptr = strchrnul(path, ';');
- wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
+ wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
if (!wpath)
return -ENOMEM;
@@ -915,8 +925,8 @@ static void pci_std_enable_acs(struct pci_dev *dev)
/* Upstream Forwarding */
ctrl |= (cap & PCI_ACS_UF);
- /* Enable Translation Blocking for external devices */
- if (dev->external_facing || dev->untrusted)
+ /* Enable Translation Blocking for external devices and noats */
+ if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
ctrl |= (cap & PCI_ACS_TB);
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
@@ -4629,31 +4639,11 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
EXPORT_SYMBOL(pci_wait_for_pending_transaction);
/**
- * pcie_has_flr - check if a device supports function level resets
- * @dev: device to check
- *
- * Returns true if the device advertises support for PCIe function level
- * resets.
- */
-bool pcie_has_flr(struct pci_dev *dev)
-{
- u32 cap;
-
- if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
- return false;
-
- pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
- return cap & PCI_EXP_DEVCAP_FLR;
-}
-EXPORT_SYMBOL_GPL(pcie_has_flr);
-
-/**
* pcie_flr - initiate a PCIe function level reset
* @dev: device to reset
*
- * Initiate a function level reset on @dev. The caller should ensure the
- * device supports FLR before calling this function, e.g. by using the
- * pcie_has_flr() helper.
+ * Initiate a function level reset unconditionally on @dev without
+ * checking any flags and DEVCAP
*/
int pcie_flr(struct pci_dev *dev)
{
@@ -4676,7 +4666,29 @@ int pcie_flr(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pcie_flr);
-static int pci_af_flr(struct pci_dev *dev, int probe)
+/**
+ * pcie_reset_flr - initiate a PCIe function level reset
+ * @dev: device to reset
+ * @probe: if true, return 0 if device can be reset this way
+ *
+ * Initiate a function level reset on @dev.
+ */
+int pcie_reset_flr(struct pci_dev *dev, bool probe)
+{
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
+ return -ENOTTY;
+
+ if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ return pcie_flr(dev);
+}
+EXPORT_SYMBOL_GPL(pcie_reset_flr);
+
+static int pci_af_flr(struct pci_dev *dev, bool probe)
{
int pos;
u8 cap;
@@ -4723,7 +4735,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
/**
* pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
* @dev: Device to reset.
- * @probe: If set, only check if the device can be reset this way.
+ * @probe: if true, return 0 if the device can be reset this way.
*
* If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
* unset, it will be reinitialized internally when going from PCI_D3hot to
@@ -4735,7 +4747,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
* by default (i.e. unless the @dev's d3hot_delay field has a different value).
* Moreover, only devices in D0 can be reset by this function.
*/
-static int pci_pm_reset(struct pci_dev *dev, int probe)
+static int pci_pm_reset(struct pci_dev *dev, bool probe)
{
u16 csr;
@@ -4995,7 +5007,7 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
-static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
{
struct pci_dev *pdev;
@@ -5013,7 +5025,7 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
return pci_bridge_secondary_bus_reset(dev->bus->self);
}
-static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
+static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
{
int rc = -ENOTTY;
@@ -5028,7 +5040,7 @@ static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
return rc;
}
-static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
+static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
{
if (dev->multifunction || dev->subordinate || !dev->slot ||
dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
@@ -5037,7 +5049,7 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
}
-static int pci_reset_bus_function(struct pci_dev *dev, int probe)
+static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
{
int rc;
@@ -5121,6 +5133,139 @@ static void pci_dev_restore(struct pci_dev *dev)
err_handler->reset_done(dev);
}
+/* dev->reset_methods[] is a 0-terminated list of indices into this array */
+static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
+ { },
+ { pci_dev_specific_reset, .name = "device_specific" },
+ { pci_dev_acpi_reset, .name = "acpi" },
+ { pcie_reset_flr, .name = "flr" },
+ { pci_af_flr, .name = "af_flr" },
+ { pci_pm_reset, .name = "pm" },
+ { pci_reset_bus_function, .name = "bus" },
+};
+
+static ssize_t reset_method_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t len = 0;
+ int i, m;
+
+ for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
+ m = pdev->reset_methods[i];
+ if (!m)
+ break;
+
+ len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
+ pci_reset_fn_methods[m].name);
+ }
+
+ if (len)
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+
+static int reset_method_lookup(const char *name)
+{
+ int m;
+
+ for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
+ if (sysfs_streq(name, pci_reset_fn_methods[m].name))
+ return m;
+ }
+
+ return 0; /* not found */
+}
+
+static ssize_t reset_method_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ char *options, *name;
+ int m, n;
+ u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
+
+ if (sysfs_streq(buf, "")) {
+ pdev->reset_methods[0] = 0;
+ pci_warn(pdev, "All device reset methods disabled by user");
+ return count;
+ }
+
+ if (sysfs_streq(buf, "default")) {
+ pci_init_reset_methods(pdev);
+ return count;
+ }
+
+ options = kstrndup(buf, count, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ n = 0;
+ while ((name = strsep(&options, " ")) != NULL) {
+ if (sysfs_streq(name, ""))
+ continue;
+
+ name = strim(name);
+
+ m = reset_method_lookup(name);
+ if (!m) {
+ pci_err(pdev, "Invalid reset method '%s'", name);
+ goto error;
+ }
+
+ if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
+ pci_err(pdev, "Unsupported reset method '%s'", name);
+ goto error;
+ }
+
+ if (n == PCI_NUM_RESET_METHODS - 1) {
+ pci_err(pdev, "Too many reset methods\n");
+ goto error;
+ }
+
+ reset_methods[n++] = m;
+ }
+
+ reset_methods[n] = 0;
+
+ /* Warn if dev-specific supported but not highest priority */
+ if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
+ reset_methods[0] != 1)
+ pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
+ memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
+ kfree(options);
+ return count;
+
+error:
+ /* Leave previous methods unchanged */
+ kfree(options);
+ return -EINVAL;
+}
+static DEVICE_ATTR_RW(reset_method);
+
+static struct attribute *pci_dev_reset_method_attrs[] = {
+ &dev_attr_reset_method.attr,
+ NULL,
+};
+
+static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ if (!pci_reset_supported(pdev))
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group pci_dev_reset_method_attr_group = {
+ .attrs = pci_dev_reset_method_attrs,
+ .is_visible = pci_dev_reset_method_attr_is_visible,
+};
+
/**
* __pci_reset_function_locked - reset a PCI device function while holding
* the @dev mutex lock.
@@ -5143,66 +5288,64 @@ static void pci_dev_restore(struct pci_dev *dev)
*/
int __pci_reset_function_locked(struct pci_dev *dev)
{
- int rc;
+ int i, m, rc = -ENOTTY;
might_sleep();
/*
- * A reset method returns -ENOTTY if it doesn't support this device
- * and we should try the next method.
+ * A reset method returns -ENOTTY if it doesn't support this device and
+ * we should try the next method.
*
- * If it returns 0 (success), we're finished. If it returns any
- * other error, we're also finished: this indicates that further
- * reset mechanisms might be broken on the device.
+ * If it returns 0 (success), we're finished. If it returns any other
+ * error, we're also finished: this indicates that further reset
+ * mechanisms might be broken on the device.
*/
- rc = pci_dev_specific_reset(dev, 0);
- if (rc != -ENOTTY)
- return rc;
- if (pcie_has_flr(dev)) {
- rc = pcie_flr(dev);
+ for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
+ m = dev->reset_methods[i];
+ if (!m)
+ return -ENOTTY;
+
+ rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
+ if (!rc)
+ return 0;
if (rc != -ENOTTY)
return rc;
}
- rc = pci_af_flr(dev, 0);
- if (rc != -ENOTTY)
- return rc;
- rc = pci_pm_reset(dev, 0);
- if (rc != -ENOTTY)
- return rc;
- return pci_reset_bus_function(dev, 0);
+
+ return -ENOTTY;
}
EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
/**
- * pci_probe_reset_function - check whether the device can be safely reset
- * @dev: PCI device to reset
+ * pci_init_reset_methods - check whether device can be safely reset
+ * and store supported reset mechanisms.
+ * @dev: PCI device to check for reset mechanisms
*
* Some devices allow an individual function to be reset without affecting
- * other functions in the same device. The PCI device must be responsive
- * to PCI config space in order to use this function.
+ * other functions in the same device. The PCI device must be in D0-D3hot
+ * state.
*
- * Returns 0 if the device function can be reset or negative if the
- * device doesn't support resetting a single function.
+ * Stores reset mechanisms supported by device in reset_methods byte array
+ * which is a member of struct pci_dev.
*/
-int pci_probe_reset_function(struct pci_dev *dev)
+void pci_init_reset_methods(struct pci_dev *dev)
{
- int rc;
+ int m, i, rc;
+
+ BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
might_sleep();
- rc = pci_dev_specific_reset(dev, 1);
- if (rc != -ENOTTY)
- return rc;
- if (pcie_has_flr(dev))
- return 0;
- rc = pci_af_flr(dev, 1);
- if (rc != -ENOTTY)
- return rc;
- rc = pci_pm_reset(dev, 1);
- if (rc != -ENOTTY)
- return rc;
+ i = 0;
+ for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
+ rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
+ if (!rc)
+ dev->reset_methods[i++] = m;
+ else if (rc != -ENOTTY)
+ break;
+ }
- return pci_reset_bus_function(dev, 1);
+ dev->reset_methods[i] = 0;
}
/**
@@ -5225,7 +5368,7 @@ int pci_reset_function(struct pci_dev *dev)
{
int rc;
- if (!dev->reset_fn)
+ if (!pci_reset_supported(dev))
return -ENOTTY;
pci_dev_lock(dev);
@@ -5261,7 +5404,7 @@ int pci_reset_function_locked(struct pci_dev *dev)
{
int rc;
- if (!dev->reset_fn)
+ if (!pci_reset_supported(dev))
return -ENOTTY;
pci_dev_save_and_disable(dev);
@@ -5284,7 +5427,7 @@ int pci_try_reset_function(struct pci_dev *dev)
{
int rc;
- if (!dev->reset_fn)
+ if (!pci_reset_supported(dev))
return -ENOTTY;
if (!pci_dev_trylock(dev))
@@ -5512,7 +5655,7 @@ static void pci_slot_restore_locked(struct pci_slot *slot)
}
}
-static int pci_slot_reset(struct pci_slot *slot, int probe)
+static int pci_slot_reset(struct pci_slot *slot, bool probe)
{
int rc;
@@ -5540,7 +5683,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe)
*/
int pci_probe_reset_slot(struct pci_slot *slot)
{
- return pci_slot_reset(slot, 1);
+ return pci_slot_reset(slot, PCI_RESET_PROBE);
}
EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
@@ -5563,14 +5706,14 @@ static int __pci_reset_slot(struct pci_slot *slot)
{
int rc;
- rc = pci_slot_reset(slot, 1);
+ rc = pci_slot_reset(slot, PCI_RESET_PROBE);
if (rc)
return rc;
if (pci_slot_trylock(slot)) {
pci_slot_save_and_disable_locked(slot);
might_sleep();
- rc = pci_reset_hotplug_slot(slot->hotplug, 0);
+ rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
pci_slot_restore_locked(slot);
pci_slot_unlock(slot);
} else
@@ -5579,7 +5722,7 @@ static int __pci_reset_slot(struct pci_slot *slot)
return rc;
}
-static int pci_bus_reset(struct pci_bus *bus, int probe)
+static int pci_bus_reset(struct pci_bus *bus, bool probe)
{
int ret;
@@ -5625,14 +5768,14 @@ int pci_bus_error_reset(struct pci_dev *bridge)
goto bus_reset;
list_for_each_entry(slot, &bus->slots, list)
- if (pci_slot_reset(slot, 0))
+ if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
goto bus_reset;
mutex_unlock(&pci_slot_mutex);
return 0;
bus_reset:
mutex_unlock(&pci_slot_mutex);
- return pci_bus_reset(bridge->subordinate, 0);
+ return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
}
/**
@@ -5643,7 +5786,7 @@ bus_reset:
*/
int pci_probe_reset_bus(struct pci_bus *bus)
{
- return pci_bus_reset(bus, 1);
+ return pci_bus_reset(bus, PCI_RESET_PROBE);
}
EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
@@ -5657,7 +5800,7 @@ static int __pci_reset_bus(struct pci_bus *bus)
{
int rc;
- rc = pci_bus_reset(bus, 1);
+ rc = pci_bus_reset(bus, PCI_RESET_PROBE);
if (rc)
return rc;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 2f52110cac97..1cce56c2aea0 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -33,10 +33,32 @@ enum pci_mmap_api {
int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
enum pci_mmap_api mmap_api);
-int pci_probe_reset_function(struct pci_dev *dev);
+bool pci_reset_supported(struct pci_dev *dev);
+void pci_init_reset_methods(struct pci_dev *dev);
int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
int pci_bus_error_reset(struct pci_dev *dev);
+struct pci_cap_saved_data {
+ u16 cap_nr;
+ bool cap_extended;
+ unsigned int size;
+ u32 data[];
+};
+
+struct pci_cap_saved_state {
+ struct hlist_node next;
+ struct pci_cap_saved_data cap;
+};
+
+void pci_allocate_cap_save_buffers(struct pci_dev *dev);
+void pci_free_cap_save_buffers(struct pci_dev *dev);
+int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
+int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
+ u16 cap, unsigned int size);
+struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
+struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
+ u16 cap);
+
#define PCI_PM_D2_DELAY 200 /* usec; see PCIe r4.0, sec 5.9.1 */
#define PCI_PM_D3HOT_WAIT 10 /* msec */
#define PCI_PM_D3COLD_WAIT 100 /* msec */
@@ -100,8 +122,6 @@ void pci_pm_init(struct pci_dev *dev);
void pci_ea_init(struct pci_dev *dev);
void pci_msi_init(struct pci_dev *dev);
void pci_msix_init(struct pci_dev *dev);
-void pci_allocate_cap_save_buffers(struct pci_dev *dev);
-void pci_free_cap_save_buffers(struct pci_dev *dev);
bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev);
@@ -604,13 +624,18 @@ static inline void pci_ptm_init(struct pci_dev *dev) { }
struct pci_dev_reset_methods {
u16 vendor;
u16 device;
- int (*reset)(struct pci_dev *dev, int probe);
+ int (*reset)(struct pci_dev *dev, bool probe);
+};
+
+struct pci_reset_fn_method {
+ int (*reset_fn)(struct pci_dev *pdev, bool probe);
+ char *name;
};
#ifdef CONFIG_PCI_QUIRKS
-int pci_dev_specific_reset(struct pci_dev *dev, int probe);
+int pci_dev_specific_reset(struct pci_dev *dev, bool probe);
#else
-static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
+static inline int pci_dev_specific_reset(struct pci_dev *dev, bool probe)
{
return -ENOTTY;
}
@@ -698,7 +723,15 @@ static inline int pci_aer_raw_clear_status(struct pci_dev *dev) { return -EINVAL
#ifdef CONFIG_ACPI
int pci_acpi_program_hp_params(struct pci_dev *dev);
extern const struct attribute_group pci_dev_acpi_attr_group;
+void pci_set_acpi_fwnode(struct pci_dev *dev);
+int pci_dev_acpi_reset(struct pci_dev *dev, bool probe);
#else
+static inline int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
+{
+ return -ENOTTY;
+}
+
+static inline void pci_set_acpi_fwnode(struct pci_dev *dev) {}
static inline int pci_acpi_program_hp_params(struct pci_dev *dev)
{
return -ENODEV;
@@ -709,4 +742,6 @@ static inline int pci_acpi_program_hp_params(struct pci_dev *dev)
extern const struct attribute_group aspm_ctrl_attr_group;
#endif
+extern const struct attribute_group pci_dev_reset_method_attr_group;
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index df4ba9b384c2..9784fdcf3006 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -1407,13 +1407,11 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
}
if (type == PCI_EXP_TYPE_RC_EC || type == PCI_EXP_TYPE_RC_END) {
- if (pcie_has_flr(dev)) {
- rc = pcie_flr(dev);
- pci_info(dev, "has been reset (%d)\n", rc);
- } else {
- pci_info(dev, "not reset (no FLR support)\n");
- rc = -ENOTTY;
- }
+ rc = pcie_reset_flr(dev, PCI_RESET_DO_RESET);
+ if (!rc)
+ pci_info(dev, "has been reset\n");
+ else
+ pci_info(dev, "not reset (no FLR support: %d)\n", rc);
} else {
rc = pci_bus_error_reset(dev);
pci_info(dev, "%s Port link has been reset (%d)\n",
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e1fed6649c41..3ee63968deaa 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -257,8 +257,13 @@ static int get_port_device_capability(struct pci_dev *dev)
services |= PCIE_PORT_SERVICE_DPC;
if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
- pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- services |= PCIE_PORT_SERVICE_BWNOTIF;
+ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
+ u32 linkcap;
+
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &linkcap);
+ if (linkcap & PCI_EXP_LNKCAP_LBNC)
+ services |= PCIE_PORT_SERVICE_BWNOTIF;
+ }
return services;
}
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 8a4ad974c5ac..368a254e3124 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -60,10 +60,8 @@ void pci_save_ptm_state(struct pci_dev *dev)
return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
- if (!save_state) {
- pci_err(dev, "no suspend buffer for PTM\n");
+ if (!save_state)
return;
- }
cap = (u16 *)&save_state->cap.data[0];
pci_read_config_word(dev, ptm + PCI_PTM_CTRL, cap);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 79177ac37880..d9fc02a71baa 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -19,6 +19,7 @@
#include <linux/hypervisor.h>
#include <linux/irqdomain.h>
#include <linux/pm_runtime.h>
+#include <linux/bitfield.h>
#include "pci.h"
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
@@ -594,6 +595,7 @@ static void pci_init_host_bridge(struct pci_host_bridge *bridge)
bridge->native_pme = 1;
bridge->native_ltr = 1;
bridge->native_dpc = 1;
+ bridge->domain_nr = PCI_DOMAIN_NR_NOT_SET;
device_initialize(&bridge->dev);
}
@@ -828,11 +830,15 @@ static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
{
struct irq_domain *d;
+ /* If the host bridge driver sets a MSI domain of the bridge, use it */
+ d = dev_get_msi_domain(bus->bridge);
+
/*
* Any firmware interface that can resolve the msi_domain
* should be called from here.
*/
- d = pci_host_bridge_of_msi_domain(bus);
+ if (!d)
+ d = pci_host_bridge_of_msi_domain(bus);
if (!d)
d = pci_host_bridge_acpi_msi_domain(bus);
@@ -898,7 +904,10 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
bus->ops = bridge->ops;
bus->number = bus->busn_res.start = bridge->busnr;
#ifdef CONFIG_PCI_DOMAINS_GENERIC
- bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
+ if (bridge->domain_nr == PCI_DOMAIN_NR_NOT_SET)
+ bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
+ else
+ bus->domain_nr = bridge->domain_nr;
#endif
b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
@@ -1498,8 +1507,8 @@ void set_pcie_port_type(struct pci_dev *pdev)
pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
pdev->pcie_flags_reg = reg16;
- pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
- pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
+ pci_read_config_dword(pdev, pos + PCI_EXP_DEVCAP, &pdev->devcap);
+ pdev->pcie_mpss = FIELD_GET(PCI_EXP_DEVCAP_PAYLOAD, pdev->devcap);
parent = pci_upstream_bridge(pdev);
if (!parent)
@@ -1809,6 +1818,9 @@ int pci_setup_device(struct pci_dev *dev)
dev->error_state = pci_channel_io_normal;
set_pcie_port_type(dev);
+ pci_set_of_node(dev);
+ pci_set_acpi_fwnode(dev);
+
pci_dev_assign_slot(dev);
/*
@@ -1946,6 +1958,7 @@ int pci_setup_device(struct pci_dev *dev)
default: /* unknown header */
pci_err(dev, "unknown header type %02x, ignoring device\n",
dev->hdr_type);
+ pci_release_of_node(dev);
return -EIO;
bad:
@@ -2225,7 +2238,6 @@ static void pci_release_capabilities(struct pci_dev *dev)
{
pci_aer_exit(dev);
pci_rcec_exit(dev);
- pci_vpd_release(dev);
pci_iov_release(dev);
pci_free_cap_save_buffers(dev);
}
@@ -2374,10 +2386,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
dev->vendor = l & 0xffff;
dev->device = (l >> 16) & 0xffff;
- pci_set_of_node(dev);
-
if (pci_setup_device(dev)) {
- pci_release_of_node(dev);
pci_bus_put(dev->bus);
kfree(dev);
return NULL;
@@ -2428,9 +2437,7 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_rcec_init(dev); /* Root Complex Event Collector */
pcie_report_downtraining(dev);
-
- if (pci_probe_reset_function(dev) == 0)
- dev->reset_fn = 1;
+ pci_init_reset_methods(dev);
}
/*
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index d32fbfc93ea9..cb18f8a13ab6 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -83,6 +83,7 @@ static ssize_t proc_bus_pci_read(struct file *file, char __user *buf,
buf += 4;
pos += 4;
cnt -= 4;
+ cond_resched();
}
if (cnt >= 2) {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ab3de1551b50..e5089af8ad90 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1822,6 +1822,45 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quir
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
/*
+ * HiSilicon KunPeng920 and KunPeng930 have devices appear as PCI but are
+ * actually on the AMBA bus. These fake PCI devices can support SVA via
+ * SMMU stall feature, by setting dma-can-stall for ACPI platforms.
+ *
+ * Normally stalling must not be enabled for PCI devices, since it would
+ * break the PCI requirement for free-flowing writes and may lead to
+ * deadlock. We expect PCI devices to support ATS and PRI if they want to
+ * be fault-tolerant, so there's no ACPI binding to describe anything else,
+ * even when a "PCI" device turns out to be a regular old SoC device
+ * dressed up as a RCiEP and normal rules don't apply.
+ */
+static void quirk_huawei_pcie_sva(struct pci_dev *pdev)
+{
+ struct property_entry properties[] = {
+ PROPERTY_ENTRY_BOOL("dma-can-stall"),
+ {},
+ };
+
+ if (pdev->revision != 0x21 && pdev->revision != 0x30)
+ return;
+
+ pdev->pasid_no_tlp = 1;
+
+ /*
+ * Set the dma-can-stall property on ACPI platforms. Device tree
+ * can set it directly.
+ */
+ if (!pdev->dev.of_node &&
+ device_add_properties(&pdev->dev, properties))
+ pci_warn(pdev, "could not add stall property");
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva);
+
+/*
* It's possible for the MSI to get corrupted if SHPC and ACPI are used
* together on certain PXH-based systems.
*/
@@ -3235,12 +3274,13 @@ static void fixup_mpss_256(struct pci_dev *dev)
{
dev->pcie_mpss = 1; /* 256 bytes */
}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
- PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ASMEDIA, 0x0612, fixup_mpss_256);
/*
* Intel 5000 and 5100 Memory controllers have an erratum with read completion
@@ -3703,7 +3743,7 @@ DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
* reset a single function if other methods (e.g. FLR, PM D0->D3) are
* not available.
*/
-static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
+static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, bool probe)
{
/*
* http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf
@@ -3725,7 +3765,7 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
#define NSDE_PWR_STATE 0xd0100
#define IGD_OPERATION_TIMEOUT 10000 /* set timeout 10 seconds */
-static int reset_ivb_igd(struct pci_dev *dev, int probe)
+static int reset_ivb_igd(struct pci_dev *dev, bool probe)
{
void __iomem *mmio_base;
unsigned long timeout;
@@ -3768,7 +3808,7 @@ reset_complete:
}
/* Device-specific reset method for Chelsio T4-based adapters */
-static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
+static int reset_chelsio_generic_dev(struct pci_dev *dev, bool probe)
{
u16 old_command;
u16 msix_flags;
@@ -3846,14 +3886,14 @@ static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
* Chapter 3: NVMe control registers
* Chapter 7.3: Reset behavior
*/
-static int nvme_disable_and_flr(struct pci_dev *dev, int probe)
+static int nvme_disable_and_flr(struct pci_dev *dev, bool probe)
{
void __iomem *bar;
u16 cmd;
u32 cfg;
if (dev->class != PCI_CLASS_STORAGE_EXPRESS ||
- !pcie_has_flr(dev) || !pci_resource_start(dev, 0))
+ pcie_reset_flr(dev, PCI_RESET_PROBE) || !pci_resource_start(dev, 0))
return -ENOTTY;
if (probe)
@@ -3920,15 +3960,12 @@ static int nvme_disable_and_flr(struct pci_dev *dev, int probe)
* device too soon after FLR. A 250ms delay after FLR has heuristically
* proven to produce reliably working results for device assignment cases.
*/
-static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
+static int delay_250ms_after_flr(struct pci_dev *dev, bool probe)
{
- if (!pcie_has_flr(dev))
- return -ENOTTY;
-
if (probe)
- return 0;
+ return pcie_reset_flr(dev, PCI_RESET_PROBE);
- pcie_flr(dev);
+ pcie_reset_flr(dev, PCI_RESET_DO_RESET);
msleep(250);
@@ -3943,7 +3980,7 @@ static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
#define HINIC_OPERATION_TIMEOUT 15000 /* 15 seconds */
/* Device-specific reset method for Huawei Intelligent NIC virtual functions */
-static int reset_hinic_vf_dev(struct pci_dev *pdev, int probe)
+static int reset_hinic_vf_dev(struct pci_dev *pdev, bool probe)
{
unsigned long timeout;
void __iomem *bar;
@@ -4020,7 +4057,7 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
* because when a host assigns a device to a guest VM, the host may need
* to reset the device but probably doesn't have a driver for it.
*/
-int pci_dev_specific_reset(struct pci_dev *dev, int probe)
+int pci_dev_specific_reset(struct pci_dev *dev, bool probe)
{
const struct pci_dev_reset_methods *i;
@@ -4615,6 +4652,18 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
+/*
+ * Each of these NXP Root Ports is in a Root Complex with a unique segment
+ * number and does provide isolation features to disable peer transactions
+ * and validate bus numbers in requests, but does not provide an ACS
+ * capability.
+ */
+static int pci_quirk_nxp_rp_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+}
+
static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
{
if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
@@ -4841,6 +4890,10 @@ static const struct pci_dev_acs_enabled {
{ 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
/* Cavium ThunderX */
{ PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
+ /* Cavium multi-function devices */
+ { PCI_VENDOR_ID_CAVIUM, 0xA026, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_CAVIUM, 0xA059, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs },
/* APM X-Gene */
{ PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
/* Ampere Computing */
@@ -4861,6 +4914,39 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
+ /* NXP root ports, xx=16, 12, or 08 cores */
+ /* LX2xx0A : without security features + CAN-FD */
+ { PCI_VENDOR_ID_NXP, 0x8d81, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8da1, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d83, pci_quirk_nxp_rp_acs },
+ /* LX2xx0C : security features + CAN-FD */
+ { PCI_VENDOR_ID_NXP, 0x8d80, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8da0, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d82, pci_quirk_nxp_rp_acs },
+ /* LX2xx0E : security features + CAN */
+ { PCI_VENDOR_ID_NXP, 0x8d90, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8db0, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d92, pci_quirk_nxp_rp_acs },
+ /* LX2xx0N : without security features + CAN */
+ { PCI_VENDOR_ID_NXP, 0x8d91, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8db1, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d93, pci_quirk_nxp_rp_acs },
+ /* LX2xx2A : without security features + CAN-FD */
+ { PCI_VENDOR_ID_NXP, 0x8d89, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8da9, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d8b, pci_quirk_nxp_rp_acs },
+ /* LX2xx2C : security features + CAN-FD */
+ { PCI_VENDOR_ID_NXP, 0x8d88, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8da8, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d8a, pci_quirk_nxp_rp_acs },
+ /* LX2xx2E : security features + CAN */
+ { PCI_VENDOR_ID_NXP, 0x8d98, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8db8, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d9a, pci_quirk_nxp_rp_acs },
+ /* LX2xx2N : without security features + CAN */
+ { PCI_VENDOR_ID_NXP, 0x8d99, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8db9, pci_quirk_nxp_rp_acs },
+ { PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs },
/* Zhaoxin Root/Downstream Ports */
{ PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
{ 0 }
@@ -5032,7 +5118,7 @@ static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
ctrl |= (cap & PCI_ACS_CR);
ctrl |= (cap & PCI_ACS_UF);
- if (dev->external_facing || dev->untrusted)
+ if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
ctrl |= (cap & PCI_ACS_TB);
pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
@@ -5630,7 +5716,7 @@ static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
pdev->subsystem_device != 0x222e ||
- !pdev->reset_fn)
+ !pci_reset_supported(pdev))
return;
if (pci_enable_device_mem(pdev))
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index dd12c2fcc7dc..4c54c75050dc 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -19,7 +19,6 @@ static void pci_stop_dev(struct pci_dev *dev)
pci_pme_active(dev, false);
if (pci_dev_is_added(dev)) {
- dev->reset_fn = 0;
device_release_driver(&dev->dev);
pci_proc_detach_device(dev);
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index 8b003c890b87..61a6fe3cde21 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -19,11 +19,12 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
u8 byte;
u16 word;
u32 dword;
- long err;
- int cfg_ret;
+ int err, cfg_ret;
+ err = -EPERM;
+ dev = NULL;
if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ goto error;
err = -ENODEV;
dev = pci_get_domain_bus_and_slot(0, bus, dfn);
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index 26bf7c877de5..25557b272a4f 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -9,116 +9,94 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/sched/signal.h>
+#include <asm/unaligned.h>
#include "pci.h"
-/* VPD access through PCI 2.2+ VPD capability */
+#define PCI_VPD_LRDT_TAG_SIZE 3
+#define PCI_VPD_SRDT_LEN_MASK 0x07
+#define PCI_VPD_SRDT_TAG_SIZE 1
+#define PCI_VPD_STIN_END 0x0f
+#define PCI_VPD_INFO_FLD_HDR_SIZE 3
-struct pci_vpd_ops {
- ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
- ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
-};
+static u16 pci_vpd_lrdt_size(const u8 *lrdt)
+{
+ return get_unaligned_le16(lrdt + 1);
+}
-struct pci_vpd {
- const struct pci_vpd_ops *ops;
- struct mutex lock;
- unsigned int len;
- u16 flag;
- u8 cap;
- unsigned int busy:1;
- unsigned int valid:1;
-};
+static u8 pci_vpd_srdt_tag(const u8 *srdt)
+{
+ return *srdt >> 3;
+}
-static struct pci_dev *pci_get_func0_dev(struct pci_dev *dev)
+static u8 pci_vpd_srdt_size(const u8 *srdt)
{
- return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ return *srdt & PCI_VPD_SRDT_LEN_MASK;
}
-/**
- * pci_read_vpd - Read one entry from Vital Product Data
- * @dev: pci device struct
- * @pos: offset in vpd space
- * @count: number of bytes to read
- * @buf: pointer to where to store result
- */
-ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
+static u8 pci_vpd_info_field_size(const u8 *info_field)
{
- if (!dev->vpd || !dev->vpd->ops)
- return -ENODEV;
- return dev->vpd->ops->read(dev, pos, count, buf);
+ return info_field[2];
}
-EXPORT_SYMBOL(pci_read_vpd);
-/**
- * pci_write_vpd - Write entry to Vital Product Data
- * @dev: pci device struct
- * @pos: offset in vpd space
- * @count: number of bytes to write
- * @buf: buffer containing write data
- */
-ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
+/* VPD access through PCI 2.2+ VPD capability */
+
+static struct pci_dev *pci_get_func0_dev(struct pci_dev *dev)
{
- if (!dev->vpd || !dev->vpd->ops)
- return -ENODEV;
- return dev->vpd->ops->write(dev, pos, count, buf);
+ return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
}
-EXPORT_SYMBOL(pci_write_vpd);
-#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
+#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
+#define PCI_VPD_SZ_INVALID UINT_MAX
/**
* pci_vpd_size - determine actual size of Vital Product Data
* @dev: pci device struct
- * @old_size: current assumed size, also maximum allowed size
*/
-static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
+static size_t pci_vpd_size(struct pci_dev *dev)
{
- size_t off = 0;
- unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */
+ size_t off = 0, size;
+ unsigned char tag, header[1+2]; /* 1 byte tag, 2 bytes length */
- while (off < old_size && pci_read_vpd(dev, off, 1, header) == 1) {
- unsigned char tag;
+ /* Otherwise the following reads would fail. */
+ dev->vpd.len = PCI_VPD_MAX_SIZE;
- if (!header[0] && !off) {
- pci_info(dev, "Invalid VPD tag 00, assume missing optional VPD EPROM\n");
- return 0;
- }
+ while (pci_read_vpd(dev, off, 1, header) == 1) {
+ size = 0;
+
+ if (off == 0 && (header[0] == 0x00 || header[0] == 0xff))
+ goto error;
if (header[0] & PCI_VPD_LRDT) {
/* Large Resource Data Type Tag */
- tag = pci_vpd_lrdt_tag(header);
- /* Only read length from known tag items */
- if ((tag == PCI_VPD_LTIN_ID_STRING) ||
- (tag == PCI_VPD_LTIN_RO_DATA) ||
- (tag == PCI_VPD_LTIN_RW_DATA)) {
- if (pci_read_vpd(dev, off+1, 2,
- &header[1]) != 2) {
- pci_warn(dev, "invalid large VPD tag %02x size at offset %zu",
- tag, off + 1);
- return 0;
- }
- off += PCI_VPD_LRDT_TAG_SIZE +
- pci_vpd_lrdt_size(header);
+ if (pci_read_vpd(dev, off + 1, 2, &header[1]) != 2) {
+ pci_warn(dev, "failed VPD read at offset %zu\n",
+ off + 1);
+ return off ?: PCI_VPD_SZ_INVALID;
}
+ size = pci_vpd_lrdt_size(header);
+ if (off + size > PCI_VPD_MAX_SIZE)
+ goto error;
+
+ off += PCI_VPD_LRDT_TAG_SIZE + size;
} else {
/* Short Resource Data Type Tag */
- off += PCI_VPD_SRDT_TAG_SIZE +
- pci_vpd_srdt_size(header);
tag = pci_vpd_srdt_tag(header);
- }
-
- if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
- return off;
+ size = pci_vpd_srdt_size(header);
+ if (off + size > PCI_VPD_MAX_SIZE)
+ goto error;
- if ((tag != PCI_VPD_LTIN_ID_STRING) &&
- (tag != PCI_VPD_LTIN_RO_DATA) &&
- (tag != PCI_VPD_LTIN_RW_DATA)) {
- pci_warn(dev, "invalid %s VPD tag %02x at offset %zu",
- (header[0] & PCI_VPD_LRDT) ? "large" : "short",
- tag, off);
- return 0;
+ off += PCI_VPD_SRDT_TAG_SIZE + size;
+ if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
+ return off;
}
}
- return 0;
+ return off;
+
+error:
+ pci_info(dev, "invalid VPD tag %#04x (size %zu) at offset %zu%s\n",
+ header[0], size, off, off == 0 ?
+ "; assume missing optional EEPROM" : "");
+ return off ?: PCI_VPD_SZ_INVALID;
}
/*
@@ -126,33 +104,26 @@ static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
* This code has to spin since there is no other notification from the PCI
* hardware. Since the VPD is often implemented by serial attachment to an
* EEPROM, it may take many milliseconds to complete.
+ * @set: if true wait for flag to be set, else wait for it to be cleared
*
* Returns 0 on success, negative values indicate error.
*/
-static int pci_vpd_wait(struct pci_dev *dev)
+static int pci_vpd_wait(struct pci_dev *dev, bool set)
{
- struct pci_vpd *vpd = dev->vpd;
+ struct pci_vpd *vpd = &dev->vpd;
unsigned long timeout = jiffies + msecs_to_jiffies(125);
unsigned long max_sleep = 16;
u16 status;
int ret;
- if (!vpd->busy)
- return 0;
-
do {
ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
&status);
if (ret < 0)
return ret;
- if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
- vpd->busy = 0;
+ if (!!(status & PCI_VPD_ADDR_F) == set)
return 0;
- }
-
- if (fatal_signal_pending(current))
- return -EINTR;
if (time_after(jiffies, timeout))
break;
@@ -169,22 +140,17 @@ static int pci_vpd_wait(struct pci_dev *dev)
static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
void *arg)
{
- struct pci_vpd *vpd = dev->vpd;
- int ret;
+ struct pci_vpd *vpd = &dev->vpd;
+ int ret = 0;
loff_t end = pos + count;
u8 *buf = arg;
+ if (!vpd->cap)
+ return -ENODEV;
+
if (pos < 0)
return -EINVAL;
- if (!vpd->valid) {
- vpd->valid = 1;
- vpd->len = pci_vpd_size(dev, vpd->len);
- }
-
- if (vpd->len == 0)
- return -EIO;
-
if (pos > vpd->len)
return 0;
@@ -196,21 +162,20 @@ static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
if (mutex_lock_killable(&vpd->lock))
return -EINTR;
- ret = pci_vpd_wait(dev);
- if (ret < 0)
- goto out;
-
while (pos < end) {
u32 val;
unsigned int i, skip;
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
pos & ~3);
if (ret < 0)
break;
- vpd->busy = 1;
- vpd->flag = PCI_VPD_ADDR_F;
- ret = pci_vpd_wait(dev);
+ ret = pci_vpd_wait(dev, true);
if (ret < 0)
break;
@@ -228,7 +193,7 @@ static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
val >>= 8;
}
}
-out:
+
mutex_unlock(&vpd->lock);
return ret ? ret : count;
}
@@ -236,41 +201,26 @@ out:
static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
const void *arg)
{
- struct pci_vpd *vpd = dev->vpd;
+ struct pci_vpd *vpd = &dev->vpd;
const u8 *buf = arg;
loff_t end = pos + count;
int ret = 0;
+ if (!vpd->cap)
+ return -ENODEV;
+
if (pos < 0 || (pos & 3) || (count & 3))
return -EINVAL;
- if (!vpd->valid) {
- vpd->valid = 1;
- vpd->len = pci_vpd_size(dev, vpd->len);
- }
-
- if (vpd->len == 0)
- return -EIO;
-
if (end > vpd->len)
return -EINVAL;
if (mutex_lock_killable(&vpd->lock))
return -EINTR;
- ret = pci_vpd_wait(dev);
- if (ret < 0)
- goto out;
-
while (pos < end) {
- u32 val;
-
- val = *buf++;
- val |= *buf++ << 8;
- val |= *buf++ << 16;
- val |= *buf++ << 24;
-
- ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
+ ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA,
+ get_unaligned_le32(buf));
if (ret < 0)
break;
ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
@@ -278,85 +228,28 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
if (ret < 0)
break;
- vpd->busy = 1;
- vpd->flag = 0;
- ret = pci_vpd_wait(dev);
+ ret = pci_vpd_wait(dev, false);
if (ret < 0)
break;
+ buf += sizeof(u32);
pos += sizeof(u32);
}
-out:
+
mutex_unlock(&vpd->lock);
return ret ? ret : count;
}
-static const struct pci_vpd_ops pci_vpd_ops = {
- .read = pci_vpd_read,
- .write = pci_vpd_write,
-};
-
-static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
- void *arg)
-{
- struct pci_dev *tdev = pci_get_func0_dev(dev);
- ssize_t ret;
-
- if (!tdev)
- return -ENODEV;
-
- ret = pci_read_vpd(tdev, pos, count, arg);
- pci_dev_put(tdev);
- return ret;
-}
-
-static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
- const void *arg)
-{
- struct pci_dev *tdev = pci_get_func0_dev(dev);
- ssize_t ret;
-
- if (!tdev)
- return -ENODEV;
-
- ret = pci_write_vpd(tdev, pos, count, arg);
- pci_dev_put(tdev);
- return ret;
-}
-
-static const struct pci_vpd_ops pci_vpd_f0_ops = {
- .read = pci_vpd_f0_read,
- .write = pci_vpd_f0_write,
-};
-
void pci_vpd_init(struct pci_dev *dev)
{
- struct pci_vpd *vpd;
- u8 cap;
+ dev->vpd.cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
+ mutex_init(&dev->vpd.lock);
- cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
- if (!cap)
- return;
-
- vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
- if (!vpd)
- return;
-
- vpd->len = PCI_VPD_MAX_SIZE;
- if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
- vpd->ops = &pci_vpd_f0_ops;
- else
- vpd->ops = &pci_vpd_ops;
- mutex_init(&vpd->lock);
- vpd->cap = cap;
- vpd->busy = 0;
- vpd->valid = 0;
- dev->vpd = vpd;
-}
+ if (!dev->vpd.len)
+ dev->vpd.len = pci_vpd_size(dev);
-void pci_vpd_release(struct pci_dev *dev)
-{
- kfree(dev->vpd);
+ if (dev->vpd.len == PCI_VPD_SZ_INVALID)
+ dev->vpd.cap = 0;
}
static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
@@ -388,7 +281,7 @@ static umode_t vpd_attr_is_visible(struct kobject *kobj,
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- if (!pdev->vpd)
+ if (!pdev->vpd.cap)
return 0;
return a->attr.mode;
@@ -399,23 +292,63 @@ const struct attribute_group pci_dev_vpd_attr_group = {
.is_bin_visible = vpd_attr_is_visible,
};
-int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt)
+void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size)
+{
+ unsigned int len = dev->vpd.len;
+ void *buf;
+ int cnt;
+
+ if (!dev->vpd.cap)
+ return ERR_PTR(-ENODEV);
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ cnt = pci_read_vpd(dev, 0, len, buf);
+ if (cnt != len) {
+ kfree(buf);
+ return ERR_PTR(-EIO);
+ }
+
+ if (size)
+ *size = len;
+
+ return buf;
+}
+EXPORT_SYMBOL_GPL(pci_vpd_alloc);
+
+static int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt, unsigned int *size)
{
int i = 0;
/* look for LRDT tags only, end tag is the only SRDT tag */
while (i + PCI_VPD_LRDT_TAG_SIZE <= len && buf[i] & PCI_VPD_LRDT) {
- if (buf[i] == rdt)
+ unsigned int lrdt_len = pci_vpd_lrdt_size(buf + i);
+ u8 tag = buf[i];
+
+ i += PCI_VPD_LRDT_TAG_SIZE;
+ if (tag == rdt) {
+ if (i + lrdt_len > len)
+ lrdt_len = len - i;
+ if (size)
+ *size = lrdt_len;
return i;
+ }
- i += PCI_VPD_LRDT_TAG_SIZE + pci_vpd_lrdt_size(buf + i);
+ i += lrdt_len;
}
return -ENOENT;
}
-EXPORT_SYMBOL_GPL(pci_vpd_find_tag);
-int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
+int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size)
+{
+ return pci_vpd_find_tag(buf, len, PCI_VPD_LRDT_ID_STRING, size);
+}
+EXPORT_SYMBOL_GPL(pci_vpd_find_id_string);
+
+static int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
unsigned int len, const char *kw)
{
int i;
@@ -431,7 +364,106 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
return -ENOENT;
}
-EXPORT_SYMBOL_GPL(pci_vpd_find_info_keyword);
+
+/**
+ * pci_read_vpd - Read one entry from Vital Product Data
+ * @dev: PCI device struct
+ * @pos: offset in VPD space
+ * @count: number of bytes to read
+ * @buf: pointer to where to store result
+ */
+ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
+{
+ ssize_t ret;
+
+ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
+ dev = pci_get_func0_dev(dev);
+ if (!dev)
+ return -ENODEV;
+
+ ret = pci_vpd_read(dev, pos, count, buf);
+ pci_dev_put(dev);
+ return ret;
+ }
+
+ return pci_vpd_read(dev, pos, count, buf);
+}
+EXPORT_SYMBOL(pci_read_vpd);
+
+/**
+ * pci_write_vpd - Write entry to Vital Product Data
+ * @dev: PCI device struct
+ * @pos: offset in VPD space
+ * @count: number of bytes to write
+ * @buf: buffer containing write data
+ */
+ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
+{
+ ssize_t ret;
+
+ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
+ dev = pci_get_func0_dev(dev);
+ if (!dev)
+ return -ENODEV;
+
+ ret = pci_vpd_write(dev, pos, count, buf);
+ pci_dev_put(dev);
+ return ret;
+ }
+
+ return pci_vpd_write(dev, pos, count, buf);
+}
+EXPORT_SYMBOL(pci_write_vpd);
+
+int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
+ const char *kw, unsigned int *size)
+{
+ int ro_start, infokw_start;
+ unsigned int ro_len, infokw_size;
+
+ ro_start = pci_vpd_find_tag(buf, len, PCI_VPD_LRDT_RO_DATA, &ro_len);
+ if (ro_start < 0)
+ return ro_start;
+
+ infokw_start = pci_vpd_find_info_keyword(buf, ro_start, ro_len, kw);
+ if (infokw_start < 0)
+ return infokw_start;
+
+ infokw_size = pci_vpd_info_field_size(buf + infokw_start);
+ infokw_start += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ if (infokw_start + infokw_size > len)
+ return -EINVAL;
+
+ if (size)
+ *size = infokw_size;
+
+ return infokw_start;
+}
+EXPORT_SYMBOL_GPL(pci_vpd_find_ro_info_keyword);
+
+int pci_vpd_check_csum(const void *buf, unsigned int len)
+{
+ const u8 *vpd = buf;
+ unsigned int size;
+ u8 csum = 0;
+ int rv_start;
+
+ rv_start = pci_vpd_find_ro_info_keyword(buf, len, PCI_VPD_RO_KEYWORD_CHKSUM, &size);
+ if (rv_start == -ENOENT) /* no checksum in VPD */
+ return 1;
+ else if (rv_start < 0)
+ return rv_start;
+
+ if (!size)
+ return -EINVAL;
+
+ while (rv_start >= 0)
+ csum += vpd[rv_start--];
+
+ return csum ? -EILSEQ : 0;
+}
+EXPORT_SYMBOL_GPL(pci_vpd_check_csum);
#ifdef CONFIG_PCI_QUIRKS
/*
@@ -450,7 +482,7 @@ static void quirk_f0_vpd_link(struct pci_dev *dev)
if (!f0)
return;
- if (f0->vpd && dev->class == f0->class &&
+ if (f0->vpd.cap && dev->class == f0->class &&
dev->vendor == f0->vendor && dev->device == f0->device)
dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
@@ -468,41 +500,27 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
*/
static void quirk_blacklist_vpd(struct pci_dev *dev)
{
- if (dev->vpd) {
- dev->vpd->len = 0;
- pci_warn(dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
- }
+ dev->vpd.len = PCI_VPD_SZ_INVALID;
+ pci_warn(dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
- quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, quirk_blacklist_vpd);
/*
* The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
* device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
*/
-DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
- PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd);
-
-static void pci_vpd_set_size(struct pci_dev *dev, size_t len)
-{
- struct pci_vpd *vpd = dev->vpd;
-
- if (!vpd || len == 0 || len > PCI_VPD_MAX_SIZE)
- return;
-
- vpd->valid = 1;
- vpd->len = len;
-}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd);
static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
{
@@ -522,12 +540,12 @@ static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
* limits.
*/
if (chip == 0x0 && prod >= 0x20)
- pci_vpd_set_size(dev, 8192);
+ dev->vpd.len = 8192;
else if (chip >= 0x4 && func < 0x8)
- pci_vpd_set_size(dev, 2048);
+ dev->vpd.len = 2048;
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
- quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+ quirk_chelsio_extend_vpd);
#endif
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index 3e491dfb2525..937a14fa7448 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -15,6 +15,7 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/reset.h>
+#include <linux/units.h>
#define STM32_USBPHYC_PLL 0x0
#define STM32_USBPHYC_MISC 0x8
@@ -47,7 +48,6 @@
#define PLL_FVCO_MHZ 2880
#define PLL_INFF_MIN_RATE_HZ 19200000
#define PLL_INFF_MAX_RATE_HZ 38400000
-#define HZ_PER_MHZ 1000000L
struct pll_params {
u8 ndiv;
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 41baccba033f..f901d2e43166 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o
obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
-cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o
+cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o cros_ec_trace.o
obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o
obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index aa7f7aa77297..a7404d69b2d3 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -279,6 +279,15 @@ static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev,
msg->insize = sizeof(struct ec_response_get_protocol_info);
ret = send_command(ec_dev, msg);
+ /*
+ * Send command once again when timeout occurred.
+ * Fingerprint MCU (FPMCU) is restarted during system boot which
+ * introduces small window in which FPMCU won't respond for any
+ * messages sent by kernel. There is no need to wait before next
+ * attempt because we waited at least EC_MSG_DEADLINE_MS.
+ */
+ if (ret == -ETIMEDOUT)
+ ret = send_command(ec_dev, msg);
if (ret < 0) {
dev_dbg(ec_dev->dev,
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
index 8921f24e83ba..98e37080f760 100644
--- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c
+++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
@@ -17,6 +17,8 @@
#include <linux/sort.h>
#include <linux/slab.h>
+#include "cros_ec_trace.h"
+
/* Precision of fixed point for the m values from the filter */
#define M_PRECISION BIT(23)
@@ -291,6 +293,7 @@ cros_ec_sensor_ring_ts_filter_update(struct cros_ec_sensors_ts_filter_state
state->median_m = 0;
state->median_error = 0;
}
+ trace_cros_ec_sensorhub_filter(state, dx, dy);
}
/**
@@ -427,6 +430,11 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
if (new_timestamp - *current_timestamp > 0)
*current_timestamp = new_timestamp;
}
+ trace_cros_ec_sensorhub_timestamp(in->timestamp,
+ fifo_info->timestamp,
+ fifo_timestamp,
+ *current_timestamp,
+ now);
}
if (in->flags & MOTIONSENSE_SENSOR_FLAG_ODR) {
@@ -460,6 +468,12 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
/* Regular sample */
out->sensor_id = in->sensor_num;
+ trace_cros_ec_sensorhub_data(in->sensor_num,
+ fifo_info->timestamp,
+ fifo_timestamp,
+ *current_timestamp,
+ now);
+
if (*current_timestamp - now > 0) {
/*
* This fix is needed to overcome the timestamp filter putting
diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
index f744b21bc655..7e7cfc98657a 100644
--- a/drivers/platform/chrome/cros_ec_trace.h
+++ b/drivers/platform/chrome/cros_ec_trace.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
#include <linux/tracepoint.h>
@@ -70,6 +71,99 @@ TRACE_EVENT(cros_ec_request_done,
__entry->retval)
);
+TRACE_EVENT(cros_ec_sensorhub_timestamp,
+ TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp,
+ s64 current_timestamp, s64 current_time),
+ TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp,
+ current_time),
+ TP_STRUCT__entry(
+ __field(u32, ec_sample_timestamp)
+ __field(u32, ec_fifo_timestamp)
+ __field(s64, fifo_timestamp)
+ __field(s64, current_timestamp)
+ __field(s64, current_time)
+ __field(s64, delta)
+ ),
+ TP_fast_assign(
+ __entry->ec_sample_timestamp = ec_sample_timestamp;
+ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
+ __entry->fifo_timestamp = fifo_timestamp;
+ __entry->current_timestamp = current_timestamp;
+ __entry->current_time = current_time;
+ __entry->delta = current_timestamp - current_time;
+ ),
+ TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
+ __entry->ec_sample_timestamp,
+ __entry->ec_fifo_timestamp,
+ __entry->fifo_timestamp,
+ __entry->current_timestamp,
+ __entry->current_time,
+ __entry->delta
+ )
+);
+
+TRACE_EVENT(cros_ec_sensorhub_data,
+ TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp,
+ s64 current_timestamp, s64 current_time),
+ TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time),
+ TP_STRUCT__entry(
+ __field(u32, ec_sensor_num)
+ __field(u32, ec_fifo_timestamp)
+ __field(s64, fifo_timestamp)
+ __field(s64, current_timestamp)
+ __field(s64, current_time)
+ __field(s64, delta)
+ ),
+ TP_fast_assign(
+ __entry->ec_sensor_num = ec_sensor_num;
+ __entry->ec_fifo_timestamp = ec_fifo_timestamp;
+ __entry->fifo_timestamp = fifo_timestamp;
+ __entry->current_timestamp = current_timestamp;
+ __entry->current_time = current_time;
+ __entry->delta = current_timestamp - current_time;
+ ),
+ TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld",
+ __entry->ec_sensor_num,
+ __entry->ec_fifo_timestamp,
+ __entry->fifo_timestamp,
+ __entry->current_timestamp,
+ __entry->current_time,
+ __entry->delta
+ )
+);
+
+TRACE_EVENT(cros_ec_sensorhub_filter,
+ TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy),
+ TP_ARGS(state, dx, dy),
+ TP_STRUCT__entry(
+ __field(s64, dx)
+ __field(s64, dy)
+ __field(s64, median_m)
+ __field(s64, median_error)
+ __field(s64, history_len)
+ __field(s64, x)
+ __field(s64, y)
+ ),
+ TP_fast_assign(
+ __entry->dx = dx;
+ __entry->dy = dy;
+ __entry->median_m = state->median_m;
+ __entry->median_error = state->median_error;
+ __entry->history_len = state->history_len;
+ __entry->x = state->x_offset;
+ __entry->y = state->y_offset;
+ ),
+ TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld",
+ __entry->dx,
+ __entry->dy,
+ __entry->median_m,
+ __entry->median_error,
+ __entry->history_len,
+ __entry->x,
+ __entry->y
+ )
+);
+
#endif /* _CROS_EC_TRACE_H_ */
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 27c068c4c38d..262a891eded3 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -1054,24 +1054,6 @@ static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
return 0;
}
-/* Check the EC feature flags to see if TYPEC_* features are supported. */
-static int cros_typec_feature_supported(struct cros_typec_data *typec, enum ec_feature_code feature)
-{
- struct ec_response_get_features resp = {};
- int ret;
-
- ret = cros_typec_ec_command(typec, 0, EC_CMD_GET_FEATURES, NULL, 0,
- &resp, sizeof(resp));
- if (ret < 0) {
- dev_warn(typec->dev,
- "Failed to get features, assuming typec feature=%d unsupported.\n",
- feature);
- return 0;
- }
-
- return resp.flags[feature / 32] & EC_FEATURE_MASK_1(feature);
-}
-
static void cros_typec_port_work(struct work_struct *work)
{
struct cros_typec_data *typec = container_of(work, struct cros_typec_data, port_work);
@@ -1113,6 +1095,7 @@ MODULE_DEVICE_TABLE(of, cros_typec_of_match);
static int cros_typec_probe(struct platform_device *pdev)
{
+ struct cros_ec_dev *ec_dev = NULL;
struct device *dev = &pdev->dev;
struct cros_typec_data *typec;
struct ec_response_usb_pd_ports resp;
@@ -1132,10 +1115,10 @@ static int cros_typec_probe(struct platform_device *pdev)
return ret;
}
- typec->typec_cmd_supported = !!cros_typec_feature_supported(typec,
- EC_FEATURE_TYPEC_CMD);
- typec->needs_mux_ack = !!cros_typec_feature_supported(typec,
- EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK);
+ ec_dev = dev_get_drvdata(&typec->ec->ec->dev);
+ typec->typec_cmd_supported = !!cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_CMD);
+ typec->needs_mux_ack = !!cros_ec_check_features(ec_dev,
+ EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK);
ret = cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
&resp, sizeof(resp));
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index c76adedd58c9..aa29841bbb79 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -272,7 +272,7 @@ config PWM_IQS620A
config PWM_JZ4740
tristate "Ingenic JZ47xx PWM support"
- depends on MIPS
+ depends on MIPS || COMPILE_TEST
depends on COMMON_CLK
select MFD_SYSCON
help
@@ -284,7 +284,8 @@ config PWM_JZ4740
config PWM_KEEMBAY
tristate "Intel Keem Bay PWM driver"
- depends on ARCH_KEEMBAY || (ARM64 && COMPILE_TEST)
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on COMMON_CLK && HAS_IOMEM
help
The platform driver for Intel Keem Bay PWM controller.
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 35e894f4a379..4527f09a5c50 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(pwmchip_add);
*
* Returns: 0 on success or a negative error code on failure.
*/
-int pwmchip_remove(struct pwm_chip *chip)
+void pwmchip_remove(struct pwm_chip *chip)
{
pwmchip_sysfs_unexport(chip);
@@ -318,8 +318,6 @@ int pwmchip_remove(struct pwm_chip *chip)
free_pwms(chip);
mutex_unlock(&pwm_lock);
-
- return 0;
}
EXPORT_SYMBOL_GPL(pwmchip_remove);
diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
index e2a26d9da25b..ad37bc46f272 100644
--- a/drivers/pwm/pwm-ab8500.c
+++ b/drivers/pwm/pwm-ab8500.c
@@ -22,14 +22,21 @@
struct ab8500_pwm_chip {
struct pwm_chip chip;
+ unsigned int hwid;
};
+static struct ab8500_pwm_chip *ab8500_pwm_from_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct ab8500_pwm_chip, chip);
+}
+
static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
int ret;
u8 reg;
unsigned int higher_val, lower_val;
+ struct ab8500_pwm_chip *ab8500 = ab8500_pwm_from_chip(chip);
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
@@ -37,7 +44,7 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (!state->enabled) {
ret = abx500_mask_and_set_register_interruptible(chip->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
- 1 << (chip->base - 1), 0);
+ 1 << ab8500->hwid, 0);
if (ret < 0)
dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
@@ -56,7 +63,7 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
*/
higher_val = ((state->duty_cycle & 0x0300) >> 8);
- reg = AB8500_PWM_OUT_CTRL1_REG + ((chip->base - 1) * 2);
+ reg = AB8500_PWM_OUT_CTRL1_REG + (ab8500->hwid * 2);
ret = abx500_set_register_interruptible(chip->dev, AB8500_MISC,
reg, (u8)lower_val);
@@ -70,7 +77,7 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ret = abx500_mask_and_set_register_interruptible(chip->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
- 1 << (chip->base - 1), 1 << (chip->base - 1));
+ 1 << ab8500->hwid, 1 << ab8500->hwid);
if (ret < 0)
dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n",
pwm->label, ret);
@@ -88,6 +95,9 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
struct ab8500_pwm_chip *ab8500;
int err;
+ if (pdev->id < 1 || pdev->id > 31)
+ return dev_err_probe(&pdev->dev, EINVAL, "Invalid device id %d\n", pdev->id);
+
/*
* Nothing to be done in probe, this is required to get the
* device which is required for ab8500 read and write
@@ -99,27 +109,13 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
ab8500->chip.dev = &pdev->dev;
ab8500->chip.ops = &ab8500_pwm_ops;
ab8500->chip.npwm = 1;
+ ab8500->hwid = pdev->id - 1;
- err = pwmchip_add(&ab8500->chip);
+ err = devm_pwmchip_add(&pdev->dev, &ab8500->chip);
if (err < 0)
return dev_err_probe(&pdev->dev, err, "Failed to add pwm chip\n");
dev_dbg(&pdev->dev, "pwm probe successful\n");
- platform_set_drvdata(pdev, ab8500);
-
- return 0;
-}
-
-static int ab8500_pwm_remove(struct platform_device *pdev)
-{
- struct ab8500_pwm_chip *ab8500 = platform_get_drvdata(pdev);
- int err;
-
- err = pwmchip_remove(&ab8500->chip);
- if (err < 0)
- return err;
-
- dev_dbg(&pdev->dev, "pwm driver removed\n");
return 0;
}
@@ -129,7 +125,6 @@ static struct platform_driver ab8500_pwm_driver = {
.name = "ab8500-pwm",
},
.probe = ab8500_pwm_probe,
- .remove = ab8500_pwm_remove,
};
module_platform_driver(ab8500_pwm_driver);
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index 4459325d3650..a43b2babc809 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -281,11 +281,8 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
static int atmel_hlcdc_pwm_remove(struct platform_device *pdev)
{
struct atmel_hlcdc_pwm *chip = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&chip->chip);
- if (ret)
- return ret;
+ pwmchip_remove(&chip->chip);
clk_disable_unprepare(chip->hlcdc->periph_clk);
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index bf398f21484d..36f7ea381838 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -503,11 +503,8 @@ err_slow_clk:
static int atmel_tcb_pwm_remove(struct platform_device *pdev)
{
struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
- int err;
- err = pwmchip_remove(&tcbpwm->chip);
- if (err < 0)
- return err;
+ pwmchip_remove(&tcbpwm->chip);
clk_disable_unprepare(tcbpwm->slow_clk);
clk_put(tcbpwm->slow_clk);
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index a8162bae3e8a..e748604403cc 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -84,9 +84,19 @@ struct atmel_pwm_chip {
void __iomem *base;
const struct atmel_pwm_data *data;
- unsigned int updated_pwms;
- /* ISR is cleared when read, ensure only one thread does that */
- struct mutex isr_lock;
+ /*
+ * The hardware supports a mechanism to update a channel's duty cycle at
+ * the end of the currently running period. When such an update is
+ * pending we delay disabling the PWM until the new configuration is
+ * active because otherwise pmw_config(duty_cycle=0); pwm_disable();
+ * might not result in an inactive output.
+ * This bitmask tracks for which channels an update is pending in
+ * hardware.
+ */
+ u32 update_pending;
+
+ /* Protects .update_pending */
+ spinlock_t lock;
};
static inline struct atmel_pwm_chip *to_atmel_pwm_chip(struct pwm_chip *chip)
@@ -123,6 +133,64 @@ static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip,
atmel_pwm_writel(chip, base + offset, val);
}
+static void atmel_pwm_update_pending(struct atmel_pwm_chip *chip)
+{
+ /*
+ * Each channel that has its bit in ISR set started a new period since
+ * ISR was cleared and so there is no more update pending. Note that
+ * reading ISR clears it, so this needs to handle all channels to not
+ * loose information.
+ */
+ u32 isr = atmel_pwm_readl(chip, PWM_ISR);
+
+ chip->update_pending &= ~isr;
+}
+
+static void atmel_pwm_set_pending(struct atmel_pwm_chip *chip, unsigned int ch)
+{
+ spin_lock(&chip->lock);
+
+ /*
+ * Clear pending flags in hardware because otherwise there might still
+ * be a stale flag in ISR.
+ */
+ atmel_pwm_update_pending(chip);
+
+ chip->update_pending |= (1 << ch);
+
+ spin_unlock(&chip->lock);
+}
+
+static int atmel_pwm_test_pending(struct atmel_pwm_chip *chip, unsigned int ch)
+{
+ int ret = 0;
+
+ spin_lock(&chip->lock);
+
+ if (chip->update_pending & (1 << ch)) {
+ atmel_pwm_update_pending(chip);
+
+ if (chip->update_pending & (1 << ch))
+ ret = 1;
+ }
+
+ spin_unlock(&chip->lock);
+
+ return ret;
+}
+
+static int atmel_pwm_wait_nonpending(struct atmel_pwm_chip *chip, unsigned int ch)
+{
+ unsigned long timeout = jiffies + 2 * HZ;
+ int ret;
+
+ while ((ret = atmel_pwm_test_pending(chip, ch)) &&
+ time_before(jiffies, timeout))
+ usleep_range(10, 100);
+
+ return ret ? -ETIMEDOUT : 0;
+}
+
static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
unsigned long clkrate,
const struct pwm_state *state,
@@ -185,6 +253,7 @@ static void atmel_pwm_update_cdty(struct pwm_chip *chip, struct pwm_device *pwm,
atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm,
atmel_pwm->data->regs.duty_upd, cdty);
+ atmel_pwm_set_pending(atmel_pwm, pwm->hwpwm);
}
static void atmel_pwm_set_cprd_cdty(struct pwm_chip *chip,
@@ -205,20 +274,8 @@ static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm,
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
unsigned long timeout = jiffies + 2 * HZ;
- /*
- * Wait for at least a complete period to have passed before disabling a
- * channel to be sure that CDTY has been updated
- */
- mutex_lock(&atmel_pwm->isr_lock);
- atmel_pwm->updated_pwms |= atmel_pwm_readl(atmel_pwm, PWM_ISR);
-
- while (!(atmel_pwm->updated_pwms & (1 << pwm->hwpwm)) &&
- time_before(jiffies, timeout)) {
- usleep_range(10, 100);
- atmel_pwm->updated_pwms |= atmel_pwm_readl(atmel_pwm, PWM_ISR);
- }
+ atmel_pwm_wait_nonpending(atmel_pwm, pwm->hwpwm);
- mutex_unlock(&atmel_pwm->isr_lock);
atmel_pwm_writel(atmel_pwm, PWM_DIS, 1 << pwm->hwpwm);
/*
@@ -292,10 +349,6 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
val |= PWM_CMR_CPOL;
atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
atmel_pwm_set_cprd_cdty(chip, pwm, cprd, cdty);
- mutex_lock(&atmel_pwm->isr_lock);
- atmel_pwm->updated_pwms |= atmel_pwm_readl(atmel_pwm, PWM_ISR);
- atmel_pwm->updated_pwms &= ~(1 << pwm->hwpwm);
- mutex_unlock(&atmel_pwm->isr_lock);
atmel_pwm_writel(atmel_pwm, PWM_ENA, 1 << pwm->hwpwm);
} else if (cstate.enabled) {
atmel_pwm_disable(chip, pwm, true);
@@ -326,6 +379,9 @@ static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
tmp <<= pres;
state->period = DIV64_U64_ROUND_UP(tmp, rate);
+ /* Wait for an updated duty_cycle queued in hardware */
+ atmel_pwm_wait_nonpending(atmel_pwm, pwm->hwpwm);
+
cdty = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
atmel_pwm->data->regs.duty);
tmp = (u64)(cprd - cdty) * NSEC_PER_SEC;
@@ -416,9 +472,10 @@ static int atmel_pwm_probe(struct platform_device *pdev)
if (!atmel_pwm)
return -ENOMEM;
- mutex_init(&atmel_pwm->isr_lock);
atmel_pwm->data = of_device_get_match_data(&pdev->dev);
- atmel_pwm->updated_pwms = 0;
+
+ atmel_pwm->update_pending = 0;
+ spin_lock_init(&atmel_pwm->lock);
atmel_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(atmel_pwm->base))
@@ -460,7 +517,6 @@ static int atmel_pwm_remove(struct platform_device *pdev)
pwmchip_remove(&atmel_pwm->chip);
clk_unprepare(atmel_pwm->clk);
- mutex_destroy(&atmel_pwm->isr_lock);
return 0;
}
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 8c85c66ea5c9..64148f5f81d0 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -267,8 +267,6 @@ static int kona_pwmc_probe(struct platform_device *pdev)
if (kp == NULL)
return -ENOMEM;
- platform_set_drvdata(pdev, kp);
-
kp->chip.dev = &pdev->dev;
kp->chip.ops = &kona_pwm_ops;
kp->chip.npwm = 6;
@@ -298,20 +296,13 @@ static int kona_pwmc_probe(struct platform_device *pdev)
clk_disable_unprepare(kp->clk);
- ret = pwmchip_add(&kp->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &kp->chip);
if (ret < 0)
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
return ret;
}
-static int kona_pwmc_remove(struct platform_device *pdev)
-{
- struct kona_pwmc *kp = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&kp->chip);
-}
-
static const struct of_device_id bcm_kona_pwmc_dt[] = {
{ .compatible = "brcm,kona-pwm" },
{ },
@@ -324,7 +315,6 @@ static struct platform_driver kona_pwmc_driver = {
.of_match_table = bcm_kona_pwmc_dt,
},
.probe = kona_pwmc_probe,
- .remove = kona_pwmc_remove,
};
module_platform_driver(kona_pwmc_driver);
diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
index 8b1d1e7aa856..3b529f82b97c 100644
--- a/drivers/pwm/pwm-brcmstb.c
+++ b/drivers/pwm/pwm-brcmstb.c
@@ -282,12 +282,11 @@ out_clk:
static int brcmstb_pwm_remove(struct platform_device *pdev)
{
struct brcmstb_pwm *p = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&p->chip);
+ pwmchip_remove(&p->chip);
clk_disable_unprepare(p->clk);
- return ret;
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 9fffb566af5f..5e29d9c682c3 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -280,7 +280,9 @@ static int cros_ec_pwm_remove(struct platform_device *dev)
struct cros_ec_pwm_device *ec_pwm = platform_get_drvdata(dev);
struct pwm_chip *chip = &ec_pwm->chip;
- return pwmchip_remove(chip);
+ pwmchip_remove(chip);
+
+ return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index fc3cb7d669c6..c45a75e65c86 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -183,27 +183,18 @@ static int ep93xx_pwm_probe(struct platform_device *pdev)
ep93xx_pwm->chip.ops = &ep93xx_pwm_ops;
ep93xx_pwm->chip.npwm = 1;
- ret = pwmchip_add(&ep93xx_pwm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &ep93xx_pwm->chip);
if (ret < 0)
return ret;
- platform_set_drvdata(pdev, ep93xx_pwm);
return 0;
}
-static int ep93xx_pwm_remove(struct platform_device *pdev)
-{
- struct ep93xx_pwm *ep93xx_pwm = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&ep93xx_pwm->chip);
-}
-
static struct platform_driver ep93xx_pwm_driver = {
.driver = {
.name = "ep93xx-pwm",
},
.probe = ep93xx_pwm_probe,
- .remove = ep93xx_pwm_remove,
};
module_platform_driver(ep93xx_pwm_driver);
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 96ccd772280c..0247757f9a72 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -453,7 +453,7 @@ static int fsl_pwm_probe(struct platform_device *pdev)
fpc->chip.ops = &fsl_pwm_ops;
fpc->chip.npwm = 8;
- ret = pwmchip_add(&fpc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &fpc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
return ret;
@@ -464,13 +464,6 @@ static int fsl_pwm_probe(struct platform_device *pdev)
return fsl_pwm_init(fpc);
}
-static int fsl_pwm_remove(struct platform_device *pdev)
-{
- struct fsl_pwm_chip *fpc = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&fpc->chip);
-}
-
#ifdef CONFIG_PM_SLEEP
static int fsl_pwm_suspend(struct device *dev)
{
@@ -552,7 +545,6 @@ static struct platform_driver fsl_pwm_driver = {
.pm = &fsl_pwm_pm_ops,
},
.probe = fsl_pwm_probe,
- .remove = fsl_pwm_remove,
};
module_platform_driver(fsl_pwm_driver);
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index 4a6e9ad3c0ff..333f1b18ff4e 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -248,13 +248,15 @@ static int hibvt_pwm_remove(struct platform_device *pdev)
pwm_chip = platform_get_drvdata(pdev);
+ pwmchip_remove(&pwm_chip->chip);
+
reset_control_assert(pwm_chip->rstc);
msleep(30);
reset_control_deassert(pwm_chip->rstc);
clk_disable_unprepare(pwm_chip->clk);
- return pwmchip_remove(&pwm_chip->chip);
+ return 0;
}
static const struct of_device_id hibvt_pwm_of_match[] = {
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 11b16ecc4f96..f97f82548293 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -326,28 +326,14 @@ err_pm_disable:
static int img_pwm_remove(struct platform_device *pdev)
{
struct img_pwm_chip *pwm_chip = platform_get_drvdata(pdev);
- u32 val;
- unsigned int i;
- int ret;
-
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put(&pdev->dev);
- return ret;
- }
-
- for (i = 0; i < pwm_chip->chip.npwm; i++) {
- val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
- val &= ~BIT(i);
- img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
- }
- pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
img_pwm_runtime_suspend(&pdev->dev);
- return pwmchip_remove(&pwm_chip->chip);
+ pwmchip_remove(&pwm_chip->chip);
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index dbb50493abdd..e5e7b7c339a8 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -382,11 +382,12 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
static int pwm_imx_tpm_remove(struct platform_device *pdev)
{
struct imx_tpm_pwm_chip *tpm = platform_get_drvdata(pdev);
- int ret = pwmchip_remove(&tpm->chip);
+
+ pwmchip_remove(&tpm->chip);
clk_disable_unprepare(tpm->clk);
- return ret;
+ return 0;
}
static int __maybe_unused pwm_imx_tpm_suspend(struct device *dev)
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index f6588a96fbd9..ea91a2f81a9f 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -313,8 +313,6 @@ static int pwm_imx27_probe(struct platform_device *pdev)
if (imx == NULL)
return -ENOMEM;
- platform_set_drvdata(pdev, imx);
-
imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx->clk_ipg))
return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_ipg),
@@ -342,16 +340,7 @@ static int pwm_imx27_probe(struct platform_device *pdev)
if (!(pwmcr & MX3_PWMCR_EN))
pwm_imx27_clk_disable_unprepare(imx);
- return pwmchip_add(&imx->chip);
-}
-
-static int pwm_imx27_remove(struct platform_device *pdev)
-{
- struct pwm_imx27_chip *imx;
-
- imx = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&imx->chip);
+ return devm_pwmchip_add(&pdev->dev, &imx->chip);
}
static struct platform_driver imx_pwm_driver = {
@@ -360,7 +349,6 @@ static struct platform_driver imx_pwm_driver = {
.of_match_table = pwm_imx27_dt_ids,
},
.probe = pwm_imx27_probe,
- .remove = pwm_imx27_remove,
};
module_platform_driver(imx_pwm_driver);
diff --git a/drivers/pwm/pwm-intel-lgm.c b/drivers/pwm/pwm-intel-lgm.c
index 015f5eba09a1..b66c35074087 100644
--- a/drivers/pwm/pwm-intel-lgm.c
+++ b/drivers/pwm/pwm-intel-lgm.c
@@ -176,8 +176,6 @@ static int lgm_pwm_probe(struct platform_device *pdev)
if (!pc)
return -ENOMEM;
- platform_set_drvdata(pdev, pc);
-
io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
@@ -210,20 +208,13 @@ static int lgm_pwm_probe(struct platform_device *pdev)
lgm_pwm_init(pc);
- ret = pwmchip_add(&pc->chip);
+ ret = devm_pwmchip_add(dev, &pc->chip);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to add PWM chip\n");
return 0;
}
-static int lgm_pwm_remove(struct platform_device *pdev)
-{
- struct lgm_pwm_chip *pc = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&pc->chip);
-}
-
static const struct of_device_id lgm_pwm_of_match[] = {
{ .compatible = "intel,lgm-pwm" },
{ }
@@ -236,7 +227,6 @@ static struct platform_driver lgm_pwm_driver = {
.of_match_table = lgm_pwm_of_match,
},
.probe = lgm_pwm_probe,
- .remove = lgm_pwm_remove,
};
module_platform_driver(lgm_pwm_driver);
diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
index 6c6e26d18329..54bd95a5cab0 100644
--- a/drivers/pwm/pwm-iqs620a.c
+++ b/drivers/pwm/pwm-iqs620a.c
@@ -189,7 +189,6 @@ static int iqs620_pwm_probe(struct platform_device *pdev)
if (!iqs620_pwm)
return -ENOMEM;
- platform_set_drvdata(pdev, iqs620_pwm);
iqs620_pwm->iqs62x = iqs62x;
ret = regmap_read(iqs62x->regmap, IQS620_PWR_SETTINGS, &val);
@@ -224,31 +223,18 @@ static int iqs620_pwm_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = pwmchip_add(&iqs620_pwm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &iqs620_pwm->chip);
if (ret)
dev_err(&pdev->dev, "Failed to add device: %d\n", ret);
return ret;
}
-static int iqs620_pwm_remove(struct platform_device *pdev)
-{
- struct iqs620_pwm_private *iqs620_pwm = platform_get_drvdata(pdev);
- int ret;
-
- ret = pwmchip_remove(&iqs620_pwm->chip);
- if (ret)
- dev_err(&pdev->dev, "Failed to remove device: %d\n", ret);
-
- return ret;
-}
-
static struct platform_driver iqs620_pwm_platform_driver = {
.driver = {
.name = "iqs620a-pwm",
},
.probe = iqs620_pwm_probe,
- .remove = iqs620_pwm_remove,
};
module_platform_driver(iqs620_pwm_platform_driver);
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 990e7904c7f1..23dc1fb770e2 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -245,16 +245,7 @@ static int jz4740_pwm_probe(struct platform_device *pdev)
jz4740->chip.ops = &jz4740_pwm_ops;
jz4740->chip.npwm = info->num_pwms;
- platform_set_drvdata(pdev, jz4740);
-
- return pwmchip_add(&jz4740->chip);
-}
-
-static int jz4740_pwm_remove(struct platform_device *pdev)
-{
- struct jz4740_pwm_chip *jz4740 = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&jz4740->chip);
+ return devm_pwmchip_add(dev, &jz4740->chip);
}
static const struct soc_info __maybe_unused jz4740_soc_info = {
@@ -280,7 +271,6 @@ static struct platform_driver jz4740_pwm_driver = {
.of_match_table = of_match_ptr(jz4740_pwm_dt_ids),
},
.probe = jz4740_pwm_probe,
- .remove = jz4740_pwm_remove,
};
module_platform_driver(jz4740_pwm_driver);
diff --git a/drivers/pwm/pwm-keembay.c b/drivers/pwm/pwm-keembay.c
index 521a825c8ba0..733811b05721 100644
--- a/drivers/pwm/pwm-keembay.c
+++ b/drivers/pwm/pwm-keembay.c
@@ -207,22 +207,13 @@ static int keembay_pwm_probe(struct platform_device *pdev)
priv->chip.ops = &keembay_pwm_ops;
priv->chip.npwm = KMB_TOTAL_PWM_CHANNELS;
- ret = pwmchip_add(&priv->chip);
+ ret = devm_pwmchip_add(dev, &priv->chip);
if (ret)
return dev_err_probe(dev, ret, "Failed to add PWM chip\n");
- platform_set_drvdata(pdev, priv);
-
return 0;
}
-static int keembay_pwm_remove(struct platform_device *pdev)
-{
- struct keembay_pwm *priv = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&priv->chip);
-}
-
static const struct of_device_id keembay_pwm_of_match[] = {
{ .compatible = "intel,keembay-pwm" },
{ }
@@ -231,7 +222,6 @@ MODULE_DEVICE_TABLE(of, keembay_pwm_of_match);
static struct platform_driver keembay_pwm_driver = {
.probe = keembay_pwm_probe,
- .remove = keembay_pwm_remove,
.driver = {
.name = "pwm-keembay",
.of_match_table = keembay_pwm_of_match,
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 7551253ada32..ea17d446a627 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -276,16 +276,7 @@ static int lp3943_pwm_probe(struct platform_device *pdev)
lp3943_pwm->chip.ops = &lp3943_pwm_ops;
lp3943_pwm->chip.npwm = LP3943_NUM_PWMS;
- platform_set_drvdata(pdev, lp3943_pwm);
-
- return pwmchip_add(&lp3943_pwm->chip);
-}
-
-static int lp3943_pwm_remove(struct platform_device *pdev)
-{
- struct lp3943_pwm *lp3943_pwm = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&lp3943_pwm->chip);
+ return devm_pwmchip_add(&pdev->dev, &lp3943_pwm->chip);
}
#ifdef CONFIG_OF
@@ -298,7 +289,6 @@ MODULE_DEVICE_TABLE(of, lp3943_pwm_of_match);
static struct platform_driver lp3943_pwm_driver = {
.probe = lp3943_pwm_probe,
- .remove = lp3943_pwm_remove,
.driver = {
.name = "lp3943-pwm",
.of_match_table = of_match_ptr(lp3943_pwm_of_match),
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 2834a0f001d3..ddeab5687cb8 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -117,29 +117,20 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
lpc32xx->chip.ops = &lpc32xx_pwm_ops;
lpc32xx->chip.npwm = 1;
- ret = pwmchip_add(&lpc32xx->chip);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add PWM chip, error %d\n", ret);
- return ret;
- }
-
- /* When PWM is disable, configure the output to the default value */
+ /* If PWM is disabled, configure the output to the default value */
val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
val &= ~PWM_PIN_LEVEL;
writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
- platform_set_drvdata(pdev, lpc32xx);
+ ret = devm_pwmchip_add(&pdev->dev, &lpc32xx->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add PWM chip, error %d\n", ret);
+ return ret;
+ }
return 0;
}
-static int lpc32xx_pwm_remove(struct platform_device *pdev)
-{
- struct lpc32xx_pwm_chip *lpc32xx = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&lpc32xx->chip);
-}
-
static const struct of_device_id lpc32xx_pwm_dt_ids[] = {
{ .compatible = "nxp,lpc3220-pwm", },
{ /* sentinel */ }
@@ -152,7 +143,6 @@ static struct platform_driver lpc32xx_pwm_driver = {
.of_match_table = lpc32xx_pwm_dt_ids,
},
.probe = lpc32xx_pwm_probe,
- .remove = lpc32xx_pwm_remove,
};
module_platform_driver(lpc32xx_pwm_driver);
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index b4a31060bcd7..0d4dd80e9f07 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -253,13 +253,11 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
}
}
- platform_set_drvdata(pdev, pc);
-
pc->chip.dev = &pdev->dev;
pc->chip.ops = &pwm_mediatek_ops;
pc->chip.npwm = pc->soc->num_pwms;
- ret = pwmchip_add(&pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
return ret;
@@ -268,13 +266,6 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
return 0;
}
-static int pwm_mediatek_remove(struct platform_device *pdev)
-{
- struct pwm_mediatek_chip *pc = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&pc->chip);
-}
-
static const struct pwm_mediatek_of_data mt2712_pwm_data = {
.num_pwms = 8,
.pwm45_fixup = false,
@@ -335,7 +326,6 @@ static struct platform_driver pwm_mediatek_driver = {
.of_match_table = pwm_mediatek_of_match,
},
.probe = pwm_mediatek_probe,
- .remove = pwm_mediatek_remove,
};
module_platform_driver(pwm_mediatek_driver);
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 9b3ba401a3db..c605013e4114 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -5,6 +5,7 @@
* Author: YH Huang <yh.huang@mediatek.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -47,6 +48,7 @@ struct mtk_disp_pwm {
struct clk *clk_main;
struct clk *clk_mm;
void __iomem *base;
+ bool enabled;
};
static inline struct mtk_disp_pwm *to_mtk_disp_pwm(struct pwm_chip *chip)
@@ -66,14 +68,47 @@ static void mtk_disp_pwm_update_bits(struct mtk_disp_pwm *mdp, u32 offset,
writel(value, address);
}
-static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int mtk_disp_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
{
struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
u32 clk_div, period, high_width, value;
u64 div, rate;
int err;
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
+ 0x0);
+
+ if (mdp->enabled) {
+ clk_disable_unprepare(mdp->clk_mm);
+ clk_disable_unprepare(mdp->clk_main);
+ }
+
+ mdp->enabled = false;
+ return 0;
+ }
+
+ if (!mdp->enabled) {
+ err = clk_prepare_enable(mdp->clk_main);
+ if (err < 0) {
+ dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ err = clk_prepare_enable(mdp->clk_mm);
+ if (err < 0) {
+ dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n",
+ ERR_PTR(err));
+ clk_disable_unprepare(mdp->clk_main);
+ return err;
+ }
+ }
+
/*
* Find period, high_width and clk_div to suit duty_ns and period_ns.
* Calculate proper div value to keep period value in the bound.
@@ -85,29 +120,24 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* high_width = (PWM_CLK_RATE * duty_ns) / (10^9 * (clk_div + 1))
*/
rate = clk_get_rate(mdp->clk_main);
- clk_div = div_u64(rate * period_ns, NSEC_PER_SEC) >>
+ clk_div = mul_u64_u64_div_u64(state->period, rate, NSEC_PER_SEC) >>
PWM_PERIOD_BIT_WIDTH;
- if (clk_div > PWM_CLKDIV_MAX)
+ if (clk_div > PWM_CLKDIV_MAX) {
+ if (!mdp->enabled) {
+ clk_disable_unprepare(mdp->clk_mm);
+ clk_disable_unprepare(mdp->clk_main);
+ }
return -EINVAL;
+ }
div = NSEC_PER_SEC * (clk_div + 1);
- period = div64_u64(rate * period_ns, div);
+ period = mul_u64_u64_div_u64(state->period, rate, div);
if (period > 0)
period--;
- high_width = div64_u64(rate * duty_ns, div);
+ high_width = mul_u64_u64_div_u64(state->duty_cycle, rate, div);
value = period | (high_width << PWM_HIGH_WIDTH_SHIFT);
- err = clk_enable(mdp->clk_main);
- if (err < 0)
- return err;
-
- err = clk_enable(mdp->clk_mm);
- if (err < 0) {
- clk_disable(mdp->clk_main);
- return err;
- }
-
mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
PWM_CLKDIV_MASK,
clk_div << PWM_CLKDIV_SHIFT);
@@ -122,50 +152,70 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
mtk_disp_pwm_update_bits(mdp, mdp->data->commit,
mdp->data->commit_mask,
0x0);
+ } else {
+ /*
+ * For MT2701, disable double buffer before writing register
+ * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
+ */
+ mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
+ mdp->data->bls_debug_mask,
+ mdp->data->bls_debug_mask);
+ mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
+ mdp->data->con0_sel,
+ mdp->data->con0_sel);
}
- clk_disable(mdp->clk_mm);
- clk_disable(mdp->clk_main);
+ mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
+ mdp->data->enable_mask);
+ mdp->enabled = true;
return 0;
}
-static int mtk_disp_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+static void mtk_disp_pwm_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
{
struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
+ u64 rate, period, high_width;
+ u32 clk_div, con0, con1;
int err;
- err = clk_enable(mdp->clk_main);
- if (err < 0)
- return err;
-
- err = clk_enable(mdp->clk_mm);
+ err = clk_prepare_enable(mdp->clk_main);
if (err < 0) {
- clk_disable(mdp->clk_main);
- return err;
+ dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n", ERR_PTR(err));
+ return;
}
- mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
- mdp->data->enable_mask);
-
- return 0;
-}
-
-static void mtk_disp_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
-
- mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
- 0x0);
+ err = clk_prepare_enable(mdp->clk_mm);
+ if (err < 0) {
+ dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n", ERR_PTR(err));
+ clk_disable_unprepare(mdp->clk_main);
+ return;
+ }
- clk_disable(mdp->clk_mm);
- clk_disable(mdp->clk_main);
+ rate = clk_get_rate(mdp->clk_main);
+ con0 = readl(mdp->base + mdp->data->con0);
+ con1 = readl(mdp->base + mdp->data->con1);
+ state->enabled = !!(con0 & BIT(0));
+ clk_div = FIELD_GET(PWM_CLKDIV_MASK, con0);
+ period = FIELD_GET(PWM_PERIOD_MASK, con1);
+ /*
+ * period has 12 bits, clk_div 11 and NSEC_PER_SEC has 30,
+ * so period * (clk_div + 1) * NSEC_PER_SEC doesn't overflow.
+ */
+ state->period = DIV64_U64_ROUND_UP(period * (clk_div + 1) * NSEC_PER_SEC, rate);
+ high_width = FIELD_GET(PWM_HIGH_WIDTH_MASK, con1);
+ state->duty_cycle = DIV64_U64_ROUND_UP(high_width * (clk_div + 1) * NSEC_PER_SEC,
+ rate);
+ state->polarity = PWM_POLARITY_NORMAL;
+ clk_disable_unprepare(mdp->clk_mm);
+ clk_disable_unprepare(mdp->clk_main);
}
static const struct pwm_ops mtk_disp_pwm_ops = {
- .config = mtk_disp_pwm_config,
- .enable = mtk_disp_pwm_enable,
- .disable = mtk_disp_pwm_disable,
+ .apply = mtk_disp_pwm_apply,
+ .get_state = mtk_disp_pwm_get_state,
.owner = THIS_MODULE,
};
@@ -192,58 +242,28 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
if (IS_ERR(mdp->clk_mm))
return PTR_ERR(mdp->clk_mm);
- ret = clk_prepare(mdp->clk_main);
- if (ret < 0)
- return ret;
-
- ret = clk_prepare(mdp->clk_mm);
- if (ret < 0)
- goto disable_clk_main;
-
mdp->chip.dev = &pdev->dev;
mdp->chip.ops = &mtk_disp_pwm_ops;
mdp->chip.npwm = 1;
ret = pwmchip_add(&mdp->chip);
if (ret < 0) {
- dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
- goto disable_clk_mm;
+ dev_err(&pdev->dev, "pwmchip_add() failed: %pe\n", ERR_PTR(ret));
+ return ret;
}
platform_set_drvdata(pdev, mdp);
- /*
- * For MT2701, disable double buffer before writing register
- * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
- */
- if (!mdp->data->has_commit) {
- mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
- mdp->data->bls_debug_mask,
- mdp->data->bls_debug_mask);
- mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
- mdp->data->con0_sel,
- mdp->data->con0_sel);
- }
-
return 0;
-
-disable_clk_mm:
- clk_unprepare(mdp->clk_mm);
-disable_clk_main:
- clk_unprepare(mdp->clk_main);
- return ret;
}
static int mtk_disp_pwm_remove(struct platform_device *pdev)
{
struct mtk_disp_pwm *mdp = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&mdp->chip);
- clk_unprepare(mdp->clk_mm);
- clk_unprepare(mdp->clk_main);
+ pwmchip_remove(&mdp->chip);
- return ret;
+ return 0;
}
static const struct mtk_pwm_data mt2701_pwm_data = {
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index a22180803bd7..766dbc58dad8 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -145,30 +145,18 @@ static int mxs_pwm_probe(struct platform_device *pdev)
return ret;
}
- ret = pwmchip_add(&mxs->chip);
+ /* FIXME: Only do this if the PWM isn't already running */
+ ret = stmp_reset_block(mxs->base);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to reset PWM\n");
+
+ ret = devm_pwmchip_add(&pdev->dev, &mxs->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add pwm chip %d\n", ret);
return ret;
}
- platform_set_drvdata(pdev, mxs);
-
- ret = stmp_reset_block(mxs->base);
- if (ret)
- goto pwm_remove;
-
return 0;
-
-pwm_remove:
- pwmchip_remove(&mxs->chip);
- return ret;
-}
-
-static int mxs_pwm_remove(struct platform_device *pdev)
-{
- struct mxs_pwm_chip *mxs = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&mxs->chip);
}
static const struct of_device_id mxs_pwm_dt_ids[] = {
@@ -183,7 +171,6 @@ static struct platform_driver mxs_pwm_driver = {
.of_match_table = mxs_pwm_dt_ids,
},
.probe = mxs_pwm_probe,
- .remove = mxs_pwm_remove,
};
module_platform_driver(mxs_pwm_driver);
diff --git a/drivers/pwm/pwm-ntxec.c b/drivers/pwm/pwm-ntxec.c
index 50c454c553c4..ab63b081df53 100644
--- a/drivers/pwm/pwm-ntxec.c
+++ b/drivers/pwm/pwm-ntxec.c
@@ -150,23 +150,12 @@ static int ntxec_pwm_probe(struct platform_device *pdev)
priv->ec = ec;
priv->dev = &pdev->dev;
- platform_set_drvdata(pdev, priv);
-
chip = &priv->chip;
chip->dev = &pdev->dev;
chip->ops = &ntxec_pwm_ops;
- chip->base = -1;
chip->npwm = 1;
- return pwmchip_add(chip);
-}
-
-static int ntxec_pwm_remove(struct platform_device *pdev)
-{
- struct ntxec_pwm *priv = platform_get_drvdata(pdev);
- struct pwm_chip *chip = &priv->chip;
-
- return pwmchip_remove(chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
static struct platform_driver ntxec_pwm_driver = {
@@ -174,7 +163,6 @@ static struct platform_driver ntxec_pwm_driver = {
.name = "ntxec-pwm",
},
.probe = ntxec_pwm_probe,
- .remove = ntxec_pwm_remove,
};
module_platform_driver(ntxec_pwm_driver);
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 507a2d945b90..fa800fcf31d4 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -444,11 +444,8 @@ err_find_timer_pdev:
static int pwm_omap_dmtimer_remove(struct platform_device *pdev)
{
struct pwm_omap_dmtimer_chip *omap = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&omap->chip);
- if (ret)
- return ret;
+ pwmchip_remove(&omap->chip);
if (pm_runtime_active(&omap->dm_timer_pdev->dev))
omap->pdata->stop(omap->dm_timer);
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 42ed770b432c..c56001a790d0 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -601,11 +601,8 @@ static int pca9685_pwm_probe(struct i2c_client *client,
static int pca9685_pwm_remove(struct i2c_client *client)
{
struct pca9685 *pca = i2c_get_clientdata(client);
- int ret;
- ret = pwmchip_remove(&pca->chip);
- if (ret)
- return ret;
+ pwmchip_remove(&pca->chip);
if (!pm_runtime_enabled(&client->dev)) {
/* Put chip in sleep state if runtime PM is disabled */
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index e091a528e33c..a9efdcf839ae 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -195,32 +195,21 @@ static int pwm_probe(struct platform_device *pdev)
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
- ret = pwmchip_add(&pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
return ret;
}
- platform_set_drvdata(pdev, pc);
return 0;
}
-static int pwm_remove(struct platform_device *pdev)
-{
- struct pxa_pwm_chip *pc;
-
- pc = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&pc->chip);
-}
-
static struct platform_driver pwm_driver = {
.driver = {
.name = "pxa25x-pwm",
.of_match_table = pwm_of_match,
},
.probe = pwm_probe,
- .remove = pwm_remove,
.id_table = pwm_id_table,
};
diff --git a/drivers/pwm/pwm-raspberrypi-poe.c b/drivers/pwm/pwm-raspberrypi-poe.c
index 043fc32e8be8..579a15240e0a 100644
--- a/drivers/pwm/pwm-raspberrypi-poe.c
+++ b/drivers/pwm/pwm-raspberrypi-poe.c
@@ -166,8 +166,6 @@ static int raspberrypi_pwm_probe(struct platform_device *pdev)
rpipwm->chip.base = -1;
rpipwm->chip.npwm = RASPBERRYPI_FIRMWARE_PWM_NUM;
- platform_set_drvdata(pdev, rpipwm);
-
ret = raspberrypi_pwm_get_property(rpipwm->firmware, RPI_PWM_CUR_DUTY_REG,
&rpipwm->duty_cycle);
if (ret) {
@@ -175,14 +173,7 @@ static int raspberrypi_pwm_probe(struct platform_device *pdev)
return ret;
}
- return pwmchip_add(&rpipwm->chip);
-}
-
-static int raspberrypi_pwm_remove(struct platform_device *pdev)
-{
- struct raspberrypi_pwm *rpipwm = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&rpipwm->chip);
+ return devm_pwmchip_add(dev, &rpipwm->chip);
}
static const struct of_device_id raspberrypi_pwm_of_match[] = {
@@ -197,7 +188,6 @@ static struct platform_driver raspberrypi_pwm_driver = {
.of_match_table = raspberrypi_pwm_of_match,
},
.probe = raspberrypi_pwm_probe,
- .remove = raspberrypi_pwm_remove,
};
module_platform_driver(raspberrypi_pwm_driver);
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 9daca0c772c7..b437192380e2 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -241,13 +241,12 @@ static int rcar_pwm_probe(struct platform_device *pdev)
static int rcar_pwm_remove(struct platform_device *pdev)
{
struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&rcar_pwm->chip);
+ pwmchip_remove(&rcar_pwm->chip);
pm_runtime_disable(&pdev->dev);
- return ret;
+ return 0;
}
static const struct of_device_id rcar_pwm_of_table[] = {
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index b853e7942605..4381df90a527 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -425,13 +425,12 @@ static int tpu_probe(struct platform_device *pdev)
static int tpu_remove(struct platform_device *pdev)
{
struct tpu_device *tpu = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&tpu->chip);
+ pwmchip_remove(&tpu->chip);
pm_runtime_disable(&pdev->dev);
- return ret;
+ return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index cbe900877724..f3647b317152 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -384,24 +384,12 @@ static int rockchip_pwm_remove(struct platform_device *pdev)
{
struct rockchip_pwm_chip *pc = platform_get_drvdata(pdev);
- /*
- * Disable the PWM clk before unpreparing it if the PWM device is still
- * running. This should only happen when the last PWM user left it
- * enabled, or when nobody requested a PWM that was previously enabled
- * by the bootloader.
- *
- * FIXME: Maybe the core should disable all PWM devices in
- * pwmchip_remove(). In this case we'd only have to call
- * clk_unprepare() after pwmchip_remove().
- *
- */
- if (pwm_is_enabled(pc->chip.pwms))
- clk_disable(pc->clk);
+ pwmchip_remove(&pc->chip);
clk_unprepare(pc->pclk);
clk_unprepare(pc->clk);
- return pwmchip_remove(&pc->chip);
+ return 0;
}
static struct platform_driver rockchip_pwm_driver = {
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index f6c528f02d43..dd94c4312a0c 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -580,11 +580,8 @@ static int pwm_samsung_probe(struct platform_device *pdev)
static int pwm_samsung_remove(struct platform_device *pdev)
{
struct samsung_pwm_chip *chip = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&chip->chip);
- if (ret < 0)
- return ret;
+ pwmchip_remove(&chip->chip);
clk_disable_unprepare(chip->base_clk);
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index 420edc4aa94a..253c4a17d255 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -291,7 +291,7 @@ static int pwm_sifive_remove(struct platform_device *dev)
struct pwm_sifive_ddata *ddata = platform_get_drvdata(dev);
bool is_enabled = false;
struct pwm_device *pwm;
- int ret, ch;
+ int ch;
for (ch = 0; ch < ddata->chip.npwm; ch++) {
pwm = &ddata->chip.pwms[ch];
@@ -304,10 +304,10 @@ static int pwm_sifive_remove(struct platform_device *dev)
clk_disable(ddata->clk);
clk_disable_unprepare(ddata->clk);
- ret = pwmchip_remove(&ddata->chip);
+ pwmchip_remove(&ddata->chip);
clk_notifier_unregister(ddata->clk, &ddata->notifier);
- return ret;
+ return 0;
}
static const struct of_device_id pwm_sifive_of_match[] = {
diff --git a/drivers/pwm/pwm-sl28cpld.c b/drivers/pwm/pwm-sl28cpld.c
index 7a69c1a0c060..589aeaaa6ac8 100644
--- a/drivers/pwm/pwm-sl28cpld.c
+++ b/drivers/pwm/pwm-sl28cpld.c
@@ -231,9 +231,7 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
chip->ops = &sl28cpld_pwm_ops;
chip->npwm = 1;
- platform_set_drvdata(pdev, priv);
-
- ret = pwmchip_add(&priv->pwm_chip);
+ ret = devm_pwmchip_add(&pdev->dev, &priv->pwm_chip);
if (ret) {
dev_err(&pdev->dev, "failed to add PWM chip (%pe)",
ERR_PTR(ret));
@@ -243,13 +241,6 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
return 0;
}
-static int sl28cpld_pwm_remove(struct platform_device *pdev)
-{
- struct sl28cpld_pwm *priv = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&priv->pwm_chip);
-}
-
static const struct of_device_id sl28cpld_pwm_of_match[] = {
{ .compatible = "kontron,sl28cpld-pwm" },
{}
@@ -258,7 +249,6 @@ MODULE_DEVICE_TABLE(of, sl28cpld_pwm_of_match);
static struct platform_driver sl28cpld_pwm_driver = {
.probe = sl28cpld_pwm_probe,
- .remove = sl28cpld_pwm_remove,
.driver = {
.name = "sl28cpld-pwm",
.of_match_table = sl28cpld_pwm_of_match,
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 93dd03618465..3115abb3f52a 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -209,7 +209,7 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
priv->chip.ops = &stm32_pwm_lp_ops;
priv->chip.npwm = 1;
- ret = pwmchip_add(&priv->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &priv->chip);
if (ret < 0)
return ret;
@@ -218,15 +218,6 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
return 0;
}
-static int stm32_pwm_lp_remove(struct platform_device *pdev)
-{
- struct stm32_pwm_lp *priv = platform_get_drvdata(pdev);
-
- pwm_disable(&priv->chip.pwms[0]);
-
- return pwmchip_remove(&priv->chip);
-}
-
static int __maybe_unused stm32_pwm_lp_suspend(struct device *dev)
{
struct stm32_pwm_lp *priv = dev_get_drvdata(dev);
@@ -258,7 +249,6 @@ MODULE_DEVICE_TABLE(of, stm32_pwm_lp_of_match);
static struct platform_driver stm32_pwm_lp_driver = {
.probe = stm32_pwm_lp_probe,
- .remove = stm32_pwm_lp_remove,
.driver = {
.name = "stm32-pwm-lp",
.of_match_table = of_match_ptr(stm32_pwm_lp_of_match),
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index c952604e91f3..91ca67651abd 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -484,11 +484,8 @@ err_bus:
static int sun4i_pwm_remove(struct platform_device *pdev)
{
struct sun4i_pwm_chip *pwm = platform_get_drvdata(pdev);
- int ret;
- ret = pwmchip_remove(&pwm->chip);
- if (ret)
- return ret;
+ pwmchip_remove(&pwm->chip);
clk_disable_unprepare(pwm->bus_clk);
reset_control_assert(pwm->rst);
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 35eb19a5a0d1..4701f0c9b921 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -253,7 +253,7 @@ static int ecap_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
- ret = pwmchip_add(&pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
return ret;
@@ -267,11 +267,9 @@ static int ecap_pwm_probe(struct platform_device *pdev)
static int ecap_pwm_remove(struct platform_device *pdev)
{
- struct ecap_pwm_chip *pc = platform_get_drvdata(pdev);
-
pm_runtime_disable(&pdev->dev);
- return pwmchip_remove(&pc->chip);
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 17909fa53211..5b723a48c5f1 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -485,11 +485,13 @@ static int ehrpwm_pwm_remove(struct platform_device *pdev)
{
struct ehrpwm_pwm_chip *pc = platform_get_drvdata(pdev);
+ pwmchip_remove(&pc->chip);
+
clk_unprepare(pc->tbclk);
pm_runtime_disable(&pdev->dev);
- return pwmchip_remove(&pc->chip);
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
index 6c8df5f4e87d..49d9f7a78012 100644
--- a/drivers/pwm/pwm-twl-led.c
+++ b/drivers/pwm/pwm-twl-led.c
@@ -276,7 +276,6 @@ static const struct pwm_ops twl6030_pwmled_ops = {
static int twl_pwmled_probe(struct platform_device *pdev)
{
struct twl_pwmled_chip *twl;
- int ret;
twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
if (!twl)
@@ -294,20 +293,7 @@ static int twl_pwmled_probe(struct platform_device *pdev)
mutex_init(&twl->mutex);
- ret = pwmchip_add(&twl->chip);
- if (ret < 0)
- return ret;
-
- platform_set_drvdata(pdev, twl);
-
- return 0;
-}
-
-static int twl_pwmled_remove(struct platform_device *pdev)
-{
- struct twl_pwmled_chip *twl = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&twl->chip);
+ return devm_pwmchip_add(&pdev->dev, &twl->chip);
}
#ifdef CONFIG_OF
@@ -325,7 +311,6 @@ static struct platform_driver twl_pwmled_driver = {
.of_match_table = of_match_ptr(twl_pwmled_of_match),
},
.probe = twl_pwmled_probe,
- .remove = twl_pwmled_remove,
};
module_platform_driver(twl_pwmled_driver);
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index e83a826bf621..203194f2c92e 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -298,7 +298,6 @@ static const struct pwm_ops twl6030_pwm_ops = {
static int twl_pwm_probe(struct platform_device *pdev)
{
struct twl_pwm_chip *twl;
- int ret;
twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
if (!twl)
@@ -314,20 +313,7 @@ static int twl_pwm_probe(struct platform_device *pdev)
mutex_init(&twl->mutex);
- ret = pwmchip_add(&twl->chip);
- if (ret < 0)
- return ret;
-
- platform_set_drvdata(pdev, twl);
-
- return 0;
-}
-
-static int twl_pwm_remove(struct platform_device *pdev)
-{
- struct twl_pwm_chip *twl = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&twl->chip);
+ return devm_pwmchip_add(&pdev->dev, &twl->chip);
}
#ifdef CONFIG_OF
@@ -345,7 +331,6 @@ static struct platform_driver twl_pwm_driver = {
.of_match_table = of_match_ptr(twl_pwm_of_match),
},
.probe = twl_pwm_probe,
- .remove = twl_pwm_remove,
};
module_platform_driver(twl_pwm_driver);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 12153d5801ce..e1bc5214494e 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -75,6 +75,15 @@ config RTC_DEBUG
Say yes here to enable debugging support in the RTC framework
and individual RTC drivers.
+config RTC_LIB_KUNIT_TEST
+ tristate "KUnit test for RTC lib functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test RTC library functions.
+
+ If unsure, say N.
+
config RTC_NVMEM
bool "RTC non volatile storage support"
select NVMEM
@@ -624,6 +633,7 @@ config RTC_DRV_FM3130
config RTC_DRV_RX8010
tristate "Epson RX8010SJ"
+ select REGMAP_I2C
help
If you say yes here you get support for the Epson RX8010SJ RTC
chip.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 2dd0dd956b0e..5ceeafe4d5b2 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -15,6 +15,8 @@ rtc-core-$(CONFIG_RTC_INTF_DEV) += dev.o
rtc-core-$(CONFIG_RTC_INTF_PROC) += proc.o
rtc-core-$(CONFIG_RTC_INTF_SYSFS) += sysfs.o
+obj-$(CONFIG_RTC_LIB_KUNIT_TEST) += lib_test.o
+
# Keep the list ordered.
obj-$(CONFIG_RTC_DRV_88PM80X) += rtc-88pm80x.o
diff --git a/drivers/rtc/lib.c b/drivers/rtc/lib.c
index 23284580df97..fe361652727a 100644
--- a/drivers/rtc/lib.c
+++ b/drivers/rtc/lib.c
@@ -6,6 +6,8 @@
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* based on arch/arm/common/rtctime.c and other bits
+ *
+ * Author: Cassio Neri <cassio.neri@gmail.com> (rtc_time64_to_tm)
*/
#include <linux/export.h>
@@ -22,8 +24,6 @@ static const unsigned short rtc_ydays[2][13] = {
{ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
};
-#define LEAPS_THRU_END_OF(y) ((y) / 4 - (y) / 100 + (y) / 400)
-
/*
* The number of days in the month.
*/
@@ -42,42 +42,95 @@ int rtc_year_days(unsigned int day, unsigned int month, unsigned int year)
}
EXPORT_SYMBOL(rtc_year_days);
-/*
- * rtc_time64_to_tm - Converts time64_t to rtc_time.
- * Convert seconds since 01-01-1970 00:00:00 to Gregorian date.
+/**
+ * rtc_time64_to_tm - converts time64_t to rtc_time.
+ *
+ * @time: The number of seconds since 01-01-1970 00:00:00.
+ * (Must be positive.)
+ * @tm: Pointer to the struct rtc_time.
*/
void rtc_time64_to_tm(time64_t time, struct rtc_time *tm)
{
- unsigned int month, year, secs;
+ unsigned int secs;
int days;
+ u64 u64tmp;
+ u32 u32tmp, udays, century, day_of_century, year_of_century, year,
+ day_of_year, month, day;
+ bool is_Jan_or_Feb, is_leap_year;
+
/* time must be positive */
days = div_s64_rem(time, 86400, &secs);
/* day of the week, 1970-01-01 was a Thursday */
tm->tm_wday = (days + 4) % 7;
- year = 1970 + days / 365;
- days -= (year - 1970) * 365
- + LEAPS_THRU_END_OF(year - 1)
- - LEAPS_THRU_END_OF(1970 - 1);
- while (days < 0) {
- year -= 1;
- days += 365 + is_leap_year(year);
- }
- tm->tm_year = year - 1900;
- tm->tm_yday = days + 1;
-
- for (month = 0; month < 11; month++) {
- int newdays;
-
- newdays = days - rtc_month_days(month, year);
- if (newdays < 0)
- break;
- days = newdays;
- }
- tm->tm_mon = month;
- tm->tm_mday = days + 1;
+ /*
+ * The following algorithm is, basically, Proposition 6.3 of Neri
+ * and Schneider [1]. In a few words: it works on the computational
+ * (fictitious) calendar where the year starts in March, month = 2
+ * (*), and finishes in February, month = 13. This calendar is
+ * mathematically convenient because the day of the year does not
+ * depend on whether the year is leap or not. For instance:
+ *
+ * March 1st 0-th day of the year;
+ * ...
+ * April 1st 31-st day of the year;
+ * ...
+ * January 1st 306-th day of the year; (Important!)
+ * ...
+ * February 28th 364-th day of the year;
+ * February 29th 365-th day of the year (if it exists).
+ *
+ * After having worked out the date in the computational calendar
+ * (using just arithmetics) it's easy to convert it to the
+ * corresponding date in the Gregorian calendar.
+ *
+ * [1] "Euclidean Affine Functions and Applications to Calendar
+ * Algorithms". https://arxiv.org/abs/2102.06959
+ *
+ * (*) The numbering of months follows rtc_time more closely and
+ * thus, is slightly different from [1].
+ */
+
+ udays = ((u32) days) + 719468;
+
+ u32tmp = 4 * udays + 3;
+ century = u32tmp / 146097;
+ day_of_century = u32tmp % 146097 / 4;
+
+ u32tmp = 4 * day_of_century + 3;
+ u64tmp = 2939745ULL * u32tmp;
+ year_of_century = upper_32_bits(u64tmp);
+ day_of_year = lower_32_bits(u64tmp) / 2939745 / 4;
+
+ year = 100 * century + year_of_century;
+ is_leap_year = year_of_century != 0 ?
+ year_of_century % 4 == 0 : century % 4 == 0;
+
+ u32tmp = 2141 * day_of_year + 132377;
+ month = u32tmp >> 16;
+ day = ((u16) u32tmp) / 2141;
+
+ /*
+ * Recall that January 01 is the 306-th day of the year in the
+ * computational (not Gregorian) calendar.
+ */
+ is_Jan_or_Feb = day_of_year >= 306;
+
+ /* Converts to the Gregorian calendar. */
+ year = year + is_Jan_or_Feb;
+ month = is_Jan_or_Feb ? month - 12 : month;
+ day = day + 1;
+
+ day_of_year = is_Jan_or_Feb ?
+ day_of_year - 306 : day_of_year + 31 + 28 + is_leap_year;
+
+ /* Converts to rtc_time's format. */
+ tm->tm_year = (int) (year - 1900);
+ tm->tm_mon = (int) month;
+ tm->tm_mday = (int) day;
+ tm->tm_yday = (int) day_of_year + 1;
tm->tm_hour = secs / 3600;
secs -= tm->tm_hour * 3600;
diff --git a/drivers/rtc/lib_test.c b/drivers/rtc/lib_test.c
new file mode 100644
index 000000000000..d5caf36c56cd
--- /dev/null
+++ b/drivers/rtc/lib_test.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: LGPL-2.1+
+
+#include <kunit/test.h>
+#include <linux/rtc.h>
+
+/*
+ * Advance a date by one day.
+ */
+static void advance_date(int *year, int *month, int *mday, int *yday)
+{
+ if (*mday != rtc_month_days(*month - 1, *year)) {
+ ++*mday;
+ ++*yday;
+ return;
+ }
+
+ *mday = 1;
+ if (*month != 12) {
+ ++*month;
+ ++*yday;
+ return;
+ }
+
+ *month = 1;
+ *yday = 1;
+ ++*year;
+}
+
+/*
+ * Checks every day in a 160000 years interval starting on 1970-01-01
+ * against the expected result.
+ */
+static void rtc_time64_to_tm_test_date_range(struct kunit *test)
+{
+ /*
+ * 160000 years = (160000 / 400) * 400 years
+ * = (160000 / 400) * 146097 days
+ * = (160000 / 400) * 146097 * 86400 seconds
+ */
+ time64_t total_secs = ((time64_t) 160000) / 400 * 146097 * 86400;
+
+ int year = 1970;
+ int month = 1;
+ int mday = 1;
+ int yday = 1;
+
+ struct rtc_time result;
+ time64_t secs;
+ s64 days;
+
+ for (secs = 0; secs <= total_secs; secs += 86400) {
+
+ rtc_time64_to_tm(secs, &result);
+
+ days = div_s64(secs, 86400);
+
+ #define FAIL_MSG "%d/%02d/%02d (%2d) : %ld", \
+ year, month, mday, yday, days
+
+ KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG);
+ KUNIT_ASSERT_EQ_MSG(test, month - 1, result.tm_mon, FAIL_MSG);
+ KUNIT_ASSERT_EQ_MSG(test, mday, result.tm_mday, FAIL_MSG);
+ KUNIT_ASSERT_EQ_MSG(test, yday, result.tm_yday, FAIL_MSG);
+
+ advance_date(&year, &month, &mday, &yday);
+ }
+}
+
+static struct kunit_case rtc_lib_test_cases[] = {
+ KUNIT_CASE(rtc_time64_to_tm_test_date_range),
+ {}
+};
+
+static struct kunit_suite rtc_lib_test_suite = {
+ .name = "rtc_lib_test_cases",
+ .test_cases = rtc_lib_test_cases,
+};
+
+kunit_test_suite(rtc_lib_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 670fd8a2970e..4eb53412b808 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -229,19 +229,13 @@ static int cmos_read_time(struct device *dev, struct rtc_time *t)
if (!pm_trace_rtc_valid())
return -EIO;
- /* REVISIT: if the clock has a "century" register, use
- * that instead of the heuristic in mc146818_get_time().
- * That'll make Y3K compatility (year > 2070) easy!
- */
mc146818_get_time(t);
return 0;
}
static int cmos_set_time(struct device *dev, struct rtc_time *t)
{
- /* REVISIT: set the "century" register if available
- *
- * NOTE: this ignores the issue whereby updating the seconds
+ /* NOTE: this ignores the issue whereby updating the seconds
* takes effect exactly 500ms after we write the register.
* (Also queueing and other delays before we get this far.)
*/
@@ -1053,7 +1047,9 @@ static void cmos_check_wkalrm(struct device *dev)
* ACK the rtc irq here
*/
if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) {
+ local_irq_disable();
cmos_interrupt(0, (void *)cmos->rtc);
+ local_irq_enable();
return;
}
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index c914091819ba..d38aaf08108c 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -60,14 +60,23 @@
#define RX8025_ADJ_DATA_MAX 62
#define RX8025_ADJ_DATA_MIN -62
+enum rx_model {
+ model_rx_unknown,
+ model_rx_8025,
+ model_rx_8035,
+ model_last
+};
+
static const struct i2c_device_id rx8025_id[] = {
- { "rx8025", 0 },
+ { "rx8025", model_rx_8025 },
+ { "rx8035", model_rx_8035 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rx8025_id);
struct rx8025_data {
struct rtc_device *rtc;
+ enum rx_model model;
u8 ctrl1;
};
@@ -100,10 +109,26 @@ static s32 rx8025_write_regs(const struct i2c_client *client,
length, values);
}
+static int rx8025_is_osc_stopped(enum rx_model model, int ctrl2)
+{
+ int xstp = ctrl2 & RX8025_BIT_CTRL2_XST;
+ /* XSTP bit has different polarity on RX-8025 vs RX-8035.
+ * RX-8025: 0 == oscillator stopped
+ * RX-8035: 1 == oscillator stopped
+ */
+
+ if (model == model_rx_8025)
+ xstp = !xstp;
+
+ return xstp;
+}
+
static int rx8025_check_validity(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
+ struct rx8025_data *drvdata = dev_get_drvdata(dev);
int ctrl2;
+ int xstp;
ctrl2 = rx8025_read_reg(client, RX8025_REG_CTRL2);
if (ctrl2 < 0)
@@ -117,7 +142,8 @@ static int rx8025_check_validity(struct device *dev)
return -EINVAL;
}
- if (!(ctrl2 & RX8025_BIT_CTRL2_XST)) {
+ xstp = rx8025_is_osc_stopped(drvdata->model, ctrl2);
+ if (xstp) {
dev_warn(dev, "crystal stopped, date is invalid\n");
return -EINVAL;
}
@@ -127,6 +153,7 @@ static int rx8025_check_validity(struct device *dev)
static int rx8025_reset_validity(struct i2c_client *client)
{
+ struct rx8025_data *drvdata = i2c_get_clientdata(client);
int ctrl2 = rx8025_read_reg(client, RX8025_REG_CTRL2);
if (ctrl2 < 0)
@@ -134,22 +161,28 @@ static int rx8025_reset_validity(struct i2c_client *client)
ctrl2 &= ~(RX8025_BIT_CTRL2_PON | RX8025_BIT_CTRL2_VDET);
+ if (drvdata->model == model_rx_8025)
+ ctrl2 |= RX8025_BIT_CTRL2_XST;
+ else
+ ctrl2 &= ~(RX8025_BIT_CTRL2_XST);
+
return rx8025_write_reg(client, RX8025_REG_CTRL2,
- ctrl2 | RX8025_BIT_CTRL2_XST);
+ ctrl2);
}
static irqreturn_t rx8025_handle_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
struct rx8025_data *rx8025 = i2c_get_clientdata(client);
- int status;
+ int status, xstp;
rtc_lock(rx8025->rtc);
status = rx8025_read_reg(client, RX8025_REG_CTRL2);
if (status < 0)
goto out;
- if (!(status & RX8025_BIT_CTRL2_XST))
+ xstp = rx8025_is_osc_stopped(rx8025->model, status);
+ if (xstp)
dev_warn(&client->dev, "Oscillation stop was detected,"
"you may have to readjust the clock\n");
@@ -519,6 +552,9 @@ static int rx8025_probe(struct i2c_client *client,
i2c_set_clientdata(client, rx8025);
+ if (id)
+ rx8025->model = id->driver_data;
+
err = rx8025_init_client(client);
if (err)
return err;
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 6b56f8eacba6..fb9c6b709e13 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -204,15 +204,9 @@ static int s5m8767_tm_to_data(struct rtc_time *tm, u8 *data)
data[RTC_WEEKDAY] = 1 << tm->tm_wday;
data[RTC_DATE] = tm->tm_mday;
data[RTC_MONTH] = tm->tm_mon + 1;
- data[RTC_YEAR1] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0;
+ data[RTC_YEAR1] = tm->tm_year - 100;
- if (tm->tm_year < 100) {
- pr_err("RTC cannot handle the year %d\n",
- 1900 + tm->tm_year);
- return -EINVAL;
- } else {
- return 0;
- }
+ return 0;
}
/*
@@ -786,29 +780,35 @@ static int s5m_rtc_probe(struct platform_device *pdev)
if (ret)
return ret;
- device_init_wakeup(&pdev->dev, 1);
-
- info->rtc_dev = devm_rtc_device_register(&pdev->dev, "s5m-rtc",
- &s5m_rtc_ops, THIS_MODULE);
-
+ info->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(info->rtc_dev))
return PTR_ERR(info->rtc_dev);
- if (!info->irq) {
- dev_info(&pdev->dev, "Alarm IRQ not available\n");
- return 0;
+ info->rtc_dev->ops = &s5m_rtc_ops;
+
+ if (info->device_type == S5M8763X) {
+ info->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_0000;
+ info->rtc_dev->range_max = RTC_TIMESTAMP_END_9999;
+ } else {
+ info->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ info->rtc_dev->range_max = RTC_TIMESTAMP_END_2099;
}
- ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
- s5m_rtc_alarm_irq, 0, "rtc-alarm0",
- info);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
- info->irq, ret);
- return ret;
+ if (!info->irq) {
+ clear_bit(RTC_FEATURE_ALARM, info->rtc_dev->features);
+ } else {
+ ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
+ s5m_rtc_alarm_irq, 0, "rtc-alarm0",
+ info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
+ info->irq, ret);
+ return ret;
+ }
+ device_init_wakeup(&pdev->dev, 1);
}
- return 0;
+ return devm_rtc_register_device(info->rtc_dev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index bc89c62ccb9b..75e4c2d777b9 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -467,6 +467,6 @@ static struct platform_driver tps65910_rtc_driver = {
};
module_platform_driver(tps65910_rtc_driver);
-MODULE_ALIAS("platform:rtc-tps65910");
+MODULE_ALIAS("platform:tps65910-rtc");
MODULE_AUTHOR("Venu Byravarasu <vbyravarasu@nvidia.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 376f1efbbb86..d0416dbd0cd8 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -2,17 +2,6 @@
comment "S/390 block device drivers"
depends on S390 && BLOCK
-config BLK_DEV_XPRAM
- def_tristate m
- prompt "XPRAM disk support"
- depends on S390 && BLOCK
- help
- Select this option if you want to use your expanded storage on S/390
- or zSeries as a disk. This is useful as a _fast_ swap device if you
- want to access more than 2G of memory when running in 31 bit mode.
- This option is also available as a module which will be called
- xpram. If unsure, say "N".
-
config DCSSBLK
def_tristate m
select FS_DAX_LIMITED
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
index 60c85cff556f..a0a54d2f063f 100644
--- a/drivers/s390/block/Makefile
+++ b/drivers/s390/block/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_DASD) += dasd_mod.o
obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
-obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
obj-$(CONFIG_DCSSBLK) += dcssblk.o
scm_block-objs := scm_drv.o scm_blk.o
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
deleted file mode 100644
index ce98fab4d43c..000000000000
--- a/drivers/s390/block/xpram.c
+++ /dev/null
@@ -1,416 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Xpram.c -- the S/390 expanded memory RAM-disk
- *
- * significant parts of this code are based on
- * the sbull device driver presented in
- * A. Rubini: Linux Device Drivers
- *
- * Author of XPRAM specific coding: Reinhard Buendgen
- * buendgen@de.ibm.com
- * Rewrite for 2.5: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * External interfaces:
- * Interfaces to linux kernel
- * xpram_setup: read kernel parameters
- * Device specific file operations
- * xpram_iotcl
- * xpram_open
- *
- * "ad-hoc" partitioning:
- * the expanded memory can be partitioned among several devices
- * (with different minors). The partitioning set up can be
- * set by kernel or module parameters (int devs & int sizes[])
- *
- * Potential future improvements:
- * generic hard disk support to replace ad-hoc partitioning
- */
-
-#define KMSG_COMPONENT "xpram"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/ctype.h> /* isdigit, isxdigit */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/blkdev.h>
-#include <linux/blkpg.h>
-#include <linux/hdreg.h> /* HDIO_GETGEO */
-#include <linux/device.h>
-#include <linux/bio.h>
-#include <linux/gfp.h>
-#include <linux/uaccess.h>
-
-#define XPRAM_NAME "xpram"
-#define XPRAM_DEVS 1 /* one partition */
-#define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */
-
-typedef struct {
- unsigned int size; /* size of xpram segment in pages */
- unsigned int offset; /* start page of xpram segment */
-} xpram_device_t;
-
-static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
-static unsigned int xpram_sizes[XPRAM_MAX_DEVS];
-static struct gendisk *xpram_disks[XPRAM_MAX_DEVS];
-static unsigned int xpram_pages;
-static int xpram_devs;
-
-/*
- * Parameter parsing functions.
- */
-static int devs = XPRAM_DEVS;
-static char *sizes[XPRAM_MAX_DEVS];
-
-module_param(devs, int, 0);
-module_param_array(sizes, charp, NULL, 0);
-
-MODULE_PARM_DESC(devs, "number of devices (\"partitions\"), " \
- "the default is " __MODULE_STRING(XPRAM_DEVS) "\n");
-MODULE_PARM_DESC(sizes, "list of device (partition) sizes " \
- "the defaults are 0s \n" \
- "All devices with size 0 equally partition the "
- "remaining space on the expanded strorage not "
- "claimed by explicit sizes\n");
-MODULE_LICENSE("GPL");
-
-/*
- * Copy expanded memory page (4kB) into main memory
- * Arguments
- * page_addr: address of target page
- * xpage_index: index of expandeded memory page
- * Return value
- * 0: if operation succeeds
- * -EIO: if pgin failed
- * -ENXIO: if xpram has vanished
- */
-static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
-{
- int cc = 2; /* return unused cc 2 if pgin traps */
-
- asm volatile(
- " .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
- if (cc == 3)
- return -ENXIO;
- if (cc == 2)
- return -ENXIO;
- if (cc == 1)
- return -EIO;
- return 0;
-}
-
-/*
- * Copy a 4kB page of main memory to an expanded memory page
- * Arguments
- * page_addr: address of source page
- * xpage_index: index of expandeded memory page
- * Return value
- * 0: if operation succeeds
- * -EIO: if pgout failed
- * -ENXIO: if xpram has vanished
- */
-static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
-{
- int cc = 2; /* return unused cc 2 if pgin traps */
-
- asm volatile(
- " .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
- if (cc == 3)
- return -ENXIO;
- if (cc == 2)
- return -ENXIO;
- if (cc == 1)
- return -EIO;
- return 0;
-}
-
-/*
- * Check if xpram is available.
- */
-static int __init xpram_present(void)
-{
- unsigned long mem_page;
- int rc;
-
- mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
- if (!mem_page)
- return -ENOMEM;
- rc = xpram_page_in(mem_page, 0);
- free_page(mem_page);
- return rc ? -ENXIO : 0;
-}
-
-/*
- * Return index of the last available xpram page.
- */
-static unsigned long __init xpram_highest_page_index(void)
-{
- unsigned int page_index, add_bit;
- unsigned long mem_page;
-
- mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
- if (!mem_page)
- return 0;
-
- page_index = 0;
- add_bit = 1ULL << (sizeof(unsigned int)*8 - 1);
- while (add_bit > 0) {
- if (xpram_page_in(mem_page, page_index | add_bit) == 0)
- page_index |= add_bit;
- add_bit >>= 1;
- }
-
- free_page (mem_page);
-
- return page_index;
-}
-
-/*
- * Block device make request function.
- */
-static blk_qc_t xpram_submit_bio(struct bio *bio)
-{
- xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
- struct bio_vec bvec;
- struct bvec_iter iter;
- unsigned int index;
- unsigned long page_addr;
- unsigned long bytes;
-
- blk_queue_split(&bio);
-
- if ((bio->bi_iter.bi_sector & 7) != 0 ||
- (bio->bi_iter.bi_size & 4095) != 0)
- /* Request is not page-aligned. */
- goto fail;
- if ((bio->bi_iter.bi_size >> 12) > xdev->size)
- /* Request size is no page-aligned. */
- goto fail;
- if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
- goto fail;
- index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
- bio_for_each_segment(bvec, bio, iter) {
- page_addr = (unsigned long)
- kmap(bvec.bv_page) + bvec.bv_offset;
- bytes = bvec.bv_len;
- if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
- /* More paranoia. */
- goto fail;
- while (bytes > 0) {
- if (bio_data_dir(bio) == READ) {
- if (xpram_page_in(page_addr, index) != 0)
- goto fail;
- } else {
- if (xpram_page_out(page_addr, index) != 0)
- goto fail;
- }
- page_addr += 4096;
- bytes -= 4096;
- index++;
- }
- }
- bio_endio(bio);
- return BLK_QC_T_NONE;
-fail:
- bio_io_error(bio);
- return BLK_QC_T_NONE;
-}
-
-static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- unsigned long size;
-
- /*
- * get geometry: we have to fake one... trim the size to a
- * multiple of 64 (32k): tell we have 16 sectors, 4 heads,
- * whatever cylinders. Tell also that data starts at sector. 4.
- */
- size = (xpram_pages * 8) & ~0x3f;
- geo->cylinders = size >> 6;
- geo->heads = 4;
- geo->sectors = 16;
- geo->start = 4;
- return 0;
-}
-
-static const struct block_device_operations xpram_devops =
-{
- .owner = THIS_MODULE,
- .submit_bio = xpram_submit_bio,
- .getgeo = xpram_getgeo,
-};
-
-/*
- * Setup xpram_sizes array.
- */
-static int __init xpram_setup_sizes(unsigned long pages)
-{
- unsigned long mem_needed;
- unsigned long mem_auto;
- unsigned long long size;
- char *sizes_end;
- int mem_auto_no;
- int i;
-
- /* Check number of devices. */
- if (devs <= 0 || devs > XPRAM_MAX_DEVS) {
- pr_err("%d is not a valid number of XPRAM devices\n",devs);
- return -EINVAL;
- }
- xpram_devs = devs;
-
- /*
- * Copy sizes array to xpram_sizes and align partition
- * sizes to page boundary.
- */
- mem_needed = 0;
- mem_auto_no = 0;
- for (i = 0; i < xpram_devs; i++) {
- if (sizes[i]) {
- size = simple_strtoull(sizes[i], &sizes_end, 0);
- switch (*sizes_end) {
- case 'g':
- case 'G':
- size <<= 20;
- break;
- case 'm':
- case 'M':
- size <<= 10;
- }
- xpram_sizes[i] = (size + 3) & -4UL;
- }
- if (xpram_sizes[i])
- mem_needed += xpram_sizes[i];
- else
- mem_auto_no++;
- }
-
- pr_info(" number of devices (partitions): %d \n", xpram_devs);
- for (i = 0; i < xpram_devs; i++) {
- if (xpram_sizes[i])
- pr_info(" size of partition %d: %u kB\n",
- i, xpram_sizes[i]);
- else
- pr_info(" size of partition %d to be set "
- "automatically\n",i);
- }
- pr_info(" memory needed (for sized partitions): %lu kB\n",
- mem_needed);
- pr_info(" partitions to be sized automatically: %d\n",
- mem_auto_no);
-
- if (mem_needed > pages * 4) {
- pr_err("Not enough expanded memory available\n");
- return -EINVAL;
- }
-
- /*
- * partitioning:
- * xpram_sizes[i] != 0; partition i has size xpram_sizes[i] kB
- * else: ; all partitions with zero xpram_sizes[i]
- * partition equally the remaining space
- */
- if (mem_auto_no) {
- mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4;
- pr_info(" automatically determined "
- "partition size: %lu kB\n", mem_auto);
- for (i = 0; i < xpram_devs; i++)
- if (xpram_sizes[i] == 0)
- xpram_sizes[i] = mem_auto;
- }
- return 0;
-}
-
-static int __init xpram_setup_blkdev(void)
-{
- unsigned long offset;
- int i, rc = -ENOMEM;
-
- for (i = 0; i < xpram_devs; i++) {
- xpram_disks[i] = blk_alloc_disk(NUMA_NO_NODE);
- if (!xpram_disks[i])
- goto out;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_disks[i]->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM,
- xpram_disks[i]->queue);
- blk_queue_logical_block_size(xpram_disks[i]->queue, 4096);
- }
-
- /*
- * Register xpram major.
- */
- rc = register_blkdev(XPRAM_MAJOR, XPRAM_NAME);
- if (rc < 0)
- goto out;
-
- /*
- * Setup device structures.
- */
- offset = 0;
- for (i = 0; i < xpram_devs; i++) {
- struct gendisk *disk = xpram_disks[i];
-
- xpram_devices[i].size = xpram_sizes[i] / 4;
- xpram_devices[i].offset = offset;
- offset += xpram_devices[i].size;
- disk->major = XPRAM_MAJOR;
- disk->first_minor = i;
- disk->minors = 1;
- disk->fops = &xpram_devops;
- disk->private_data = &xpram_devices[i];
- sprintf(disk->disk_name, "slram%d", i);
- set_capacity(disk, xpram_sizes[i] << 1);
- add_disk(disk);
- }
-
- return 0;
-out:
- while (i--)
- blk_cleanup_disk(xpram_disks[i]);
- return rc;
-}
-
-/*
- * Finally, the init/exit functions.
- */
-static void __exit xpram_exit(void)
-{
- int i;
- for (i = 0; i < xpram_devs; i++) {
- del_gendisk(xpram_disks[i]);
- blk_cleanup_disk(xpram_disks[i]);
- }
- unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
-}
-
-static int __init xpram_init(void)
-{
- int rc;
-
- /* Find out size of expanded memory. */
- if (xpram_present() != 0) {
- pr_err("No expanded memory available\n");
- return -ENODEV;
- }
- xpram_pages = xpram_highest_page_index() + 1;
- pr_info(" %u pages expanded memory found (%lu KB).\n",
- xpram_pages, (unsigned long) xpram_pages*4);
- rc = xpram_setup_sizes(xpram_pages);
- if (rc)
- return rc;
- return xpram_setup_blkdev();
-}
-
-module_init(xpram_init);
-module_exit(xpram_exit);
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 87cdbace1453..e4592890f20a 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -292,13 +292,15 @@ con3270_update(struct timer_list *t)
* Read tasklet.
*/
static void
-con3270_read_tasklet(struct raw3270_request *rrq)
+con3270_read_tasklet(unsigned long data)
{
static char kreset_data = TW_KR;
+ struct raw3270_request *rrq;
struct con3270 *cp;
unsigned long flags;
int nr_up, deactivate;
+ rrq = (struct raw3270_request *)data;
cp = (struct con3270 *) rrq->view;
spin_lock_irqsave(&cp->view.lock, flags);
nr_up = cp->nr_up;
@@ -625,8 +627,7 @@ con3270_init(void)
INIT_LIST_HEAD(&condev->lines);
INIT_LIST_HEAD(&condev->update);
timer_setup(&condev->timer, con3270_update, 0);
- tasklet_init(&condev->readlet,
- (void (*)(unsigned long)) con3270_read_tasklet,
+ tasklet_init(&condev->readlet, con3270_read_tasklet,
(unsigned long) condev->read);
raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ);
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index e1686a69a68e..6f2b64040078 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -34,12 +34,13 @@ void schedule_sysrq_work(struct sysrq_work *sw)
/**
- * Check for special chars at start of input.
+ * ctrlchar_handle - check for special chars at start of input
*
- * @param buf Console input buffer.
- * @param len Length of valid data in buffer.
- * @param tty The tty struct for this console.
- * @return CTRLCHAR_NONE, if nothing matched,
+ * @buf: console input buffer
+ * @len: length of valid data in buffer
+ * @tty: the tty struct for this console
+ *
+ * Return: CTRLCHAR_NONE, if nothing matched,
* CTRLCHAR_SYSRQ, if sysrq was encountered
* otherwise char to be inserted logically or'ed
* with CTRLCHAR_CTRL
diff --git a/drivers/s390/char/hmcdrv_ftp.c b/drivers/s390/char/hmcdrv_ftp.c
index 37ee8f698c3b..02b6f394aec2 100644
--- a/drivers/s390/char/hmcdrv_ftp.c
+++ b/drivers/s390/char/hmcdrv_ftp.c
@@ -26,7 +26,7 @@
* struct hmcdrv_ftp_ops - HMC drive FTP operations
* @startup: startup function
* @shutdown: shutdown function
- * @cmd: FTP transfer function
+ * @transfer: FTP transfer function
*/
struct hmcdrv_ftp_ops {
int (*startup)(void);
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index b4b84e3e0949..2cf7fe131ece 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -28,7 +28,7 @@
#define SCLP_HEADER "sclp: "
struct sclp_trace_entry {
- char id[4];
+ char id[4] __nonstring;
u32 a;
u64 b;
};
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 4dd2eb634856..f3c656975e05 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -262,7 +262,10 @@ static int blacklist_parse_proc_parameters(char *buf)
if (strcmp("free", parm) == 0) {
rc = blacklist_parse_parameters(buf, free, 0);
- css_schedule_eval_all_unreg(0);
+ /* There could be subchannels without proper devices connected.
+ * evaluate all the entries
+ */
+ css_schedule_eval_all();
} else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0);
else if (strcmp("purge", parm) == 0)
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index adf33b653d87..8d14569823d7 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -867,19 +867,6 @@ out_err:
wake_up(&ccw_device_init_wq);
}
-static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
-{
- struct subchannel *sch;
-
- /* Get subchannel reference for local processing. */
- if (!get_device(cdev->dev.parent))
- return;
- sch = to_subchannel(cdev->dev.parent);
- css_sch_device_unregister(sch);
- /* Release subchannel reference for local processing. */
- put_device(&sch->dev);
-}
-
/*
* subchannel recognition done. Called from the state machine.
*/
@@ -1857,10 +1844,10 @@ static void ccw_device_todo(struct work_struct *work)
css_schedule_eval(sch->schid);
fallthrough;
case CDEV_TODO_UNREG:
- if (sch_is_pseudo_sch(sch))
- ccw_device_unregister(cdev);
- else
- ccw_device_call_sch_unregister(cdev);
+ spin_lock_irq(sch->lock);
+ sch_set_cdev(sch, NULL);
+ spin_unlock_irq(sch->lock);
+ ccw_device_unregister(cdev);
break;
default:
break;
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 740996d0dc8c..7835a87a60b5 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -91,7 +91,7 @@ static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
}
/**
- * diag_get_dev_info - retrieve device information via diag 0x210
+ * diag210_get_dev_info - retrieve device information via diag 0x210
* @cdev: ccw device
*
* Returns zero on success, non-zero otherwise.
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index fa0cb8633040..356318746dd1 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -71,7 +71,7 @@ static LIST_HEAD(zcrypt_ops_list);
/* Zcrypt related debug feature stuff. */
debug_info_t *zcrypt_dbf_info;
-/**
+/*
* Process a rescan of the transport layer.
*
* Returns 1, if the rescan has been processed, otherwise 0.
@@ -462,7 +462,7 @@ static void zcdn_destroy_all(void)
#endif
-/**
+/*
* zcrypt_read (): Not supported beyond zcrypt 1.3.1.
*
* This function is not supported beyond zcrypt 1.3.1.
@@ -473,7 +473,7 @@ static ssize_t zcrypt_read(struct file *filp, char __user *buf,
return -EPERM;
}
-/**
+/*
* zcrypt_write(): Not allowed.
*
* Write is is not allowed
@@ -484,7 +484,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
return -EPERM;
}
-/**
+/*
* zcrypt_open(): Count number of users.
*
* Device open function to count number of users.
@@ -512,7 +512,7 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
return stream_open(inode, filp);
}
-/**
+/*
* zcrypt_release(): Count number of users.
*
* Device close function to count number of users.
@@ -2153,7 +2153,7 @@ static void zcdn_exit(void)
#endif
-/**
+/*
* zcrypt_api_init(): Module initialization.
*
* The module initialization code.
@@ -2191,7 +2191,7 @@ out:
return rc;
}
-/**
+/*
* zcrypt_api_exit(): Module termination.
*
* The module termination code.
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index fa8293d37006..2bd49950ba81 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -65,7 +65,7 @@ static struct ap_device_id zcrypt_cex2a_queue_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids);
-/**
+/*
* Probe function for CEX2A card devices. It always accepts the AP device
* since the bus_match already checked the card type.
* @ap_dev: pointer to the AP device.
@@ -124,7 +124,7 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX2A card driver information
* if an AP card device is removed.
*/
@@ -142,7 +142,7 @@ static struct ap_driver zcrypt_cex2a_card_driver = {
.flags = AP_DRIVER_FLAG_DEFAULT,
};
-/**
+/*
* Probe function for CEX2A queue devices. It always accepts the AP device
* since the bus_match already checked the queue type.
* @ap_dev: pointer to the AP device.
@@ -183,7 +183,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX2A queue driver information
* if an AP queue device is removed.
*/
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index a0b9f1153e12..6360fdd06160 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -171,7 +171,7 @@ static const struct attribute_group cca_queue_attr_grp = {
.attrs = cca_queue_attrs,
};
-/**
+/*
* Large random number detection function. Its sends a message to a CEX2C/CEX3C
* card to find out if large random numbers are supported.
* @ap_dev: pointer to the AP device.
@@ -237,7 +237,7 @@ out_free:
return rc;
}
-/**
+/*
* Probe function for CEX2C/CEX3C card devices. It always accepts the
* AP device since the bus_match already checked the hardware type.
* @ap_dev: pointer to the AP card device.
@@ -303,7 +303,7 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX2C/CEX3C card driver information
* if an AP card device is removed.
*/
@@ -325,7 +325,7 @@ static struct ap_driver zcrypt_cex2c_card_driver = {
.flags = AP_DRIVER_FLAG_DEFAULT,
};
-/**
+/*
* Probe function for CEX2C/CEX3C queue devices. It always accepts the
* AP device since the bus_match already checked the hardware type.
* @ap_dev: pointer to the AP card device.
@@ -376,7 +376,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX2C/CEX3C queue driver information
* if an AP queue device is removed.
*/
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 1f7ec54142e1..06024bbe9a58 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -394,7 +394,7 @@ static const struct attribute_group ep11_queue_attr_grp = {
.attrs = ep11_queue_attrs,
};
-/**
+/*
* Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always
* accepts the AP device since the bus_match already checked
* the hardware type.
@@ -562,7 +562,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX4/CEX5/CEX6/CEX7 card driver
* information if an AP card device is removed.
*/
@@ -586,7 +586,7 @@ static struct ap_driver zcrypt_cex4_card_driver = {
.flags = AP_DRIVER_FLAG_DEFAULT,
};
-/**
+/*
* Probe function for CEX4/CEX5/CEX6/CEX7 queue device. It always
* accepts the AP device since the bus_match already checked
* the hardware type.
@@ -652,7 +652,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
return rc;
}
-/**
+/*
* This is called to remove the CEX4/CEX5/CEX6/CEX7 queue driver
* information if an AP queue device is removed.
*/
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 99405472824d..99937f3e1d49 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -39,7 +39,7 @@ MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
-/**
+/*
* The type 50 message family is associated with a CEXxA cards.
*
* The four members of the family are described below.
@@ -136,7 +136,7 @@ struct type50_crb3_msg {
unsigned char message[512];
} __packed;
-/**
+/*
* The type 80 response family is associated with a CEXxA cards.
*
* Note that all unsigned char arrays are right-justified and left-padded
@@ -188,7 +188,7 @@ unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode)
return 0;
}
-/**
+/*
* Convert a ICAMEX message to a type50 MEX message.
*
* @zq: crypto queue pointer
@@ -255,7 +255,7 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Convert a ICACRT message to a type50 CRT message.
*
* @zq: crypto queue pointer
@@ -346,7 +346,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Copy results from a type 80 reply message back to user space.
*
* @zq: crypto device pointer
@@ -418,7 +418,7 @@ static int convert_response_cex2a(struct zcrypt_queue *zq,
}
}
-/**
+/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
@@ -457,7 +457,7 @@ out:
static atomic_t zcrypt_step = ATOMIC_INIT(0);
-/**
+/*
* The request distributor calls this function if it picked the CEXxA
* device to handle a modexpo request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -502,7 +502,7 @@ out:
return rc;
}
-/**
+/*
* The request distributor calls this function if it picked the CEXxA
* device to handle a modexpo_crt request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -547,7 +547,7 @@ out:
return rc;
}
-/**
+/*
* The crypto operations for message type 50.
*/
static struct zcrypt_ops zcrypt_msgtype50_ops = {
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 752c6398fcd6..bc5a8c31ba73 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -44,7 +44,7 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
-/**
+/*
* CPRB
* Note that all shorts, ints and longs are little-endian.
* All pointer fields are 32-bits long, and mean nothing
@@ -107,7 +107,7 @@ struct function_and_rules_block {
unsigned char only_rule[8];
} __packed;
-/**
+/*
* The following is used to initialize the CPRBX passed to the CEXxC/CEXxP
* card in a type6 message. The 3 fields that must be filled in at execution
* time are req_parml, rpl_parml and usage_domain.
@@ -236,7 +236,7 @@ int speed_idx_ep11(int req_type)
}
-/**
+/*
* Convert a ICAMEX message to a type6 MEX message.
*
* @zq: crypto device pointer
@@ -305,7 +305,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Convert a ICACRT message to a type6 CRT message.
*
* @zq: crypto device pointer
@@ -374,7 +374,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Convert a XCRB message to a type6 CPRB message.
*
* @zq: crypto device pointer
@@ -571,7 +571,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap
return 0;
}
-/**
+/*
* Copy results from a type 86 ICA reply message back to user space.
*
* @zq: crypto device pointer
@@ -697,7 +697,7 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Copy results from a type 86 XCRB reply message back to user space.
*
* @zq: crypto device pointer
@@ -728,7 +728,7 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Copy results from a type 86 EP11 XCRB reply message back to user space.
*
* @zq: crypto device pointer
@@ -911,7 +911,7 @@ static int convert_response_rng(struct zcrypt_queue *zq,
}
}
-/**
+/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
@@ -966,7 +966,7 @@ out:
complete(&(resp_type->work));
}
-/**
+/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
@@ -1015,7 +1015,7 @@ out:
static atomic_t zcrypt_step = ATOMIC_INIT(0);
-/**
+/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a modexpo request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1063,7 +1063,7 @@ out_free:
return rc;
}
-/**
+/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a modexpo_crt request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1112,7 +1112,7 @@ out_free:
return rc;
}
-/**
+/*
* Fetch function code from cprb.
* Extracting the fc requires to copy the cprb from userspace.
* So this function allocates memory and needs an ap_msg prepared
@@ -1140,7 +1140,7 @@ unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *xcRB,
return XCRB_msg_to_type6CPRB_msgX(userspace, ap_msg, xcRB, func_code, dom);
}
-/**
+/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a send_cprb request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1170,7 +1170,7 @@ out:
return rc;
}
-/**
+/*
* Fetch function code from ep11 cprb.
* Extracting the fc requires to copy the ep11 cprb from userspace.
* So this function allocates memory and needs an ap_msg prepared
@@ -1198,7 +1198,7 @@ unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *xcrb,
return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb, func_code);
}
-/**
+/*
* The request distributor calls this function if it picked the CEX4P
* device to handle a send_ep11_cprb request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1228,7 +1228,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
} __packed * payload_hdr = NULL;
- /**
+ /*
* The target domain field within the cprb body/payload block will be
* replaced by the usage domain for non-management commands only.
* Therefore we check the first bit of the 'flags' parameter for
@@ -1299,7 +1299,7 @@ unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
return 0;
}
-/**
+/*
* The request distributor calls this function if it picked the CEXxC
* device to generate random data.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1339,7 +1339,7 @@ out:
return rc;
}
-/**
+/*
* The crypto operations for a CEXxC card.
*/
static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index ca473b368905..cbc3b62cd9e5 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -766,7 +766,7 @@ static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
}
/**
- * zfcp_adapter_debug_register - registers debug feature for an adapter
+ * zfcp_dbf_adapter_register - registers debug feature for an adapter
* @adapter: pointer to adapter for which debug features should be registered
* return: -ENOMEM on error, 0 otherwise
*/
@@ -824,7 +824,7 @@ err_out:
}
/**
- * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
+ * zfcp_dbf_adapter_unregister - unregisters debug feature for an adapter
* @adapter: pointer to adapter for which debug features should be unregistered
*/
void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 6da8f6d05d39..c1f979296c1a 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -2275,7 +2275,7 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
}
/**
- * zfcp_fsf_close_LUN - close LUN
+ * zfcp_fsf_close_lun - close LUN
* @erp_action: pointer to erp_action triggering the "close LUN"
* Returns: 0 on success, error otherwise
*/
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 8f19bed6384e..6a2720105138 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -384,7 +384,7 @@ free_req_q:
}
/**
- * zfcp_close_qdio - close qdio queues for an adapter
+ * zfcp_qdio_close - close qdio queues for an adapter
* @qdio: pointer to structure zfcp_qdio
*/
void zfcp_qdio_close(struct zfcp_qdio *qdio)
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 59333f0257a8..60f2a04f0869 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -111,9 +111,9 @@ static void zfcp_unit_release(struct device *dev)
}
/**
- * zfcp_unit_enqueue - enqueue unit to unit list of a port.
+ * zfcp_unit_add - add unit to unit list of a port.
* @port: pointer to port where unit is added
- * @fcp_lun: FCP LUN of unit to be enqueued
+ * @fcp_lun: FCP LUN of unit to be added
* Returns: 0 success
*
* Sets up some unit internal structures and creates sysfs entry.
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 2f1894588e0b..b2730e859df8 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -1629,8 +1629,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
{
struct device *dev = &cfg->dev->dev;
struct pci_dev *pdev = cfg->dev;
- int rc = 0;
- int ro_start, ro_size, i, j, k;
+ int i, k, rc = 0;
+ unsigned int kw_size;
ssize_t vpd_size;
char vpd_data[CXLFLASH_VPD_LEN];
char tmp_buf[WWPN_BUF_LEN] = { 0 };
@@ -1648,24 +1648,6 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
goto out;
}
- /* Get the read only section offset */
- ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (unlikely(ro_start < 0)) {
- dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
- rc = -ENODEV;
- goto out;
- }
-
- /* Get the read only section size, cap when extends beyond read VPD */
- ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- if (unlikely((i + j) > vpd_size)) {
- dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
- __func__, (i + j), vpd_size);
- ro_size = vpd_size - i;
- }
-
/*
* Find the offset of the WWPN tag within the read only
* VPD data and validate the found field (partials are
@@ -1681,11 +1663,9 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
* ports programmed and operate in an undefined state.
*/
for (k = 0; k < cfg->num_fc_ports; k++) {
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
-
- i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
- if (i < 0) {
+ i = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ wwpn_vpd_tags[k], &kw_size);
+ if (i == -ENOENT) {
if (wwpn_vpd_required)
dev_err(dev, "%s: Port %d WWPN not found\n",
__func__, k);
@@ -1693,9 +1673,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
continue;
}
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
+ if (i < 0 || kw_size != WWPN_LEN) {
dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
__func__, k);
rc = -ENODEV;
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 5a86cffd78f6..4310cb342a9f 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -18,10 +18,10 @@
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/thermal.h>
+#include <linux/units.h>
#include <trace/events/thermal.h>
-#define HZ_PER_KHZ 1000
#define SCALE_ERROR_MITIGATION 100
/**
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 823354a1a91a..19926beeb3b7 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -108,9 +108,12 @@ static struct attribute *imok_attr[] = {
NULL
};
+static const struct attribute_group imok_attribute_group = {
+ .attrs = imok_attr,
+};
+
static const struct attribute_group data_attribute_group = {
.bin_attrs = data_attributes,
- .attrs = imok_attr,
};
static ssize_t available_uuids_show(struct device *dev,
@@ -522,6 +525,12 @@ static int int3400_thermal_probe(struct platform_device *pdev)
if (result)
goto free_rel_misc;
+ if (acpi_has_method(priv->adev->handle, "IMOK")) {
+ result = sysfs_create_group(&pdev->dev.kobj, &imok_attribute_group);
+ if (result)
+ goto free_imok;
+ }
+
if (priv->data_vault) {
result = sysfs_create_group(&pdev->dev.kobj,
&data_attribute_group);
@@ -545,6 +554,8 @@ free_sysfs:
}
free_uuid:
sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
+free_imok:
+ sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group);
free_rel_misc:
if (!priv->rel_misc_dev_res)
acpi_thermal_rel_misc_device_remove(priv->adev->handle);
@@ -573,6 +584,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
if (priv->data_vault)
sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
+ sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group);
thermal_zone_device_unregister(priv->thermal);
kfree(priv->data_vault);
kfree(priv->trts);
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index b0eb5ece9243..a5b58ea89cc6 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -528,7 +528,7 @@ static int start_power_clamp(void)
set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
/* prevent cpu hotplug */
- get_online_cpus();
+ cpus_read_lock();
/* prefer BSP */
control_cpu = 0;
@@ -542,7 +542,7 @@ static int start_power_clamp(void)
for_each_online_cpu(cpu) {
start_power_clamp_worker(cpu);
}
- put_online_cpus();
+ cpus_read_unlock();
return 0;
}
diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c
index 8ec10d55d421..cd80c7db4073 100644
--- a/drivers/thermal/intel/intel_tcc_cooling.c
+++ b/drivers/thermal/intel/intel_tcc_cooling.c
@@ -79,6 +79,8 @@ static const struct x86_cpu_id tcc_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
{}
};
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index 8d5ac2df26dc..7d942f71e532 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -31,3 +31,13 @@ config QCOM_SPMI_TEMP_ALARM
trip points. The temperature reported by the thermal sensor reflects the
real time die temperature if an ADC is present or an estimate of the
temperature based upon the over temperature stage value.
+
+config QCOM_LMH
+ tristate "Qualcomm Limits Management Hardware"
+ depends on ARCH_QCOM
+ help
+ This enables initialization of Qualcomm limits management
+ hardware(LMh). LMh allows for hardware-enforced mitigation for cpus based on
+ input from temperature and current sensors. On many newer Qualcomm SoCs
+ LMh is configured in the firmware and this feature need not be enabled.
+ However, on certain SoCs like sdm845 LMh has to be configured from kernel.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 252ea7d9da0b..0fa2512042e7 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -5,3 +5,4 @@ qcom_tsens-y += tsens.o tsens-v2.o tsens-v1.o tsens-v0_1.o \
tsens-8960.o
obj-$(CONFIG_QCOM_SPMI_ADC_TM5) += qcom-spmi-adc-tm5.o
obj-$(CONFIG_QCOM_SPMI_TEMP_ALARM) += qcom-spmi-temp-alarm.o
+obj-$(CONFIG_QCOM_LMH) += lmh.o
diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
new file mode 100644
index 000000000000..eafa7526eb8b
--- /dev/null
+++ b/drivers/thermal/qcom/lmh.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright (C) 2021, Linaro Limited. All rights reserved.
+ */
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/qcom_scm.h>
+
+#define LMH_NODE_DCVS 0x44435653
+#define LMH_CLUSTER0_NODE_ID 0x6370302D
+#define LMH_CLUSTER1_NODE_ID 0x6370312D
+
+#define LMH_SUB_FN_THERMAL 0x54484D4C
+#define LMH_SUB_FN_CRNT 0x43524E54
+#define LMH_SUB_FN_REL 0x52454C00
+#define LMH_SUB_FN_BCL 0x42434C00
+
+#define LMH_ALGO_MODE_ENABLE 0x454E424C
+#define LMH_TH_HI_THRESHOLD 0x48494748
+#define LMH_TH_LOW_THRESHOLD 0x4C4F5700
+#define LMH_TH_ARM_THRESHOLD 0x41524D00
+
+#define LMH_REG_DCVS_INTR_CLR 0x8
+
+struct lmh_hw_data {
+ void __iomem *base;
+ struct irq_domain *domain;
+ int irq;
+};
+
+static irqreturn_t lmh_handle_irq(int hw_irq, void *data)
+{
+ struct lmh_hw_data *lmh_data = data;
+ int irq = irq_find_mapping(lmh_data->domain, 0);
+
+ /* Call the cpufreq driver to handle the interrupt */
+ if (irq)
+ generic_handle_irq(irq);
+
+ return 0;
+}
+
+static void lmh_enable_interrupt(struct irq_data *d)
+{
+ struct lmh_hw_data *lmh_data = irq_data_get_irq_chip_data(d);
+
+ /* Clear the existing interrupt */
+ writel(0xff, lmh_data->base + LMH_REG_DCVS_INTR_CLR);
+ enable_irq(lmh_data->irq);
+}
+
+static void lmh_disable_interrupt(struct irq_data *d)
+{
+ struct lmh_hw_data *lmh_data = irq_data_get_irq_chip_data(d);
+
+ disable_irq_nosync(lmh_data->irq);
+}
+
+static struct irq_chip lmh_irq_chip = {
+ .name = "lmh",
+ .irq_enable = lmh_enable_interrupt,
+ .irq_disable = lmh_disable_interrupt
+};
+
+static int lmh_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ struct lmh_hw_data *lmh_data = d->host_data;
+
+ irq_set_chip_and_handler(irq, &lmh_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, lmh_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops lmh_irq_ops = {
+ .map = lmh_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int lmh_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *cpu_node;
+ struct lmh_hw_data *lmh_data;
+ int temp_low, temp_high, temp_arm, cpu_id, ret;
+ u32 node_id;
+
+ lmh_data = devm_kzalloc(dev, sizeof(*lmh_data), GFP_KERNEL);
+ if (!lmh_data)
+ return -ENOMEM;
+
+ lmh_data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(lmh_data->base))
+ return PTR_ERR(lmh_data->base);
+
+ cpu_node = of_parse_phandle(np, "cpus", 0);
+ if (!cpu_node)
+ return -EINVAL;
+ cpu_id = of_cpu_node_to_id(cpu_node);
+ of_node_put(cpu_node);
+
+ ret = of_property_read_u32(np, "qcom,lmh-temp-high-millicelsius", &temp_high);
+ if (ret) {
+ dev_err(dev, "missing qcom,lmh-temp-high-millicelsius property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "qcom,lmh-temp-low-millicelsius", &temp_low);
+ if (ret) {
+ dev_err(dev, "missing qcom,lmh-temp-low-millicelsius property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "qcom,lmh-temp-arm-millicelsius", &temp_arm);
+ if (ret) {
+ dev_err(dev, "missing qcom,lmh-temp-arm-millicelsius property\n");
+ return ret;
+ }
+
+ /*
+ * Only sdm845 has lmh hardware currently enabled from hlos. If this is needed
+ * for other platforms, revisit this to check if the <cpu-id, node-id> should be part
+ * of a dt match table.
+ */
+ if (cpu_id == 0) {
+ node_id = LMH_CLUSTER0_NODE_ID;
+ } else if (cpu_id == 4) {
+ node_id = LMH_CLUSTER1_NODE_ID;
+ } else {
+ dev_err(dev, "Wrong CPU id associated with LMh node\n");
+ return -EINVAL;
+ }
+
+ if (!qcom_scm_lmh_dcvsh_available())
+ return -EINVAL;
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_CRNT, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret)
+ dev_err(dev, "Error %d enabling current subfunction\n", ret);
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_REL, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret)
+ dev_err(dev, "Error %d enabling reliability subfunction\n", ret);
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_BCL, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret)
+ dev_err(dev, "Error %d enabling BCL subfunction\n", ret);
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_ALGO_MODE_ENABLE, 1,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret) {
+ dev_err(dev, "Error %d enabling thermal subfunction\n", ret);
+ return ret;
+ }
+
+ ret = qcom_scm_lmh_profile_change(0x1);
+ if (ret) {
+ dev_err(dev, "Error %d changing profile\n", ret);
+ return ret;
+ }
+
+ /* Set default thermal trips */
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_TH_ARM_THRESHOLD, temp_arm,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret) {
+ dev_err(dev, "Error setting thermal ARM threshold%d\n", ret);
+ return ret;
+ }
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_TH_HI_THRESHOLD, temp_high,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret) {
+ dev_err(dev, "Error setting thermal HI threshold%d\n", ret);
+ return ret;
+ }
+
+ ret = qcom_scm_lmh_dcvsh(LMH_SUB_FN_THERMAL, LMH_TH_LOW_THRESHOLD, temp_low,
+ LMH_NODE_DCVS, node_id, 0);
+ if (ret) {
+ dev_err(dev, "Error setting thermal ARM threshold%d\n", ret);
+ return ret;
+ }
+
+ lmh_data->irq = platform_get_irq(pdev, 0);
+ lmh_data->domain = irq_domain_add_linear(np, 1, &lmh_irq_ops, lmh_data);
+ if (!lmh_data->domain) {
+ dev_err(dev, "Error adding irq_domain\n");
+ return -EINVAL;
+ }
+
+ /* Disable the irq and let cpufreq enable it when ready to handle the interrupt */
+ irq_set_status_flags(lmh_data->irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(dev, lmh_data->irq, lmh_handle_irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "lmh-irq", lmh_data);
+ if (ret) {
+ dev_err(dev, "Error %d registering irq %x\n", ret, lmh_data->irq);
+ irq_domain_remove(lmh_data->domain);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id lmh_table[] = {
+ { .compatible = "qcom,sdm845-lmh", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lmh_table);
+
+static struct platform_driver lmh_driver = {
+ .probe = lmh_probe,
+ .driver = {
+ .name = "qcom-lmh",
+ .of_match_table = lmh_table,
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(lmh_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QCOM LMh driver");
diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
index 232fd0b33325..8494cc04aa21 100644
--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
@@ -359,6 +359,12 @@ static int adc_tm5_register_tzd(struct adc_tm5_chip *adc_tm)
&adc_tm->channels[i],
&adc_tm5_ops);
if (IS_ERR(tzd)) {
+ if (PTR_ERR(tzd) == -ENODEV) {
+ dev_warn(adc_tm->dev, "thermal sensor on channel %d is not used\n",
+ adc_tm->channels[i].channel);
+ continue;
+ }
+
dev_err(adc_tm->dev, "Error registering TZ zone for channel %d: %ld\n",
adc_tm->channels[i].channel, PTR_ERR(tzd));
return PTR_ERR(tzd);
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index fdf16aa34eb4..85228d308dd3 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -84,7 +84,7 @@ struct rcar_gen3_thermal_tsc {
struct thermal_zone_device *zone;
struct equation_coefs coef;
int tj_t;
- int id; /* thermal channel id */
+ unsigned int id; /* thermal channel id */
};
struct rcar_gen3_thermal_priv {
@@ -190,10 +190,64 @@ static int rcar_gen3_thermal_get_temp(void *devdata, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops rcar_gen3_tz_of_ops = {
+static int rcar_gen3_thermal_mcelsius_to_temp(struct rcar_gen3_thermal_tsc *tsc,
+ int mcelsius)
+{
+ int celsius, val;
+
+ celsius = DIV_ROUND_CLOSEST(mcelsius, 1000);
+ if (celsius <= INT_FIXPT(tsc->tj_t))
+ val = celsius * tsc->coef.a1 + tsc->coef.b1;
+ else
+ val = celsius * tsc->coef.a2 + tsc->coef.b2;
+
+ return INT_FIXPT(val);
+}
+
+static int rcar_gen3_thermal_set_trips(void *devdata, int low, int high)
+{
+ struct rcar_gen3_thermal_tsc *tsc = devdata;
+ u32 irqmsk = 0;
+
+ if (low != -INT_MAX) {
+ irqmsk |= IRQ_TEMPD1;
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQTEMP1,
+ rcar_gen3_thermal_mcelsius_to_temp(tsc, low));
+ }
+
+ if (high != INT_MAX) {
+ irqmsk |= IRQ_TEMP2;
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQTEMP2,
+ rcar_gen3_thermal_mcelsius_to_temp(tsc, high));
+ }
+
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQMSK, irqmsk);
+
+ return 0;
+}
+
+static struct thermal_zone_of_device_ops rcar_gen3_tz_of_ops = {
.get_temp = rcar_gen3_thermal_get_temp,
+ .set_trips = rcar_gen3_thermal_set_trips,
};
+static irqreturn_t rcar_gen3_thermal_irq(int irq, void *data)
+{
+ struct rcar_gen3_thermal_priv *priv = data;
+ unsigned int i;
+ u32 status;
+
+ for (i = 0; i < priv->num_tscs; i++) {
+ status = rcar_gen3_thermal_read(priv->tscs[i], REG_GEN3_IRQSTR);
+ rcar_gen3_thermal_write(priv->tscs[i], REG_GEN3_IRQSTR, 0);
+ if (status)
+ thermal_zone_device_update(priv->tscs[i]->zone,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ return IRQ_HANDLED;
+}
+
static const struct soc_device_attribute r8a7795es1[] = {
{ .soc_id = "r8a7795", .revision = "ES1.*" },
{ /* sentinel */ }
@@ -210,6 +264,9 @@ static void rcar_gen3_thermal_init_r8a7795es1(struct rcar_gen3_thermal_tsc *tsc)
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0x3F);
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQMSK, 0);
+ if (tsc->zone->ops->set_trips)
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQEN,
+ IRQ_TEMPD1 | IRQ_TEMP2);
rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR,
CTSR_PONM | CTSR_AOUT | CTSR_THBGR | CTSR_VMEN);
@@ -235,6 +292,9 @@ static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0);
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQMSK, 0);
+ if (tsc->zone->ops->set_trips)
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQEN,
+ IRQ_TEMPD1 | IRQ_TEMP2);
reg_val = rcar_gen3_thermal_read(tsc, REG_GEN3_THCTR);
reg_val |= THCTR_THSST;
@@ -303,6 +363,34 @@ static void rcar_gen3_hwmon_action(void *data)
thermal_remove_hwmon_sysfs(zone);
}
+static int rcar_gen3_thermal_request_irqs(struct rcar_gen3_thermal_priv *priv,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned int i;
+ char *irqname;
+ int ret, irq;
+
+ for (i = 0; i < 2; i++) {
+ irq = platform_get_irq_optional(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s:ch%d",
+ dev_name(dev), i);
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rcar_gen3_thermal_irq,
+ IRQF_ONESHOT, irqname, priv);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int rcar_gen3_thermal_probe(struct platform_device *pdev)
{
struct rcar_gen3_thermal_priv *priv;
@@ -310,7 +398,8 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
const int *ths_tj_1 = of_device_get_match_data(dev);
struct resource *res;
struct thermal_zone_device *zone;
- int ret, i;
+ unsigned int i;
+ int ret;
/* default values if FUSEs are missing */
/* TODO: Read values from hardware on supported platforms */
@@ -326,6 +415,9 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
+ if (rcar_gen3_thermal_request_irqs(priv, pdev))
+ rcar_gen3_tz_of_ops.set_trips = NULL;
+
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
@@ -351,9 +443,6 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
priv->tscs[i] = tsc;
- priv->thermal_init(tsc);
- rcar_gen3_thermal_calc_coefs(tsc, ptat, thcodes[i], *ths_tj_1);
-
zone = devm_thermal_zone_of_sensor_register(dev, i, tsc,
&rcar_gen3_tz_of_ops);
if (IS_ERR(zone)) {
@@ -363,6 +452,9 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
}
tsc->zone = zone;
+ priv->thermal_init(tsc);
+ rcar_gen3_thermal_calc_coefs(tsc, ptat, thcodes[i], *ths_tj_1);
+
tsc->zone->tzp->no_hwmon = false;
ret = thermal_add_hwmon_sysfs(tsc->zone);
if (ret)
@@ -376,7 +468,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (ret < 0)
goto error_unregister;
- dev_info(dev, "TSC%d: Loaded %d trip points\n", i, ret);
+ dev_info(dev, "TSC%u: Loaded %d trip points\n", i, ret);
}
priv->num_tscs = i;
@@ -401,8 +493,12 @@ static int __maybe_unused rcar_gen3_thermal_resume(struct device *dev)
for (i = 0; i < priv->num_tscs; i++) {
struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
+ struct thermal_zone_device *zone = tsc->zone;
priv->thermal_init(tsc);
+ if (zone->ops->set_trips)
+ rcar_gen3_thermal_set_trips(tsc, zone->prev_low_trip,
+ zone->prev_high_trip);
}
return 0;
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index e9a90bc23b11..f4ab4c5b4b62 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1073,6 +1073,7 @@ static int exynos_tmu_probe(struct platform_device *pdev)
data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
if (IS_ERR(data->sclk)) {
dev_err(&pdev->dev, "Failed to get sclk\n");
+ ret = PTR_ERR(data->sclk);
goto err_clk;
} else {
ret = clk_prepare_enable(data->sclk);
diff --git a/drivers/thermal/tegra/Kconfig b/drivers/thermal/tegra/Kconfig
index 46c2215867cd..cfa41d87a794 100644
--- a/drivers/thermal/tegra/Kconfig
+++ b/drivers/thermal/tegra/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "NVIDIA Tegra thermal drivers"
-depends on ARCH_TEGRA
+depends on ARCH_TEGRA || COMPILE_TEST
config TEGRA_SOCTHERM
tristate "Tegra SOCTHERM thermal management"
@@ -18,4 +18,11 @@ config TEGRA_BPMP_THERMAL
Enable this option for support for sensing system temperature of NVIDIA
Tegra systems-on-chip with the BPMP coprocessor (Tegra186).
+config TEGRA30_TSENSOR
+ tristate "Tegra30 Thermal Sensor"
+ depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
+ help
+ Enable this option to support thermal management of NVIDIA Tegra30
+ system-on-chip.
+
endmenu
diff --git a/drivers/thermal/tegra/Makefile b/drivers/thermal/tegra/Makefile
index 0f2b66edf0d2..eb27d194c583 100644
--- a/drivers/thermal/tegra/Makefile
+++ b/drivers/thermal/tegra/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TEGRA_SOCTHERM) += tegra-soctherm.o
obj-$(CONFIG_TEGRA_BPMP_THERMAL) += tegra-bpmp-thermal.o
+obj-$(CONFIG_TEGRA30_TSENSOR) += tegra30-tsensor.o
tegra-soctherm-y := soctherm.o soctherm-fuse.o
tegra-soctherm-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124-soctherm.o
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 8e303e9d1dc0..210325f92559 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -450,8 +450,8 @@ static int enforce_temp_range(struct device *dev, int trip_temp)
temp = clamp_val(trip_temp, min_low_temp, max_high_temp);
if (temp != trip_temp)
- dev_info(dev, "soctherm: trip temperature %d forced to %d\n",
- trip_temp, temp);
+ dev_dbg(dev, "soctherm: trip temperature %d forced to %d\n",
+ trip_temp, temp);
return temp;
}
diff --git a/drivers/thermal/tegra/tegra30-tsensor.c b/drivers/thermal/tegra/tegra30-tsensor.c
new file mode 100644
index 000000000000..9b6b693cbcf8
--- /dev/null
+++ b/drivers/thermal/tegra/tegra30-tsensor.c
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tegra30 SoC Thermal Sensor driver
+ *
+ * Based on downstream HWMON driver from NVIDIA.
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * Author: Dmitry Osipenko <digetx@gmail.com>
+ * Copyright (C) 2021 GRATE-DRIVER project
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <linux/types.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "../thermal_core.h"
+#include "../thermal_hwmon.h"
+
+#define TSENSOR_SENSOR0_CONFIG0 0x0
+#define TSENSOR_SENSOR0_CONFIG0_SENSOR_STOP BIT(0)
+#define TSENSOR_SENSOR0_CONFIG0_HW_FREQ_DIV_EN BIT(1)
+#define TSENSOR_SENSOR0_CONFIG0_THERMAL_RST_EN BIT(2)
+#define TSENSOR_SENSOR0_CONFIG0_DVFS_EN BIT(3)
+#define TSENSOR_SENSOR0_CONFIG0_INTR_OVERFLOW_EN BIT(4)
+#define TSENSOR_SENSOR0_CONFIG0_INTR_HW_FREQ_DIV_EN BIT(5)
+#define TSENSOR_SENSOR0_CONFIG0_INTR_THERMAL_RST_EN BIT(6)
+#define TSENSOR_SENSOR0_CONFIG0_M GENMASK(23, 8)
+#define TSENSOR_SENSOR0_CONFIG0_N GENMASK(31, 24)
+
+#define TSENSOR_SENSOR0_CONFIG1 0x8
+#define TSENSOR_SENSOR0_CONFIG1_TH1 GENMASK(15, 0)
+#define TSENSOR_SENSOR0_CONFIG1_TH2 GENMASK(31, 16)
+
+#define TSENSOR_SENSOR0_CONFIG2 0xc
+#define TSENSOR_SENSOR0_CONFIG2_TH3 GENMASK(15, 0)
+
+#define TSENSOR_SENSOR0_STATUS0 0x18
+#define TSENSOR_SENSOR0_STATUS0_STATE GENMASK(2, 0)
+#define TSENSOR_SENSOR0_STATUS0_INTR BIT(8)
+#define TSENSOR_SENSOR0_STATUS0_CURRENT_VALID BIT(9)
+
+#define TSENSOR_SENSOR0_TS_STATUS1 0x1c
+#define TSENSOR_SENSOR0_TS_STATUS1_CURRENT_COUNT GENMASK(31, 16)
+
+#define TEGRA30_FUSE_TEST_PROG_VER 0x28
+
+#define TEGRA30_FUSE_TSENSOR_CALIB 0x98
+#define TEGRA30_FUSE_TSENSOR_CALIB_LOW GENMASK(15, 0)
+#define TEGRA30_FUSE_TSENSOR_CALIB_HIGH GENMASK(31, 16)
+
+#define TEGRA30_FUSE_SPARE_BIT 0x144
+
+struct tegra_tsensor;
+
+struct tegra_tsensor_calibration_data {
+ int a, b, m, n, p, r;
+};
+
+struct tegra_tsensor_channel {
+ void __iomem *regs;
+ unsigned int id;
+ struct tegra_tsensor *ts;
+ struct thermal_zone_device *tzd;
+};
+
+struct tegra_tsensor {
+ void __iomem *regs;
+ bool swap_channels;
+ struct clk *clk;
+ struct device *dev;
+ struct reset_control *rst;
+ struct tegra_tsensor_channel ch[2];
+ struct tegra_tsensor_calibration_data calib;
+};
+
+static int tegra_tsensor_hw_enable(const struct tegra_tsensor *ts)
+{
+ u32 val;
+ int err;
+
+ err = reset_control_assert(ts->rst);
+ if (err) {
+ dev_err(ts->dev, "failed to assert hardware reset: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(ts->clk);
+ if (err) {
+ dev_err(ts->dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ fsleep(1000);
+
+ err = reset_control_deassert(ts->rst);
+ if (err) {
+ dev_err(ts->dev, "failed to deassert hardware reset: %d\n", err);
+ goto disable_clk;
+ }
+
+ /*
+ * Sensors are enabled after reset by default, but not gauging
+ * until clock counter is programmed.
+ *
+ * M: number of reference clock pulses after which every
+ * temperature / voltage measurement is made
+ *
+ * N: number of reference clock counts for which the counter runs
+ */
+ val = FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_M, 12500);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_N, 255);
+
+ /* apply the same configuration to both channels */
+ writel_relaxed(val, ts->regs + 0x40 + TSENSOR_SENSOR0_CONFIG0);
+ writel_relaxed(val, ts->regs + 0x80 + TSENSOR_SENSOR0_CONFIG0);
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(ts->clk);
+
+ return err;
+}
+
+static int tegra_tsensor_hw_disable(const struct tegra_tsensor *ts)
+{
+ int err;
+
+ err = reset_control_assert(ts->rst);
+ if (err) {
+ dev_err(ts->dev, "failed to assert hardware reset: %d\n", err);
+ return err;
+ }
+
+ clk_disable_unprepare(ts->clk);
+
+ return 0;
+}
+
+static void devm_tegra_tsensor_hw_disable(void *data)
+{
+ const struct tegra_tsensor *ts = data;
+
+ tegra_tsensor_hw_disable(ts);
+}
+
+static int tegra_tsensor_get_temp(void *data, int *temp)
+{
+ const struct tegra_tsensor_channel *tsc = data;
+ const struct tegra_tsensor *ts = tsc->ts;
+ int err, c1, c2, c3, c4, counter;
+ u32 val;
+
+ /*
+ * Counter will be invalid if hardware is misprogrammed or not enough
+ * time passed since the time when sensor was enabled.
+ */
+ err = readl_relaxed_poll_timeout(tsc->regs + TSENSOR_SENSOR0_STATUS0, val,
+ val & TSENSOR_SENSOR0_STATUS0_CURRENT_VALID,
+ 21 * USEC_PER_MSEC,
+ 21 * USEC_PER_MSEC * 50);
+ if (err) {
+ dev_err_once(ts->dev, "ch%u: counter invalid\n", tsc->id);
+ return err;
+ }
+
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_TS_STATUS1);
+ counter = FIELD_GET(TSENSOR_SENSOR0_TS_STATUS1_CURRENT_COUNT, val);
+
+ /*
+ * This shouldn't happen with a valid counter status, nevertheless
+ * lets verify the value since it's in a separate (from status)
+ * register.
+ */
+ if (counter == 0xffff) {
+ dev_err_once(ts->dev, "ch%u: counter overflow\n", tsc->id);
+ return -EINVAL;
+ }
+
+ /*
+ * temperature = a * counter + b
+ * temperature = m * (temperature ^ 2) + n * temperature + p
+ */
+ c1 = DIV_ROUND_CLOSEST(ts->calib.a * counter + ts->calib.b, 1000000);
+ c1 = c1 ?: 1;
+ c2 = DIV_ROUND_CLOSEST(ts->calib.p, c1);
+ c3 = c1 * ts->calib.m;
+ c4 = ts->calib.n;
+
+ *temp = DIV_ROUND_CLOSEST(c1 * (c2 + c3 + c4), 1000);
+
+ return 0;
+}
+
+static int tegra_tsensor_temp_to_counter(const struct tegra_tsensor *ts, int temp)
+{
+ int c1, c2;
+
+ c1 = DIV_ROUND_CLOSEST(ts->calib.p - temp * 1000, ts->calib.m);
+ c2 = -ts->calib.r - int_sqrt(ts->calib.r * ts->calib.r - c1);
+
+ return DIV_ROUND_CLOSEST(c2 * 1000000 - ts->calib.b, ts->calib.a);
+}
+
+static int tegra_tsensor_set_trips(void *data, int low, int high)
+{
+ const struct tegra_tsensor_channel *tsc = data;
+ const struct tegra_tsensor *ts = tsc->ts;
+ u32 val;
+
+ /*
+ * TSENSOR doesn't trigger interrupt on the "low" temperature breach,
+ * hence bail out if high temperature is unspecified.
+ */
+ if (high == INT_MAX)
+ return 0;
+
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG1);
+ val &= ~TSENSOR_SENSOR0_CONFIG1_TH1;
+
+ high = tegra_tsensor_temp_to_counter(ts, high);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG1_TH1, high);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG1);
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops ops = {
+ .get_temp = tegra_tsensor_get_temp,
+ .set_trips = tegra_tsensor_set_trips,
+};
+
+static bool
+tegra_tsensor_handle_channel_interrupt(const struct tegra_tsensor *ts,
+ unsigned int id)
+{
+ const struct tegra_tsensor_channel *tsc = &ts->ch[id];
+ u32 val;
+
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_STATUS0);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_STATUS0);
+
+ if (FIELD_GET(TSENSOR_SENSOR0_STATUS0_STATE, val) == 5)
+ dev_err_ratelimited(ts->dev, "ch%u: counter overflowed\n", id);
+
+ if (!FIELD_GET(TSENSOR_SENSOR0_STATUS0_INTR, val))
+ return false;
+
+ thermal_zone_device_update(tsc->tzd, THERMAL_EVENT_UNSPECIFIED);
+
+ return true;
+}
+
+static irqreturn_t tegra_tsensor_isr(int irq, void *data)
+{
+ const struct tegra_tsensor *ts = data;
+ bool handled = false;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ts->ch); i++)
+ handled |= tegra_tsensor_handle_channel_interrupt(ts, i);
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int tegra_tsensor_disable_hw_channel(const struct tegra_tsensor *ts,
+ unsigned int id)
+{
+ const struct tegra_tsensor_channel *tsc = &ts->ch[id];
+ struct thermal_zone_device *tzd = tsc->tzd;
+ u32 val;
+ int err;
+
+ if (!tzd)
+ goto stop_channel;
+
+ err = thermal_zone_device_disable(tzd);
+ if (err) {
+ dev_err(ts->dev, "ch%u: failed to disable zone: %d\n", id, err);
+ return err;
+ }
+
+stop_channel:
+ /* stop channel gracefully */
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_SENSOR_STOP, 1);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+
+ return 0;
+}
+
+static void tegra_tsensor_get_hw_channel_trips(struct thermal_zone_device *tzd,
+ int *hot_trip, int *crit_trip)
+{
+ unsigned int i;
+
+ /*
+ * 90C is the maximal critical temperature of all Tegra30 SoC variants,
+ * use it for the default trip if unspecified in a device-tree.
+ */
+ *hot_trip = 85000;
+ *crit_trip = 90000;
+
+ for (i = 0; i < tzd->trips; i++) {
+ enum thermal_trip_type type;
+ int trip_temp;
+
+ tzd->ops->get_trip_temp(tzd, i, &trip_temp);
+ tzd->ops->get_trip_type(tzd, i, &type);
+
+ if (type == THERMAL_TRIP_HOT)
+ *hot_trip = trip_temp;
+
+ if (type == THERMAL_TRIP_CRITICAL)
+ *crit_trip = trip_temp;
+ }
+
+ /* clamp hardware trips to the calibration limits */
+ *hot_trip = clamp(*hot_trip, 25000, 90000);
+
+ /*
+ * Kernel will perform a normal system shut down if it will
+ * see that critical temperature is breached, hence set the
+ * hardware limit by 5C higher in order to allow system to
+ * shut down gracefully before sending signal to the Power
+ * Management controller.
+ */
+ *crit_trip = clamp(*crit_trip + 5000, 25000, 90000);
+}
+
+static int tegra_tsensor_enable_hw_channel(const struct tegra_tsensor *ts,
+ unsigned int id)
+{
+ const struct tegra_tsensor_channel *tsc = &ts->ch[id];
+ struct thermal_zone_device *tzd = tsc->tzd;
+ int err, hot_trip = 0, crit_trip = 0;
+ u32 val;
+
+ if (!tzd) {
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+ val &= ~TSENSOR_SENSOR0_CONFIG0_SENSOR_STOP;
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+
+ return 0;
+ }
+
+ tegra_tsensor_get_hw_channel_trips(tzd, &hot_trip, &crit_trip);
+
+ /* prevent potential racing with tegra_tsensor_set_trips() */
+ mutex_lock(&tzd->lock);
+
+ dev_info_once(ts->dev, "ch%u: PMC emergency shutdown trip set to %dC\n",
+ id, DIV_ROUND_CLOSEST(crit_trip, 1000));
+
+ hot_trip = tegra_tsensor_temp_to_counter(ts, hot_trip);
+ crit_trip = tegra_tsensor_temp_to_counter(ts, crit_trip);
+
+ /* program LEVEL2 counter threshold */
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG1);
+ val &= ~TSENSOR_SENSOR0_CONFIG1_TH2;
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG1_TH2, hot_trip);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG1);
+
+ /* program LEVEL3 counter threshold */
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG2);
+ val &= ~TSENSOR_SENSOR0_CONFIG2_TH3;
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG2_TH3, crit_trip);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG2);
+
+ /*
+ * Enable sensor, emergency shutdown, interrupts for level 1/2/3
+ * breaches and counter overflow condition.
+ *
+ * Disable DIV2 throttle for now since we need to figure out how
+ * to integrate it properly with the thermal framework.
+ *
+ * Thermal levels supported by hardware:
+ *
+ * Level 0 = cold
+ * Level 1 = passive cooling (cpufreq DVFS)
+ * Level 2 = passive cooling assisted by hardware (DIV2)
+ * Level 3 = emergency shutdown assisted by hardware (PMC)
+ */
+ val = readl_relaxed(tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+ val &= ~TSENSOR_SENSOR0_CONFIG0_SENSOR_STOP;
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_DVFS_EN, 1);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_HW_FREQ_DIV_EN, 0);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_THERMAL_RST_EN, 1);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_INTR_OVERFLOW_EN, 1);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_INTR_HW_FREQ_DIV_EN, 1);
+ val |= FIELD_PREP(TSENSOR_SENSOR0_CONFIG0_INTR_THERMAL_RST_EN, 1);
+ writel_relaxed(val, tsc->regs + TSENSOR_SENSOR0_CONFIG0);
+
+ mutex_unlock(&tzd->lock);
+
+ err = thermal_zone_device_enable(tzd);
+ if (err) {
+ dev_err(ts->dev, "ch%u: failed to enable zone: %d\n", id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static bool tegra_tsensor_fuse_read_spare(unsigned int spare)
+{
+ u32 val = 0;
+
+ tegra_fuse_readl(TEGRA30_FUSE_SPARE_BIT + spare * 4, &val);
+
+ return !!val;
+}
+
+static int tegra_tsensor_nvmem_setup(struct tegra_tsensor *ts)
+{
+ u32 i, ate_ver = 0, cal = 0, t1_25C = 0, t2_90C = 0;
+ int err, c1_25C, c2_90C;
+
+ err = tegra_fuse_readl(TEGRA30_FUSE_TEST_PROG_VER, &ate_ver);
+ if (err) {
+ dev_err_probe(ts->dev, err, "failed to get ATE version\n");
+ return err;
+ }
+
+ if (ate_ver < 8) {
+ dev_info(ts->dev, "unsupported ATE version: %u\n", ate_ver);
+ return -ENODEV;
+ }
+
+ /*
+ * We have two TSENSOR channels in a two different spots on SoC.
+ * Second channel provides more accurate data on older SoC versions,
+ * use it as a primary channel.
+ */
+ if (ate_ver <= 21) {
+ dev_info_once(ts->dev,
+ "older ATE version detected, channels remapped\n");
+ ts->swap_channels = true;
+ }
+
+ err = tegra_fuse_readl(TEGRA30_FUSE_TSENSOR_CALIB, &cal);
+ if (err) {
+ dev_err(ts->dev, "failed to get calibration data: %d\n", err);
+ return err;
+ }
+
+ /* get calibrated counter values for 25C/90C thresholds */
+ c1_25C = FIELD_GET(TEGRA30_FUSE_TSENSOR_CALIB_LOW, cal);
+ c2_90C = FIELD_GET(TEGRA30_FUSE_TSENSOR_CALIB_HIGH, cal);
+
+ /* and calibrated temperatures corresponding to the counter values */
+ for (i = 0; i < 7; i++) {
+ t1_25C |= tegra_tsensor_fuse_read_spare(14 + i) << i;
+ t1_25C |= tegra_tsensor_fuse_read_spare(21 + i) << i;
+
+ t2_90C |= tegra_tsensor_fuse_read_spare(0 + i) << i;
+ t2_90C |= tegra_tsensor_fuse_read_spare(7 + i) << i;
+ }
+
+ if (c2_90C - c1_25C <= t2_90C - t1_25C) {
+ dev_err(ts->dev, "invalid calibration data: %d %d %u %u\n",
+ c2_90C, c1_25C, t2_90C, t1_25C);
+ return -EINVAL;
+ }
+
+ /* all calibration coefficients are premultiplied by 1000000 */
+
+ ts->calib.a = DIV_ROUND_CLOSEST((t2_90C - t1_25C) * 1000000,
+ (c2_90C - c1_25C));
+
+ ts->calib.b = t1_25C * 1000000 - ts->calib.a * c1_25C;
+
+ if (tegra_sku_info.revision == TEGRA_REVISION_A01) {
+ ts->calib.m = -2775;
+ ts->calib.n = 1338811;
+ ts->calib.p = -7300000;
+ } else {
+ ts->calib.m = -3512;
+ ts->calib.n = 1528943;
+ ts->calib.p = -11100000;
+ }
+
+ /* except the coefficient of a reduced quadratic equation */
+ ts->calib.r = DIV_ROUND_CLOSEST(ts->calib.n, ts->calib.m * 2);
+
+ dev_info_once(ts->dev,
+ "calibration: %d %d %u %u ATE ver: %u SoC rev: %u\n",
+ c2_90C, c1_25C, t2_90C, t1_25C, ate_ver,
+ tegra_sku_info.revision);
+
+ return 0;
+}
+
+static int tegra_tsensor_register_channel(struct tegra_tsensor *ts,
+ unsigned int id)
+{
+ struct tegra_tsensor_channel *tsc = &ts->ch[id];
+ unsigned int hw_id = ts->swap_channels ? !id : id;
+
+ tsc->ts = ts;
+ tsc->id = id;
+ tsc->regs = ts->regs + 0x40 * (hw_id + 1);
+
+ tsc->tzd = devm_thermal_zone_of_sensor_register(ts->dev, id, tsc, &ops);
+ if (IS_ERR(tsc->tzd)) {
+ if (PTR_ERR(tsc->tzd) != -ENODEV)
+ return dev_err_probe(ts->dev, PTR_ERR(tsc->tzd),
+ "failed to register thermal zone\n");
+
+ /*
+ * It's okay if sensor isn't assigned to any thermal zone
+ * in a device-tree.
+ */
+ tsc->tzd = NULL;
+ return 0;
+ }
+
+ if (devm_thermal_add_hwmon_sysfs(tsc->tzd))
+ dev_warn(ts->dev, "failed to add hwmon sysfs attributes\n");
+
+ return 0;
+}
+
+static int tegra_tsensor_probe(struct platform_device *pdev)
+{
+ struct tegra_tsensor *ts;
+ unsigned int i;
+ int err, irq;
+
+ ts = devm_kzalloc(&pdev->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ts->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ts);
+
+ ts->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ts->regs))
+ return PTR_ERR(ts->regs);
+
+ ts->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ts->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts->clk),
+ "failed to get clock\n");
+
+ ts->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(ts->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts->rst),
+ "failed to get reset control\n");
+
+ err = tegra_tsensor_nvmem_setup(ts);
+ if (err)
+ return err;
+
+ err = tegra_tsensor_hw_enable(ts);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(&pdev->dev,
+ devm_tegra_tsensor_hw_disable,
+ ts);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(ts->ch); i++) {
+ err = tegra_tsensor_register_channel(ts, i);
+ if (err)
+ return err;
+ }
+
+ err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ tegra_tsensor_isr, IRQF_ONESHOT,
+ "tegra_tsensor", ts);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to request interrupt\n");
+
+ for (i = 0; i < ARRAY_SIZE(ts->ch); i++) {
+ err = tegra_tsensor_enable_hw_channel(ts, i);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_tsensor_suspend(struct device *dev)
+{
+ struct tegra_tsensor *ts = dev_get_drvdata(dev);
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(ts->ch); i++) {
+ err = tegra_tsensor_disable_hw_channel(ts, i);
+ if (err)
+ goto enable_channel;
+ }
+
+ err = tegra_tsensor_hw_disable(ts);
+ if (err)
+ goto enable_channel;
+
+ return 0;
+
+enable_channel:
+ while (i--)
+ tegra_tsensor_enable_hw_channel(ts, i);
+
+ return err;
+}
+
+static int __maybe_unused tegra_tsensor_resume(struct device *dev)
+{
+ struct tegra_tsensor *ts = dev_get_drvdata(dev);
+ unsigned int i;
+ int err;
+
+ err = tegra_tsensor_hw_enable(ts);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(ts->ch); i++) {
+ err = tegra_tsensor_enable_hw_channel(ts, i);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_tsensor_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_tsensor_suspend,
+ tegra_tsensor_resume)
+};
+
+static const struct of_device_id tegra_tsensor_of_match[] = {
+ { .compatible = "nvidia,tegra30-tsensor", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tegra_tsensor_of_match);
+
+static struct platform_driver tegra_tsensor_driver = {
+ .probe = tegra_tsensor_probe,
+ .driver = {
+ .name = "tegra30-tsensor",
+ .of_match_table = tegra_tsensor_of_match,
+ .pm = &tegra_tsensor_pm_ops,
+ },
+};
+module_platform_driver(tegra_tsensor_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra30 Thermal Sensor driver");
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index a503c1b2bfd9..3d91982d8371 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -33,6 +33,16 @@ config VDPA_SIM_BLOCK
vDPA block device simulator which terminates IO request in a
memory buffer.
+config VDPA_USER
+ tristate "VDUSE (vDPA Device in Userspace) support"
+ depends on EVENTFD && MMU && HAS_DMA
+ select DMA_OPS
+ select VHOST_IOTLB
+ select IOMMU_IOVA
+ help
+ With VDUSE it is possible to emulate a vDPA Device
+ in a userspace program.
+
config IFCVF
tristate "Intel IFC VF vDPA driver"
depends on PCI_MSI
@@ -53,6 +63,7 @@ config MLX5_VDPA
config MLX5_VDPA_NET
tristate "vDPA driver for ConnectX devices"
select MLX5_VDPA
+ select VHOST_RING
depends on MLX5_CORE
help
VDPA network driver for ConnectX6 and newer. Provides offloading
diff --git a/drivers/vdpa/Makefile b/drivers/vdpa/Makefile
index 67fe7f3d6943..f02ebed33f19 100644
--- a/drivers/vdpa/Makefile
+++ b/drivers/vdpa/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VDPA) += vdpa.o
obj-$(CONFIG_VDPA_SIM) += vdpa_sim/
+obj-$(CONFIG_VDPA_USER) += vdpa_user/
obj-$(CONFIG_IFCVF) += ifcvf/
obj-$(CONFIG_MLX5_VDPA) += mlx5/
obj-$(CONFIG_VP_VDPA) += virtio_pci/
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 6e197fe0fcf9..2808f1ba9f7b 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -158,7 +158,9 @@ next:
return -EIO;
}
- for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+ hw->nr_vring = ifc_ioread16(&hw->common_cfg->num_queues);
+
+ for (i = 0; i < hw->nr_vring; i++) {
ifc_iowrite16(i, &hw->common_cfg->queue_select);
notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off);
hw->vring[i].notify_addr = hw->notify_base +
@@ -304,7 +306,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
u32 q_pair_id;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
- q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
+ q_pair_id = qid / hw->nr_vring;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
last_avail_idx = ifc_ioread16(avail_idx_addr);
@@ -318,7 +320,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
u32 q_pair_id;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
- q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
+ q_pair_id = qid / hw->nr_vring;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
hw->vring[qid].last_avail_idx = num;
ifc_iowrite16(num, avail_idx_addr);
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 2996db0da490..09918af3ecf8 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -22,17 +22,8 @@
#define N3000_DEVICE_ID 0x1041
#define N3000_SUBSYS_DEVICE_ID 0x001A
-#define IFCVF_NET_SUPPORTED_FEATURES \
- ((1ULL << VIRTIO_NET_F_MAC) | \
- (1ULL << VIRTIO_F_ANY_LAYOUT) | \
- (1ULL << VIRTIO_F_VERSION_1) | \
- (1ULL << VIRTIO_NET_F_STATUS) | \
- (1ULL << VIRTIO_F_ORDER_PLATFORM) | \
- (1ULL << VIRTIO_F_ACCESS_PLATFORM) | \
- (1ULL << VIRTIO_NET_F_MRG_RXBUF))
-
-/* Only one queue pair for now. */
-#define IFCVF_MAX_QUEUE_PAIRS 1
+/* Max 8 data queue pairs(16 queues) and one control vq for now. */
+#define IFCVF_MAX_QUEUES 17
#define IFCVF_QUEUE_ALIGNMENT PAGE_SIZE
#define IFCVF_QUEUE_MAX 32768
@@ -51,8 +42,6 @@
#define ifcvf_private_to_vf(adapter) \
(&((struct ifcvf_adapter *)adapter)->vf)
-#define IFCVF_MAX_INTR (IFCVF_MAX_QUEUE_PAIRS * 2 + 1)
-
struct vring_info {
u64 desc;
u64 avail;
@@ -83,7 +72,7 @@ struct ifcvf_hw {
u32 dev_type;
struct virtio_pci_common_cfg __iomem *common_cfg;
void __iomem *net_cfg;
- struct vring_info vring[IFCVF_MAX_QUEUE_PAIRS * 2];
+ struct vring_info vring[IFCVF_MAX_QUEUES];
void __iomem * const *base;
char config_msix_name[256];
struct vdpa_callback config_cb;
@@ -103,7 +92,13 @@ struct ifcvf_vring_lm_cfg {
struct ifcvf_lm_cfg {
u8 reserved[IFCVF_LM_RING_STATE_OFFSET];
- struct ifcvf_vring_lm_cfg vring_lm_cfg[IFCVF_MAX_QUEUE_PAIRS];
+ struct ifcvf_vring_lm_cfg vring_lm_cfg[IFCVF_MAX_QUEUES];
+};
+
+struct ifcvf_vdpa_mgmt_dev {
+ struct vdpa_mgmt_dev mdev;
+ struct ifcvf_adapter *adapter;
+ struct pci_dev *pdev;
};
int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *dev);
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 351c6cfb24c3..dcd648e1f7e7 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -63,9 +63,13 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct ifcvf_hw *vf = &adapter->vf;
int vector, i, ret, irq;
+ u16 max_intr;
- ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
- IFCVF_MAX_INTR, PCI_IRQ_MSIX);
+ /* all queues and config interrupt */
+ max_intr = vf->nr_vring + 1;
+
+ ret = pci_alloc_irq_vectors(pdev, max_intr,
+ max_intr, PCI_IRQ_MSIX);
if (ret < 0) {
IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
return ret;
@@ -83,7 +87,7 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
return ret;
}
- for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+ for (i = 0; i < vf->nr_vring; i++) {
snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
pci_name(pdev), i);
vector = i + IFCVF_MSI_QUEUE_OFF;
@@ -112,7 +116,6 @@ static int ifcvf_start_datapath(void *private)
u8 status;
int ret;
- vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
ret = ifcvf_start_hw(vf);
if (ret < 0) {
status = ifcvf_get_status(vf);
@@ -128,7 +131,7 @@ static int ifcvf_stop_datapath(void *private)
struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
int i;
- for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
+ for (i = 0; i < vf->nr_vring; i++)
vf->vring[i].cb.callback = NULL;
ifcvf_stop_hw(vf);
@@ -141,7 +144,7 @@ static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
int i;
- for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+ for (i = 0; i < vf->nr_vring; i++) {
vf->vring[i].last_avail_idx = 0;
vf->vring[i].desc = 0;
vf->vring[i].avail = 0;
@@ -171,17 +174,12 @@ static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
struct pci_dev *pdev = adapter->pdev;
-
+ u32 type = vf->dev_type;
u64 features;
- switch (vf->dev_type) {
- case VIRTIO_ID_NET:
- features = ifcvf_get_features(vf) & IFCVF_NET_SUPPORTED_FEATURES;
- break;
- case VIRTIO_ID_BLOCK:
+ if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
features = ifcvf_get_features(vf);
- break;
- default:
+ else {
features = 0;
IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
}
@@ -218,23 +216,12 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
int ret;
vf = vdpa_to_vf(vdpa_dev);
- adapter = dev_get_drvdata(vdpa_dev->dev.parent);
+ adapter = vdpa_to_adapter(vdpa_dev);
status_old = ifcvf_get_status(vf);
if (status_old == status)
return;
- if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
- !(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
- ifcvf_stop_datapath(adapter);
- ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2);
- }
-
- if (status == 0) {
- ifcvf_reset_vring(adapter);
- return;
- }
-
if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
!(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
ret = ifcvf_request_irq(adapter);
@@ -254,6 +241,29 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
ifcvf_set_status(vf, status);
}
+static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_adapter *adapter;
+ struct ifcvf_hw *vf;
+ u8 status_old;
+
+ vf = vdpa_to_vf(vdpa_dev);
+ adapter = vdpa_to_adapter(vdpa_dev);
+ status_old = ifcvf_get_status(vf);
+
+ if (status_old == 0)
+ return 0;
+
+ if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
+ ifcvf_stop_datapath(adapter);
+ ifcvf_free_irq(adapter, vf->nr_vring);
+ }
+
+ ifcvf_reset_vring(adapter);
+
+ return 0;
+}
+
static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
{
return IFCVF_QUEUE_MAX;
@@ -437,6 +447,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.set_features = ifcvf_vdpa_set_features,
.get_status = ifcvf_vdpa_get_status,
.set_status = ifcvf_vdpa_set_status,
+ .reset = ifcvf_vdpa_reset,
.get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
.get_vq_state = ifcvf_vdpa_get_vq_state,
.set_vq_state = ifcvf_vdpa_set_vq_state,
@@ -458,63 +469,63 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.get_vq_notification = ifcvf_get_vq_notification,
};
-static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static struct virtio_device_id id_table_net[] = {
+ {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
+ {0},
+};
+
+static struct virtio_device_id id_table_blk[] = {
+ {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
+ {0},
+};
+
+static u32 get_dev_type(struct pci_dev *pdev)
{
- struct device *dev = &pdev->dev;
- struct ifcvf_adapter *adapter;
- struct ifcvf_hw *vf;
- int ret, i;
+ u32 dev_type;
- ret = pcim_enable_device(pdev);
- if (ret) {
- IFCVF_ERR(pdev, "Failed to enable device\n");
- return ret;
- }
+ /* This drirver drives both modern virtio devices and transitional
+ * devices in modern mode.
+ * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
+ * so legacy devices and transitional devices in legacy
+ * mode will not work for vDPA, this driver will not
+ * drive devices with legacy interface.
+ */
- ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
- IFCVF_DRIVER_NAME);
- if (ret) {
- IFCVF_ERR(pdev, "Failed to request MMIO region\n");
- return ret;
- }
+ if (pdev->device < 0x1040)
+ dev_type = pdev->subsystem_device;
+ else
+ dev_type = pdev->device - 0x1040;
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (ret) {
- IFCVF_ERR(pdev, "No usable DMA configuration\n");
- return ret;
- }
+ return dev_type;
+}
- ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
- if (ret) {
- IFCVF_ERR(pdev,
- "Failed for adding devres for freeing irq vectors\n");
- return ret;
- }
+static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
+{
+ struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+ struct ifcvf_adapter *adapter;
+ struct pci_dev *pdev;
+ struct ifcvf_hw *vf;
+ struct device *dev;
+ int ret, i;
+ ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
+ if (ifcvf_mgmt_dev->adapter)
+ return -EOPNOTSUPP;
+
+ pdev = ifcvf_mgmt_dev->pdev;
+ dev = &pdev->dev;
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
- dev, &ifc_vdpa_ops, NULL);
+ dev, &ifc_vdpa_ops, name, false);
if (IS_ERR(adapter)) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return PTR_ERR(adapter);
}
- pci_set_master(pdev);
- pci_set_drvdata(pdev, adapter);
+ ifcvf_mgmt_dev->adapter = adapter;
+ pci_set_drvdata(pdev, ifcvf_mgmt_dev);
vf = &adapter->vf;
-
- /* This drirver drives both modern virtio devices and transitional
- * devices in modern mode.
- * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
- * so legacy devices and transitional devices in legacy
- * mode will not work for vDPA, this driver will not
- * drive devices with legacy interface.
- */
- if (pdev->device < 0x1040)
- vf->dev_type = pdev->subsystem_device;
- else
- vf->dev_type = pdev->device - 0x1040;
-
+ vf->dev_type = get_dev_type(pdev);
vf->base = pcim_iomap_table(pdev);
adapter->pdev = pdev;
@@ -526,14 +537,15 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
- for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
+ for (i = 0; i < vf->nr_vring; i++)
vf->vring[i].irq = -EINVAL;
vf->hw_features = ifcvf_get_hw_features(vf);
- ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2);
+ adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
+ ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
if (ret) {
- IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
+ IFCVF_ERR(pdev, "Failed to register to vDPA bus");
goto err;
}
@@ -544,11 +556,100 @@ err:
return ret;
}
+static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
+{
+ struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+
+ ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
+ _vdpa_unregister_device(dev);
+ ifcvf_mgmt_dev->adapter = NULL;
+}
+
+static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = {
+ .dev_add = ifcvf_vdpa_dev_add,
+ .dev_del = ifcvf_vdpa_dev_del
+};
+
+static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+ struct device *dev = &pdev->dev;
+ u32 dev_type;
+ int ret;
+
+ ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
+ if (!ifcvf_mgmt_dev) {
+ IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
+ return -ENOMEM;
+ }
+
+ dev_type = get_dev_type(pdev);
+ switch (dev_type) {
+ case VIRTIO_ID_NET:
+ ifcvf_mgmt_dev->mdev.id_table = id_table_net;
+ break;
+ case VIRTIO_ID_BLOCK:
+ ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
+ break;
+ default:
+ IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
+ ifcvf_mgmt_dev->mdev.device = dev;
+ ifcvf_mgmt_dev->pdev = pdev;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to enable device\n");
+ goto err;
+ }
+
+ ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
+ IFCVF_DRIVER_NAME);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request MMIO region\n");
+ goto err;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret) {
+ IFCVF_ERR(pdev, "No usable DMA configuration\n");
+ goto err;
+ }
+
+ ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
+ if (ret) {
+ IFCVF_ERR(pdev,
+ "Failed for adding devres for freeing irq vectors\n");
+ goto err;
+ }
+
+ pci_set_master(pdev);
+
+ ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
+ if (ret) {
+ IFCVF_ERR(pdev,
+ "Failed to initialize the management interfaces\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ kfree(ifcvf_mgmt_dev);
+ return ret;
+}
+
static void ifcvf_remove(struct pci_dev *pdev)
{
- struct ifcvf_adapter *adapter = pci_get_drvdata(pdev);
+ struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
- vdpa_unregister_device(&adapter->vdpa);
+ ifcvf_mgmt_dev = pci_get_drvdata(pdev);
+ vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
+ kfree(ifcvf_mgmt_dev);
}
static struct pci_device_id ifcvf_pci_ids[] = {
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 0002b2136b48..01a848adf590 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -5,7 +5,7 @@
#define __MLX5_VDPA_H__
#include <linux/etherdevice.h>
-#include <linux/if_vlan.h>
+#include <linux/vringh.h>
#include <linux/vdpa.h>
#include <linux/mlx5/driver.h>
@@ -48,6 +48,26 @@ struct mlx5_vdpa_resources {
bool valid;
};
+struct mlx5_control_vq {
+ struct vhost_iotlb *iotlb;
+ /* spinlock to synchronize iommu table */
+ spinlock_t iommu_lock;
+ struct vringh vring;
+ bool ready;
+ u64 desc_addr;
+ u64 device_addr;
+ u64 driver_addr;
+ struct vdpa_callback event_cb;
+ struct vringh_kiov riov;
+ struct vringh_kiov wiov;
+ unsigned short head;
+};
+
+struct mlx5_ctrl_wq_ent {
+ struct work_struct work;
+ struct mlx5_vdpa_dev *mvdev;
+};
+
struct mlx5_vdpa_dev {
struct vdpa_device vdev;
struct mlx5_core_dev *mdev;
@@ -57,9 +77,12 @@ struct mlx5_vdpa_dev {
u64 actual_features;
u8 status;
u32 max_vqs;
+ u16 max_idx;
u32 generation;
struct mlx5_vdpa_mr mr;
+ struct mlx5_control_vq cvq;
+ struct workqueue_struct *wq;
};
int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
@@ -68,6 +91,7 @@ int mlx5_vdpa_get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey);
int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn);
void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn);
int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn);
+int mlx5_vdpa_modify_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 rqtn);
void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn);
int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn);
void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn);
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index e59135fa867e..ff010c6d0cd3 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+#include <linux/vhost_types.h>
#include <linux/vdpa.h>
#include <linux/gcd.h>
#include <linux/string.h>
@@ -451,33 +452,30 @@ static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
mlx5_vdpa_destroy_mkey(mvdev, &mr->mkey);
}
-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static int dup_iotlb(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *src)
{
- struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ struct vhost_iotlb_map *map;
+ u64 start = 0, last = ULLONG_MAX;
int err;
- if (mr->initialized)
- return 0;
-
- if (iotlb)
- err = create_user_mr(mvdev, iotlb);
- else
- err = create_dma_mr(mvdev, mr);
-
- if (!err)
- mr->initialized = true;
+ if (!src) {
+ err = vhost_iotlb_add_range(mvdev->cvq.iotlb, start, last, start, VHOST_ACCESS_RW);
+ return err;
+ }
- return err;
+ for (map = vhost_iotlb_itree_first(src, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start, map->last,
+ map->addr, map->perm);
+ if (err)
+ return err;
+ }
+ return 0;
}
-int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static void prune_iotlb(struct mlx5_vdpa_dev *mvdev)
{
- int err;
-
- mutex_lock(&mvdev->mr.mkey_mtx);
- err = _mlx5_vdpa_create_mr(mvdev, iotlb);
- mutex_unlock(&mvdev->mr.mkey_mtx);
- return err;
+ vhost_iotlb_del_range(mvdev->cvq.iotlb, 0, ULLONG_MAX);
}
static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
@@ -501,6 +499,7 @@ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
if (!mr->initialized)
goto out;
+ prune_iotlb(mvdev);
if (mr->user_mr)
destroy_user_mr(mvdev, mr);
else
@@ -512,6 +511,48 @@ out:
mutex_unlock(&mr->mkey_mtx);
}
+static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ int err;
+
+ if (mr->initialized)
+ return 0;
+
+ if (iotlb)
+ err = create_user_mr(mvdev, iotlb);
+ else
+ err = create_dma_mr(mvdev, mr);
+
+ if (err)
+ return err;
+
+ err = dup_iotlb(mvdev, iotlb);
+ if (err)
+ goto out_err;
+
+ mr->initialized = true;
+ return 0;
+
+out_err:
+ if (iotlb)
+ destroy_user_mr(mvdev, mr);
+ else
+ destroy_dma_mr(mvdev, mr);
+
+ return err;
+}
+
+int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+ int err;
+
+ mutex_lock(&mvdev->mr.mkey_mtx);
+ err = _mlx5_vdpa_create_mr(mvdev, iotlb);
+ mutex_unlock(&mvdev->mr.mkey_mtx);
+ return err;
+}
+
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
bool *change_map)
{
diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c
index d4606213f88a..15e266d0e27a 100644
--- a/drivers/vdpa/mlx5/core/resources.c
+++ b/drivers/vdpa/mlx5/core/resources.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+#include <linux/iova.h>
#include <linux/mlx5/driver.h>
#include "mlx5_vdpa.h"
@@ -128,6 +129,16 @@ int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *
return err;
}
+int mlx5_vdpa_modify_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 rqtn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
+
+ MLX5_SET(modify_rqt_in, in, uid, mvdev->res.uid);
+ MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
+ MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
+ return mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
+}
+
void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn)
{
u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
@@ -221,6 +232,22 @@ int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *m
return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in);
}
+static int init_ctrl_vq(struct mlx5_vdpa_dev *mvdev)
+{
+ mvdev->cvq.iotlb = vhost_iotlb_alloc(0, 0);
+ if (!mvdev->cvq.iotlb)
+ return -ENOMEM;
+
+ vringh_set_iotlb(&mvdev->cvq.vring, mvdev->cvq.iotlb, &mvdev->cvq.iommu_lock);
+
+ return 0;
+}
+
+static void cleanup_ctrl_vq(struct mlx5_vdpa_dev *mvdev)
+{
+ vhost_iotlb_free(mvdev->cvq.iotlb);
+}
+
int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
{
u64 offset = MLX5_CAP64_DEV_VDPA_EMULATION(mvdev->mdev, doorbell_bar_offset);
@@ -260,10 +287,17 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
err = -ENOMEM;
goto err_key;
}
+
+ err = init_ctrl_vq(mvdev);
+ if (err)
+ goto err_ctrl;
+
res->valid = true;
return 0;
+err_ctrl:
+ iounmap(res->kick_addr);
err_key:
dealloc_pd(mvdev, res->pdn, res->uid);
err_pd:
@@ -282,6 +316,7 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
if (!res->valid)
return;
+ cleanup_ctrl_vq(mvdev);
iounmap(res->kick_addr);
res->kick_addr = NULL;
dealloc_pd(mvdev, res->pdn, res->uid);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 5906cada2293..294ba05e6fc9 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -45,6 +45,8 @@ MODULE_LICENSE("Dual BSD/GPL");
(VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK | \
VIRTIO_CONFIG_S_FEATURES_OK | VIRTIO_CONFIG_S_NEEDS_RESET | VIRTIO_CONFIG_S_FAILED)
+#define MLX5_FEATURE(_mvdev, _feature) (!!((_mvdev)->actual_features & BIT_ULL(_feature)))
+
struct mlx5_vdpa_net_resources {
u32 tisn;
u32 tdn;
@@ -90,7 +92,6 @@ struct mlx5_vq_restore_info {
u16 avail_index;
u16 used_index;
bool ready;
- struct vdpa_callback cb;
bool restore;
};
@@ -100,7 +101,6 @@ struct mlx5_vdpa_virtqueue {
u64 device_addr;
u64 driver_addr;
u32 num_ent;
- struct vdpa_callback event_cb;
/* Resources for implementing the notification channel from the device
* to the driver. fwqp is the firmware end of an RC connection; the
@@ -135,11 +135,20 @@ struct mlx5_vdpa_virtqueue {
*/
#define MLX5_MAX_SUPPORTED_VQS 16
+static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
+{
+ if (unlikely(idx > mvdev->max_idx))
+ return false;
+
+ return true;
+}
+
struct mlx5_vdpa_net {
struct mlx5_vdpa_dev mvdev;
struct mlx5_vdpa_net_resources res;
struct virtio_net_config config;
struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS];
+ struct vdpa_callback event_cbs[MLX5_MAX_SUPPORTED_VQS + 1];
/* Serialize vq resources creation and destruction. This is required
* since memory map might change and we need to destroy and create
@@ -151,15 +160,18 @@ struct mlx5_vdpa_net {
struct mlx5_flow_handle *rx_rule;
bool setup;
u16 mtu;
+ u32 cur_num_vqs;
};
static void free_resources(struct mlx5_vdpa_net *ndev);
static void init_mvqs(struct mlx5_vdpa_net *ndev);
-static int setup_driver(struct mlx5_vdpa_net *ndev);
+static int setup_driver(struct mlx5_vdpa_dev *mvdev);
static void teardown_driver(struct mlx5_vdpa_net *ndev);
static bool mlx5_vdpa_debug;
+#define MLX5_CVQ_MAX_ENT 16
+
#define MLX5_LOG_VIO_FLAG(_feature) \
do { \
if (features & BIT_ULL(_feature)) \
@@ -172,11 +184,41 @@ static bool mlx5_vdpa_debug;
mlx5_vdpa_info(mvdev, "%s\n", #_status); \
} while (0)
+/* TODO: cross-endian support */
+static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
+{
+ return virtio_legacy_is_little_endian() ||
+ (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1));
+}
+
+static u16 mlx5vdpa16_to_cpu(struct mlx5_vdpa_dev *mvdev, __virtio16 val)
+{
+ return __virtio16_to_cpu(mlx5_vdpa_is_little_endian(mvdev), val);
+}
+
+static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
+{
+ return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
+}
+
static inline u32 mlx5_vdpa_max_qps(int max_vqs)
{
return max_vqs / 2;
}
+static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev)
+{
+ if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
+ return 2;
+
+ return 2 * mlx5_vdpa_max_qps(mvdev->max_vqs);
+}
+
+static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx)
+{
+ return idx == ctrl_vq_idx(mvdev);
+}
+
static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set)
{
if (status & ~VALID_STATUS_MASK)
@@ -481,6 +523,10 @@ static int mlx5_vdpa_poll_one(struct mlx5_vdpa_cq *vcq)
static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num)
{
+ struct mlx5_vdpa_net *ndev = mvq->ndev;
+ struct vdpa_callback *event_cb;
+
+ event_cb = &ndev->event_cbs[mvq->index];
mlx5_cq_set_ci(&mvq->cq.mcq);
/* make sure CQ cosumer update is visible to the hardware before updating
@@ -488,8 +534,8 @@ static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int nu
*/
dma_wmb();
rx_post(&mvq->vqqp, num);
- if (mvq->event_cb.callback)
- mvq->event_cb.callback(mvq->event_cb.private);
+ if (event_cb->callback)
+ event_cb->callback(event_cb->private);
}
static void mlx5_vdpa_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
@@ -1100,10 +1146,8 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
if (!mvq->num_ent)
return 0;
- if (mvq->initialized) {
- mlx5_vdpa_warn(&ndev->mvdev, "attempt re init\n");
- return -EINVAL;
- }
+ if (mvq->initialized)
+ return 0;
err = cq_create(ndev, idx, mvq->num_ent);
if (err)
@@ -1190,19 +1234,20 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
static int create_rqt(struct mlx5_vdpa_net *ndev)
{
- int log_max_rqt;
__be32 *list;
+ int max_rqt;
void *rqtc;
int inlen;
void *in;
int i, j;
int err;
- log_max_rqt = min_t(int, 1, MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
- if (log_max_rqt < 1)
+ max_rqt = min_t(int, MLX5_MAX_SUPPORTED_VQS / 2,
+ 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
+ if (max_rqt < 1)
return -EOPNOTSUPP;
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + (1 << log_max_rqt) * MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1211,10 +1256,9 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
- MLX5_SET(rqtc, rqtc, rqt_max_size, 1 << log_max_rqt);
- MLX5_SET(rqtc, rqtc, rqt_actual_size, 1);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; j < ndev->mvdev.max_vqs; j++) {
+ for (i = 0, j = 0; j < max_rqt; j++) {
if (!ndev->vqs[j].initialized)
continue;
@@ -1223,6 +1267,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
i++;
}
}
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, i);
err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
kfree(in);
@@ -1232,6 +1277,52 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
return 0;
}
+#define MLX5_MODIFY_RQT_NUM_RQS ((u64)1)
+
+static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
+{
+ __be32 *list;
+ int max_rqt;
+ void *rqtc;
+ int inlen;
+ void *in;
+ int i, j;
+ int err;
+
+ max_rqt = min_t(int, ndev->cur_num_vqs / 2,
+ 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
+ if (max_rqt < 1)
+ return -EOPNOTSUPP;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_rqt_in, in, uid, ndev->mvdev.res.uid);
+ MLX5_SET64(modify_rqt_in, in, bitmask, MLX5_MODIFY_RQT_NUM_RQS);
+ rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
+ MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
+
+ list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
+ for (i = 0, j = 0; j < num; j++) {
+ if (!ndev->vqs[j].initialized)
+ continue;
+
+ if (!vq_is_tx(ndev->vqs[j].index)) {
+ list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
+ i++;
+ }
+ }
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, i);
+ err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
+ kfree(in);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static void destroy_rqt(struct mlx5_vdpa_net *ndev)
{
mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn);
@@ -1345,12 +1436,206 @@ static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
ndev->rx_rule = NULL;
}
+static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
+{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+ struct mlx5_core_dev *pfmdev;
+ size_t read;
+ u8 mac[ETH_ALEN];
+
+ pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
+ switch (cmd) {
+ case VIRTIO_NET_CTRL_MAC_ADDR_SET:
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)mac, ETH_ALEN);
+ if (read != ETH_ALEN)
+ break;
+
+ if (!memcmp(ndev->config.mac, mac, 6)) {
+ status = VIRTIO_NET_OK;
+ break;
+ }
+
+ if (!is_zero_ether_addr(ndev->config.mac)) {
+ if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) {
+ mlx5_vdpa_warn(mvdev, "failed to delete old MAC %pM from MPFS table\n",
+ ndev->config.mac);
+ break;
+ }
+ }
+
+ if (mlx5_mpfs_add_mac(pfmdev, mac)) {
+ mlx5_vdpa_warn(mvdev, "failed to insert new MAC %pM into MPFS table\n",
+ mac);
+ break;
+ }
+
+ memcpy(ndev->config.mac, mac, ETH_ALEN);
+ status = VIRTIO_NET_OK;
+ break;
+
+ default:
+ break;
+ }
+
+ return status;
+}
+
+static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps)
+{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ int cur_qps = ndev->cur_num_vqs / 2;
+ int err;
+ int i;
+
+ if (cur_qps > newqps) {
+ err = modify_rqt(ndev, 2 * newqps);
+ if (err)
+ return err;
+
+ for (i = ndev->cur_num_vqs - 1; i >= 2 * newqps; i--)
+ teardown_vq(ndev, &ndev->vqs[i]);
+
+ ndev->cur_num_vqs = 2 * newqps;
+ } else {
+ ndev->cur_num_vqs = 2 * newqps;
+ for (i = cur_qps * 2; i < 2 * newqps; i++) {
+ err = setup_vq(ndev, &ndev->vqs[i]);
+ if (err)
+ goto clean_added;
+ }
+ err = modify_rqt(ndev, 2 * newqps);
+ if (err)
+ goto clean_added;
+ }
+ return 0;
+
+clean_added:
+ for (--i; i >= cur_qps; --i)
+ teardown_vq(ndev, &ndev->vqs[i]);
+
+ return err;
+}
+
+static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
+{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+ struct virtio_net_ctrl_mq mq;
+ size_t read;
+ u16 newqps;
+
+ switch (cmd) {
+ case VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET:
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq));
+ if (read != sizeof(mq))
+ break;
+
+ newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
+ if (ndev->cur_num_vqs == 2 * newqps) {
+ status = VIRTIO_NET_OK;
+ break;
+ }
+
+ if (newqps & (newqps - 1))
+ break;
+
+ if (!change_num_qps(mvdev, newqps))
+ status = VIRTIO_NET_OK;
+
+ break;
+ default:
+ break;
+ }
+
+ return status;
+}
+
+static void mlx5_cvq_kick_handler(struct work_struct *work)
+{
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+ struct virtio_net_ctrl_hdr ctrl;
+ struct mlx5_ctrl_wq_ent *wqent;
+ struct mlx5_vdpa_dev *mvdev;
+ struct mlx5_control_vq *cvq;
+ struct mlx5_vdpa_net *ndev;
+ size_t read, write;
+ int err;
+
+ wqent = container_of(work, struct mlx5_ctrl_wq_ent, work);
+ mvdev = wqent->mvdev;
+ ndev = to_mlx5_vdpa_ndev(mvdev);
+ cvq = &mvdev->cvq;
+ if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
+ goto out;
+
+ if (!cvq->ready)
+ goto out;
+
+ while (true) {
+ err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head,
+ GFP_ATOMIC);
+ if (err <= 0)
+ break;
+
+ read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &ctrl, sizeof(ctrl));
+ if (read != sizeof(ctrl))
+ break;
+
+ switch (ctrl.class) {
+ case VIRTIO_NET_CTRL_MAC:
+ status = handle_ctrl_mac(mvdev, ctrl.cmd);
+ break;
+ case VIRTIO_NET_CTRL_MQ:
+ status = handle_ctrl_mq(mvdev, ctrl.cmd);
+ break;
+
+ default:
+ break;
+ }
+
+ /* Make sure data is written before advancing index */
+ smp_wmb();
+
+ write = vringh_iov_push_iotlb(&cvq->vring, &cvq->wiov, &status, sizeof(status));
+ vringh_complete_iotlb(&cvq->vring, cvq->head, write);
+ vringh_kiov_cleanup(&cvq->riov);
+ vringh_kiov_cleanup(&cvq->wiov);
+
+ if (vringh_need_notify_iotlb(&cvq->vring))
+ vringh_notify(&cvq->vring);
+ }
+out:
+ kfree(wqent);
+}
+
static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_vdpa_virtqueue *mvq;
+ struct mlx5_ctrl_wq_ent *wqent;
+
+ if (!is_index_valid(mvdev, idx))
+ return;
+
+ if (unlikely(is_ctrl_vq_idx(mvdev, idx))) {
+ if (!mvdev->cvq.ready)
+ return;
+
+ wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
+ if (!wqent)
+ return;
+ wqent->mvdev = mvdev;
+ INIT_WORK(&wqent->work, mlx5_cvq_kick_handler);
+ queue_work(mvdev->wq, &wqent->work);
+ return;
+ }
+
+ mvq = &ndev->vqs[idx];
if (unlikely(!mvq->ready))
return;
@@ -1362,8 +1647,19 @@ static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_vdpa_virtqueue *mvq;
+
+ if (!is_index_valid(mvdev, idx))
+ return -EINVAL;
+ if (is_ctrl_vq_idx(mvdev, idx)) {
+ mvdev->cvq.desc_addr = desc_area;
+ mvdev->cvq.device_addr = device_area;
+ mvdev->cvq.driver_addr = driver_area;
+ return 0;
+ }
+
+ mvq = &ndev->vqs[idx];
mvq->desc_addr = desc_area;
mvq->device_addr = device_area;
mvq->driver_addr = driver_area;
@@ -1376,6 +1672,9 @@ static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
+ if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx))
+ return;
+
mvq = &ndev->vqs[idx];
mvq->num_ent = num;
}
@@ -1384,17 +1683,46 @@ static void mlx5_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_c
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *vq = &ndev->vqs[idx];
- vq->event_cb = *cb;
+ ndev->event_cbs[idx] = *cb;
+}
+
+static void mlx5_cvq_notify(struct vringh *vring)
+{
+ struct mlx5_control_vq *cvq = container_of(vring, struct mlx5_control_vq, vring);
+
+ if (!cvq->event_cb.callback)
+ return;
+
+ cvq->event_cb.callback(cvq->event_cb.private);
+}
+
+static void set_cvq_ready(struct mlx5_vdpa_dev *mvdev, bool ready)
+{
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+
+ cvq->ready = ready;
+ if (!ready)
+ return;
+
+ cvq->vring.notify = mlx5_cvq_notify;
}
static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_vdpa_virtqueue *mvq;
+
+ if (!is_index_valid(mvdev, idx))
+ return;
+
+ if (is_ctrl_vq_idx(mvdev, idx)) {
+ set_cvq_ready(mvdev, ready);
+ return;
+ }
+ mvq = &ndev->vqs[idx];
if (!ready)
suspend_vq(ndev, mvq);
@@ -1405,9 +1733,14 @@ static bool mlx5_vdpa_get_vq_ready(struct vdpa_device *vdev, u16 idx)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
- return mvq->ready;
+ if (!is_index_valid(mvdev, idx))
+ return false;
+
+ if (is_ctrl_vq_idx(mvdev, idx))
+ return mvdev->cvq.ready;
+
+ return ndev->vqs[idx].ready;
}
static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
@@ -1415,8 +1748,17 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_vdpa_virtqueue *mvq;
+ if (!is_index_valid(mvdev, idx))
+ return -EINVAL;
+
+ if (is_ctrl_vq_idx(mvdev, idx)) {
+ mvdev->cvq.vring.last_avail_idx = state->split.avail_index;
+ return 0;
+ }
+
+ mvq = &ndev->vqs[idx];
if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) {
mlx5_vdpa_warn(mvdev, "can't modify available index\n");
return -EINVAL;
@@ -1431,10 +1773,19 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+ struct mlx5_vdpa_virtqueue *mvq;
struct mlx5_virtq_attr attr;
int err;
+ if (!is_index_valid(mvdev, idx))
+ return -EINVAL;
+
+ if (is_ctrl_vq_idx(mvdev, idx)) {
+ state->split.avail_index = mvdev->cvq.vring.last_avail_idx;
+ return 0;
+ }
+
+ mvq = &ndev->vqs[idx];
/* If the virtq object was destroyed, use the value saved at
* the last minute of suspend_vq. This caters for userspace
* that cares about emulating the index after vq is stopped.
@@ -1491,10 +1842,14 @@ static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev)
u16 dev_features;
dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask);
- ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features);
+ ndev->mvdev.mlx_features |= mlx_to_vritio_features(dev_features);
if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0))
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1);
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ);
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR);
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MQ);
+
print_features(mvdev, ndev->mvdev.mlx_features, false);
return ndev->mvdev.mlx_features;
}
@@ -1507,17 +1862,29 @@ static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features)
return 0;
}
-static int setup_virtqueues(struct mlx5_vdpa_net *ndev)
+static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
int err;
int i;
- for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); i++) {
+ for (i = 0; i < 2 * mlx5_vdpa_max_qps(mvdev->max_vqs); i++) {
err = setup_vq(ndev, &ndev->vqs[i]);
if (err)
goto err_vq;
}
+ if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) {
+ err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
+ MLX5_CVQ_MAX_ENT, false,
+ (struct vring_desc *)(uintptr_t)cvq->desc_addr,
+ (struct vring_avail *)(uintptr_t)cvq->driver_addr,
+ (struct vring_used *)(uintptr_t)cvq->device_addr);
+ if (err)
+ goto err_vq;
+ }
+
return 0;
err_vq:
@@ -1541,16 +1908,22 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
}
}
-/* TODO: cross-endian support */
-static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
-{
- return virtio_legacy_is_little_endian() ||
- (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1));
-}
-
-static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
+static void update_cvq_info(struct mlx5_vdpa_dev *mvdev)
{
- return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
+ if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_CTRL_VQ)) {
+ if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) {
+ /* MQ supported. CVQ index is right above the last data virtqueue's */
+ mvdev->max_idx = mvdev->max_vqs;
+ } else {
+ /* Only CVQ supportted. data virtqueues occupy indices 0 and 1.
+ * CVQ gets index 2
+ */
+ mvdev->max_idx = 2;
+ }
+ } else {
+ /* Two data virtqueues only: one for rx and one for tx */
+ mvdev->max_idx = 1;
+ }
}
static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
@@ -1568,6 +1941,7 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu);
ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
+ update_cvq_info(mvdev);
return err;
}
@@ -1605,15 +1979,14 @@ static u8 mlx5_vdpa_get_status(struct vdpa_device *vdev)
static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
struct mlx5_vq_restore_info *ri = &mvq->ri;
- struct mlx5_virtq_attr attr;
+ struct mlx5_virtq_attr attr = {};
int err;
- if (!mvq->initialized)
- return 0;
-
- err = query_virtqueue(ndev, mvq, &attr);
- if (err)
- return err;
+ if (mvq->initialized) {
+ err = query_virtqueue(ndev, mvq, &attr);
+ if (err)
+ return err;
+ }
ri->avail_index = attr.available_index;
ri->used_index = attr.used_index;
@@ -1622,7 +1995,6 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu
ri->desc_addr = mvq->desc_addr;
ri->device_addr = mvq->device_addr;
ri->driver_addr = mvq->driver_addr;
- ri->cb = mvq->event_cb;
ri->restore = true;
return 0;
}
@@ -1667,12 +2039,12 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
mvq->desc_addr = ri->desc_addr;
mvq->device_addr = ri->device_addr;
mvq->driver_addr = ri->driver_addr;
- mvq->event_cb = ri->cb;
}
}
-static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *iotlb)
+static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
suspend_vqs(ndev);
@@ -1681,58 +2053,59 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *
goto err_mr;
teardown_driver(ndev);
- mlx5_vdpa_destroy_mr(&ndev->mvdev);
- err = mlx5_vdpa_create_mr(&ndev->mvdev, iotlb);
+ mlx5_vdpa_destroy_mr(mvdev);
+ err = mlx5_vdpa_create_mr(mvdev, iotlb);
if (err)
goto err_mr;
- if (!(ndev->mvdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
+ if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
return 0;
restore_channels_info(ndev);
- err = setup_driver(ndev);
+ err = setup_driver(mvdev);
if (err)
goto err_setup;
return 0;
err_setup:
- mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ mlx5_vdpa_destroy_mr(mvdev);
err_mr:
return err;
}
-static int setup_driver(struct mlx5_vdpa_net *ndev)
+static int setup_driver(struct mlx5_vdpa_dev *mvdev)
{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
mutex_lock(&ndev->reslock);
if (ndev->setup) {
- mlx5_vdpa_warn(&ndev->mvdev, "setup driver called for already setup driver\n");
+ mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
err = 0;
goto out;
}
- err = setup_virtqueues(ndev);
+ err = setup_virtqueues(mvdev);
if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "setup_virtqueues\n");
+ mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
goto out;
}
err = create_rqt(ndev);
if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "create_rqt\n");
+ mlx5_vdpa_warn(mvdev, "create_rqt\n");
goto err_rqt;
}
err = create_tir(ndev);
if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "create_tir\n");
+ mlx5_vdpa_warn(mvdev, "create_tir\n");
goto err_tir;
}
err = add_fwd_to_tir(ndev);
if (err) {
- mlx5_vdpa_warn(&ndev->mvdev, "add_fwd_to_tir\n");
+ mlx5_vdpa_warn(mvdev, "add_fwd_to_tir\n");
goto err_fwd;
}
ndev->setup = true;
@@ -1781,24 +2154,10 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
int err;
print_status(mvdev, status, true);
- if (!status) {
- mlx5_vdpa_info(mvdev, "performing device reset\n");
- teardown_driver(ndev);
- clear_vqs_ready(ndev);
- mlx5_vdpa_destroy_mr(&ndev->mvdev);
- ndev->mvdev.status = 0;
- ndev->mvdev.mlx_features = 0;
- ++mvdev->generation;
- if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
- if (mlx5_vdpa_create_mr(mvdev, NULL))
- mlx5_vdpa_warn(mvdev, "create MR failed\n");
- }
- return;
- }
if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
- err = setup_driver(ndev);
+ err = setup_driver(mvdev);
if (err) {
mlx5_vdpa_warn(mvdev, "failed to setup driver\n");
goto err_setup;
@@ -1817,6 +2176,29 @@ err_setup:
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
}
+static int mlx5_vdpa_reset(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+
+ print_status(mvdev, 0, true);
+ mlx5_vdpa_info(mvdev, "performing device reset\n");
+ teardown_driver(ndev);
+ clear_vqs_ready(ndev);
+ mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ ndev->mvdev.status = 0;
+ ndev->mvdev.mlx_features = 0;
+ memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs));
+ ndev->mvdev.actual_features = 0;
+ ++mvdev->generation;
+ if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
+ if (mlx5_vdpa_create_mr(mvdev, NULL))
+ mlx5_vdpa_warn(mvdev, "create MR failed\n");
+ }
+
+ return 0;
+}
+
static size_t mlx5_vdpa_get_config_size(struct vdpa_device *vdev)
{
return sizeof(struct virtio_net_config);
@@ -1848,7 +2230,6 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
- struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
bool change_map;
int err;
@@ -1859,7 +2240,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb
}
if (change_map)
- return mlx5_vdpa_change_map(ndev, iotlb);
+ return mlx5_vdpa_change_map(mvdev, iotlb);
return 0;
}
@@ -1889,6 +2270,9 @@ static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device
struct mlx5_vdpa_net *ndev;
phys_addr_t addr;
+ if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx))
+ return ret;
+
/* If SF BAR size is smaller than PAGE_SIZE, do not use direct
* notification to avoid the risk of mapping pages that contain BAR of more
* than one SF
@@ -1928,6 +2312,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.get_vendor_id = mlx5_vdpa_get_vendor_id,
.get_status = mlx5_vdpa_get_status,
.set_status = mlx5_vdpa_set_status,
+ .reset = mlx5_vdpa_reset,
.get_config_size = mlx5_vdpa_get_config_size,
.get_config = mlx5_vdpa_get_config,
.set_config = mlx5_vdpa_set_config,
@@ -2040,7 +2425,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
- name);
+ name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
@@ -2063,8 +2448,11 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
err = mlx5_mpfs_add_mac(pfmdev, config->mac);
if (err)
goto err_mtu;
+
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
}
+ config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, mlx5_vdpa_max_qps(max_vqs));
mvdev->vdev.dma_dev = &mdev->pdev->dev;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
@@ -2080,8 +2468,15 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
if (err)
goto err_mr;
+ mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_ctrl_wq");
+ if (!mvdev->wq) {
+ err = -ENOMEM;
+ goto err_res2;
+ }
+
+ ndev->cur_num_vqs = 2 * mlx5_vdpa_max_qps(max_vqs);
mvdev->vdev.mdev = &mgtdev->mgtdev;
- err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs));
+ err = _vdpa_register_device(&mvdev->vdev, ndev->cur_num_vqs + 1);
if (err)
goto err_reg;
@@ -2089,6 +2484,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
return 0;
err_reg:
+ destroy_workqueue(mvdev->wq);
+err_res2:
free_resources(ndev);
err_mr:
mlx5_vdpa_destroy_mr(mvdev);
@@ -2106,7 +2503,9 @@ err_mtu:
static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *dev)
{
struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(dev);
+ destroy_workqueue(mvdev->wq);
_vdpa_unregister_device(dev);
mgtdev->ndev = NULL;
}
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 3fc4525fc05c..1dc121a07a93 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -69,6 +69,7 @@ static void vdpa_release_dev(struct device *d)
* @config: the bus operations that is supported by this device
* @size: size of the parent structure that contains private data
* @name: name of the vdpa device; optional.
+ * @use_va: indicate whether virtual address must be used by this device
*
* Driver should use vdpa_alloc_device() wrapper macro instead of
* using this directly.
@@ -78,7 +79,8 @@ static void vdpa_release_dev(struct device *d)
*/
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
- size_t size, const char *name)
+ size_t size, const char *name,
+ bool use_va)
{
struct vdpa_device *vdev;
int err = -EINVAL;
@@ -89,6 +91,10 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
if (!!config->dma_map != !!config->dma_unmap)
goto err;
+ /* It should only work for the device that use on-chip IOMMU */
+ if (use_va && !(config->dma_map || config->set_map))
+ goto err;
+
err = -ENOMEM;
vdev = kzalloc(size, GFP_KERNEL);
if (!vdev)
@@ -104,6 +110,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
vdev->index = err;
vdev->config = config;
vdev->features_valid = false;
+ vdev->use_va = use_va;
if (name)
err = dev_set_name(&vdev->dev, "%s", name);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index c621cf7feec0..5f484fff8dbe 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -92,7 +92,7 @@ static void vdpasim_vq_reset(struct vdpasim *vdpasim,
vq->vring.notify = NULL;
}
-static void vdpasim_reset(struct vdpasim *vdpasim)
+static void vdpasim_do_reset(struct vdpasim *vdpasim)
{
int i;
@@ -137,7 +137,8 @@ static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
int ret;
/* We set the limit_pfn to the maximum (ULONG_MAX - 1) */
- iova = alloc_iova(&vdpasim->iova, size, ULONG_MAX - 1, true);
+ iova = alloc_iova(&vdpasim->iova, size >> iova_shift(&vdpasim->iova),
+ ULONG_MAX - 1, true);
if (!iova)
return DMA_MAPPING_ERROR;
@@ -250,7 +251,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
ops = &vdpasim_config_ops;
vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
- dev_attr->name);
+ dev_attr->name, false);
if (IS_ERR(vdpasim)) {
ret = PTR_ERR(vdpasim);
goto err_alloc;
@@ -459,11 +460,21 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
spin_lock(&vdpasim->lock);
vdpasim->status = status;
- if (status == 0)
- vdpasim_reset(vdpasim);
spin_unlock(&vdpasim->lock);
}
+static int vdpasim_reset(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ spin_lock(&vdpasim->lock);
+ vdpasim->status = 0;
+ vdpasim_do_reset(vdpasim);
+ spin_unlock(&vdpasim->lock);
+
+ return 0;
+}
+
static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -544,14 +555,14 @@ err:
}
static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
- u64 pa, u32 perm)
+ u64 pa, u32 perm, void *opaque)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
int ret;
spin_lock(&vdpasim->iommu_lock);
- ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
- perm);
+ ret = vhost_iotlb_add_range_ctx(vdpasim->iommu, iova, iova + size - 1,
+ pa, perm, opaque);
spin_unlock(&vdpasim->iommu_lock);
return ret;
@@ -607,6 +618,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.get_vendor_id = vdpasim_get_vendor_id,
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
+ .reset = vdpasim_reset,
.get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
@@ -635,6 +647,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.get_vendor_id = vdpasim_get_vendor_id,
.get_status = vdpasim_get_status,
.set_status = vdpasim_set_status,
+ .reset = vdpasim_reset,
.get_config_size = vdpasim_get_config_size,
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
diff --git a/drivers/vdpa/vdpa_user/Makefile b/drivers/vdpa/vdpa_user/Makefile
new file mode 100644
index 000000000000..260e0b26af99
--- /dev/null
+++ b/drivers/vdpa/vdpa_user/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+vduse-y := vduse_dev.o iova_domain.o
+
+obj-$(CONFIG_VDPA_USER) += vduse.o
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
new file mode 100644
index 000000000000..1daae2608860
--- /dev/null
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MMU-based software IOTLB.
+ *
+ * Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved.
+ *
+ * Author: Xie Yongji <xieyongji@bytedance.com>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+#include <linux/vdpa.h>
+
+#include "iova_domain.h"
+
+static int vduse_iotlb_add_range(struct vduse_iova_domain *domain,
+ u64 start, u64 last,
+ u64 addr, unsigned int perm,
+ struct file *file, u64 offset)
+{
+ struct vdpa_map_file *map_file;
+ int ret;
+
+ map_file = kmalloc(sizeof(*map_file), GFP_ATOMIC);
+ if (!map_file)
+ return -ENOMEM;
+
+ map_file->file = get_file(file);
+ map_file->offset = offset;
+
+ ret = vhost_iotlb_add_range_ctx(domain->iotlb, start, last,
+ addr, perm, map_file);
+ if (ret) {
+ fput(map_file->file);
+ kfree(map_file);
+ return ret;
+ }
+ return 0;
+}
+
+static void vduse_iotlb_del_range(struct vduse_iova_domain *domain,
+ u64 start, u64 last)
+{
+ struct vdpa_map_file *map_file;
+ struct vhost_iotlb_map *map;
+
+ while ((map = vhost_iotlb_itree_first(domain->iotlb, start, last))) {
+ map_file = (struct vdpa_map_file *)map->opaque;
+ fput(map_file->file);
+ kfree(map_file);
+ vhost_iotlb_map_free(domain->iotlb, map);
+ }
+}
+
+int vduse_domain_set_map(struct vduse_iova_domain *domain,
+ struct vhost_iotlb *iotlb)
+{
+ struct vdpa_map_file *map_file;
+ struct vhost_iotlb_map *map;
+ u64 start = 0ULL, last = ULLONG_MAX;
+ int ret;
+
+ spin_lock(&domain->iotlb_lock);
+ vduse_iotlb_del_range(domain, start, last);
+
+ for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ map_file = (struct vdpa_map_file *)map->opaque;
+ ret = vduse_iotlb_add_range(domain, map->start, map->last,
+ map->addr, map->perm,
+ map_file->file,
+ map_file->offset);
+ if (ret)
+ goto err;
+ }
+ spin_unlock(&domain->iotlb_lock);
+
+ return 0;
+err:
+ vduse_iotlb_del_range(domain, start, last);
+ spin_unlock(&domain->iotlb_lock);
+ return ret;
+}
+
+void vduse_domain_clear_map(struct vduse_iova_domain *domain,
+ struct vhost_iotlb *iotlb)
+{
+ struct vhost_iotlb_map *map;
+ u64 start = 0ULL, last = ULLONG_MAX;
+
+ spin_lock(&domain->iotlb_lock);
+ for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ vduse_iotlb_del_range(domain, map->start, map->last);
+ }
+ spin_unlock(&domain->iotlb_lock);
+}
+
+static int vduse_domain_map_bounce_page(struct vduse_iova_domain *domain,
+ u64 iova, u64 size, u64 paddr)
+{
+ struct vduse_bounce_map *map;
+ u64 last = iova + size - 1;
+
+ while (iova <= last) {
+ map = &domain->bounce_maps[iova >> PAGE_SHIFT];
+ if (!map->bounce_page) {
+ map->bounce_page = alloc_page(GFP_ATOMIC);
+ if (!map->bounce_page)
+ return -ENOMEM;
+ }
+ map->orig_phys = paddr;
+ paddr += PAGE_SIZE;
+ iova += PAGE_SIZE;
+ }
+ return 0;
+}
+
+static void vduse_domain_unmap_bounce_page(struct vduse_iova_domain *domain,
+ u64 iova, u64 size)
+{
+ struct vduse_bounce_map *map;
+ u64 last = iova + size - 1;
+
+ while (iova <= last) {
+ map = &domain->bounce_maps[iova >> PAGE_SHIFT];
+ map->orig_phys = INVALID_PHYS_ADDR;
+ iova += PAGE_SIZE;
+ }
+}
+
+static void do_bounce(phys_addr_t orig, void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(orig);
+ unsigned int offset = offset_in_page(orig);
+ char *buffer;
+ unsigned int sz = 0;
+
+ while (size) {
+ sz = min_t(size_t, PAGE_SIZE - offset, size);
+
+ buffer = kmap_atomic(pfn_to_page(pfn));
+ if (dir == DMA_TO_DEVICE)
+ memcpy(addr, buffer + offset, sz);
+ else
+ memcpy(buffer + offset, addr, sz);
+ kunmap_atomic(buffer);
+
+ size -= sz;
+ pfn++;
+ addr += sz;
+ offset = 0;
+ }
+}
+
+static void vduse_domain_bounce(struct vduse_iova_domain *domain,
+ dma_addr_t iova, size_t size,
+ enum dma_data_direction dir)
+{
+ struct vduse_bounce_map *map;
+ unsigned int offset;
+ void *addr;
+ size_t sz;
+
+ if (iova >= domain->bounce_size)
+ return;
+
+ while (size) {
+ map = &domain->bounce_maps[iova >> PAGE_SHIFT];
+ offset = offset_in_page(iova);
+ sz = min_t(size_t, PAGE_SIZE - offset, size);
+
+ if (WARN_ON(!map->bounce_page ||
+ map->orig_phys == INVALID_PHYS_ADDR))
+ return;
+
+ addr = page_address(map->bounce_page) + offset;
+ do_bounce(map->orig_phys + offset, addr, sz, dir);
+ size -= sz;
+ iova += sz;
+ }
+}
+
+static struct page *
+vduse_domain_get_coherent_page(struct vduse_iova_domain *domain, u64 iova)
+{
+ u64 start = iova & PAGE_MASK;
+ u64 last = start + PAGE_SIZE - 1;
+ struct vhost_iotlb_map *map;
+ struct page *page = NULL;
+
+ spin_lock(&domain->iotlb_lock);
+ map = vhost_iotlb_itree_first(domain->iotlb, start, last);
+ if (!map)
+ goto out;
+
+ page = pfn_to_page((map->addr + iova - map->start) >> PAGE_SHIFT);
+ get_page(page);
+out:
+ spin_unlock(&domain->iotlb_lock);
+
+ return page;
+}
+
+static struct page *
+vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova)
+{
+ struct vduse_bounce_map *map;
+ struct page *page = NULL;
+
+ spin_lock(&domain->iotlb_lock);
+ map = &domain->bounce_maps[iova >> PAGE_SHIFT];
+ if (!map->bounce_page)
+ goto out;
+
+ page = map->bounce_page;
+ get_page(page);
+out:
+ spin_unlock(&domain->iotlb_lock);
+
+ return page;
+}
+
+static void
+vduse_domain_free_bounce_pages(struct vduse_iova_domain *domain)
+{
+ struct vduse_bounce_map *map;
+ unsigned long pfn, bounce_pfns;
+
+ bounce_pfns = domain->bounce_size >> PAGE_SHIFT;
+
+ for (pfn = 0; pfn < bounce_pfns; pfn++) {
+ map = &domain->bounce_maps[pfn];
+ if (WARN_ON(map->orig_phys != INVALID_PHYS_ADDR))
+ continue;
+
+ if (!map->bounce_page)
+ continue;
+
+ __free_page(map->bounce_page);
+ map->bounce_page = NULL;
+ }
+}
+
+void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain)
+{
+ if (!domain->bounce_map)
+ return;
+
+ spin_lock(&domain->iotlb_lock);
+ if (!domain->bounce_map)
+ goto unlock;
+
+ vduse_iotlb_del_range(domain, 0, domain->bounce_size - 1);
+ domain->bounce_map = 0;
+unlock:
+ spin_unlock(&domain->iotlb_lock);
+}
+
+static int vduse_domain_init_bounce_map(struct vduse_iova_domain *domain)
+{
+ int ret = 0;
+
+ if (domain->bounce_map)
+ return 0;
+
+ spin_lock(&domain->iotlb_lock);
+ if (domain->bounce_map)
+ goto unlock;
+
+ ret = vduse_iotlb_add_range(domain, 0, domain->bounce_size - 1,
+ 0, VHOST_MAP_RW, domain->file, 0);
+ if (ret)
+ goto unlock;
+
+ domain->bounce_map = 1;
+unlock:
+ spin_unlock(&domain->iotlb_lock);
+ return ret;
+}
+
+static dma_addr_t
+vduse_domain_alloc_iova(struct iova_domain *iovad,
+ unsigned long size, unsigned long limit)
+{
+ unsigned long shift = iova_shift(iovad);
+ unsigned long iova_len = iova_align(iovad, size) >> shift;
+ unsigned long iova_pfn;
+
+ /*
+ * Freeing non-power-of-two-sized allocations back into the IOVA caches
+ * will come back to bite us badly, so we have to waste a bit of space
+ * rounding up anything cacheable to make sure that can't happen. The
+ * order of the unadjusted size will still match upon freeing.
+ */
+ if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
+ iova_len = roundup_pow_of_two(iova_len);
+ iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true);
+
+ return iova_pfn << shift;
+}
+
+static void vduse_domain_free_iova(struct iova_domain *iovad,
+ dma_addr_t iova, size_t size)
+{
+ unsigned long shift = iova_shift(iovad);
+ unsigned long iova_len = iova_align(iovad, size) >> shift;
+
+ free_iova_fast(iovad, iova >> shift, iova_len);
+}
+
+dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
+ struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct iova_domain *iovad = &domain->stream_iovad;
+ unsigned long limit = domain->bounce_size - 1;
+ phys_addr_t pa = page_to_phys(page) + offset;
+ dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit);
+
+ if (!iova)
+ return DMA_MAPPING_ERROR;
+
+ if (vduse_domain_init_bounce_map(domain))
+ goto err;
+
+ if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa))
+ goto err;
+
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
+ vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE);
+
+ return iova;
+err:
+ vduse_domain_free_iova(iovad, iova, size);
+ return DMA_MAPPING_ERROR;
+}
+
+void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ struct iova_domain *iovad = &domain->stream_iovad;
+
+ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+ vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
+
+ vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size);
+ vduse_domain_free_iova(iovad, dma_addr, size);
+}
+
+void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
+ size_t size, dma_addr_t *dma_addr,
+ gfp_t flag, unsigned long attrs)
+{
+ struct iova_domain *iovad = &domain->consistent_iovad;
+ unsigned long limit = domain->iova_limit;
+ dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit);
+ void *orig = alloc_pages_exact(size, flag);
+
+ if (!iova || !orig)
+ goto err;
+
+ spin_lock(&domain->iotlb_lock);
+ if (vduse_iotlb_add_range(domain, (u64)iova, (u64)iova + size - 1,
+ virt_to_phys(orig), VHOST_MAP_RW,
+ domain->file, (u64)iova)) {
+ spin_unlock(&domain->iotlb_lock);
+ goto err;
+ }
+ spin_unlock(&domain->iotlb_lock);
+
+ *dma_addr = iova;
+
+ return orig;
+err:
+ *dma_addr = DMA_MAPPING_ERROR;
+ if (orig)
+ free_pages_exact(orig, size);
+ if (iova)
+ vduse_domain_free_iova(iovad, iova, size);
+
+ return NULL;
+}
+
+void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ unsigned long attrs)
+{
+ struct iova_domain *iovad = &domain->consistent_iovad;
+ struct vhost_iotlb_map *map;
+ struct vdpa_map_file *map_file;
+ phys_addr_t pa;
+
+ spin_lock(&domain->iotlb_lock);
+ map = vhost_iotlb_itree_first(domain->iotlb, (u64)dma_addr,
+ (u64)dma_addr + size - 1);
+ if (WARN_ON(!map)) {
+ spin_unlock(&domain->iotlb_lock);
+ return;
+ }
+ map_file = (struct vdpa_map_file *)map->opaque;
+ fput(map_file->file);
+ kfree(map_file);
+ pa = map->addr;
+ vhost_iotlb_map_free(domain->iotlb, map);
+ spin_unlock(&domain->iotlb_lock);
+
+ vduse_domain_free_iova(iovad, dma_addr, size);
+ free_pages_exact(phys_to_virt(pa), size);
+}
+
+static vm_fault_t vduse_domain_mmap_fault(struct vm_fault *vmf)
+{
+ struct vduse_iova_domain *domain = vmf->vma->vm_private_data;
+ unsigned long iova = vmf->pgoff << PAGE_SHIFT;
+ struct page *page;
+
+ if (!domain)
+ return VM_FAULT_SIGBUS;
+
+ if (iova < domain->bounce_size)
+ page = vduse_domain_get_bounce_page(domain, iova);
+ else
+ page = vduse_domain_get_coherent_page(domain, iova);
+
+ if (!page)
+ return VM_FAULT_SIGBUS;
+
+ vmf->page = page;
+
+ return 0;
+}
+
+static const struct vm_operations_struct vduse_domain_mmap_ops = {
+ .fault = vduse_domain_mmap_fault,
+};
+
+static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct vduse_iova_domain *domain = file->private_data;
+
+ vma->vm_flags |= VM_DONTDUMP | VM_DONTEXPAND;
+ vma->vm_private_data = domain;
+ vma->vm_ops = &vduse_domain_mmap_ops;
+
+ return 0;
+}
+
+static int vduse_domain_release(struct inode *inode, struct file *file)
+{
+ struct vduse_iova_domain *domain = file->private_data;
+
+ spin_lock(&domain->iotlb_lock);
+ vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
+ vduse_domain_free_bounce_pages(domain);
+ spin_unlock(&domain->iotlb_lock);
+ put_iova_domain(&domain->stream_iovad);
+ put_iova_domain(&domain->consistent_iovad);
+ vhost_iotlb_free(domain->iotlb);
+ vfree(domain->bounce_maps);
+ kfree(domain);
+
+ return 0;
+}
+
+static const struct file_operations vduse_domain_fops = {
+ .owner = THIS_MODULE,
+ .mmap = vduse_domain_mmap,
+ .release = vduse_domain_release,
+};
+
+void vduse_domain_destroy(struct vduse_iova_domain *domain)
+{
+ fput(domain->file);
+}
+
+struct vduse_iova_domain *
+vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
+{
+ struct vduse_iova_domain *domain;
+ struct file *file;
+ struct vduse_bounce_map *map;
+ unsigned long pfn, bounce_pfns;
+
+ bounce_pfns = PAGE_ALIGN(bounce_size) >> PAGE_SHIFT;
+ if (iova_limit <= bounce_size)
+ return NULL;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+
+ domain->iotlb = vhost_iotlb_alloc(0, 0);
+ if (!domain->iotlb)
+ goto err_iotlb;
+
+ domain->iova_limit = iova_limit;
+ domain->bounce_size = PAGE_ALIGN(bounce_size);
+ domain->bounce_maps = vzalloc(bounce_pfns *
+ sizeof(struct vduse_bounce_map));
+ if (!domain->bounce_maps)
+ goto err_map;
+
+ for (pfn = 0; pfn < bounce_pfns; pfn++) {
+ map = &domain->bounce_maps[pfn];
+ map->orig_phys = INVALID_PHYS_ADDR;
+ }
+ file = anon_inode_getfile("[vduse-domain]", &vduse_domain_fops,
+ domain, O_RDWR);
+ if (IS_ERR(file))
+ goto err_file;
+
+ domain->file = file;
+ spin_lock_init(&domain->iotlb_lock);
+ init_iova_domain(&domain->stream_iovad,
+ PAGE_SIZE, IOVA_START_PFN);
+ init_iova_domain(&domain->consistent_iovad,
+ PAGE_SIZE, bounce_pfns);
+
+ return domain;
+err_file:
+ vfree(domain->bounce_maps);
+err_map:
+ vhost_iotlb_free(domain->iotlb);
+err_iotlb:
+ kfree(domain);
+ return NULL;
+}
+
+int vduse_domain_init(void)
+{
+ return iova_cache_get();
+}
+
+void vduse_domain_exit(void)
+{
+ iova_cache_put();
+}
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
new file mode 100644
index 000000000000..2722d9b8e21a
--- /dev/null
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * MMU-based software IOTLB.
+ *
+ * Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved.
+ *
+ * Author: Xie Yongji <xieyongji@bytedance.com>
+ *
+ */
+
+#ifndef _VDUSE_IOVA_DOMAIN_H
+#define _VDUSE_IOVA_DOMAIN_H
+
+#include <linux/iova.h>
+#include <linux/dma-mapping.h>
+#include <linux/vhost_iotlb.h>
+
+#define IOVA_START_PFN 1
+
+#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
+
+struct vduse_bounce_map {
+ struct page *bounce_page;
+ u64 orig_phys;
+};
+
+struct vduse_iova_domain {
+ struct iova_domain stream_iovad;
+ struct iova_domain consistent_iovad;
+ struct vduse_bounce_map *bounce_maps;
+ size_t bounce_size;
+ unsigned long iova_limit;
+ int bounce_map;
+ struct vhost_iotlb *iotlb;
+ spinlock_t iotlb_lock;
+ struct file *file;
+};
+
+int vduse_domain_set_map(struct vduse_iova_domain *domain,
+ struct vhost_iotlb *iotlb);
+
+void vduse_domain_clear_map(struct vduse_iova_domain *domain,
+ struct vhost_iotlb *iotlb);
+
+dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
+ struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+
+void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+
+void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
+ size_t size, dma_addr_t *dma_addr,
+ gfp_t flag, unsigned long attrs);
+
+void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ unsigned long attrs);
+
+void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
+
+void vduse_domain_destroy(struct vduse_iova_domain *domain);
+
+struct vduse_iova_domain *vduse_domain_create(unsigned long iova_limit,
+ size_t bounce_size);
+
+int vduse_domain_init(void);
+
+void vduse_domain_exit(void);
+
+#endif /* _VDUSE_IOVA_DOMAIN_H */
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
new file mode 100644
index 000000000000..29a38ecba19e
--- /dev/null
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -0,0 +1,1641 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VDUSE: vDPA Device in Userspace
+ *
+ * Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved.
+ *
+ * Author: Xie Yongji <xieyongji@bytedance.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/eventfd.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/dma-map-ops.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+#include <linux/uio.h>
+#include <linux/vdpa.h>
+#include <linux/nospec.h>
+#include <uapi/linux/vduse.h>
+#include <uapi/linux/vdpa.h>
+#include <uapi/linux/virtio_config.h>
+#include <uapi/linux/virtio_ids.h>
+#include <uapi/linux/virtio_blk.h>
+#include <linux/mod_devicetable.h>
+
+#include "iova_domain.h"
+
+#define DRV_AUTHOR "Yongji Xie <xieyongji@bytedance.com>"
+#define DRV_DESC "vDPA Device in Userspace"
+#define DRV_LICENSE "GPL v2"
+
+#define VDUSE_DEV_MAX (1U << MINORBITS)
+#define VDUSE_BOUNCE_SIZE (64 * 1024 * 1024)
+#define VDUSE_IOVA_SIZE (128 * 1024 * 1024)
+#define VDUSE_MSG_DEFAULT_TIMEOUT 30
+
+struct vduse_virtqueue {
+ u16 index;
+ u16 num_max;
+ u32 num;
+ u64 desc_addr;
+ u64 driver_addr;
+ u64 device_addr;
+ struct vdpa_vq_state state;
+ bool ready;
+ bool kicked;
+ spinlock_t kick_lock;
+ spinlock_t irq_lock;
+ struct eventfd_ctx *kickfd;
+ struct vdpa_callback cb;
+ struct work_struct inject;
+ struct work_struct kick;
+};
+
+struct vduse_dev;
+
+struct vduse_vdpa {
+ struct vdpa_device vdpa;
+ struct vduse_dev *dev;
+};
+
+struct vduse_dev {
+ struct vduse_vdpa *vdev;
+ struct device *dev;
+ struct vduse_virtqueue *vqs;
+ struct vduse_iova_domain *domain;
+ char *name;
+ struct mutex lock;
+ spinlock_t msg_lock;
+ u64 msg_unique;
+ u32 msg_timeout;
+ wait_queue_head_t waitq;
+ struct list_head send_list;
+ struct list_head recv_list;
+ struct vdpa_callback config_cb;
+ struct work_struct inject;
+ spinlock_t irq_lock;
+ int minor;
+ bool broken;
+ bool connected;
+ u64 api_version;
+ u64 device_features;
+ u64 driver_features;
+ u32 device_id;
+ u32 vendor_id;
+ u32 generation;
+ u32 config_size;
+ void *config;
+ u8 status;
+ u32 vq_num;
+ u32 vq_align;
+};
+
+struct vduse_dev_msg {
+ struct vduse_dev_request req;
+ struct vduse_dev_response resp;
+ struct list_head list;
+ wait_queue_head_t waitq;
+ bool completed;
+};
+
+struct vduse_control {
+ u64 api_version;
+};
+
+static DEFINE_MUTEX(vduse_lock);
+static DEFINE_IDR(vduse_idr);
+
+static dev_t vduse_major;
+static struct class *vduse_class;
+static struct cdev vduse_ctrl_cdev;
+static struct cdev vduse_cdev;
+static struct workqueue_struct *vduse_irq_wq;
+
+static u32 allowed_device_id[] = {
+ VIRTIO_ID_BLOCK,
+};
+
+static inline struct vduse_dev *vdpa_to_vduse(struct vdpa_device *vdpa)
+{
+ struct vduse_vdpa *vdev = container_of(vdpa, struct vduse_vdpa, vdpa);
+
+ return vdev->dev;
+}
+
+static inline struct vduse_dev *dev_to_vduse(struct device *dev)
+{
+ struct vdpa_device *vdpa = dev_to_vdpa(dev);
+
+ return vdpa_to_vduse(vdpa);
+}
+
+static struct vduse_dev_msg *vduse_find_msg(struct list_head *head,
+ uint32_t request_id)
+{
+ struct vduse_dev_msg *msg;
+
+ list_for_each_entry(msg, head, list) {
+ if (msg->req.request_id == request_id) {
+ list_del(&msg->list);
+ return msg;
+ }
+ }
+
+ return NULL;
+}
+
+static struct vduse_dev_msg *vduse_dequeue_msg(struct list_head *head)
+{
+ struct vduse_dev_msg *msg = NULL;
+
+ if (!list_empty(head)) {
+ msg = list_first_entry(head, struct vduse_dev_msg, list);
+ list_del(&msg->list);
+ }
+
+ return msg;
+}
+
+static void vduse_enqueue_msg(struct list_head *head,
+ struct vduse_dev_msg *msg)
+{
+ list_add_tail(&msg->list, head);
+}
+
+static void vduse_dev_broken(struct vduse_dev *dev)
+{
+ struct vduse_dev_msg *msg, *tmp;
+
+ if (unlikely(dev->broken))
+ return;
+
+ list_splice_init(&dev->recv_list, &dev->send_list);
+ list_for_each_entry_safe(msg, tmp, &dev->send_list, list) {
+ list_del(&msg->list);
+ msg->completed = 1;
+ msg->resp.result = VDUSE_REQ_RESULT_FAILED;
+ wake_up(&msg->waitq);
+ }
+ dev->broken = true;
+ wake_up(&dev->waitq);
+}
+
+static int vduse_dev_msg_sync(struct vduse_dev *dev,
+ struct vduse_dev_msg *msg)
+{
+ int ret;
+
+ if (unlikely(dev->broken))
+ return -EIO;
+
+ init_waitqueue_head(&msg->waitq);
+ spin_lock(&dev->msg_lock);
+ if (unlikely(dev->broken)) {
+ spin_unlock(&dev->msg_lock);
+ return -EIO;
+ }
+ msg->req.request_id = dev->msg_unique++;
+ vduse_enqueue_msg(&dev->send_list, msg);
+ wake_up(&dev->waitq);
+ spin_unlock(&dev->msg_lock);
+ if (dev->msg_timeout)
+ ret = wait_event_killable_timeout(msg->waitq, msg->completed,
+ (long)dev->msg_timeout * HZ);
+ else
+ ret = wait_event_killable(msg->waitq, msg->completed);
+
+ spin_lock(&dev->msg_lock);
+ if (!msg->completed) {
+ list_del(&msg->list);
+ msg->resp.result = VDUSE_REQ_RESULT_FAILED;
+ /* Mark the device as malfunction when there is a timeout */
+ if (!ret)
+ vduse_dev_broken(dev);
+ }
+ ret = (msg->resp.result == VDUSE_REQ_RESULT_OK) ? 0 : -EIO;
+ spin_unlock(&dev->msg_lock);
+
+ return ret;
+}
+
+static int vduse_dev_get_vq_state_packed(struct vduse_dev *dev,
+ struct vduse_virtqueue *vq,
+ struct vdpa_vq_state_packed *packed)
+{
+ struct vduse_dev_msg msg = { 0 };
+ int ret;
+
+ msg.req.type = VDUSE_GET_VQ_STATE;
+ msg.req.vq_state.index = vq->index;
+
+ ret = vduse_dev_msg_sync(dev, &msg);
+ if (ret)
+ return ret;
+
+ packed->last_avail_counter =
+ msg.resp.vq_state.packed.last_avail_counter & 0x0001;
+ packed->last_avail_idx =
+ msg.resp.vq_state.packed.last_avail_idx & 0x7FFF;
+ packed->last_used_counter =
+ msg.resp.vq_state.packed.last_used_counter & 0x0001;
+ packed->last_used_idx =
+ msg.resp.vq_state.packed.last_used_idx & 0x7FFF;
+
+ return 0;
+}
+
+static int vduse_dev_get_vq_state_split(struct vduse_dev *dev,
+ struct vduse_virtqueue *vq,
+ struct vdpa_vq_state_split *split)
+{
+ struct vduse_dev_msg msg = { 0 };
+ int ret;
+
+ msg.req.type = VDUSE_GET_VQ_STATE;
+ msg.req.vq_state.index = vq->index;
+
+ ret = vduse_dev_msg_sync(dev, &msg);
+ if (ret)
+ return ret;
+
+ split->avail_index = msg.resp.vq_state.split.avail_index;
+
+ return 0;
+}
+
+static int vduse_dev_set_status(struct vduse_dev *dev, u8 status)
+{
+ struct vduse_dev_msg msg = { 0 };
+
+ msg.req.type = VDUSE_SET_STATUS;
+ msg.req.s.status = status;
+
+ return vduse_dev_msg_sync(dev, &msg);
+}
+
+static int vduse_dev_update_iotlb(struct vduse_dev *dev,
+ u64 start, u64 last)
+{
+ struct vduse_dev_msg msg = { 0 };
+
+ if (last < start)
+ return -EINVAL;
+
+ msg.req.type = VDUSE_UPDATE_IOTLB;
+ msg.req.iova.start = start;
+ msg.req.iova.last = last;
+
+ return vduse_dev_msg_sync(dev, &msg);
+}
+
+static ssize_t vduse_dev_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vduse_dev *dev = file->private_data;
+ struct vduse_dev_msg *msg;
+ int size = sizeof(struct vduse_dev_request);
+ ssize_t ret;
+
+ if (iov_iter_count(to) < size)
+ return -EINVAL;
+
+ spin_lock(&dev->msg_lock);
+ while (1) {
+ msg = vduse_dequeue_msg(&dev->send_list);
+ if (msg)
+ break;
+
+ ret = -EAGAIN;
+ if (file->f_flags & O_NONBLOCK)
+ goto unlock;
+
+ spin_unlock(&dev->msg_lock);
+ ret = wait_event_interruptible_exclusive(dev->waitq,
+ !list_empty(&dev->send_list));
+ if (ret)
+ return ret;
+
+ spin_lock(&dev->msg_lock);
+ }
+ spin_unlock(&dev->msg_lock);
+ ret = copy_to_iter(&msg->req, size, to);
+ spin_lock(&dev->msg_lock);
+ if (ret != size) {
+ ret = -EFAULT;
+ vduse_enqueue_msg(&dev->send_list, msg);
+ goto unlock;
+ }
+ vduse_enqueue_msg(&dev->recv_list, msg);
+unlock:
+ spin_unlock(&dev->msg_lock);
+
+ return ret;
+}
+
+static bool is_mem_zero(const char *ptr, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (ptr[i])
+ return false;
+ }
+ return true;
+}
+
+static ssize_t vduse_dev_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vduse_dev *dev = file->private_data;
+ struct vduse_dev_response resp;
+ struct vduse_dev_msg *msg;
+ size_t ret;
+
+ ret = copy_from_iter(&resp, sizeof(resp), from);
+ if (ret != sizeof(resp))
+ return -EINVAL;
+
+ if (!is_mem_zero((const char *)resp.reserved, sizeof(resp.reserved)))
+ return -EINVAL;
+
+ spin_lock(&dev->msg_lock);
+ msg = vduse_find_msg(&dev->recv_list, resp.request_id);
+ if (!msg) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ memcpy(&msg->resp, &resp, sizeof(resp));
+ msg->completed = 1;
+ wake_up(&msg->waitq);
+unlock:
+ spin_unlock(&dev->msg_lock);
+
+ return ret;
+}
+
+static __poll_t vduse_dev_poll(struct file *file, poll_table *wait)
+{
+ struct vduse_dev *dev = file->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(file, &dev->waitq, wait);
+
+ spin_lock(&dev->msg_lock);
+
+ if (unlikely(dev->broken))
+ mask |= EPOLLERR;
+ if (!list_empty(&dev->send_list))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (!list_empty(&dev->recv_list))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+
+ spin_unlock(&dev->msg_lock);
+
+ return mask;
+}
+
+static void vduse_dev_reset(struct vduse_dev *dev)
+{
+ int i;
+ struct vduse_iova_domain *domain = dev->domain;
+
+ /* The coherent mappings are handled in vduse_dev_free_coherent() */
+ if (domain->bounce_map)
+ vduse_domain_reset_bounce_map(domain);
+
+ dev->status = 0;
+ dev->driver_features = 0;
+ dev->generation++;
+ spin_lock(&dev->irq_lock);
+ dev->config_cb.callback = NULL;
+ dev->config_cb.private = NULL;
+ spin_unlock(&dev->irq_lock);
+ flush_work(&dev->inject);
+
+ for (i = 0; i < dev->vq_num; i++) {
+ struct vduse_virtqueue *vq = &dev->vqs[i];
+
+ vq->ready = false;
+ vq->desc_addr = 0;
+ vq->driver_addr = 0;
+ vq->device_addr = 0;
+ vq->num = 0;
+ memset(&vq->state, 0, sizeof(vq->state));
+
+ spin_lock(&vq->kick_lock);
+ vq->kicked = false;
+ if (vq->kickfd)
+ eventfd_ctx_put(vq->kickfd);
+ vq->kickfd = NULL;
+ spin_unlock(&vq->kick_lock);
+
+ spin_lock(&vq->irq_lock);
+ vq->cb.callback = NULL;
+ vq->cb.private = NULL;
+ spin_unlock(&vq->irq_lock);
+ flush_work(&vq->inject);
+ flush_work(&vq->kick);
+ }
+}
+
+static int vduse_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 idx,
+ u64 desc_area, u64 driver_area,
+ u64 device_area)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ vq->desc_addr = desc_area;
+ vq->driver_addr = driver_area;
+ vq->device_addr = device_area;
+
+ return 0;
+}
+
+static void vduse_vq_kick(struct vduse_virtqueue *vq)
+{
+ spin_lock(&vq->kick_lock);
+ if (!vq->ready)
+ goto unlock;
+
+ if (vq->kickfd)
+ eventfd_signal(vq->kickfd, 1);
+ else
+ vq->kicked = true;
+unlock:
+ spin_unlock(&vq->kick_lock);
+}
+
+static void vduse_vq_kick_work(struct work_struct *work)
+{
+ struct vduse_virtqueue *vq = container_of(work,
+ struct vduse_virtqueue, kick);
+
+ vduse_vq_kick(vq);
+}
+
+static void vduse_vdpa_kick_vq(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ if (!eventfd_signal_allowed()) {
+ schedule_work(&vq->kick);
+ return;
+ }
+ vduse_vq_kick(vq);
+}
+
+static void vduse_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
+ struct vdpa_callback *cb)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ spin_lock(&vq->irq_lock);
+ vq->cb.callback = cb->callback;
+ vq->cb.private = cb->private;
+ spin_unlock(&vq->irq_lock);
+}
+
+static void vduse_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ vq->num = num;
+}
+
+static void vduse_vdpa_set_vq_ready(struct vdpa_device *vdpa,
+ u16 idx, bool ready)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ vq->ready = ready;
+}
+
+static bool vduse_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ return vq->ready;
+}
+
+static int vduse_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 idx,
+ const struct vdpa_vq_state *state)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ if (dev->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
+ vq->state.packed.last_avail_counter =
+ state->packed.last_avail_counter;
+ vq->state.packed.last_avail_idx = state->packed.last_avail_idx;
+ vq->state.packed.last_used_counter =
+ state->packed.last_used_counter;
+ vq->state.packed.last_used_idx = state->packed.last_used_idx;
+ } else
+ vq->state.split.avail_index = state->split.avail_index;
+
+ return 0;
+}
+
+static int vduse_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 idx,
+ struct vdpa_vq_state *state)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = &dev->vqs[idx];
+
+ if (dev->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED))
+ return vduse_dev_get_vq_state_packed(dev, vq, &state->packed);
+
+ return vduse_dev_get_vq_state_split(dev, vq, &state->split);
+}
+
+static u32 vduse_vdpa_get_vq_align(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->vq_align;
+}
+
+static u64 vduse_vdpa_get_features(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->device_features;
+}
+
+static int vduse_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ dev->driver_features = features;
+ return 0;
+}
+
+static void vduse_vdpa_set_config_cb(struct vdpa_device *vdpa,
+ struct vdpa_callback *cb)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ spin_lock(&dev->irq_lock);
+ dev->config_cb.callback = cb->callback;
+ dev->config_cb.private = cb->private;
+ spin_unlock(&dev->irq_lock);
+}
+
+static u16 vduse_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ u16 num_max = 0;
+ int i;
+
+ for (i = 0; i < dev->vq_num; i++)
+ if (num_max < dev->vqs[i].num_max)
+ num_max = dev->vqs[i].num_max;
+
+ return num_max;
+}
+
+static u32 vduse_vdpa_get_device_id(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->device_id;
+}
+
+static u32 vduse_vdpa_get_vendor_id(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->vendor_id;
+}
+
+static u8 vduse_vdpa_get_status(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->status;
+}
+
+static void vduse_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ if (vduse_dev_set_status(dev, status))
+ return;
+
+ dev->status = status;
+}
+
+static size_t vduse_vdpa_get_config_size(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->config_size;
+}
+
+static void vduse_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ if (len > dev->config_size - offset)
+ return;
+
+ memcpy(buf, dev->config + offset, len);
+}
+
+static void vduse_vdpa_set_config(struct vdpa_device *vdpa, unsigned int offset,
+ const void *buf, unsigned int len)
+{
+ /* Now we only support read-only configuration space */
+}
+
+static int vduse_vdpa_reset(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ if (vduse_dev_set_status(dev, 0))
+ return -EIO;
+
+ vduse_dev_reset(dev);
+
+ return 0;
+}
+
+static u32 vduse_vdpa_get_generation(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->generation;
+}
+
+static int vduse_vdpa_set_map(struct vdpa_device *vdpa,
+ struct vhost_iotlb *iotlb)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ int ret;
+
+ ret = vduse_domain_set_map(dev->domain, iotlb);
+ if (ret)
+ return ret;
+
+ ret = vduse_dev_update_iotlb(dev, 0ULL, ULLONG_MAX);
+ if (ret) {
+ vduse_domain_clear_map(dev->domain, iotlb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vduse_vdpa_free(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ dev->vdev = NULL;
+}
+
+static const struct vdpa_config_ops vduse_vdpa_config_ops = {
+ .set_vq_address = vduse_vdpa_set_vq_address,
+ .kick_vq = vduse_vdpa_kick_vq,
+ .set_vq_cb = vduse_vdpa_set_vq_cb,
+ .set_vq_num = vduse_vdpa_set_vq_num,
+ .set_vq_ready = vduse_vdpa_set_vq_ready,
+ .get_vq_ready = vduse_vdpa_get_vq_ready,
+ .set_vq_state = vduse_vdpa_set_vq_state,
+ .get_vq_state = vduse_vdpa_get_vq_state,
+ .get_vq_align = vduse_vdpa_get_vq_align,
+ .get_features = vduse_vdpa_get_features,
+ .set_features = vduse_vdpa_set_features,
+ .set_config_cb = vduse_vdpa_set_config_cb,
+ .get_vq_num_max = vduse_vdpa_get_vq_num_max,
+ .get_device_id = vduse_vdpa_get_device_id,
+ .get_vendor_id = vduse_vdpa_get_vendor_id,
+ .get_status = vduse_vdpa_get_status,
+ .set_status = vduse_vdpa_set_status,
+ .get_config_size = vduse_vdpa_get_config_size,
+ .get_config = vduse_vdpa_get_config,
+ .set_config = vduse_vdpa_set_config,
+ .get_generation = vduse_vdpa_get_generation,
+ .reset = vduse_vdpa_reset,
+ .set_map = vduse_vdpa_set_map,
+ .free = vduse_vdpa_free,
+};
+
+static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+
+ return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
+}
+
+static void vduse_dev_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+
+ return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
+}
+
+static void *vduse_dev_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t flag,
+ unsigned long attrs)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+ unsigned long iova;
+ void *addr;
+
+ *dma_addr = DMA_MAPPING_ERROR;
+ addr = vduse_domain_alloc_coherent(domain, size,
+ (dma_addr_t *)&iova, flag, attrs);
+ if (!addr)
+ return NULL;
+
+ *dma_addr = (dma_addr_t)iova;
+
+ return addr;
+}
+
+static void vduse_dev_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ unsigned long attrs)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+
+ vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs);
+}
+
+static size_t vduse_dev_max_mapping_size(struct device *dev)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+
+ return domain->bounce_size;
+}
+
+static const struct dma_map_ops vduse_dev_dma_ops = {
+ .map_page = vduse_dev_map_page,
+ .unmap_page = vduse_dev_unmap_page,
+ .alloc = vduse_dev_alloc_coherent,
+ .free = vduse_dev_free_coherent,
+ .max_mapping_size = vduse_dev_max_mapping_size,
+};
+
+static unsigned int perm_to_file_flags(u8 perm)
+{
+ unsigned int flags = 0;
+
+ switch (perm) {
+ case VDUSE_ACCESS_WO:
+ flags |= O_WRONLY;
+ break;
+ case VDUSE_ACCESS_RO:
+ flags |= O_RDONLY;
+ break;
+ case VDUSE_ACCESS_RW:
+ flags |= O_RDWR;
+ break;
+ default:
+ WARN(1, "invalidate vhost IOTLB permission\n");
+ break;
+ }
+
+ return flags;
+}
+
+static int vduse_kickfd_setup(struct vduse_dev *dev,
+ struct vduse_vq_eventfd *eventfd)
+{
+ struct eventfd_ctx *ctx = NULL;
+ struct vduse_virtqueue *vq;
+ u32 index;
+
+ if (eventfd->index >= dev->vq_num)
+ return -EINVAL;
+
+ index = array_index_nospec(eventfd->index, dev->vq_num);
+ vq = &dev->vqs[index];
+ if (eventfd->fd >= 0) {
+ ctx = eventfd_ctx_fdget(eventfd->fd);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+ } else if (eventfd->fd != VDUSE_EVENTFD_DEASSIGN)
+ return 0;
+
+ spin_lock(&vq->kick_lock);
+ if (vq->kickfd)
+ eventfd_ctx_put(vq->kickfd);
+ vq->kickfd = ctx;
+ if (vq->ready && vq->kicked && vq->kickfd) {
+ eventfd_signal(vq->kickfd, 1);
+ vq->kicked = false;
+ }
+ spin_unlock(&vq->kick_lock);
+
+ return 0;
+}
+
+static bool vduse_dev_is_ready(struct vduse_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->vq_num; i++)
+ if (!dev->vqs[i].num_max)
+ return false;
+
+ return true;
+}
+
+static void vduse_dev_irq_inject(struct work_struct *work)
+{
+ struct vduse_dev *dev = container_of(work, struct vduse_dev, inject);
+
+ spin_lock_irq(&dev->irq_lock);
+ if (dev->config_cb.callback)
+ dev->config_cb.callback(dev->config_cb.private);
+ spin_unlock_irq(&dev->irq_lock);
+}
+
+static void vduse_vq_irq_inject(struct work_struct *work)
+{
+ struct vduse_virtqueue *vq = container_of(work,
+ struct vduse_virtqueue, inject);
+
+ spin_lock_irq(&vq->irq_lock);
+ if (vq->ready && vq->cb.callback)
+ vq->cb.callback(vq->cb.private);
+ spin_unlock_irq(&vq->irq_lock);
+}
+
+static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct vduse_dev *dev = file->private_data;
+ void __user *argp = (void __user *)arg;
+ int ret;
+
+ if (unlikely(dev->broken))
+ return -EPERM;
+
+ switch (cmd) {
+ case VDUSE_IOTLB_GET_FD: {
+ struct vduse_iotlb_entry entry;
+ struct vhost_iotlb_map *map;
+ struct vdpa_map_file *map_file;
+ struct vduse_iova_domain *domain = dev->domain;
+ struct file *f = NULL;
+
+ ret = -EFAULT;
+ if (copy_from_user(&entry, argp, sizeof(entry)))
+ break;
+
+ ret = -EINVAL;
+ if (entry.start > entry.last)
+ break;
+
+ spin_lock(&domain->iotlb_lock);
+ map = vhost_iotlb_itree_first(domain->iotlb,
+ entry.start, entry.last);
+ if (map) {
+ map_file = (struct vdpa_map_file *)map->opaque;
+ f = get_file(map_file->file);
+ entry.offset = map_file->offset;
+ entry.start = map->start;
+ entry.last = map->last;
+ entry.perm = map->perm;
+ }
+ spin_unlock(&domain->iotlb_lock);
+ ret = -EINVAL;
+ if (!f)
+ break;
+
+ ret = -EFAULT;
+ if (copy_to_user(argp, &entry, sizeof(entry))) {
+ fput(f);
+ break;
+ }
+ ret = receive_fd(f, perm_to_file_flags(entry.perm));
+ fput(f);
+ break;
+ }
+ case VDUSE_DEV_GET_FEATURES:
+ /*
+ * Just mirror what driver wrote here.
+ * The driver is expected to check FEATURE_OK later.
+ */
+ ret = put_user(dev->driver_features, (u64 __user *)argp);
+ break;
+ case VDUSE_DEV_SET_CONFIG: {
+ struct vduse_config_data config;
+ unsigned long size = offsetof(struct vduse_config_data,
+ buffer);
+
+ ret = -EFAULT;
+ if (copy_from_user(&config, argp, size))
+ break;
+
+ ret = -EINVAL;
+ if (config.length == 0 ||
+ config.length > dev->config_size - config.offset)
+ break;
+
+ ret = -EFAULT;
+ if (copy_from_user(dev->config + config.offset, argp + size,
+ config.length))
+ break;
+
+ ret = 0;
+ break;
+ }
+ case VDUSE_DEV_INJECT_CONFIG_IRQ:
+ ret = 0;
+ queue_work(vduse_irq_wq, &dev->inject);
+ break;
+ case VDUSE_VQ_SETUP: {
+ struct vduse_vq_config config;
+ u32 index;
+
+ ret = -EFAULT;
+ if (copy_from_user(&config, argp, sizeof(config)))
+ break;
+
+ ret = -EINVAL;
+ if (config.index >= dev->vq_num)
+ break;
+
+ if (!is_mem_zero((const char *)config.reserved,
+ sizeof(config.reserved)))
+ break;
+
+ index = array_index_nospec(config.index, dev->vq_num);
+ dev->vqs[index].num_max = config.max_size;
+ ret = 0;
+ break;
+ }
+ case VDUSE_VQ_GET_INFO: {
+ struct vduse_vq_info vq_info;
+ struct vduse_virtqueue *vq;
+ u32 index;
+
+ ret = -EFAULT;
+ if (copy_from_user(&vq_info, argp, sizeof(vq_info)))
+ break;
+
+ ret = -EINVAL;
+ if (vq_info.index >= dev->vq_num)
+ break;
+
+ index = array_index_nospec(vq_info.index, dev->vq_num);
+ vq = &dev->vqs[index];
+ vq_info.desc_addr = vq->desc_addr;
+ vq_info.driver_addr = vq->driver_addr;
+ vq_info.device_addr = vq->device_addr;
+ vq_info.num = vq->num;
+
+ if (dev->driver_features & BIT_ULL(VIRTIO_F_RING_PACKED)) {
+ vq_info.packed.last_avail_counter =
+ vq->state.packed.last_avail_counter;
+ vq_info.packed.last_avail_idx =
+ vq->state.packed.last_avail_idx;
+ vq_info.packed.last_used_counter =
+ vq->state.packed.last_used_counter;
+ vq_info.packed.last_used_idx =
+ vq->state.packed.last_used_idx;
+ } else
+ vq_info.split.avail_index =
+ vq->state.split.avail_index;
+
+ vq_info.ready = vq->ready;
+
+ ret = -EFAULT;
+ if (copy_to_user(argp, &vq_info, sizeof(vq_info)))
+ break;
+
+ ret = 0;
+ break;
+ }
+ case VDUSE_VQ_SETUP_KICKFD: {
+ struct vduse_vq_eventfd eventfd;
+
+ ret = -EFAULT;
+ if (copy_from_user(&eventfd, argp, sizeof(eventfd)))
+ break;
+
+ ret = vduse_kickfd_setup(dev, &eventfd);
+ break;
+ }
+ case VDUSE_VQ_INJECT_IRQ: {
+ u32 index;
+
+ ret = -EFAULT;
+ if (get_user(index, (u32 __user *)argp))
+ break;
+
+ ret = -EINVAL;
+ if (index >= dev->vq_num)
+ break;
+
+ ret = 0;
+ index = array_index_nospec(index, dev->vq_num);
+ queue_work(vduse_irq_wq, &dev->vqs[index].inject);
+ break;
+ }
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ return ret;
+}
+
+static int vduse_dev_release(struct inode *inode, struct file *file)
+{
+ struct vduse_dev *dev = file->private_data;
+
+ spin_lock(&dev->msg_lock);
+ /* Make sure the inflight messages can processed after reconncection */
+ list_splice_init(&dev->recv_list, &dev->send_list);
+ spin_unlock(&dev->msg_lock);
+ dev->connected = false;
+
+ return 0;
+}
+
+static struct vduse_dev *vduse_dev_get_from_minor(int minor)
+{
+ struct vduse_dev *dev;
+
+ mutex_lock(&vduse_lock);
+ dev = idr_find(&vduse_idr, minor);
+ mutex_unlock(&vduse_lock);
+
+ return dev;
+}
+
+static int vduse_dev_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct vduse_dev *dev = vduse_dev_get_from_minor(iminor(inode));
+
+ if (!dev)
+ return -ENODEV;
+
+ ret = -EBUSY;
+ mutex_lock(&dev->lock);
+ if (dev->connected)
+ goto unlock;
+
+ ret = 0;
+ dev->connected = true;
+ file->private_data = dev;
+unlock:
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static const struct file_operations vduse_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = vduse_dev_open,
+ .release = vduse_dev_release,
+ .read_iter = vduse_dev_read_iter,
+ .write_iter = vduse_dev_write_iter,
+ .poll = vduse_dev_poll,
+ .unlocked_ioctl = vduse_dev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .llseek = noop_llseek,
+};
+
+static struct vduse_dev *vduse_dev_create(void)
+{
+ struct vduse_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
+ if (!dev)
+ return NULL;
+
+ mutex_init(&dev->lock);
+ spin_lock_init(&dev->msg_lock);
+ INIT_LIST_HEAD(&dev->send_list);
+ INIT_LIST_HEAD(&dev->recv_list);
+ spin_lock_init(&dev->irq_lock);
+
+ INIT_WORK(&dev->inject, vduse_dev_irq_inject);
+ init_waitqueue_head(&dev->waitq);
+
+ return dev;
+}
+
+static void vduse_dev_destroy(struct vduse_dev *dev)
+{
+ kfree(dev);
+}
+
+static struct vduse_dev *vduse_find_dev(const char *name)
+{
+ struct vduse_dev *dev;
+ int id;
+
+ idr_for_each_entry(&vduse_idr, dev, id)
+ if (!strcmp(dev->name, name))
+ return dev;
+
+ return NULL;
+}
+
+static int vduse_destroy_dev(char *name)
+{
+ struct vduse_dev *dev = vduse_find_dev(name);
+
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&dev->lock);
+ if (dev->vdev || dev->connected) {
+ mutex_unlock(&dev->lock);
+ return -EBUSY;
+ }
+ dev->connected = true;
+ mutex_unlock(&dev->lock);
+
+ vduse_dev_reset(dev);
+ device_destroy(vduse_class, MKDEV(MAJOR(vduse_major), dev->minor));
+ idr_remove(&vduse_idr, dev->minor);
+ kvfree(dev->config);
+ kfree(dev->vqs);
+ vduse_domain_destroy(dev->domain);
+ kfree(dev->name);
+ vduse_dev_destroy(dev);
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+static bool device_is_allowed(u32 device_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(allowed_device_id); i++)
+ if (allowed_device_id[i] == device_id)
+ return true;
+
+ return false;
+}
+
+static bool features_is_valid(u64 features)
+{
+ if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
+ return false;
+
+ /* Now we only support read-only configuration space */
+ if (features & (1ULL << VIRTIO_BLK_F_CONFIG_WCE))
+ return false;
+
+ return true;
+}
+
+static bool vduse_validate_config(struct vduse_dev_config *config)
+{
+ if (!is_mem_zero((const char *)config->reserved,
+ sizeof(config->reserved)))
+ return false;
+
+ if (config->vq_align > PAGE_SIZE)
+ return false;
+
+ if (config->config_size > PAGE_SIZE)
+ return false;
+
+ if (!device_is_allowed(config->device_id))
+ return false;
+
+ if (!features_is_valid(config->features))
+ return false;
+
+ return true;
+}
+
+static ssize_t msg_timeout_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct vduse_dev *dev = dev_get_drvdata(device);
+
+ return sysfs_emit(buf, "%u\n", dev->msg_timeout);
+}
+
+static ssize_t msg_timeout_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct vduse_dev *dev = dev_get_drvdata(device);
+ int ret;
+
+ ret = kstrtouint(buf, 10, &dev->msg_timeout);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(msg_timeout);
+
+static struct attribute *vduse_dev_attrs[] = {
+ &dev_attr_msg_timeout.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(vduse_dev);
+
+static int vduse_create_dev(struct vduse_dev_config *config,
+ void *config_buf, u64 api_version)
+{
+ int i, ret;
+ struct vduse_dev *dev;
+
+ ret = -EEXIST;
+ if (vduse_find_dev(config->name))
+ goto err;
+
+ ret = -ENOMEM;
+ dev = vduse_dev_create();
+ if (!dev)
+ goto err;
+
+ dev->api_version = api_version;
+ dev->device_features = config->features;
+ dev->device_id = config->device_id;
+ dev->vendor_id = config->vendor_id;
+ dev->name = kstrdup(config->name, GFP_KERNEL);
+ if (!dev->name)
+ goto err_str;
+
+ dev->domain = vduse_domain_create(VDUSE_IOVA_SIZE - 1,
+ VDUSE_BOUNCE_SIZE);
+ if (!dev->domain)
+ goto err_domain;
+
+ dev->config = config_buf;
+ dev->config_size = config->config_size;
+ dev->vq_align = config->vq_align;
+ dev->vq_num = config->vq_num;
+ dev->vqs = kcalloc(dev->vq_num, sizeof(*dev->vqs), GFP_KERNEL);
+ if (!dev->vqs)
+ goto err_vqs;
+
+ for (i = 0; i < dev->vq_num; i++) {
+ dev->vqs[i].index = i;
+ INIT_WORK(&dev->vqs[i].inject, vduse_vq_irq_inject);
+ INIT_WORK(&dev->vqs[i].kick, vduse_vq_kick_work);
+ spin_lock_init(&dev->vqs[i].kick_lock);
+ spin_lock_init(&dev->vqs[i].irq_lock);
+ }
+
+ ret = idr_alloc(&vduse_idr, dev, 1, VDUSE_DEV_MAX, GFP_KERNEL);
+ if (ret < 0)
+ goto err_idr;
+
+ dev->minor = ret;
+ dev->msg_timeout = VDUSE_MSG_DEFAULT_TIMEOUT;
+ dev->dev = device_create(vduse_class, NULL,
+ MKDEV(MAJOR(vduse_major), dev->minor),
+ dev, "%s", config->name);
+ if (IS_ERR(dev->dev)) {
+ ret = PTR_ERR(dev->dev);
+ goto err_dev;
+ }
+ __module_get(THIS_MODULE);
+
+ return 0;
+err_dev:
+ idr_remove(&vduse_idr, dev->minor);
+err_idr:
+ kfree(dev->vqs);
+err_vqs:
+ vduse_domain_destroy(dev->domain);
+err_domain:
+ kfree(dev->name);
+err_str:
+ vduse_dev_destroy(dev);
+err:
+ kvfree(config_buf);
+ return ret;
+}
+
+static long vduse_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ void __user *argp = (void __user *)arg;
+ struct vduse_control *control = file->private_data;
+
+ mutex_lock(&vduse_lock);
+ switch (cmd) {
+ case VDUSE_GET_API_VERSION:
+ ret = put_user(control->api_version, (u64 __user *)argp);
+ break;
+ case VDUSE_SET_API_VERSION: {
+ u64 api_version;
+
+ ret = -EFAULT;
+ if (get_user(api_version, (u64 __user *)argp))
+ break;
+
+ ret = -EINVAL;
+ if (api_version > VDUSE_API_VERSION)
+ break;
+
+ ret = 0;
+ control->api_version = api_version;
+ break;
+ }
+ case VDUSE_CREATE_DEV: {
+ struct vduse_dev_config config;
+ unsigned long size = offsetof(struct vduse_dev_config, config);
+ void *buf;
+
+ ret = -EFAULT;
+ if (copy_from_user(&config, argp, size))
+ break;
+
+ ret = -EINVAL;
+ if (vduse_validate_config(&config) == false)
+ break;
+
+ buf = vmemdup_user(argp + size, config.config_size);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
+ break;
+ }
+ config.name[VDUSE_NAME_MAX - 1] = '\0';
+ ret = vduse_create_dev(&config, buf, control->api_version);
+ break;
+ }
+ case VDUSE_DESTROY_DEV: {
+ char name[VDUSE_NAME_MAX];
+
+ ret = -EFAULT;
+ if (copy_from_user(name, argp, VDUSE_NAME_MAX))
+ break;
+
+ name[VDUSE_NAME_MAX - 1] = '\0';
+ ret = vduse_destroy_dev(name);
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&vduse_lock);
+
+ return ret;
+}
+
+static int vduse_release(struct inode *inode, struct file *file)
+{
+ struct vduse_control *control = file->private_data;
+
+ kfree(control);
+ return 0;
+}
+
+static int vduse_open(struct inode *inode, struct file *file)
+{
+ struct vduse_control *control;
+
+ control = kmalloc(sizeof(struct vduse_control), GFP_KERNEL);
+ if (!control)
+ return -ENOMEM;
+
+ control->api_version = VDUSE_API_VERSION;
+ file->private_data = control;
+
+ return 0;
+}
+
+static const struct file_operations vduse_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = vduse_open,
+ .release = vduse_release,
+ .unlocked_ioctl = vduse_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .llseek = noop_llseek,
+};
+
+static char *vduse_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "vduse/%s", dev_name(dev));
+}
+
+static void vduse_mgmtdev_release(struct device *dev)
+{
+}
+
+static struct device vduse_mgmtdev = {
+ .init_name = "vduse",
+ .release = vduse_mgmtdev_release,
+};
+
+static struct vdpa_mgmt_dev mgmt_dev;
+
+static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
+{
+ struct vduse_vdpa *vdev;
+ int ret;
+
+ if (dev->vdev)
+ return -EEXIST;
+
+ vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev,
+ &vduse_vdpa_config_ops, name, true);
+ if (IS_ERR(vdev))
+ return PTR_ERR(vdev);
+
+ dev->vdev = vdev;
+ vdev->dev = dev;
+ vdev->vdpa.dev.dma_mask = &vdev->vdpa.dev.coherent_dma_mask;
+ ret = dma_set_mask_and_coherent(&vdev->vdpa.dev, DMA_BIT_MASK(64));
+ if (ret) {
+ put_device(&vdev->vdpa.dev);
+ return ret;
+ }
+ set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops);
+ vdev->vdpa.dma_dev = &vdev->vdpa.dev;
+ vdev->vdpa.mdev = &mgmt_dev;
+
+ return 0;
+}
+
+static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
+{
+ struct vduse_dev *dev;
+ int ret;
+
+ mutex_lock(&vduse_lock);
+ dev = vduse_find_dev(name);
+ if (!dev || !vduse_dev_is_ready(dev)) {
+ mutex_unlock(&vduse_lock);
+ return -EINVAL;
+ }
+ ret = vduse_dev_init_vdpa(dev, name);
+ mutex_unlock(&vduse_lock);
+ if (ret)
+ return ret;
+
+ ret = _vdpa_register_device(&dev->vdev->vdpa, dev->vq_num);
+ if (ret) {
+ put_device(&dev->vdev->vdpa.dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
+{
+ _vdpa_unregister_device(dev);
+}
+
+static const struct vdpa_mgmtdev_ops vdpa_dev_mgmtdev_ops = {
+ .dev_add = vdpa_dev_add,
+ .dev_del = vdpa_dev_del,
+};
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct vdpa_mgmt_dev mgmt_dev = {
+ .device = &vduse_mgmtdev,
+ .id_table = id_table,
+ .ops = &vdpa_dev_mgmtdev_ops,
+};
+
+static int vduse_mgmtdev_init(void)
+{
+ int ret;
+
+ ret = device_register(&vduse_mgmtdev);
+ if (ret)
+ return ret;
+
+ ret = vdpa_mgmtdev_register(&mgmt_dev);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ device_unregister(&vduse_mgmtdev);
+ return ret;
+}
+
+static void vduse_mgmtdev_exit(void)
+{
+ vdpa_mgmtdev_unregister(&mgmt_dev);
+ device_unregister(&vduse_mgmtdev);
+}
+
+static int vduse_init(void)
+{
+ int ret;
+ struct device *dev;
+
+ vduse_class = class_create(THIS_MODULE, "vduse");
+ if (IS_ERR(vduse_class))
+ return PTR_ERR(vduse_class);
+
+ vduse_class->devnode = vduse_devnode;
+ vduse_class->dev_groups = vduse_dev_groups;
+
+ ret = alloc_chrdev_region(&vduse_major, 0, VDUSE_DEV_MAX, "vduse");
+ if (ret)
+ goto err_chardev_region;
+
+ /* /dev/vduse/control */
+ cdev_init(&vduse_ctrl_cdev, &vduse_ctrl_fops);
+ vduse_ctrl_cdev.owner = THIS_MODULE;
+ ret = cdev_add(&vduse_ctrl_cdev, vduse_major, 1);
+ if (ret)
+ goto err_ctrl_cdev;
+
+ dev = device_create(vduse_class, NULL, vduse_major, NULL, "control");
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto err_device;
+ }
+
+ /* /dev/vduse/$DEVICE */
+ cdev_init(&vduse_cdev, &vduse_dev_fops);
+ vduse_cdev.owner = THIS_MODULE;
+ ret = cdev_add(&vduse_cdev, MKDEV(MAJOR(vduse_major), 1),
+ VDUSE_DEV_MAX - 1);
+ if (ret)
+ goto err_cdev;
+
+ vduse_irq_wq = alloc_workqueue("vduse-irq",
+ WQ_HIGHPRI | WQ_SYSFS | WQ_UNBOUND, 0);
+ if (!vduse_irq_wq)
+ goto err_wq;
+
+ ret = vduse_domain_init();
+ if (ret)
+ goto err_domain;
+
+ ret = vduse_mgmtdev_init();
+ if (ret)
+ goto err_mgmtdev;
+
+ return 0;
+err_mgmtdev:
+ vduse_domain_exit();
+err_domain:
+ destroy_workqueue(vduse_irq_wq);
+err_wq:
+ cdev_del(&vduse_cdev);
+err_cdev:
+ device_destroy(vduse_class, vduse_major);
+err_device:
+ cdev_del(&vduse_ctrl_cdev);
+err_ctrl_cdev:
+ unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX);
+err_chardev_region:
+ class_destroy(vduse_class);
+ return ret;
+}
+module_init(vduse_init);
+
+static void vduse_exit(void)
+{
+ vduse_mgmtdev_exit();
+ vduse_domain_exit();
+ destroy_workqueue(vduse_irq_wq);
+ cdev_del(&vduse_cdev);
+ device_destroy(vduse_class, vduse_major);
+ cdev_del(&vduse_ctrl_cdev);
+ unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX);
+ class_destroy(vduse_class);
+}
+module_exit(vduse_exit);
+
+MODULE_LICENSE(DRV_LICENSE);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index fe0527329857..5bcd00246d2e 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -189,10 +189,20 @@ static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
}
vp_modern_set_status(mdev, status);
+}
- if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) &&
- (s & VIRTIO_CONFIG_S_DRIVER_OK))
+static int vp_vdpa_reset(struct vdpa_device *vdpa)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
+ u8 s = vp_vdpa_get_status(vdpa);
+
+ vp_modern_set_status(mdev, 0);
+
+ if (s & VIRTIO_CONFIG_S_DRIVER_OK)
vp_vdpa_free_irq(vp_vdpa);
+
+ return 0;
}
static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
@@ -398,6 +408,7 @@ static const struct vdpa_config_ops vp_vdpa_ops = {
.set_features = vp_vdpa_set_features,
.get_status = vp_vdpa_get_status,
.set_status = vp_vdpa_set_status,
+ .reset = vp_vdpa_reset,
.get_vq_num_max = vp_vdpa_get_vq_num_max,
.get_vq_state = vp_vdpa_get_vq_state,
.get_vq_notification = vp_vdpa_get_vq_notification,
@@ -435,7 +446,7 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
- dev, &vp_vdpa_ops, NULL);
+ dev, &vp_vdpa_ops, NULL, false);
if (IS_ERR(vp_vdpa)) {
dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
return PTR_ERR(vp_vdpa);
diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c
index 0582079e4bcc..670d56c879e5 100644
--- a/drivers/vhost/iotlb.c
+++ b/drivers/vhost/iotlb.c
@@ -36,19 +36,21 @@ void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
EXPORT_SYMBOL_GPL(vhost_iotlb_map_free);
/**
- * vhost_iotlb_add_range - add a new range to vhost IOTLB
+ * vhost_iotlb_add_range_ctx - add a new range to vhost IOTLB
* @iotlb: the IOTLB
* @start: start of the IOVA range
* @last: last of IOVA range
* @addr: the address that is mapped to @start
* @perm: access permission of this range
+ * @opaque: the opaque pointer for the new mapping
*
* Returns an error last is smaller than start or memory allocation
* fails
*/
-int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
- u64 start, u64 last,
- u64 addr, unsigned int perm)
+int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb,
+ u64 start, u64 last,
+ u64 addr, unsigned int perm,
+ void *opaque)
{
struct vhost_iotlb_map *map;
@@ -71,6 +73,7 @@ int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
map->last = last;
map->addr = addr;
map->perm = perm;
+ map->opaque = opaque;
iotlb->nmaps++;
vhost_iotlb_itree_insert(map, &iotlb->root);
@@ -80,6 +83,15 @@ int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
return 0;
}
+EXPORT_SYMBOL_GPL(vhost_iotlb_add_range_ctx);
+
+int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
+ u64 start, u64 last,
+ u64 addr, unsigned int perm)
+{
+ return vhost_iotlb_add_range_ctx(iotlb, start, last,
+ addr, perm, NULL);
+}
EXPORT_SYMBOL_GPL(vhost_iotlb_add_range);
/**
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 3a249ee7e144..28ef323882fb 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -467,7 +467,7 @@ static void vhost_tx_batch(struct vhost_net *net,
.num = nvq->batched_xdp,
.ptr = nvq->xdp,
};
- int err;
+ int i, err;
if (nvq->batched_xdp == 0)
goto signal_used;
@@ -476,6 +476,15 @@ static void vhost_tx_batch(struct vhost_net *net,
err = sock->ops->sendmsg(sock, msghdr, 0);
if (unlikely(err < 0)) {
vq_err(&nvq->vq, "Fail to batch sending packets\n");
+
+ /* free pages owned by XDP; since this is an unlikely error path,
+ * keep it simple and avoid more complex bulk update for the
+ * used pages
+ */
+ for (i = 0; i < nvq->batched_xdp; ++i)
+ put_page(virt_to_head_page(nvq->xdp[i].data));
+ nvq->batched_xdp = 0;
+ nvq->done_idx = 0;
return;
}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 46f897e41217..532e204f2b1b 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1,24 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
/*******************************************************************************
* Vhost kernel TCM fabric driver for virtio SCSI initiators
*
* (C) Copyright 2010-2013 Datera, Inc.
* (C) Copyright 2010-2012 IBM Corp.
*
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
- *
* Authors: Nicholas A. Bellinger <nab@daterainc.com>
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
****************************************************************************/
#include <linux/module.h>
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 9479f7f79217..f41d081777f5 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -116,12 +116,13 @@ static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
irq_bypass_unregister_producer(&vq->call_ctx.producer);
}
-static void vhost_vdpa_reset(struct vhost_vdpa *v)
+static int vhost_vdpa_reset(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
- vdpa_reset(vdpa);
v->in_batch = 0;
+
+ return vdpa_reset(vdpa);
}
static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
@@ -157,7 +158,7 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
u8 status, status_old;
- int nvqs = v->nvqs;
+ int ret, nvqs = v->nvqs;
u16 i;
if (copy_from_user(&status, statusp, sizeof(status)))
@@ -172,7 +173,12 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
return -EINVAL;
- ops->set_status(vdpa, status);
+ if (status == 0) {
+ ret = ops->reset(vdpa);
+ if (ret)
+ return ret;
+ } else
+ ops->set_status(vdpa, status);
if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
for (i = 0; i < nvqs; i++)
@@ -498,7 +504,7 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
return r;
}
-static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
{
struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb *iotlb = dev->iotlb;
@@ -507,19 +513,44 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
unsigned long pfn, pinned;
while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
- pinned = map->size >> PAGE_SHIFT;
- for (pfn = map->addr >> PAGE_SHIFT;
+ pinned = PFN_DOWN(map->size);
+ for (pfn = PFN_DOWN(map->addr);
pinned > 0; pfn++, pinned--) {
page = pfn_to_page(pfn);
if (map->perm & VHOST_ACCESS_WO)
set_page_dirty_lock(page);
unpin_user_page(page);
}
- atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
+ atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
vhost_iotlb_map_free(iotlb, map);
}
}
+static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vhost_iotlb *iotlb = dev->iotlb;
+ struct vhost_iotlb_map *map;
+ struct vdpa_map_file *map_file;
+
+ while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
+ map_file = (struct vdpa_map_file *)map->opaque;
+ fput(map_file->file);
+ kfree(map_file);
+ vhost_iotlb_map_free(iotlb, map);
+ }
+}
+
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+
+ if (vdpa->use_va)
+ return vhost_vdpa_va_unmap(v, start, last);
+
+ return vhost_vdpa_pa_unmap(v, start, last);
+}
+
static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
{
struct vhost_dev *dev = &v->vdev;
@@ -551,21 +582,21 @@ static int perm_to_iommu_flags(u32 perm)
return flags | IOMMU_CACHE;
}
-static int vhost_vdpa_map(struct vhost_vdpa *v,
- u64 iova, u64 size, u64 pa, u32 perm)
+static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
+ u64 size, u64 pa, u32 perm, void *opaque)
{
struct vhost_dev *dev = &v->vdev;
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
int r = 0;
- r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
- pa, perm);
+ r = vhost_iotlb_add_range_ctx(dev->iotlb, iova, iova + size - 1,
+ pa, perm, opaque);
if (r)
return r;
if (ops->dma_map) {
- r = ops->dma_map(vdpa, iova, size, pa, perm);
+ r = ops->dma_map(vdpa, iova, size, pa, perm, opaque);
} else if (ops->set_map) {
if (!v->in_batch)
r = ops->set_map(vdpa, dev->iotlb);
@@ -573,13 +604,15 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
r = iommu_map(v->domain, iova, pa, size,
perm_to_iommu_flags(perm));
}
-
- if (r)
+ if (r) {
vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
- else
- atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
+ return r;
+ }
- return r;
+ if (!vdpa->use_va)
+ atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
+
+ return 0;
}
static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
@@ -600,38 +633,78 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
}
}
-static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
- struct vhost_iotlb_msg *msg)
+static int vhost_vdpa_va_map(struct vhost_vdpa *v,
+ u64 iova, u64 size, u64 uaddr, u32 perm)
+{
+ struct vhost_dev *dev = &v->vdev;
+ u64 offset, map_size, map_iova = iova;
+ struct vdpa_map_file *map_file;
+ struct vm_area_struct *vma;
+ int ret;
+
+ mmap_read_lock(dev->mm);
+
+ while (size) {
+ vma = find_vma(dev->mm, uaddr);
+ if (!vma) {
+ ret = -EINVAL;
+ break;
+ }
+ map_size = min(size, vma->vm_end - uaddr);
+ if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
+ !(vma->vm_flags & (VM_IO | VM_PFNMAP))))
+ goto next;
+
+ map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
+ if (!map_file) {
+ ret = -ENOMEM;
+ break;
+ }
+ offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
+ map_file->offset = offset;
+ map_file->file = get_file(vma->vm_file);
+ ret = vhost_vdpa_map(v, map_iova, map_size, uaddr,
+ perm, map_file);
+ if (ret) {
+ fput(map_file->file);
+ kfree(map_file);
+ break;
+ }
+next:
+ size -= map_size;
+ uaddr += map_size;
+ map_iova += map_size;
+ }
+ if (ret)
+ vhost_vdpa_unmap(v, iova, map_iova - iova);
+
+ mmap_read_unlock(dev->mm);
+
+ return ret;
+}
+
+static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
+ u64 iova, u64 size, u64 uaddr, u32 perm)
{
struct vhost_dev *dev = &v->vdev;
- struct vhost_iotlb *iotlb = dev->iotlb;
struct page **page_list;
unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
unsigned int gup_flags = FOLL_LONGTERM;
unsigned long npages, cur_base, map_pfn, last_pfn = 0;
unsigned long lock_limit, sz2pin, nchunks, i;
- u64 iova = msg->iova;
+ u64 start = iova;
long pinned;
int ret = 0;
- if (msg->iova < v->range.first || !msg->size ||
- msg->iova > U64_MAX - msg->size + 1 ||
- msg->iova + msg->size - 1 > v->range.last)
- return -EINVAL;
-
- if (vhost_iotlb_itree_first(iotlb, msg->iova,
- msg->iova + msg->size - 1))
- return -EEXIST;
-
/* Limit the use of memory for bookkeeping */
page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list)
return -ENOMEM;
- if (msg->perm & VHOST_ACCESS_WO)
+ if (perm & VHOST_ACCESS_WO)
gup_flags |= FOLL_WRITE;
- npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
+ npages = PFN_UP(size + (iova & ~PAGE_MASK));
if (!npages) {
ret = -EINVAL;
goto free;
@@ -639,13 +712,13 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
mmap_read_lock(dev->mm);
- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
ret = -ENOMEM;
goto unlock;
}
- cur_base = msg->uaddr & PAGE_MASK;
+ cur_base = uaddr & PAGE_MASK;
iova &= PAGE_MASK;
nchunks = 0;
@@ -673,10 +746,10 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
if (last_pfn && (this_pfn != last_pfn + 1)) {
/* Pin a contiguous chunk of memory */
- csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
+ csize = PFN_PHYS(last_pfn - map_pfn + 1);
ret = vhost_vdpa_map(v, iova, csize,
- map_pfn << PAGE_SHIFT,
- msg->perm);
+ PFN_PHYS(map_pfn),
+ perm, NULL);
if (ret) {
/*
* Unpin the pages that are left unmapped
@@ -699,13 +772,13 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
last_pfn = this_pfn;
}
- cur_base += pinned << PAGE_SHIFT;
+ cur_base += PFN_PHYS(pinned);
npages -= pinned;
}
/* Pin the rest chunk */
- ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
- map_pfn << PAGE_SHIFT, msg->perm);
+ ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
+ PFN_PHYS(map_pfn), perm, NULL);
out:
if (ret) {
if (nchunks) {
@@ -724,13 +797,38 @@ out:
for (pfn = map_pfn; pfn <= last_pfn; pfn++)
unpin_user_page(pfn_to_page(pfn));
}
- vhost_vdpa_unmap(v, msg->iova, msg->size);
+ vhost_vdpa_unmap(v, start, size);
}
unlock:
mmap_read_unlock(dev->mm);
free:
free_page((unsigned long)page_list);
return ret;
+
+}
+
+static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ struct vhost_iotlb_msg *msg)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vdpa_device *vdpa = v->vdpa;
+ struct vhost_iotlb *iotlb = dev->iotlb;
+
+ if (msg->iova < v->range.first || !msg->size ||
+ msg->iova > U64_MAX - msg->size + 1 ||
+ msg->iova + msg->size - 1 > v->range.last)
+ return -EINVAL;
+
+ if (vhost_iotlb_itree_first(iotlb, msg->iova,
+ msg->iova + msg->size - 1))
+ return -EEXIST;
+
+ if (vdpa->use_va)
+ return vhost_vdpa_va_map(v, msg->iova, msg->size,
+ msg->uaddr, msg->perm);
+
+ return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr,
+ msg->perm);
}
static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
@@ -860,7 +958,9 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
return -EBUSY;
nvqs = v->nvqs;
- vhost_vdpa_reset(v);
+ r = vhost_vdpa_reset(v);
+ if (r)
+ goto err;
vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
@@ -945,7 +1045,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
- notify.addr >> PAGE_SHIFT, PAGE_SIZE,
+ PFN_DOWN(notify.addr), PAGE_SIZE,
vma->vm_page_prot))
return VM_FAULT_SIGBUS;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index f249622ef11b..938aefbc75ec 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -114,7 +114,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
size_t nbytes;
size_t iov_len, payload_len;
int head;
- bool restore_flag = false;
+ u32 flags_to_restore = 0;
spin_lock_bh(&vsock->send_pkt_list_lock);
if (list_empty(&vsock->send_pkt_list)) {
@@ -178,16 +178,21 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
* small rx buffers, headers of packets in rx queue are
* created dynamically and are initialized with header
* of current packet(except length). But in case of
- * SOCK_SEQPACKET, we also must clear record delimeter
- * bit(VIRTIO_VSOCK_SEQ_EOR). Otherwise, instead of one
- * packet with delimeter(which marks end of record),
- * there will be sequence of packets with delimeter
- * bit set. After initialized header will be copied to
- * rx buffer, this bit will be restored.
+ * SOCK_SEQPACKET, we also must clear message delimeter
+ * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit
+ * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise,
+ * there will be sequence of packets with these
+ * bits set. After initialized header will be copied to
+ * rx buffer, these required bits will be restored.
*/
- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
- pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
- restore_flag = true;
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
+ pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
+ flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
+
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
+ pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
+ }
}
}
@@ -224,8 +229,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
* to send it with the next available buffer.
*/
if (pkt->off < pkt->len) {
- if (restore_flag)
- pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ pkt->hdr.flags |= cpu_to_le32(flags_to_restore);
/* We are queueing the same virtio_vsock_pkt to handle
* the remaining bytes, and we want to deliver it
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 71fb710f1ce3..7420d2c16e47 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -962,6 +962,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
struct fb_var_screeninfo old_var;
struct fb_videomode mode;
struct fb_event event;
+ u32 unused;
if (var->activate & FB_ACTIVATE_INV_MODE) {
struct fb_videomode mode1, mode2;
@@ -1008,6 +1009,11 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
if (var->xres < 8 || var->yres < 8)
return -EINVAL;
+ /* Too huge resolution causes multiplication overflow. */
+ if (check_mul_overflow(var->xres, var->yres, &unused) ||
+ check_mul_overflow(var->xres_virtual, var->yres_virtual, &unused))
+ return -EINVAL;
+
ret = info->fbops->fb_check_var(var, info);
if (ret)
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 1ea0c1f6a1fd..588e02fb91d3 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -4,6 +4,7 @@
#include <linux/virtio_config.h>
#include <linux/module.h>
#include <linux/idr.h>
+#include <linux/of.h>
#include <uapi/linux/virtio_ids.h>
/* Unique numbering for virtio devices. */
@@ -292,6 +293,8 @@ static void virtio_dev_remove(struct device *_d)
/* Acknowledge the device's existence again. */
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+
+ of_node_put(dev->dev.of_node);
}
static struct bus_type virtio_bus = {
@@ -318,6 +321,43 @@ void unregister_virtio_driver(struct virtio_driver *driver)
}
EXPORT_SYMBOL_GPL(unregister_virtio_driver);
+static int virtio_device_of_init(struct virtio_device *dev)
+{
+ struct device_node *np, *pnode = dev_of_node(dev->dev.parent);
+ char compat[] = "virtio,deviceXXXXXXXX";
+ int ret, count;
+
+ if (!pnode)
+ return 0;
+
+ count = of_get_available_child_count(pnode);
+ if (!count)
+ return 0;
+
+ /* There can be only 1 child node */
+ if (WARN_ON(count > 1))
+ return -EINVAL;
+
+ np = of_get_next_available_child(pnode, NULL);
+ if (WARN_ON(!np))
+ return -ENODEV;
+
+ ret = snprintf(compat, sizeof(compat), "virtio,device%x", dev->id.device);
+ BUG_ON(ret >= sizeof(compat));
+
+ if (!of_device_is_compatible(np, compat)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dev->dev.of_node = np;
+ return 0;
+
+out:
+ of_node_put(np);
+ return ret;
+}
+
/**
* register_virtio_device - register virtio device
* @dev : virtio device to be registered
@@ -342,6 +382,10 @@ int register_virtio_device(struct virtio_device *dev)
dev->index = err;
dev_set_name(&dev->dev, "virtio%u", dev->index);
+ err = virtio_device_of_init(dev);
+ if (err)
+ goto out_ida_remove;
+
spin_lock_init(&dev->config_lock);
dev->config_enabled = false;
dev->config_change_pending = false;
@@ -362,10 +406,16 @@ int register_virtio_device(struct virtio_device *dev)
*/
err = device_add(&dev->dev);
if (err)
- ida_simple_remove(&virtio_index_ida, dev->index);
+ goto out_of_node_put;
+
+ return 0;
+
+out_of_node_put:
+ of_node_put(dev->dev.of_node);
+out_ida_remove:
+ ida_simple_remove(&virtio_index_ida, dev->index);
out:
- if (err)
- virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
+ virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return err;
}
EXPORT_SYMBOL_GPL(register_virtio_device);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 47dce91f788c..c22ff0117b46 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -531,8 +531,8 @@ static int init_vqs(struct virtio_balloon *vb)
callbacks[VIRTIO_BALLOON_VQ_REPORTING] = balloon_ack;
}
- err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX,
- vqs, callbacks, names, NULL, NULL);
+ err = virtio_find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, vqs,
+ callbacks, names, NULL);
if (err)
return err;
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index b91bc810a87e..bef8ad6bf466 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -143,6 +143,8 @@ struct virtio_mem {
* add_memory_driver_managed().
*/
const char *resource_name;
+ /* Memory group identification. */
+ int mgid;
/*
* We don't want to add too much memory if it's not getting onlined,
@@ -626,8 +628,8 @@ static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr,
addr + size - 1);
/* Memory might get onlined immediately. */
atomic64_add(size, &vm->offline_size);
- rc = add_memory_driver_managed(vm->nid, addr, size, vm->resource_name,
- MHP_MERGE_RESOURCE);
+ rc = add_memory_driver_managed(vm->mgid, addr, size, vm->resource_name,
+ MHP_MERGE_RESOURCE | MHP_NID_IS_MGID);
if (rc) {
atomic64_sub(size, &vm->offline_size);
dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc);
@@ -677,7 +679,7 @@ static int virtio_mem_remove_memory(struct virtio_mem *vm, uint64_t addr,
dev_dbg(&vm->vdev->dev, "removing memory: 0x%llx - 0x%llx\n", addr,
addr + size - 1);
- rc = remove_memory(vm->nid, addr, size);
+ rc = remove_memory(addr, size);
if (!rc) {
atomic64_sub(size, &vm->offline_size);
/*
@@ -720,7 +722,7 @@ static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm,
"offlining and removing memory: 0x%llx - 0x%llx\n", addr,
addr + size - 1);
- rc = offline_and_remove_memory(vm->nid, addr, size);
+ rc = offline_and_remove_memory(addr, size);
if (!rc) {
atomic64_sub(size, &vm->offline_size);
/*
@@ -2569,6 +2571,7 @@ static bool virtio_mem_has_memory_added(struct virtio_mem *vm)
static int virtio_mem_probe(struct virtio_device *vdev)
{
struct virtio_mem *vm;
+ uint64_t unit_pages;
int rc;
BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
@@ -2603,6 +2606,16 @@ static int virtio_mem_probe(struct virtio_device *vdev)
if (rc)
goto out_del_vq;
+ /* use a single dynamic memory group to cover the whole memory device */
+ if (vm->in_sbm)
+ unit_pages = PHYS_PFN(memory_block_size_bytes());
+ else
+ unit_pages = PHYS_PFN(vm->bbm.bb_size);
+ rc = memory_group_register_dynamic(vm->nid, unit_pages);
+ if (rc < 0)
+ goto out_del_resource;
+ vm->mgid = rc;
+
/*
* If we still have memory plugged, we have to unplug all memory first.
* Registering our parent resource makes sure that this memory isn't
@@ -2617,7 +2630,7 @@ static int virtio_mem_probe(struct virtio_device *vdev)
vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
rc = register_memory_notifier(&vm->memory_notifier);
if (rc)
- goto out_del_resource;
+ goto out_unreg_group;
rc = register_virtio_mem_device(vm);
if (rc)
goto out_unreg_mem;
@@ -2631,6 +2644,8 @@ static int virtio_mem_probe(struct virtio_device *vdev)
return 0;
out_unreg_mem:
unregister_memory_notifier(&vm->memory_notifier);
+out_unreg_group:
+ memory_group_unregister(vm->mgid);
out_del_resource:
virtio_mem_delete_resource(vm);
out_del_vq:
@@ -2695,6 +2710,7 @@ static void virtio_mem_remove(struct virtio_device *vdev)
} else {
virtio_mem_delete_resource(vm);
kfree_const(vm->resource_name);
+ memory_group_unregister(vm->mgid);
}
/* remove all tracking data - no locking needed */
diff --git a/fs/Kconfig b/fs/Kconfig
index d8207a1b8c44..a6313a969bc5 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -43,7 +43,7 @@ source "fs/f2fs/Kconfig"
source "fs/zonefs/Kconfig"
config FS_DAX
- bool "Direct Access (DAX) support"
+ bool "File system based Direct Access (DAX) support"
depends on MMU
depends on !(ARM || MIPS || SPARC)
select DEV_PAGEMAP_OPS if (ZONE_DEVICE && !FS_DAX_LIMITED)
@@ -53,8 +53,23 @@ config FS_DAX
Direct Access (DAX) can be used on memory-backed block devices.
If the block device supports DAX and the filesystem supports DAX,
then you can avoid using the pagecache to buffer I/Os. Turning
- on this option will compile in support for DAX; you will need to
- mount the filesystem using the -o dax option.
+ on this option will compile in support for DAX.
+
+ For a DAX device to support file system access it needs to have
+ struct pages. For the nfit based NVDIMMs this can be enabled
+ using the ndctl utility:
+
+ # ndctl create-namespace --force --reconfig=namespace0.0 \
+ --mode=fsdax --map=mem
+
+ See the 'create-namespace' man page for details on the overhead of
+ --map=mem:
+ https://docs.pmem.io/ndctl-user-guide/ndctl-man-pages/ndctl-create-namespace
+
+ For ndctl to work CONFIG_DEV_DAX needs to be enabled as well. For most
+ file systems DAX support needs to be manually enabled globally or
+ per-inode using a mount option as well. See the file documentation in
+ Documentation/filesystems/dax.rst for details.
If you do not have a block device that is capable of using this,
or if unsure, say N. Saying Y will increase the size of the kernel
@@ -219,8 +234,7 @@ config ARCH_SUPPORTS_HUGETLBFS
config HUGETLBFS
bool "HugeTLB file system support"
- depends on X86 || IA64 || SPARC64 || (S390 && 64BIT) || \
- ARCH_SUPPORTS_HUGETLBFS || BROKEN
+ depends on X86 || IA64 || SPARC64 || ARCH_SUPPORTS_HUGETLBFS || BROKEN
help
hugetlbfs is a filesystem backing for HugeTLB pages, based on
ramfs. For architectures that support it, say Y here and read
@@ -353,7 +367,7 @@ source "fs/ceph/Kconfig"
source "fs/cifs/Kconfig"
source "fs/ksmbd/Kconfig"
-config CIFS_COMMON
+config SMBFS_COMMON
tristate
default y if CIFS=y
default m if CIFS=m
diff --git a/fs/Makefile b/fs/Makefile
index 2f21300851ae..84c5e4cdfee5 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -17,7 +17,7 @@ obj-y := open.o read_write.o file_table.o super.o \
kernel_read_file.o remap_range.o
ifeq ($(CONFIG_BLOCK),y)
-obj-y += buffer.o block_dev.o direct-io.o mpage.o
+obj-y += buffer.o direct-io.o mpage.o
else
obj-y += no-block.o
endif
@@ -96,7 +96,7 @@ obj-$(CONFIG_LOCKD) += lockd/
obj-$(CONFIG_NLS) += nls/
obj-$(CONFIG_UNICODE) += unicode/
obj-$(CONFIG_SYSV_FS) += sysv/
-obj-$(CONFIG_CIFS_COMMON) += cifs_common/
+obj-$(CONFIG_SMBFS_COMMON) += smbfs_common/
obj-$(CONFIG_CIFS) += cifs/
obj-$(CONFIG_SMB_SERVER) += ksmbd/
obj-$(CONFIG_HPFS_FS) += hpfs/
diff --git a/fs/attr.c b/fs/attr.c
index 87ef39db1c34..473d21b3a86d 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -249,6 +249,34 @@ void setattr_copy(struct user_namespace *mnt_userns, struct inode *inode,
}
EXPORT_SYMBOL(setattr_copy);
+int may_setattr(struct user_namespace *mnt_userns, struct inode *inode,
+ unsigned int ia_valid)
+{
+ int error;
+
+ if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)) {
+ if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+ return -EPERM;
+ }
+
+ /*
+ * If utimes(2) and friends are called with times == NULL (or both
+ * times are UTIME_NOW), then we need to check for write permission
+ */
+ if (ia_valid & ATTR_TOUCH) {
+ if (IS_IMMUTABLE(inode))
+ return -EPERM;
+
+ if (!inode_owner_or_capable(mnt_userns, inode)) {
+ error = inode_permission(mnt_userns, inode, MAY_WRITE);
+ if (error)
+ return error;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(may_setattr);
+
/**
* notify_change - modify attributes of a filesytem object
* @mnt_userns: user namespace of the mount the inode was found from
@@ -290,25 +318,9 @@ int notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
WARN_ON_ONCE(!inode_is_locked(inode));
- if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)) {
- if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
- return -EPERM;
- }
-
- /*
- * If utimes(2) and friends are called with times == NULL (or both
- * times are UTIME_NOW), then we need to check for write permission
- */
- if (ia_valid & ATTR_TOUCH) {
- if (IS_IMMUTABLE(inode))
- return -EPERM;
-
- if (!inode_owner_or_capable(mnt_userns, inode)) {
- error = inode_permission(mnt_userns, inode, MAY_WRITE);
- if (error)
- return error;
- }
- }
+ error = may_setattr(mnt_userns, inode, ia_valid);
+ if (error)
+ return error;
if ((ia_valid & ATTR_MODE)) {
umode_t amode = attr->ia_mode;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 2f9515dccce0..355ea88d5c5f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3314,6 +3314,30 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
*/
fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
+ /*
+ * Flag our filesystem as having big metadata blocks if they are bigger
+ * than the page size.
+ */
+ if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
+ if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
+ btrfs_info(fs_info,
+ "flagging fs with big metadata feature");
+ features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
+ }
+
+ /* Set up fs_info before parsing mount options */
+ nodesize = btrfs_super_nodesize(disk_super);
+ sectorsize = btrfs_super_sectorsize(disk_super);
+ stripesize = sectorsize;
+ fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
+ fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
+
+ fs_info->nodesize = nodesize;
+ fs_info->sectorsize = sectorsize;
+ fs_info->sectorsize_bits = ilog2(sectorsize);
+ fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
+ fs_info->stripesize = stripesize;
+
ret = btrfs_parse_options(fs_info, options, sb->s_flags);
if (ret) {
err = ret;
@@ -3341,30 +3365,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
btrfs_info(fs_info, "has skinny extents");
/*
- * flag our filesystem as having big metadata blocks if
- * they are bigger than the page size
- */
- if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
- if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
- btrfs_info(fs_info,
- "flagging fs with big metadata feature");
- features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
- }
-
- nodesize = btrfs_super_nodesize(disk_super);
- sectorsize = btrfs_super_sectorsize(disk_super);
- stripesize = sectorsize;
- fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
- fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
-
- /* Cache block sizes */
- fs_info->nodesize = nodesize;
- fs_info->sectorsize = sectorsize;
- fs_info->sectorsize_bits = ilog2(sectorsize);
- fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
- fs_info->stripesize = stripesize;
-
- /*
* mixed block groups end up with duplicate but slightly offset
* extent buffers for the same range. It leads to corruptions
*/
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 41524f9aeac3..cc61813213d8 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3223,6 +3223,8 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_vol_args_v2 *vol_args;
+ struct block_device *bdev = NULL;
+ fmode_t mode;
int ret;
bool cancel = false;
@@ -3255,9 +3257,9 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
/* Exclusive operation is now claimed */
if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
- ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
+ ret = btrfs_rm_device(fs_info, NULL, vol_args->devid, &bdev, &mode);
else
- ret = btrfs_rm_device(fs_info, vol_args->name, 0);
+ ret = btrfs_rm_device(fs_info, vol_args->name, 0, &bdev, &mode);
btrfs_exclop_finish(fs_info);
@@ -3273,6 +3275,8 @@ out:
kfree(vol_args);
err_drop:
mnt_drop_write_file(file);
+ if (bdev)
+ blkdev_put(bdev, mode);
return ret;
}
@@ -3281,6 +3285,8 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_vol_args *vol_args;
+ struct block_device *bdev = NULL;
+ fmode_t mode;
int ret;
bool cancel;
@@ -3302,7 +3308,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
cancel);
if (ret == 0) {
- ret = btrfs_rm_device(fs_info, vol_args->name, 0);
+ ret = btrfs_rm_device(fs_info, vol_args->name, 0, &bdev, &mode);
if (!ret)
btrfs_info(fs_info, "disk deleted %s", vol_args->name);
btrfs_exclop_finish(fs_info);
@@ -3311,7 +3317,8 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
kfree(vol_args);
out_drop_write:
mnt_drop_write_file(file);
-
+ if (bdev)
+ blkdev_put(bdev, mode);
return ret;
}
diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h
index 6461ebc3a1c1..340f995652f2 100644
--- a/fs/btrfs/misc.h
+++ b/fs/btrfs/misc.h
@@ -5,7 +5,7 @@
#include <linux/sched.h>
#include <linux/wait.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
#include <linux/rbtree.h>
#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index edb65abf0393..6b51fd2ec5ac 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -1049,6 +1049,7 @@ static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
u64 len)
{
struct inode *inode = ordered->inode;
+ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
u64 file_offset = ordered->file_offset + pos;
u64 disk_bytenr = ordered->disk_bytenr + pos;
u64 num_bytes = len;
@@ -1066,6 +1067,13 @@ static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
else
type = __ffs(flags_masked);
+ /*
+ * The splitting extent is already counted and will be added again
+ * in btrfs_add_ordered_extent_*(). Subtract num_bytes to avoid
+ * double counting.
+ */
+ percpu_counter_add_batch(&fs_info->ordered_bytes, -num_bytes,
+ fs_info->delalloc_batch);
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered->flags)) {
WARN_ON_ONCE(1);
ret = btrfs_add_ordered_extent_compress(BTRFS_I(inode),
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index ec3a874165de..464485aa7318 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -558,6 +558,8 @@ static int btrfs_free_stale_devices(const char *path,
struct btrfs_device *device, *tmp_device;
int ret = 0;
+ lockdep_assert_held(&uuid_mutex);
+
if (path)
ret = -ENOENT;
@@ -988,11 +990,12 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
struct btrfs_device *orig_dev;
int ret = 0;
+ lockdep_assert_held(&uuid_mutex);
+
fs_devices = alloc_fs_devices(orig->fsid, NULL);
if (IS_ERR(fs_devices))
return fs_devices;
- mutex_lock(&orig->device_list_mutex);
fs_devices->total_devices = orig->total_devices;
list_for_each_entry(orig_dev, &orig->devices, dev_list) {
@@ -1024,10 +1027,8 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
device->fs_devices = fs_devices;
fs_devices->num_devices++;
}
- mutex_unlock(&orig->device_list_mutex);
return fs_devices;
error:
- mutex_unlock(&orig->device_list_mutex);
free_fs_devices(fs_devices);
return ERR_PTR(ret);
}
@@ -1869,15 +1870,17 @@ out:
* Function to update ctime/mtime for a given device path.
* Mainly used for ctime/mtime based probe like libblkid.
*/
-static void update_dev_time(const char *path_name)
+static void update_dev_time(struct block_device *bdev)
{
- struct file *filp;
+ struct inode *inode = bdev->bd_inode;
+ struct timespec64 now;
- filp = filp_open(path_name, O_RDWR, 0);
- if (IS_ERR(filp))
+ /* Shouldn't happen but just in case. */
+ if (!inode)
return;
- file_update_time(filp);
- filp_close(filp, NULL);
+
+ now = current_time(inode);
+ generic_update_time(inode, &now, S_MTIME | S_CTIME);
}
static int btrfs_rm_dev_item(struct btrfs_device *device)
@@ -2053,11 +2056,11 @@ void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
/* Update ctime/mtime for device path for libblkid */
- update_dev_time(device_path);
+ update_dev_time(bdev);
}
int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
- u64 devid)
+ u64 devid, struct block_device **bdev, fmode_t *mode)
{
struct btrfs_device *device;
struct btrfs_fs_devices *cur_devices;
@@ -2171,15 +2174,26 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
mutex_unlock(&fs_devices->device_list_mutex);
/*
- * at this point, the device is zero sized and detached from
- * the devices list. All that's left is to zero out the old
- * supers and free the device.
+ * At this point, the device is zero sized and detached from the
+ * devices list. All that's left is to zero out the old supers and
+ * free the device.
+ *
+ * We cannot call btrfs_close_bdev() here because we're holding the sb
+ * write lock, and blkdev_put() will pull in the ->open_mutex on the
+ * block device and it's dependencies. Instead just flush the device
+ * and let the caller do the final blkdev_put.
*/
- if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
btrfs_scratch_superblocks(fs_info, device->bdev,
device->name->str);
+ if (device->bdev) {
+ sync_blockdev(device->bdev);
+ invalidate_bdev(device->bdev);
+ }
+ }
- btrfs_close_bdev(device);
+ *bdev = device->bdev;
+ *mode = device->mode;
synchronize_rcu();
btrfs_free_device(device);
@@ -2706,7 +2720,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
btrfs_forget_devices(device_path);
/* Update ctime/mtime for blkid or udev */
- update_dev_time(device_path);
+ update_dev_time(bdev);
return ret;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index b082250b42e0..2183361db614 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -472,7 +472,8 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
const u8 *uuid);
void btrfs_free_device(struct btrfs_device *device);
int btrfs_rm_device(struct btrfs_fs_info *fs_info,
- const char *device_path, u64 devid);
+ const char *device_path, u64 devid,
+ struct block_device **bdev, fmode_t *mode);
void __exit btrfs_cleanup_fs_uuids(void);
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
int btrfs_grow_device(struct btrfs_trans_handle *trans,
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 7e7a897ae0d3..99b80b5c7a93 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1281,8 +1281,8 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
inode, page, (int)pos, (int)copied, (int)len);
- /* zero the stale part of the page if we did a short copy */
if (!PageUptodate(page)) {
+ /* just return that nothing was copied on a short copy */
if (copied < len) {
copied = 0;
goto out;
diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h
index 1409d6149281..058ea2a04376 100644
--- a/fs/ceph/cache.h
+++ b/fs/ceph/cache.h
@@ -26,12 +26,6 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci);
void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp);
void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci);
-int ceph_readpage_from_fscache(struct inode *inode, struct page *page);
-int ceph_readpages_from_fscache(struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages);
-
static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
{
ci->fscache = NULL;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 39db97f149b9..6c0e52fd0743 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -703,29 +703,12 @@ void ceph_add_cap(struct inode *inode,
*/
struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
realmino);
- if (realm) {
- struct ceph_snap_realm *oldrealm = ci->i_snap_realm;
- if (oldrealm) {
- spin_lock(&oldrealm->inodes_with_caps_lock);
- list_del_init(&ci->i_snap_realm_item);
- spin_unlock(&oldrealm->inodes_with_caps_lock);
- }
-
- spin_lock(&realm->inodes_with_caps_lock);
- list_add(&ci->i_snap_realm_item,
- &realm->inodes_with_caps);
- ci->i_snap_realm = realm;
- if (realm->ino == ci->i_vino.ino)
- realm->inode = inode;
- spin_unlock(&realm->inodes_with_caps_lock);
-
- if (oldrealm)
- ceph_put_snap_realm(mdsc, oldrealm);
- } else {
- pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
- realmino);
- WARN_ON(!realm);
- }
+ if (realm)
+ ceph_change_snap_realm(inode, realm);
+ else
+ WARN(1, "%s: couldn't find snap realm 0x%llx (ino 0x%llx oldrealm 0x%llx)\n",
+ __func__, realmino, ci->i_vino.ino,
+ ci->i_snap_realm ? ci->i_snap_realm->ino : 0);
}
__check_cap_issue(ci, cap, issued);
@@ -1112,20 +1095,6 @@ int ceph_is_any_caps(struct inode *inode)
return ret;
}
-static void drop_inode_snap_realm(struct ceph_inode_info *ci)
-{
- struct ceph_snap_realm *realm = ci->i_snap_realm;
- spin_lock(&realm->inodes_with_caps_lock);
- list_del_init(&ci->i_snap_realm_item);
- ci->i_snap_realm_counter++;
- ci->i_snap_realm = NULL;
- if (realm->ino == ci->i_vino.ino)
- realm->inode = NULL;
- spin_unlock(&realm->inodes_with_caps_lock);
- ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
- realm);
-}
-
/*
* Remove a cap. Take steps to deal with a racing iterate_session_caps.
*
@@ -1145,17 +1114,16 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
return;
}
+ lockdep_assert_held(&ci->i_ceph_lock);
+
dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
mdsc = ceph_inode_to_client(&ci->vfs_inode)->mdsc;
/* remove from inode's cap rbtree, and clear auth cap */
rb_erase(&cap->ci_node, &ci->i_caps);
- if (ci->i_auth_cap == cap) {
- WARN_ON_ONCE(!list_empty(&ci->i_dirty_item) &&
- !mdsc->fsc->blocklisted);
+ if (ci->i_auth_cap == cap)
ci->i_auth_cap = NULL;
- }
/* remove from session list */
spin_lock(&session->s_cap_lock);
@@ -1201,12 +1169,34 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
* keep i_snap_realm.
*/
if (ci->i_wr_ref == 0 && ci->i_snap_realm)
- drop_inode_snap_realm(ci);
+ ceph_change_snap_realm(&ci->vfs_inode, NULL);
__cap_delay_cancel(mdsc, ci);
}
}
+void ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
+{
+ struct ceph_inode_info *ci = cap->ci;
+ struct ceph_fs_client *fsc;
+
+ /* 'ci' being NULL means the remove have already occurred */
+ if (!ci) {
+ dout("%s: cap inode is NULL\n", __func__);
+ return;
+ }
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+
+ fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+ WARN_ON_ONCE(ci->i_auth_cap == cap &&
+ !list_empty(&ci->i_dirty_item) &&
+ !fsc->blocklisted &&
+ READ_ONCE(fsc->mount_state) != CEPH_MOUNT_SHUTDOWN);
+
+ __ceph_remove_cap(cap, queue_release);
+}
+
struct cap_msg_args {
struct ceph_mds_session *session;
u64 ino, cid, follows;
@@ -1335,7 +1325,7 @@ void __ceph_remove_caps(struct ceph_inode_info *ci)
while (p) {
struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
p = rb_next(p);
- __ceph_remove_cap(cap, true);
+ ceph_remove_cap(cap, true);
}
spin_unlock(&ci->i_ceph_lock);
}
@@ -1746,6 +1736,9 @@ struct ceph_cap_flush *ceph_alloc_cap_flush(void)
struct ceph_cap_flush *cf;
cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
+ if (!cf)
+ return NULL;
+
cf->is_capsnap = false;
return cf;
}
@@ -1856,6 +1849,8 @@ static u64 __mark_caps_flushing(struct inode *inode,
* try to invalidate mapping pages without blocking.
*/
static int try_nonblocking_invalidate(struct inode *inode)
+ __releases(ci->i_ceph_lock)
+ __acquires(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
u32 invalidating_gen = ci->i_rdcache_gen;
@@ -2219,6 +2214,7 @@ static int caps_are_flushed(struct inode *inode, u64 flush_tid)
*/
static int unsafe_request_wait(struct inode *inode)
{
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_request *req1 = NULL, *req2 = NULL;
int ret, err = 0;
@@ -2238,6 +2234,81 @@ static int unsafe_request_wait(struct inode *inode)
}
spin_unlock(&ci->i_unsafe_lock);
+ /*
+ * Trigger to flush the journal logs in all the relevant MDSes
+ * manually, or in the worst case we must wait at most 5 seconds
+ * to wait the journal logs to be flushed by the MDSes periodically.
+ */
+ if (req1 || req2) {
+ struct ceph_mds_session **sessions = NULL;
+ struct ceph_mds_session *s;
+ struct ceph_mds_request *req;
+ unsigned int max;
+ int i;
+
+ /*
+ * The mdsc->max_sessions is unlikely to be changed
+ * mostly, here we will retry it by reallocating the
+ * sessions arrary memory to get rid of the mdsc->mutex
+ * lock.
+ */
+retry:
+ max = mdsc->max_sessions;
+ sessions = krealloc(sessions, max * sizeof(s), __GFP_ZERO);
+ if (!sessions)
+ return -ENOMEM;
+
+ spin_lock(&ci->i_unsafe_lock);
+ if (req1) {
+ list_for_each_entry(req, &ci->i_unsafe_dirops,
+ r_unsafe_dir_item) {
+ s = req->r_session;
+ if (unlikely(s->s_mds > max)) {
+ spin_unlock(&ci->i_unsafe_lock);
+ goto retry;
+ }
+ if (!sessions[s->s_mds]) {
+ s = ceph_get_mds_session(s);
+ sessions[s->s_mds] = s;
+ }
+ }
+ }
+ if (req2) {
+ list_for_each_entry(req, &ci->i_unsafe_iops,
+ r_unsafe_target_item) {
+ s = req->r_session;
+ if (unlikely(s->s_mds > max)) {
+ spin_unlock(&ci->i_unsafe_lock);
+ goto retry;
+ }
+ if (!sessions[s->s_mds]) {
+ s = ceph_get_mds_session(s);
+ sessions[s->s_mds] = s;
+ }
+ }
+ }
+ spin_unlock(&ci->i_unsafe_lock);
+
+ /* the auth MDS */
+ spin_lock(&ci->i_ceph_lock);
+ if (ci->i_auth_cap) {
+ s = ci->i_auth_cap->session;
+ if (!sessions[s->s_mds])
+ sessions[s->s_mds] = ceph_get_mds_session(s);
+ }
+ spin_unlock(&ci->i_ceph_lock);
+
+ /* send flush mdlog request to MDSes */
+ for (i = 0; i < max; i++) {
+ s = sessions[i];
+ if (s) {
+ send_flush_mdlog(s);
+ ceph_put_mds_session(s);
+ }
+ }
+ kfree(sessions);
+ }
+
dout("unsafe_request_wait %p wait on tid %llu %llu\n",
inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL);
if (req1) {
@@ -3008,7 +3079,7 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
}
/* see comment in __ceph_remove_cap() */
if (!__ceph_is_any_real_caps(ci) && ci->i_snap_realm)
- drop_inode_snap_realm(ci);
+ ceph_change_snap_realm(inode, NULL);
}
}
if (check_flushsnaps && __ceph_have_pending_cap_snap(ci)) {
@@ -3114,7 +3185,16 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
break;
}
}
- BUG_ON(!found);
+
+ if (!found) {
+ /*
+ * The capsnap should already be removed when removing
+ * auth cap in the case of a forced unmount.
+ */
+ WARN_ON_ONCE(ci->i_auth_cap);
+ goto unlock;
+ }
+
capsnap->dirty_pages -= nr;
if (capsnap->dirty_pages == 0) {
complete_capsnap = true;
@@ -3136,6 +3216,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
complete_capsnap ? " (complete capsnap)" : "");
}
+unlock:
spin_unlock(&ci->i_ceph_lock);
if (last) {
@@ -3606,6 +3687,43 @@ out:
iput(inode);
}
+void __ceph_remove_capsnap(struct inode *inode, struct ceph_cap_snap *capsnap,
+ bool *wake_ci, bool *wake_mdsc)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ bool ret;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+
+ dout("removing capsnap %p, inode %p ci %p\n", capsnap, inode, ci);
+
+ list_del_init(&capsnap->ci_item);
+ ret = __detach_cap_flush_from_ci(ci, &capsnap->cap_flush);
+ if (wake_ci)
+ *wake_ci = ret;
+
+ spin_lock(&mdsc->cap_dirty_lock);
+ if (list_empty(&ci->i_cap_flush_list))
+ list_del_init(&ci->i_flushing_item);
+
+ ret = __detach_cap_flush_from_mdsc(mdsc, &capsnap->cap_flush);
+ if (wake_mdsc)
+ *wake_mdsc = ret;
+ spin_unlock(&mdsc->cap_dirty_lock);
+}
+
+void ceph_remove_capsnap(struct inode *inode, struct ceph_cap_snap *capsnap,
+ bool *wake_ci, bool *wake_mdsc)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+
+ WARN_ON_ONCE(capsnap->dirty_pages || capsnap->writing);
+ __ceph_remove_capsnap(inode, capsnap, wake_ci, wake_mdsc);
+}
+
/*
* Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can
* throw away our cap_snap.
@@ -3643,23 +3761,10 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
capsnap, capsnap->follows);
}
}
- if (flushed) {
- WARN_ON(capsnap->dirty_pages || capsnap->writing);
- dout(" removing %p cap_snap %p follows %lld\n",
- inode, capsnap, follows);
- list_del(&capsnap->ci_item);
- wake_ci |= __detach_cap_flush_from_ci(ci, &capsnap->cap_flush);
-
- spin_lock(&mdsc->cap_dirty_lock);
-
- if (list_empty(&ci->i_cap_flush_list))
- list_del_init(&ci->i_flushing_item);
-
- wake_mdsc |= __detach_cap_flush_from_mdsc(mdsc,
- &capsnap->cap_flush);
- spin_unlock(&mdsc->cap_dirty_lock);
- }
+ if (flushed)
+ ceph_remove_capsnap(inode, capsnap, &wake_ci, &wake_mdsc);
spin_unlock(&ci->i_ceph_lock);
+
if (flushed) {
ceph_put_snap_context(capsnap->context);
ceph_put_cap_snap(capsnap);
@@ -3743,7 +3848,7 @@ retry:
goto out_unlock;
if (target < 0) {
- __ceph_remove_cap(cap, false);
+ ceph_remove_cap(cap, false);
goto out_unlock;
}
@@ -3778,7 +3883,7 @@ retry:
change_auth_cap_ses(ci, tcap->session);
}
}
- __ceph_remove_cap(cap, false);
+ ceph_remove_cap(cap, false);
goto out_unlock;
} else if (tsession) {
/* add placeholder for the export tagert */
@@ -3795,7 +3900,7 @@ retry:
spin_unlock(&mdsc->cap_dirty_lock);
}
- __ceph_remove_cap(cap, false);
+ ceph_remove_cap(cap, false);
goto out_unlock;
}
@@ -3906,7 +4011,7 @@ retry:
ocap->mseq, mds, le32_to_cpu(ph->seq),
le32_to_cpu(ph->mseq));
}
- __ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
+ ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
}
*old_issued = issued;
@@ -4134,8 +4239,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
done:
mutex_unlock(&session->s_mutex);
done_unlocked:
- ceph_put_string(extra_info.pool_ns);
iput(inode);
+out:
+ ceph_put_string(extra_info.pool_ns);
return;
flush_cap_releases:
@@ -4150,7 +4256,7 @@ flush_cap_releases:
bad:
pr_err("ceph_handle_caps: corrupt message\n");
ceph_msg_dump(msg);
- return;
+ goto out;
}
/*
@@ -4225,33 +4331,9 @@ static void flush_dirty_session_caps(struct ceph_mds_session *s)
dout("flush_dirty_caps done\n");
}
-static void iterate_sessions(struct ceph_mds_client *mdsc,
- void (*cb)(struct ceph_mds_session *))
-{
- int mds;
-
- mutex_lock(&mdsc->mutex);
- for (mds = 0; mds < mdsc->max_sessions; ++mds) {
- struct ceph_mds_session *s;
-
- if (!mdsc->sessions[mds])
- continue;
-
- s = ceph_get_mds_session(mdsc->sessions[mds]);
- if (!s)
- continue;
-
- mutex_unlock(&mdsc->mutex);
- cb(s);
- ceph_put_mds_session(s);
- mutex_lock(&mdsc->mutex);
- }
- mutex_unlock(&mdsc->mutex);
-}
-
void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
{
- iterate_sessions(mdsc, flush_dirty_session_caps);
+ ceph_mdsc_iterate_sessions(mdsc, flush_dirty_session_caps, true);
}
void __ceph_touch_fmode(struct ceph_inode_info *ci,
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index e1d605a02d4a..d16fd2d5fd42 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1722,32 +1722,26 @@ retry_snap:
goto out;
}
- err = file_remove_privs(file);
- if (err)
+ down_read(&osdc->lock);
+ map_flags = osdc->osdmap->flags;
+ pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
+ up_read(&osdc->lock);
+ if ((map_flags & CEPH_OSDMAP_FULL) ||
+ (pool_flags & CEPH_POOL_FLAG_FULL)) {
+ err = -ENOSPC;
goto out;
+ }
- err = file_update_time(file);
+ err = file_remove_privs(file);
if (err)
goto out;
- inode_inc_iversion_raw(inode);
-
if (ci->i_inline_version != CEPH_INLINE_NONE) {
err = ceph_uninline_data(file, NULL);
if (err < 0)
goto out;
}
- down_read(&osdc->lock);
- map_flags = osdc->osdmap->flags;
- pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
- up_read(&osdc->lock);
- if ((map_flags & CEPH_OSDMAP_FULL) ||
- (pool_flags & CEPH_POOL_FLAG_FULL)) {
- err = -ENOSPC;
- goto out;
- }
-
dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
inode, ceph_vinop(inode), pos, count, i_size_read(inode));
if (fi->fmode & CEPH_FILE_MODE_LAZY)
@@ -1759,6 +1753,12 @@ retry_snap:
if (err < 0)
goto out;
+ err = file_update_time(file);
+ if (err)
+ goto out_caps;
+
+ inode_inc_iversion_raw(inode);
+
dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
@@ -1842,6 +1842,8 @@ retry_snap:
}
goto out_unlocked;
+out_caps:
+ ceph_put_cap_refs(ci, got);
out:
if (direct_lock)
ceph_end_io_direct(inode);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 1bd2cc015913..2df1e1284451 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -581,16 +581,9 @@ void ceph_evict_inode(struct inode *inode)
*/
if (ci->i_snap_realm) {
if (ceph_snap(inode) == CEPH_NOSNAP) {
- struct ceph_snap_realm *realm = ci->i_snap_realm;
dout(" dropping residual ref to snap realm %p\n",
- realm);
- spin_lock(&realm->inodes_with_caps_lock);
- list_del_init(&ci->i_snap_realm_item);
- ci->i_snap_realm = NULL;
- if (realm->ino == ci->i_vino.ino)
- realm->inode = NULL;
- spin_unlock(&realm->inodes_with_caps_lock);
- ceph_put_snap_realm(mdsc, realm);
+ ci->i_snap_realm);
+ ceph_change_snap_realm(inode, NULL);
} else {
ceph_put_snapid_map(mdsc, ci->i_snapid_map);
ci->i_snap_realm = NULL;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 0b69aec23e5c..7cad180d6deb 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -11,6 +11,7 @@
#include <linux/ratelimit.h>
#include <linux/bits.h>
#include <linux/ktime.h>
+#include <linux/bitmap.h>
#include "super.h"
#include "mds_client.h"
@@ -652,14 +653,9 @@ const char *ceph_session_state_name(int s)
struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
{
- if (refcount_inc_not_zero(&s->s_ref)) {
- dout("mdsc get_session %p %d -> %d\n", s,
- refcount_read(&s->s_ref)-1, refcount_read(&s->s_ref));
+ if (refcount_inc_not_zero(&s->s_ref))
return s;
- } else {
- dout("mdsc get_session %p 0 -- FAIL\n", s);
- return NULL;
- }
+ return NULL;
}
void ceph_put_mds_session(struct ceph_mds_session *s)
@@ -667,8 +663,6 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
if (IS_ERR_OR_NULL(s))
return;
- dout("mdsc put_session %p %d -> %d\n", s,
- refcount_read(&s->s_ref), refcount_read(&s->s_ref)-1);
if (refcount_dec_and_test(&s->s_ref)) {
if (s->s_auth.authorizer)
ceph_auth_destroy_authorizer(s->s_auth.authorizer);
@@ -743,8 +737,6 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
s->s_mdsc = mdsc;
s->s_mds = mds;
s->s_state = CEPH_MDS_SESSION_NEW;
- s->s_ttl = 0;
- s->s_seq = 0;
mutex_init(&s->s_mutex);
ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
@@ -753,17 +745,11 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
s->s_cap_ttl = jiffies - 1;
spin_lock_init(&s->s_cap_lock);
- s->s_renew_requested = 0;
- s->s_renew_seq = 0;
INIT_LIST_HEAD(&s->s_caps);
- s->s_nr_caps = 0;
refcount_set(&s->s_ref, 1);
INIT_LIST_HEAD(&s->s_waiting);
INIT_LIST_HEAD(&s->s_unsafe);
xa_init(&s->s_delegated_inos);
- s->s_num_cap_releases = 0;
- s->s_cap_reconnect = 0;
- s->s_cap_iterator = NULL;
INIT_LIST_HEAD(&s->s_cap_releases);
INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work);
@@ -811,6 +797,33 @@ static void put_request_session(struct ceph_mds_request *req)
}
}
+void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
+ void (*cb)(struct ceph_mds_session *),
+ bool check_state)
+{
+ int mds;
+
+ mutex_lock(&mdsc->mutex);
+ for (mds = 0; mds < mdsc->max_sessions; ++mds) {
+ struct ceph_mds_session *s;
+
+ s = __ceph_lookup_mds_session(mdsc, mds);
+ if (!s)
+ continue;
+
+ if (check_state && !check_session_state(s)) {
+ ceph_put_mds_session(s);
+ continue;
+ }
+
+ mutex_unlock(&mdsc->mutex);
+ cb(s);
+ ceph_put_mds_session(s);
+ mutex_lock(&mdsc->mutex);
+ }
+ mutex_unlock(&mdsc->mutex);
+}
+
void ceph_mdsc_release_request(struct kref *kref)
{
struct ceph_mds_request *req = container_of(kref,
@@ -1155,7 +1168,7 @@ random:
/*
* session messages
*/
-static struct ceph_msg *create_session_msg(u32 op, u64 seq)
+struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq)
{
struct ceph_msg *msg;
struct ceph_mds_session_head *h;
@@ -1163,7 +1176,8 @@ static struct ceph_msg *create_session_msg(u32 op, u64 seq)
msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
false);
if (!msg) {
- pr_err("create_session_msg ENOMEM creating msg\n");
+ pr_err("ENOMEM creating session %s msg\n",
+ ceph_session_op_name(op));
return NULL;
}
h = msg->front.iov_base;
@@ -1294,7 +1308,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
GFP_NOFS, false);
if (!msg) {
- pr_err("create_session_msg ENOMEM creating msg\n");
+ pr_err("ENOMEM creating session open msg\n");
return ERR_PTR(-ENOMEM);
}
p = msg->front.iov_base;
@@ -1583,14 +1597,39 @@ out:
return ret;
}
+static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_cap_snap *capsnap;
+ int capsnap_release = 0;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+
+ dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode);
+
+ while (!list_empty(&ci->i_cap_snaps)) {
+ capsnap = list_first_entry(&ci->i_cap_snaps,
+ struct ceph_cap_snap, ci_item);
+ __ceph_remove_capsnap(inode, capsnap, NULL, NULL);
+ ceph_put_snap_context(capsnap->context);
+ ceph_put_cap_snap(capsnap);
+ capsnap_release++;
+ }
+ wake_up_all(&ci->i_cap_wq);
+ wake_up_all(&mdsc->cap_flushing_wq);
+ return capsnap_release;
+}
+
static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
void *arg)
{
struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
LIST_HEAD(to_remove);
bool dirty_dropped = false;
bool invalidate = false;
+ int capsnap_release = 0;
dout("removing cap %p, ci is %p, inode is %p\n",
cap, ci, &ci->vfs_inode);
@@ -1598,7 +1637,6 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
__ceph_remove_cap(cap, false);
if (!ci->i_auth_cap) {
struct ceph_cap_flush *cf;
- struct ceph_mds_client *mdsc = fsc->mdsc;
if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
if (inode->i_data.nrpages > 0)
@@ -1662,6 +1700,9 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
ci->i_prealloc_cap_flush = NULL;
}
+
+ if (!list_empty(&ci->i_cap_snaps))
+ capsnap_release = remove_capsnaps(mdsc, inode);
}
spin_unlock(&ci->i_ceph_lock);
while (!list_empty(&to_remove)) {
@@ -1678,6 +1719,8 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
ceph_queue_invalidate(inode);
if (dirty_dropped)
iput(inode);
+ while (capsnap_release--)
+ iput(inode);
return 0;
}
@@ -1803,8 +1846,8 @@ static int send_renew_caps(struct ceph_mds_client *mdsc,
dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
ceph_mds_state_name(state));
- msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
- ++session->s_renew_seq);
+ msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
+ ++session->s_renew_seq);
if (!msg)
return -ENOMEM;
ceph_con_send(&session->s_con, msg);
@@ -1818,7 +1861,7 @@ static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
session->s_mds, ceph_session_state_name(session->s_state), seq);
- msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
+ msg = ceph_create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
if (!msg)
return -ENOMEM;
ceph_con_send(&session->s_con, msg);
@@ -1870,7 +1913,8 @@ static int request_close_session(struct ceph_mds_session *session)
dout("request_close_session mds%d state %s seq %lld\n",
session->s_mds, ceph_session_state_name(session->s_state),
session->s_seq);
- msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
+ msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_CLOSE,
+ session->s_seq);
if (!msg)
return -ENOMEM;
ceph_con_send(&session->s_con, msg);
@@ -1965,7 +2009,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
if (oissued) {
/* we aren't the only cap.. just remove us */
- __ceph_remove_cap(cap, true);
+ ceph_remove_cap(cap, true);
(*remaining)--;
} else {
struct dentry *dentry;
@@ -4150,13 +4194,21 @@ static void check_new_map(struct ceph_mds_client *mdsc,
struct ceph_mdsmap *newmap,
struct ceph_mdsmap *oldmap)
{
- int i;
+ int i, j, err;
int oldstate, newstate;
struct ceph_mds_session *s;
+ unsigned long targets[DIV_ROUND_UP(CEPH_MAX_MDS, sizeof(unsigned long))] = {0};
dout("check_new_map new %u old %u\n",
newmap->m_epoch, oldmap->m_epoch);
+ if (newmap->m_info) {
+ for (i = 0; i < newmap->possible_max_rank; i++) {
+ for (j = 0; j < newmap->m_info[i].num_export_targets; j++)
+ set_bit(newmap->m_info[i].export_targets[j], targets);
+ }
+ }
+
for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
if (!mdsc->sessions[i])
continue;
@@ -4210,6 +4262,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
newstate >= CEPH_MDS_STATE_RECONNECT) {
mutex_unlock(&mdsc->mutex);
+ clear_bit(i, targets);
send_mds_reconnect(mdsc, s);
mutex_lock(&mdsc->mutex);
}
@@ -4232,6 +4285,51 @@ static void check_new_map(struct ceph_mds_client *mdsc,
}
}
+ /*
+ * Only open and reconnect sessions that don't exist yet.
+ */
+ for (i = 0; i < newmap->possible_max_rank; i++) {
+ /*
+ * In case the import MDS is crashed just after
+ * the EImportStart journal is flushed, so when
+ * a standby MDS takes over it and is replaying
+ * the EImportStart journal the new MDS daemon
+ * will wait the client to reconnect it, but the
+ * client may never register/open the session yet.
+ *
+ * Will try to reconnect that MDS daemon if the
+ * rank number is in the export targets array and
+ * is the up:reconnect state.
+ */
+ newstate = ceph_mdsmap_get_state(newmap, i);
+ if (!test_bit(i, targets) || newstate != CEPH_MDS_STATE_RECONNECT)
+ continue;
+
+ /*
+ * The session maybe registered and opened by some
+ * requests which were choosing random MDSes during
+ * the mdsc->mutex's unlock/lock gap below in rare
+ * case. But the related MDS daemon will just queue
+ * that requests and be still waiting for the client's
+ * reconnection request in up:reconnect state.
+ */
+ s = __ceph_lookup_mds_session(mdsc, i);
+ if (likely(!s)) {
+ s = __open_export_target_session(mdsc, i);
+ if (IS_ERR(s)) {
+ err = PTR_ERR(s);
+ pr_err("failed to open export target session, err %d\n",
+ err);
+ continue;
+ }
+ }
+ dout("send reconnect to export target mds.%d\n", i);
+ mutex_unlock(&mdsc->mutex);
+ send_mds_reconnect(mdsc, s);
+ ceph_put_mds_session(s);
+ mutex_lock(&mdsc->mutex);
+ }
+
for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
s = mdsc->sessions[i];
if (!s)
@@ -4409,24 +4507,12 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
}
/*
- * lock unlock sessions, to wait ongoing session activities
+ * lock unlock the session, to wait ongoing session activities
*/
-static void lock_unlock_sessions(struct ceph_mds_client *mdsc)
+static void lock_unlock_session(struct ceph_mds_session *s)
{
- int i;
-
- mutex_lock(&mdsc->mutex);
- for (i = 0; i < mdsc->max_sessions; i++) {
- struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
- if (!s)
- continue;
- mutex_unlock(&mdsc->mutex);
- mutex_lock(&s->s_mutex);
- mutex_unlock(&s->s_mutex);
- ceph_put_mds_session(s);
- mutex_lock(&mdsc->mutex);
- }
- mutex_unlock(&mdsc->mutex);
+ mutex_lock(&s->s_mutex);
+ mutex_unlock(&s->s_mutex);
}
static void maybe_recover_session(struct ceph_mds_client *mdsc)
@@ -4448,6 +4534,8 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc)
bool check_session_state(struct ceph_mds_session *s)
{
+ struct ceph_fs_client *fsc = s->s_mdsc->fsc;
+
switch (s->s_state) {
case CEPH_MDS_SESSION_OPEN:
if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
@@ -4456,8 +4544,9 @@ bool check_session_state(struct ceph_mds_session *s)
}
break;
case CEPH_MDS_SESSION_CLOSING:
- /* Should never reach this when we're unmounting */
- WARN_ON_ONCE(s->s_ttl);
+ /* Should never reach this when not force unmounting */
+ WARN_ON_ONCE(s->s_ttl &&
+ READ_ONCE(fsc->mount_state) != CEPH_MOUNT_SHUTDOWN);
fallthrough;
case CEPH_MDS_SESSION_NEW:
case CEPH_MDS_SESSION_RESTARTING:
@@ -4584,21 +4673,12 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
init_completion(&mdsc->safe_umount_waiters);
init_waitqueue_head(&mdsc->session_close_wq);
INIT_LIST_HEAD(&mdsc->waiting_for_map);
- mdsc->sessions = NULL;
- atomic_set(&mdsc->num_sessions, 0);
- mdsc->max_sessions = 0;
- mdsc->stopping = 0;
- atomic64_set(&mdsc->quotarealms_count, 0);
mdsc->quotarealms_inodes = RB_ROOT;
mutex_init(&mdsc->quotarealms_inodes_mutex);
- mdsc->last_snap_seq = 0;
init_rwsem(&mdsc->snap_rwsem);
mdsc->snap_realms = RB_ROOT;
INIT_LIST_HEAD(&mdsc->snap_empty);
- mdsc->num_snap_realms = 0;
spin_lock_init(&mdsc->snap_empty_lock);
- mdsc->last_tid = 0;
- mdsc->oldest_tid = 0;
mdsc->request_tree = RB_ROOT;
INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
mdsc->last_renew_caps = jiffies;
@@ -4610,11 +4690,9 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
mdsc->last_cap_flush_tid = 1;
INIT_LIST_HEAD(&mdsc->cap_flush_list);
INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
- mdsc->num_cap_flushing = 0;
spin_lock_init(&mdsc->cap_dirty_lock);
init_waitqueue_head(&mdsc->cap_flushing_wq);
INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
- atomic_set(&mdsc->cap_reclaim_pending, 0);
err = ceph_metric_init(&mdsc->metric);
if (err)
goto err_mdsmap;
@@ -4676,6 +4754,30 @@ static void wait_requests(struct ceph_mds_client *mdsc)
dout("wait_requests done\n");
}
+void send_flush_mdlog(struct ceph_mds_session *s)
+{
+ struct ceph_msg *msg;
+
+ /*
+ * Pre-luminous MDS crashes when it sees an unknown session request
+ */
+ if (!CEPH_HAVE_FEATURE(s->s_con.peer_features, SERVER_LUMINOUS))
+ return;
+
+ mutex_lock(&s->s_mutex);
+ dout("request mdlog flush to mds%d (%s)s seq %lld\n", s->s_mds,
+ ceph_session_state_name(s->s_state), s->s_seq);
+ msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_FLUSH_MDLOG,
+ s->s_seq);
+ if (!msg) {
+ pr_err("failed to request mdlog flush to mds%d (%s) seq %lld\n",
+ s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
+ } else {
+ ceph_con_send(&s->s_con, msg);
+ }
+ mutex_unlock(&s->s_mutex);
+}
+
/*
* called before mount is ro, and before dentries are torn down.
* (hmm, does this still race with new lookups?)
@@ -4685,7 +4787,8 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
dout("pre_umount\n");
mdsc->stopping = 1;
- lock_unlock_sessions(mdsc);
+ ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
+ ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false);
ceph_flush_dirty_caps(mdsc);
wait_requests(mdsc);
@@ -4912,7 +5015,6 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
ceph_metric_destroy(&mdsc->metric);
- flush_delayed_work(&mdsc->metric.delayed_work);
fsc->mdsc = NULL;
kfree(mdsc);
dout("mdsc_destroy %p done\n", mdsc);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 20e42d8b66c6..97c7f7bfa55f 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -522,6 +522,11 @@ static inline void ceph_mdsc_put_request(struct ceph_mds_request *req)
kref_put(&req->r_kref, ceph_mdsc_release_request);
}
+extern void send_flush_mdlog(struct ceph_mds_session *s);
+extern void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
+ void (*cb)(struct ceph_mds_session *),
+ bool check_state);
+extern struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq);
extern void __ceph_queue_cap_release(struct ceph_mds_session *session,
struct ceph_cap *cap);
extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 3c444b9cb17b..61d67cbcb367 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -122,6 +122,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
int err;
u8 mdsmap_v;
u16 mdsmap_ev;
+ u32 target;
m = kzalloc(sizeof(*m), GFP_NOFS);
if (!m)
@@ -260,9 +261,14 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
sizeof(u32), GFP_NOFS);
if (!info->export_targets)
goto nomem;
- for (j = 0; j < num_export_targets; j++)
- info->export_targets[j] =
- ceph_decode_32(&pexport_targets);
+ for (j = 0; j < num_export_targets; j++) {
+ target = ceph_decode_32(&pexport_targets);
+ if (target >= m->possible_max_rank) {
+ err = -EIO;
+ goto corrupt;
+ }
+ info->export_targets[j] = target;
+ }
} else {
info->export_targets = NULL;
}
diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
index 5ac151eb0d49..04d5df29bbbf 100644
--- a/fs/ceph/metric.c
+++ b/fs/ceph/metric.c
@@ -302,6 +302,8 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
if (!m)
return;
+ cancel_delayed_work_sync(&m->delayed_work);
+
percpu_counter_destroy(&m->total_inodes);
percpu_counter_destroy(&m->opened_inodes);
percpu_counter_destroy(&m->i_caps_mis);
@@ -309,8 +311,6 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
percpu_counter_destroy(&m->d_lease_mis);
percpu_counter_destroy(&m->d_lease_hit);
- cancel_delayed_work_sync(&m->delayed_work);
-
ceph_put_mds_session(m->session);
}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 15105f9da3fd..b41e6724c591 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -849,6 +849,43 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
dout("flush_snaps done\n");
}
+/**
+ * ceph_change_snap_realm - change the snap_realm for an inode
+ * @inode: inode to move to new snap realm
+ * @realm: new realm to move inode into (may be NULL)
+ *
+ * Detach an inode from its old snaprealm (if any) and attach it to
+ * the new snaprealm (if any). The old snap realm reference held by
+ * the inode is put. If realm is non-NULL, then the caller's reference
+ * to it is taken over by the inode.
+ */
+void ceph_change_snap_realm(struct inode *inode, struct ceph_snap_realm *realm)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct ceph_snap_realm *oldrealm = ci->i_snap_realm;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+
+ if (oldrealm) {
+ spin_lock(&oldrealm->inodes_with_caps_lock);
+ list_del_init(&ci->i_snap_realm_item);
+ if (oldrealm->ino == ci->i_vino.ino)
+ oldrealm->inode = NULL;
+ spin_unlock(&oldrealm->inodes_with_caps_lock);
+ ceph_put_snap_realm(mdsc, oldrealm);
+ }
+
+ ci->i_snap_realm = realm;
+
+ if (realm) {
+ spin_lock(&realm->inodes_with_caps_lock);
+ list_add(&ci->i_snap_realm_item, &realm->inodes_with_caps);
+ if (realm->ino == ci->i_vino.ino)
+ realm->inode = inode;
+ spin_unlock(&realm->inodes_with_caps_lock);
+ }
+}
/*
* Handle a snap notification from the MDS.
@@ -935,7 +972,6 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
};
struct inode *inode = ceph_find_inode(sb, vino);
struct ceph_inode_info *ci;
- struct ceph_snap_realm *oldrealm;
if (!inode)
continue;
@@ -960,27 +996,10 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
}
dout(" will move %p to split realm %llx %p\n",
inode, realm->ino, realm);
- /*
- * Move the inode to the new realm
- */
- oldrealm = ci->i_snap_realm;
- spin_lock(&oldrealm->inodes_with_caps_lock);
- list_del_init(&ci->i_snap_realm_item);
- spin_unlock(&oldrealm->inodes_with_caps_lock);
-
- spin_lock(&realm->inodes_with_caps_lock);
- list_add(&ci->i_snap_realm_item,
- &realm->inodes_with_caps);
- ci->i_snap_realm = realm;
- if (realm->ino == ci->i_vino.ino)
- realm->inode = inode;
- spin_unlock(&realm->inodes_with_caps_lock);
-
- spin_unlock(&ci->i_ceph_lock);
ceph_get_snap_realm(mdsc, realm);
- ceph_put_snap_realm(mdsc, oldrealm);
-
+ ceph_change_snap_realm(inode, realm);
+ spin_unlock(&ci->i_ceph_lock);
iput(inode);
continue;
diff --git a/fs/ceph/strings.c b/fs/ceph/strings.c
index 4a79f3632260..573bb9556fb5 100644
--- a/fs/ceph/strings.c
+++ b/fs/ceph/strings.c
@@ -46,6 +46,7 @@ const char *ceph_session_op_name(int op)
case CEPH_SESSION_FLUSHMSG_ACK: return "flushmsg_ack";
case CEPH_SESSION_FORCE_RO: return "force_ro";
case CEPH_SESSION_REJECT: return "reject";
+ case CEPH_SESSION_REQUEST_FLUSH_MDLOG: return "flush_mdlog";
}
return "???";
}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index c30258f95e37..a40eb14c282a 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -418,7 +418,6 @@ struct ceph_inode_info {
struct ceph_snap_realm *i_snap_realm; /* snap realm (if caps) */
struct ceph_snapid_map *i_snapid_map; /* snapid -> dev_t */
};
- int i_snap_realm_counter; /* snap realm (if caps) */
struct list_head i_snap_realm_item;
struct list_head i_snap_flush_item;
struct timespec64 i_btime;
@@ -929,6 +928,7 @@ extern void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
extern int ceph_update_snap_trace(struct ceph_mds_client *m,
void *p, void *e, bool deletion,
struct ceph_snap_realm **realm_ret);
+void ceph_change_snap_realm(struct inode *inode, struct ceph_snap_realm *realm);
extern void ceph_handle_snap(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct ceph_msg *msg);
@@ -1138,6 +1138,7 @@ extern void ceph_add_cap(struct inode *inode,
unsigned cap, unsigned seq, u64 realmino, int flags,
struct ceph_cap **new_cap);
extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
+extern void ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
extern void __ceph_remove_caps(struct ceph_inode_info *ci);
extern void ceph_put_cap(struct ceph_mds_client *mdsc,
struct ceph_cap *cap);
@@ -1163,6 +1164,12 @@ extern void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci,
int had);
extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
struct ceph_snap_context *snapc);
+extern void __ceph_remove_capsnap(struct inode *inode,
+ struct ceph_cap_snap *capsnap,
+ bool *wake_ci, bool *wake_mdsc);
+extern void ceph_remove_capsnap(struct inode *inode,
+ struct ceph_cap_snap *capsnap,
+ bool *wake_ci, bool *wake_mdsc);
extern void ceph_flush_snaps(struct ceph_inode_info *ci,
struct ceph_mds_session **psession);
extern bool __ceph_should_report_size(struct ceph_inode_info *ci);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 1242db8d3444..159a1ffa4f4b 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -340,6 +340,18 @@ static ssize_t ceph_vxattrcb_caps(struct ceph_inode_info *ci, char *val,
ceph_cap_string(issued), issued);
}
+static ssize_t ceph_vxattrcb_auth_mds(struct ceph_inode_info *ci,
+ char *val, size_t size)
+{
+ int ret;
+
+ spin_lock(&ci->i_ceph_lock);
+ ret = ceph_fmt_xattr(val, size, "%d",
+ ci->i_auth_cap ? ci->i_auth_cap->session->s_mds : -1);
+ spin_unlock(&ci->i_ceph_lock);
+ return ret;
+}
+
#define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
#define CEPH_XATTR_NAME2(_type, _name, _name2) \
XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
@@ -473,6 +485,13 @@ static struct ceph_vxattr ceph_common_vxattrs[] = {
.exists_cb = NULL,
.flags = VXATTR_FLAG_READONLY,
},
+ {
+ .name = "ceph.auth_mds",
+ .name_size = sizeof("ceph.auth_mds"),
+ .getxattr_cb = ceph_vxattrcb_auth_mds,
+ .exists_cb = NULL,
+ .flags = VXATTR_FLAG_READONLY,
+ },
{ .name = NULL, 0 } /* Required table terminator */
};
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 6679e07e533e..2e6f40344037 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -22,7 +22,7 @@
#include <linux/random.h>
#include <linux/highmem.h>
#include <linux/fips.h>
-#include "../cifs_common/arc4.h"
+#include "../smbfs_common/arc4.h"
#include <crypto/aead.h>
int __cifs_calc_signature(struct smb_rqst *rqst,
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index dc920e206336..98e8e5aa0613 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -12,7 +12,7 @@
#include <net/sock.h>
#include <asm/unaligned.h>
-#include "smbfsctl.h"
+#include "../smbfs_common/smbfsctl.h"
#define CIFS_PROT 0
#define POSIX_PROT (CIFS_PROT+1)
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index ddc0e8f97872..bda606dc72b1 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -689,13 +689,19 @@ smb2_close_cached_fid(struct kref *ref)
cifs_dbg(FYI, "clear cached root file handle\n");
SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
cfid->fid->volatile_fid);
- cfid->is_valid = false;
- cfid->file_all_info_is_valid = false;
- cfid->has_lease = false;
- if (cfid->dentry) {
- dput(cfid->dentry);
- cfid->dentry = NULL;
- }
+ }
+
+ /*
+ * We only check validity above to send SMB2_close,
+ * but we still need to invalidate these entries
+ * when this function is called
+ */
+ cfid->is_valid = false;
+ cfid->file_all_info_is_valid = false;
+ cfid->has_lease = false;
+ if (cfid->dentry) {
+ dput(cfid->dentry);
+ cfid->dentry = NULL;
}
}
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 10047cc55286..4a0487753869 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -24,7 +24,7 @@
#include "cifsglob.h"
#include "cifs_debug.h"
#include "cifsproto.h"
-#include "../cifs_common/md4.h"
+#include "../smbfs_common/md4.h"
#ifndef false
#define false 0
diff --git a/fs/coredump.c b/fs/coredump.c
index 07afb5ddb1c4..3224dee44d30 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -782,10 +782,17 @@ void do_coredump(const kernel_siginfo_t *siginfo)
* filesystem.
*/
mnt_userns = file_mnt_user_ns(cprm.file);
- if (!uid_eq(i_uid_into_mnt(mnt_userns, inode), current_fsuid()))
+ if (!uid_eq(i_uid_into_mnt(mnt_userns, inode),
+ current_fsuid())) {
+ pr_info_ratelimited("Core dump to %s aborted: cannot preserve file owner\n",
+ cn.corename);
goto close_fail;
- if ((inode->i_mode & 0677) != 0600)
+ }
+ if ((inode->i_mode & 0677) != 0600) {
+ pr_info_ratelimited("Core dump to %s aborted: cannot preserve file permissions\n",
+ cn.corename);
goto close_fail;
+ }
if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
goto close_fail;
if (do_truncate(mnt_userns, cprm.file->f_path.dentry,
@@ -1127,8 +1134,10 @@ int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count,
mmap_write_unlock(mm);
- if (WARN_ON(i != *vma_count))
+ if (WARN_ON(i != *vma_count)) {
+ kvfree(*vma_meta);
return -EFAULT;
+ }
*vma_data_size_ptr = vma_data_size;
return 0;
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index a8d49e8fc83a..11b88559f8bf 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -546,7 +546,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
return err;
if (test_opt(ctx, DAX_ALWAYS) &&
- !bdev_dax_supported(sb->s_bdev, EROFS_BLKSIZ)) {
+ !dax_supported(sbi->dax_dev, sb->s_bdev, EROFS_BLKSIZ, 0, bdev_nr_sectors(sb->s_bdev))) {
errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
clear_opt(ctx, DAX_ALWAYS);
}
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 1e596e1d0bba..06f4c5ae1451 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -723,7 +723,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
*/
call_rcu(&epi->rcu, epi_rcu_free);
- atomic_long_dec(&ep->user->epoll_watches);
+ percpu_counter_dec(&ep->user->epoll_watches);
return 0;
}
@@ -1439,7 +1439,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
{
int error, pwake = 0;
__poll_t revents;
- long user_watches;
struct epitem *epi;
struct ep_pqueue epq;
struct eventpoll *tep = NULL;
@@ -1449,11 +1448,15 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
lockdep_assert_irqs_enabled();
- user_watches = atomic_long_read(&ep->user->epoll_watches);
- if (unlikely(user_watches >= max_user_watches))
+ if (unlikely(percpu_counter_compare(&ep->user->epoll_watches,
+ max_user_watches) >= 0))
return -ENOSPC;
- if (!(epi = kmem_cache_zalloc(epi_cache, GFP_KERNEL)))
+ percpu_counter_inc(&ep->user->epoll_watches);
+
+ if (!(epi = kmem_cache_zalloc(epi_cache, GFP_KERNEL))) {
+ percpu_counter_dec(&ep->user->epoll_watches);
return -ENOMEM;
+ }
/* Item initialization follow here ... */
INIT_LIST_HEAD(&epi->rdllink);
@@ -1466,17 +1469,16 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
mutex_lock_nested(&tep->mtx, 1);
/* Add the current item to the list of active epoll hook for this file */
if (unlikely(attach_epitem(tfile, epi) < 0)) {
- kmem_cache_free(epi_cache, epi);
if (tep)
mutex_unlock(&tep->mtx);
+ kmem_cache_free(epi_cache, epi);
+ percpu_counter_dec(&ep->user->epoll_watches);
return -ENOMEM;
}
if (full_check && !tep)
list_file(tfile);
- atomic_long_inc(&ep->user->epoll_watches);
-
/*
* Add the current item to the RB tree. All RB tree operations are
* protected by "mtx", and ep_insert() is called with "mtx" held.
@@ -1684,8 +1686,8 @@ static int ep_send_events(struct eventpoll *ep,
if (!revents)
continue;
- if (__put_user(revents, &events->events) ||
- __put_user(epi->event.data, &events->data)) {
+ events = epoll_put_uevent(revents, epi->event.data, events);
+ if (!events) {
list_add(&epi->rdllink, &txlist);
ep_pm_stay_awake(epi);
if (!res)
@@ -1693,7 +1695,6 @@ static int ep_send_events(struct eventpoll *ep,
break;
}
res++;
- events++;
if (epi->event.events & EPOLLONESHOT)
epi->event.events &= EP_PRIVATE_BITS;
else if (!(epi->event.events & EPOLLET)) {
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 987bcf32ed46..d8d580b609ba 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -946,7 +946,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
if (test_opt(sb, DAX)) {
- if (!bdev_dax_supported(sb->s_bdev, blocksize)) {
+ if (!dax_supported(dax_dev, sb->s_bdev, blocksize, 0,
+ bdev_nr_sectors(sb->s_bdev))) {
ext2_msg(sb, KERN_ERR,
"DAX unsupported by block device. Turning off DAX.");
clear_opt(sbi->s_mount_opt, DAX);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 136940af00b8..0775950ee84e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -4287,7 +4287,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
- if (bdev_dax_supported(sb->s_bdev, blocksize))
+ if (dax_supported(dax_dev, sb->s_bdev, blocksize, 0,
+ bdev_nr_sectors(sb->s_bdev)))
set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags);
if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) {
diff --git a/fs/file.c b/fs/file.c
index d8afa8266859..8627dacfc424 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -1150,6 +1150,12 @@ int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
return new_fd;
}
+int receive_fd(struct file *file, unsigned int o_flags)
+{
+ return __receive_fd(file, NULL, o_flags);
+}
+EXPORT_SYMBOL_GPL(receive_fd);
+
static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
{
int err = -EBADF;
diff --git a/fs/filesystems.c b/fs/filesystems.c
index 90b8d879fbaf..58b9067b2391 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -209,21 +209,28 @@ SYSCALL_DEFINE3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2)
}
#endif
-int __init get_filesystem_list(char *buf)
+int __init list_bdev_fs_names(char *buf, size_t size)
{
- int len = 0;
- struct file_system_type * tmp;
+ struct file_system_type *p;
+ size_t len;
+ int count = 0;
read_lock(&file_systems_lock);
- tmp = file_systems;
- while (tmp && len < PAGE_SIZE - 80) {
- len += sprintf(buf+len, "%s\t%s\n",
- (tmp->fs_flags & FS_REQUIRES_DEV) ? "" : "nodev",
- tmp->name);
- tmp = tmp->next;
+ for (p = file_systems; p; p = p->next) {
+ if (!(p->fs_flags & FS_REQUIRES_DEV))
+ continue;
+ len = strlen(p->name) + 1;
+ if (len > size) {
+ pr_warn("%s: truncating file system list\n", __func__);
+ break;
+ }
+ memcpy(buf, p->name, len);
+ buf += len;
+ size -= len;
+ count++;
}
read_unlock(&file_systems_lock);
- return len;
+ return count;
}
#ifdef CONFIG_PROC_FS
diff --git a/fs/fs_parser.c b/fs/fs_parser.c
index 980d44fd3a36..3df07c0e32b3 100644
--- a/fs/fs_parser.c
+++ b/fs/fs_parser.c
@@ -165,7 +165,6 @@ int fs_lookup_param(struct fs_context *fc,
return invalf(fc, "%s: not usable as path", param->key);
}
- f->refcnt++; /* filename_lookup() drops our ref. */
ret = filename_lookup(param->dirfd, f, flags, _path, NULL);
if (ret < 0) {
errorf(fc, "%s: Lookup failure for '%s'", param->key, f->name);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 6e15434b23ac..3130f85d2b3f 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1985,8 +1985,8 @@ static int gfs2_setattr(struct user_namespace *mnt_userns,
if (error)
goto out;
- error = -EPERM;
- if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+ error = may_setattr(&init_user_ns, inode, attr->ia_valid);
+ if (error)
goto error;
error = setattr_prepare(&init_user_ns, dentry, attr);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 7d0c3dbb2898..d5c9d886cd9f 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -381,6 +381,7 @@ static int hostfs_fsync(struct file *file, loff_t start, loff_t end,
static const struct file_operations hostfs_file_fops = {
.llseek = generic_file_llseek,
.splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
diff --git a/fs/internal.h b/fs/internal.h
index 68a2ae029a27..3cd065c8a66b 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -18,7 +18,7 @@ struct user_namespace;
struct pipe_inode_info;
/*
- * block_dev.c
+ * block/bdev.c
*/
#ifdef CONFIG_BLOCK
extern void __init bdev_cache_init(void);
diff --git a/fs/io-wq.c b/fs/io-wq.c
index d80e4a735677..6c55362c1f99 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -709,6 +709,7 @@ static void create_worker_cont(struct callback_head *cb)
}
raw_spin_unlock(&wqe->lock);
io_worker_ref_put(wqe->wq);
+ kfree(worker);
return;
}
@@ -725,6 +726,7 @@ static void io_workqueue_create(struct work_struct *work)
if (!io_queue_worker_create(worker, acct, create_worker_cont)) {
clear_bit_unlock(0, &worker->create_state);
io_worker_release(worker);
+ kfree(worker);
}
}
@@ -759,6 +761,7 @@ fail:
if (!IS_ERR(tsk)) {
io_init_new_worker(wqe, worker, tsk);
} else if (!io_should_retry_thread(PTR_ERR(tsk))) {
+ kfree(worker);
goto fail;
} else {
INIT_WORK(&worker->work, io_workqueue_create);
@@ -832,6 +835,11 @@ append:
wq_list_add_after(&work->list, &tail->list, &acct->work_list);
}
+static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
+{
+ return work == data;
+}
+
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
{
struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
@@ -844,7 +852,6 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
*/
if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) ||
(work->flags & IO_WQ_WORK_CANCEL)) {
-run_cancel:
io_run_cancel(work, wqe);
return;
}
@@ -864,15 +871,22 @@ run_cancel:
bool did_create;
did_create = io_wqe_create_worker(wqe, acct);
- if (unlikely(!did_create)) {
- raw_spin_lock(&wqe->lock);
- /* fatal condition, failed to create the first worker */
- if (!acct->nr_workers) {
- raw_spin_unlock(&wqe->lock);
- goto run_cancel;
- }
- raw_spin_unlock(&wqe->lock);
+ if (likely(did_create))
+ return;
+
+ raw_spin_lock(&wqe->lock);
+ /* fatal condition, failed to create the first worker */
+ if (!acct->nr_workers) {
+ struct io_cb_cancel_data match = {
+ .fn = io_wq_work_match_item,
+ .data = work,
+ .cancel_all = false,
+ };
+
+ if (io_acct_cancel_pending_work(wqe, acct, &match))
+ raw_spin_lock(&wqe->lock);
}
+ raw_spin_unlock(&wqe->lock);
}
}
@@ -1122,7 +1136,7 @@ static bool io_task_work_match(struct callback_head *cb, void *data)
{
struct io_worker *worker;
- if (cb->func != create_worker_cb || cb->func != create_worker_cont)
+ if (cb->func != create_worker_cb && cb->func != create_worker_cont)
return false;
worker = container_of(cb, struct io_worker, create_work);
return worker->wqe->wq == data;
@@ -1143,9 +1157,14 @@ static void io_wq_exit_workers(struct io_wq *wq)
while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
struct io_worker *worker;
+ struct io_wqe_acct *acct;
worker = container_of(cb, struct io_worker, create_work);
- atomic_dec(&worker->wqe->acct[worker->create_index].nr_running);
+ acct = io_wqe_get_acct(worker);
+ atomic_dec(&acct->nr_running);
+ raw_spin_lock(&worker->wqe->lock);
+ acct->nr_workers--;
+ raw_spin_unlock(&worker->wqe->lock);
io_worker_ref_put(wq);
clear_bit_unlock(0, &worker->create_state);
io_worker_release(worker);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index d816c09c88a5..16fb7436043c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1482,6 +1482,8 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
struct io_timeout_data *io = req->async_data;
if (hrtimer_try_to_cancel(&io->timer) != -1) {
+ if (status)
+ req_set_fail(req);
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
list_del_init(&req->timeout.list);
@@ -1619,8 +1621,11 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
{
+ /* see waitqueue_active() comment */
+ smp_mb();
+
if (ctx->flags & IORING_SETUP_SQPOLL) {
- if (wq_has_sleeper(&ctx->cq_wait))
+ if (waitqueue_active(&ctx->cq_wait))
wake_up_all(&ctx->cq_wait);
}
if (io_should_trigger_evfd(ctx))
@@ -3480,6 +3485,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
if (req->flags & REQ_F_NOWAIT)
goto done;
/* some cases will consume bytes even on error returns */
+ iov_iter_reexpand(iter, iter->count + iter->truncated);
iov_iter_revert(iter, io_size - iov_iter_count(iter));
ret = 0;
} else if (ret == -EIOCBQUEUED) {
@@ -3619,6 +3625,7 @@ done:
} else {
copy_iov:
/* some cases will consume bytes even on error returns */
+ iov_iter_reexpand(iter, iter->count + iter->truncated);
iov_iter_revert(iter, io_size - iov_iter_count(iter));
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
return ret ?: -EAGAIN;
@@ -10548,7 +10555,14 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
if (ctx->flags & IORING_SETUP_SQPOLL) {
sqd = ctx->sq_data;
if (sqd) {
+ /*
+ * Observe the correct sqd->lock -> ctx->uring_lock
+ * ordering. Fine to drop uring_lock here, we hold
+ * a ref to the ctx.
+ */
+ mutex_unlock(&ctx->uring_lock);
mutex_lock(&sqd->lock);
+ mutex_lock(&ctx->uring_lock);
tctx = sqd->thread->io_uring;
}
} else {
@@ -10851,7 +10865,7 @@ static int __init io_uring_init(void)
BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
- BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
+ BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
SLAB_ACCOUNT);
diff --git a/fs/ksmbd/ndr.c b/fs/ksmbd/ndr.c
index 2243a2c64b37..8317f7ca402b 100644
--- a/fs/ksmbd/ndr.c
+++ b/fs/ksmbd/ndr.c
@@ -28,37 +28,60 @@ static int try_to_realloc_ndr_blob(struct ndr *n, size_t sz)
return 0;
}
-static void ndr_write_int16(struct ndr *n, __u16 value)
+static int ndr_write_int16(struct ndr *n, __u16 value)
{
- if (n->length <= n->offset + sizeof(value))
- try_to_realloc_ndr_blob(n, sizeof(value));
+ if (n->length <= n->offset + sizeof(value)) {
+ int ret;
+
+ ret = try_to_realloc_ndr_blob(n, sizeof(value));
+ if (ret)
+ return ret;
+ }
*(__le16 *)ndr_get_field(n) = cpu_to_le16(value);
n->offset += sizeof(value);
+ return 0;
}
-static void ndr_write_int32(struct ndr *n, __u32 value)
+static int ndr_write_int32(struct ndr *n, __u32 value)
{
- if (n->length <= n->offset + sizeof(value))
- try_to_realloc_ndr_blob(n, sizeof(value));
+ if (n->length <= n->offset + sizeof(value)) {
+ int ret;
+
+ ret = try_to_realloc_ndr_blob(n, sizeof(value));
+ if (ret)
+ return ret;
+ }
*(__le32 *)ndr_get_field(n) = cpu_to_le32(value);
n->offset += sizeof(value);
+ return 0;
}
-static void ndr_write_int64(struct ndr *n, __u64 value)
+static int ndr_write_int64(struct ndr *n, __u64 value)
{
- if (n->length <= n->offset + sizeof(value))
- try_to_realloc_ndr_blob(n, sizeof(value));
+ if (n->length <= n->offset + sizeof(value)) {
+ int ret;
+
+ ret = try_to_realloc_ndr_blob(n, sizeof(value));
+ if (ret)
+ return ret;
+ }
*(__le64 *)ndr_get_field(n) = cpu_to_le64(value);
n->offset += sizeof(value);
+ return 0;
}
static int ndr_write_bytes(struct ndr *n, void *value, size_t sz)
{
- if (n->length <= n->offset + sz)
- try_to_realloc_ndr_blob(n, sz);
+ if (n->length <= n->offset + sz) {
+ int ret;
+
+ ret = try_to_realloc_ndr_blob(n, sz);
+ if (ret)
+ return ret;
+ }
memcpy(ndr_get_field(n), value, sz);
n->offset += sz;
@@ -70,8 +93,13 @@ static int ndr_write_string(struct ndr *n, char *value)
size_t sz;
sz = strlen(value) + 1;
- if (n->length <= n->offset + sz)
- try_to_realloc_ndr_blob(n, sz);
+ if (n->length <= n->offset + sz) {
+ int ret;
+
+ ret = try_to_realloc_ndr_blob(n, sz);
+ if (ret)
+ return ret;
+ }
memcpy(ndr_get_field(n), value, sz);
n->offset += sz;
@@ -81,9 +109,14 @@ static int ndr_write_string(struct ndr *n, char *value)
static int ndr_read_string(struct ndr *n, void *value, size_t sz)
{
- int len = strnlen(ndr_get_field(n), sz);
+ int len;
- memcpy(value, ndr_get_field(n), len);
+ if (n->offset + sz > n->length)
+ return -EINVAL;
+
+ len = strnlen(ndr_get_field(n), sz);
+ if (value)
+ memcpy(value, ndr_get_field(n), len);
len++;
n->offset += len;
n->offset = ALIGN(n->offset, 2);
@@ -92,41 +125,52 @@ static int ndr_read_string(struct ndr *n, void *value, size_t sz)
static int ndr_read_bytes(struct ndr *n, void *value, size_t sz)
{
- memcpy(value, ndr_get_field(n), sz);
+ if (n->offset + sz > n->length)
+ return -EINVAL;
+
+ if (value)
+ memcpy(value, ndr_get_field(n), sz);
n->offset += sz;
return 0;
}
-static __u16 ndr_read_int16(struct ndr *n)
+static int ndr_read_int16(struct ndr *n, __u16 *value)
{
- __u16 ret;
+ if (n->offset + sizeof(__u16) > n->length)
+ return -EINVAL;
- ret = le16_to_cpu(*(__le16 *)ndr_get_field(n));
+ if (value)
+ *value = le16_to_cpu(*(__le16 *)ndr_get_field(n));
n->offset += sizeof(__u16);
- return ret;
+ return 0;
}
-static __u32 ndr_read_int32(struct ndr *n)
+static int ndr_read_int32(struct ndr *n, __u32 *value)
{
- __u32 ret;
+ if (n->offset + sizeof(__u32) > n->length)
+ return 0;
- ret = le32_to_cpu(*(__le32 *)ndr_get_field(n));
+ if (value)
+ *value = le32_to_cpu(*(__le32 *)ndr_get_field(n));
n->offset += sizeof(__u32);
- return ret;
+ return 0;
}
-static __u64 ndr_read_int64(struct ndr *n)
+static int ndr_read_int64(struct ndr *n, __u64 *value)
{
- __u64 ret;
+ if (n->offset + sizeof(__u64) > n->length)
+ return -EINVAL;
- ret = le64_to_cpu(*(__le64 *)ndr_get_field(n));
+ if (value)
+ *value = le64_to_cpu(*(__le64 *)ndr_get_field(n));
n->offset += sizeof(__u64);
- return ret;
+ return 0;
}
int ndr_encode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da)
{
char hex_attr[12] = {0};
+ int ret;
n->offset = 0;
n->length = 1024;
@@ -136,97 +180,161 @@ int ndr_encode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da)
if (da->version == 3) {
snprintf(hex_attr, 10, "0x%x", da->attr);
- ndr_write_string(n, hex_attr);
+ ret = ndr_write_string(n, hex_attr);
} else {
- ndr_write_string(n, "");
+ ret = ndr_write_string(n, "");
}
- ndr_write_int16(n, da->version);
- ndr_write_int32(n, da->version);
+ if (ret)
+ return ret;
+
+ ret = ndr_write_int16(n, da->version);
+ if (ret)
+ return ret;
+
+ ret = ndr_write_int32(n, da->version);
+ if (ret)
+ return ret;
+
+ ret = ndr_write_int32(n, da->flags);
+ if (ret)
+ return ret;
+
+ ret = ndr_write_int32(n, da->attr);
+ if (ret)
+ return ret;
- ndr_write_int32(n, da->flags);
- ndr_write_int32(n, da->attr);
if (da->version == 3) {
- ndr_write_int32(n, da->ea_size);
- ndr_write_int64(n, da->size);
- ndr_write_int64(n, da->alloc_size);
+ ret = ndr_write_int32(n, da->ea_size);
+ if (ret)
+ return ret;
+ ret = ndr_write_int64(n, da->size);
+ if (ret)
+ return ret;
+ ret = ndr_write_int64(n, da->alloc_size);
} else {
- ndr_write_int64(n, da->itime);
+ ret = ndr_write_int64(n, da->itime);
}
- ndr_write_int64(n, da->create_time);
+ if (ret)
+ return ret;
+
+ ret = ndr_write_int64(n, da->create_time);
+ if (ret)
+ return ret;
+
if (da->version == 3)
- ndr_write_int64(n, da->change_time);
- return 0;
+ ret = ndr_write_int64(n, da->change_time);
+ return ret;
}
int ndr_decode_dos_attr(struct ndr *n, struct xattr_dos_attrib *da)
{
- char *hex_attr;
- int version2;
-
- hex_attr = kzalloc(n->length, GFP_KERNEL);
- if (!hex_attr)
- return -ENOMEM;
+ char hex_attr[12];
+ unsigned int version2;
+ int ret;
n->offset = 0;
- ndr_read_string(n, hex_attr, n->length);
- kfree(hex_attr);
- da->version = ndr_read_int16(n);
+ ret = ndr_read_string(n, hex_attr, sizeof(hex_attr));
+ if (ret)
+ return ret;
+
+ ret = ndr_read_int16(n, &da->version);
+ if (ret)
+ return ret;
if (da->version != 3 && da->version != 4) {
pr_err("v%d version is not supported\n", da->version);
return -EINVAL;
}
- version2 = ndr_read_int32(n);
+ ret = ndr_read_int32(n, &version2);
+ if (ret)
+ return ret;
+
if (da->version != version2) {
pr_err("ndr version mismatched(version: %d, version2: %d)\n",
da->version, version2);
return -EINVAL;
}
- ndr_read_int32(n);
- da->attr = ndr_read_int32(n);
+ ret = ndr_read_int32(n, NULL);
+ if (ret)
+ return ret;
+
+ ret = ndr_read_int32(n, &da->attr);
+ if (ret)
+ return ret;
+
if (da->version == 4) {
- da->itime = ndr_read_int64(n);
- da->create_time = ndr_read_int64(n);
+ ret = ndr_read_int64(n, &da->itime);
+ if (ret)
+ return ret;
+
+ ret = ndr_read_int64(n, &da->create_time);
} else {
- ndr_read_int32(n);
- ndr_read_int64(n);
- ndr_read_int64(n);
- da->create_time = ndr_read_int64(n);
- ndr_read_int64(n);
+ ret = ndr_read_int32(n, NULL);
+ if (ret)
+ return ret;
+
+ ret = ndr_read_int64(n, NULL);
+ if (ret)
+ return ret;
+
+ ret = ndr_read_int64(n, NULL);
+ if (ret)
+ return ret;
+
+ ret = ndr_read_int64(n, &da->create_time);
+ if (ret)
+ return ret;
+
+ ret = ndr_read_int64(n, NULL);
}
- return 0;
+ return ret;
}
static int ndr_encode_posix_acl_entry(struct ndr *n, struct xattr_smb_acl *acl)
{
- int i;
+ int i, ret;
+
+ ret = ndr_write_int32(n, acl->count);
+ if (ret)
+ return ret;
- ndr_write_int32(n, acl->count);
n->offset = ALIGN(n->offset, 8);
- ndr_write_int32(n, acl->count);
- ndr_write_int32(n, 0);
+ ret = ndr_write_int32(n, acl->count);
+ if (ret)
+ return ret;
+
+ ret = ndr_write_int32(n, 0);
+ if (ret)
+ return ret;
for (i = 0; i < acl->count; i++) {
n->offset = ALIGN(n->offset, 8);
- ndr_write_int16(n, acl->entries[i].type);
- ndr_write_int16(n, acl->entries[i].type);
+ ret = ndr_write_int16(n, acl->entries[i].type);
+ if (ret)
+ return ret;
+
+ ret = ndr_write_int16(n, acl->entries[i].type);
+ if (ret)
+ return ret;
if (acl->entries[i].type == SMB_ACL_USER) {
n->offset = ALIGN(n->offset, 8);
- ndr_write_int64(n, acl->entries[i].uid);
+ ret = ndr_write_int64(n, acl->entries[i].uid);
} else if (acl->entries[i].type == SMB_ACL_GROUP) {
n->offset = ALIGN(n->offset, 8);
- ndr_write_int64(n, acl->entries[i].gid);
+ ret = ndr_write_int64(n, acl->entries[i].gid);
}
+ if (ret)
+ return ret;
/* push permission */
- ndr_write_int32(n, acl->entries[i].perm);
+ ret = ndr_write_int32(n, acl->entries[i].perm);
}
- return 0;
+ return ret;
}
int ndr_encode_posix_acl(struct ndr *n,
@@ -235,7 +343,8 @@ int ndr_encode_posix_acl(struct ndr *n,
struct xattr_smb_acl *acl,
struct xattr_smb_acl *def_acl)
{
- int ref_id = 0x00020000;
+ unsigned int ref_id = 0x00020000;
+ int ret;
n->offset = 0;
n->length = 1024;
@@ -245,35 +354,46 @@ int ndr_encode_posix_acl(struct ndr *n,
if (acl) {
/* ACL ACCESS */
- ndr_write_int32(n, ref_id);
+ ret = ndr_write_int32(n, ref_id);
ref_id += 4;
} else {
- ndr_write_int32(n, 0);
+ ret = ndr_write_int32(n, 0);
}
+ if (ret)
+ return ret;
if (def_acl) {
/* DEFAULT ACL ACCESS */
- ndr_write_int32(n, ref_id);
+ ret = ndr_write_int32(n, ref_id);
ref_id += 4;
} else {
- ndr_write_int32(n, 0);
+ ret = ndr_write_int32(n, 0);
}
-
- ndr_write_int64(n, from_kuid(user_ns, inode->i_uid));
- ndr_write_int64(n, from_kgid(user_ns, inode->i_gid));
- ndr_write_int32(n, inode->i_mode);
+ if (ret)
+ return ret;
+
+ ret = ndr_write_int64(n, from_kuid(&init_user_ns, i_uid_into_mnt(user_ns, inode)));
+ if (ret)
+ return ret;
+ ret = ndr_write_int64(n, from_kgid(&init_user_ns, i_gid_into_mnt(user_ns, inode)));
+ if (ret)
+ return ret;
+ ret = ndr_write_int32(n, inode->i_mode);
+ if (ret)
+ return ret;
if (acl) {
- ndr_encode_posix_acl_entry(n, acl);
- if (def_acl)
- ndr_encode_posix_acl_entry(n, def_acl);
+ ret = ndr_encode_posix_acl_entry(n, acl);
+ if (def_acl && !ret)
+ ret = ndr_encode_posix_acl_entry(n, def_acl);
}
- return 0;
+ return ret;
}
int ndr_encode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
{
- int ref_id = 0x00020004;
+ unsigned int ref_id = 0x00020004;
+ int ret;
n->offset = 0;
n->length = 2048;
@@ -281,36 +401,65 @@ int ndr_encode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
if (!n->data)
return -ENOMEM;
- ndr_write_int16(n, acl->version);
- ndr_write_int32(n, acl->version);
- ndr_write_int16(n, 2);
- ndr_write_int32(n, ref_id);
+ ret = ndr_write_int16(n, acl->version);
+ if (ret)
+ return ret;
+
+ ret = ndr_write_int32(n, acl->version);
+ if (ret)
+ return ret;
+
+ ret = ndr_write_int16(n, 2);
+ if (ret)
+ return ret;
+
+ ret = ndr_write_int32(n, ref_id);
+ if (ret)
+ return ret;
/* push hash type and hash 64bytes */
- ndr_write_int16(n, acl->hash_type);
- ndr_write_bytes(n, acl->hash, XATTR_SD_HASH_SIZE);
- ndr_write_bytes(n, acl->desc, acl->desc_len);
- ndr_write_int64(n, acl->current_time);
- ndr_write_bytes(n, acl->posix_acl_hash, XATTR_SD_HASH_SIZE);
+ ret = ndr_write_int16(n, acl->hash_type);
+ if (ret)
+ return ret;
- /* push ndr for security descriptor */
- ndr_write_bytes(n, acl->sd_buf, acl->sd_size);
+ ret = ndr_write_bytes(n, acl->hash, XATTR_SD_HASH_SIZE);
+ if (ret)
+ return ret;
- return 0;
+ ret = ndr_write_bytes(n, acl->desc, acl->desc_len);
+ if (ret)
+ return ret;
+
+ ret = ndr_write_int64(n, acl->current_time);
+ if (ret)
+ return ret;
+
+ ret = ndr_write_bytes(n, acl->posix_acl_hash, XATTR_SD_HASH_SIZE);
+ if (ret)
+ return ret;
+
+ /* push ndr for security descriptor */
+ ret = ndr_write_bytes(n, acl->sd_buf, acl->sd_size);
+ return ret;
}
int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
{
- int version2;
+ unsigned int version2;
+ int ret;
n->offset = 0;
- acl->version = ndr_read_int16(n);
+ ret = ndr_read_int16(n, &acl->version);
+ if (ret)
+ return ret;
if (acl->version != 4) {
pr_err("v%d version is not supported\n", acl->version);
return -EINVAL;
}
- version2 = ndr_read_int32(n);
+ ret = ndr_read_int32(n, &version2);
+ if (ret)
+ return ret;
if (acl->version != version2) {
pr_err("ndr version mismatched(version: %d, version2: %d)\n",
acl->version, version2);
@@ -318,11 +467,22 @@ int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
}
/* Read Level */
- ndr_read_int16(n);
+ ret = ndr_read_int16(n, NULL);
+ if (ret)
+ return ret;
+
/* Read Ref Id */
- ndr_read_int32(n);
- acl->hash_type = ndr_read_int16(n);
- ndr_read_bytes(n, acl->hash, XATTR_SD_HASH_SIZE);
+ ret = ndr_read_int32(n, NULL);
+ if (ret)
+ return ret;
+
+ ret = ndr_read_int16(n, &acl->hash_type);
+ if (ret)
+ return ret;
+
+ ret = ndr_read_bytes(n, acl->hash, XATTR_SD_HASH_SIZE);
+ if (ret)
+ return ret;
ndr_read_bytes(n, acl->desc, 10);
if (strncmp(acl->desc, "posix_acl", 9)) {
@@ -331,15 +491,20 @@ int ndr_decode_v4_ntacl(struct ndr *n, struct xattr_ntacl *acl)
}
/* Read Time */
- ndr_read_int64(n);
+ ret = ndr_read_int64(n, NULL);
+ if (ret)
+ return ret;
+
/* Read Posix ACL hash */
- ndr_read_bytes(n, acl->posix_acl_hash, XATTR_SD_HASH_SIZE);
+ ret = ndr_read_bytes(n, acl->posix_acl_hash, XATTR_SD_HASH_SIZE);
+ if (ret)
+ return ret;
+
acl->sd_size = n->length - n->offset;
acl->sd_buf = kzalloc(acl->sd_size, GFP_KERNEL);
if (!acl->sd_buf)
return -ENOMEM;
- ndr_read_bytes(n, acl->sd_buf, acl->sd_size);
-
- return 0;
+ ret = ndr_read_bytes(n, acl->sd_buf, acl->sd_size);
+ return ret;
}
diff --git a/fs/ksmbd/oplock.c b/fs/ksmbd/oplock.c
index 6ace6c2f22dc..16b6236d1bd2 100644
--- a/fs/ksmbd/oplock.c
+++ b/fs/ksmbd/oplock.c
@@ -1614,9 +1614,11 @@ void create_posix_rsp_buf(char *cc, struct ksmbd_file *fp)
buf->nlink = cpu_to_le32(inode->i_nlink);
buf->reparse_tag = cpu_to_le32(fp->volatile_id);
buf->mode = cpu_to_le32(inode->i_mode);
- id_to_sid(from_kuid(user_ns, inode->i_uid),
+ id_to_sid(from_kuid_munged(&init_user_ns,
+ i_uid_into_mnt(user_ns, inode)),
SIDNFS_USER, (struct smb_sid *)&buf->SidBuffer[0]);
- id_to_sid(from_kgid(user_ns, inode->i_gid),
+ id_to_sid(from_kgid_munged(&init_user_ns,
+ i_gid_into_mnt(user_ns, inode)),
SIDNFS_GROUP, (struct smb_sid *)&buf->SidBuffer[20]);
}
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index d329ea49fa14..c86164dc70bb 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -2381,10 +2381,12 @@ static int smb2_create_sd_buffer(struct ksmbd_work *work,
le32_to_cpu(sd_buf->ccontext.DataLength), true);
}
-static void ksmbd_acls_fattr(struct smb_fattr *fattr, struct inode *inode)
+static void ksmbd_acls_fattr(struct smb_fattr *fattr,
+ struct user_namespace *mnt_userns,
+ struct inode *inode)
{
- fattr->cf_uid = inode->i_uid;
- fattr->cf_gid = inode->i_gid;
+ fattr->cf_uid = i_uid_into_mnt(mnt_userns, inode);
+ fattr->cf_gid = i_gid_into_mnt(mnt_userns, inode);
fattr->cf_mode = inode->i_mode;
fattr->cf_acls = NULL;
fattr->cf_dacls = NULL;
@@ -2893,7 +2895,7 @@ int smb2_open(struct ksmbd_work *work)
struct smb_ntsd *pntsd;
int pntsd_size, ace_num = 0;
- ksmbd_acls_fattr(&fattr, inode);
+ ksmbd_acls_fattr(&fattr, user_ns, inode);
if (fattr.cf_acls)
ace_num = fattr.cf_acls->a_count;
if (fattr.cf_dacls)
@@ -3324,7 +3326,6 @@ static int dentry_name(struct ksmbd_dir_info *d_info, int info_level)
*/
static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
struct ksmbd_dir_info *d_info,
- struct user_namespace *user_ns,
struct ksmbd_kstat *ksmbd_kstat)
{
int next_entry_offset = 0;
@@ -3478,9 +3479,9 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
S_ISDIR(ksmbd_kstat->kstat->mode) ? ATTR_DIRECTORY_LE : ATTR_ARCHIVE_LE;
if (d_info->hide_dot_file && d_info->name[0] == '.')
posix_info->DosAttributes |= ATTR_HIDDEN_LE;
- id_to_sid(from_kuid(user_ns, ksmbd_kstat->kstat->uid),
+ id_to_sid(from_kuid_munged(&init_user_ns, ksmbd_kstat->kstat->uid),
SIDNFS_USER, (struct smb_sid *)&posix_info->SidBuffer[0]);
- id_to_sid(from_kgid(user_ns, ksmbd_kstat->kstat->gid),
+ id_to_sid(from_kgid_munged(&init_user_ns, ksmbd_kstat->kstat->gid),
SIDNFS_GROUP, (struct smb_sid *)&posix_info->SidBuffer[20]);
memcpy(posix_info->name, conv_name, conv_len);
posix_info->name_len = cpu_to_le32(conv_len);
@@ -3543,9 +3544,9 @@ static int process_query_dir_entries(struct smb2_query_dir_private *priv)
return -EINVAL;
lock_dir(priv->dir_fp);
- dent = lookup_one_len(priv->d_info->name,
- priv->dir_fp->filp->f_path.dentry,
- priv->d_info->name_len);
+ dent = lookup_one(user_ns, priv->d_info->name,
+ priv->dir_fp->filp->f_path.dentry,
+ priv->d_info->name_len);
unlock_dir(priv->dir_fp);
if (IS_ERR(dent)) {
@@ -3571,7 +3572,6 @@ static int process_query_dir_entries(struct smb2_query_dir_private *priv)
rc = smb2_populate_readdir_entry(priv->work->conn,
priv->info_level,
priv->d_info,
- user_ns,
&ksmbd_kstat);
dput(dent);
if (rc)
@@ -5008,7 +5008,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
user_ns = file_mnt_user_ns(fp->filp);
inode = file_inode(fp->filp);
- ksmbd_acls_fattr(&fattr, inode);
+ ksmbd_acls_fattr(&fattr, user_ns, inode);
if (test_share_config_flag(work->tcon->share_conf,
KSMBD_SHARE_FLAG_ACL_XATTR))
@@ -5246,7 +5246,9 @@ int smb2_echo(struct ksmbd_work *work)
return 0;
}
-static int smb2_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
+static int smb2_rename(struct ksmbd_work *work,
+ struct ksmbd_file *fp,
+ struct user_namespace *user_ns,
struct smb2_file_rename_info *file_info,
struct nls_table *local_nls)
{
@@ -5310,7 +5312,7 @@ static int smb2_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
if (rc)
goto out;
- rc = ksmbd_vfs_setxattr(file_mnt_user_ns(fp->filp),
+ rc = ksmbd_vfs_setxattr(user_ns,
fp->filp->f_path.dentry,
xattr_stream_name,
NULL, 0, 0);
@@ -5438,11 +5440,11 @@ static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
{
struct smb2_file_all_info *file_info;
struct iattr attrs;
- struct iattr temp_attrs;
+ struct timespec64 ctime;
struct file *filp;
struct inode *inode;
struct user_namespace *user_ns;
- int rc;
+ int rc = 0;
if (!(fp->daccess & FILE_WRITE_ATTRIBUTES_LE))
return -EACCES;
@@ -5462,11 +5464,11 @@ static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
}
if (file_info->ChangeTime) {
- temp_attrs.ia_ctime = ksmbd_NTtimeToUnix(file_info->ChangeTime);
- attrs.ia_ctime = temp_attrs.ia_ctime;
+ attrs.ia_ctime = ksmbd_NTtimeToUnix(file_info->ChangeTime);
+ ctime = attrs.ia_ctime;
attrs.ia_valid |= ATTR_CTIME;
} else {
- temp_attrs.ia_ctime = inode->i_ctime;
+ ctime = inode->i_ctime;
}
if (file_info->LastWriteTime) {
@@ -5505,13 +5507,6 @@ static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
rc = 0;
}
- /*
- * HACK : set ctime here to avoid ctime changed
- * when file_info->ChangeTime is zero.
- */
- attrs.ia_ctime = temp_attrs.ia_ctime;
- attrs.ia_valid |= ATTR_CTIME;
-
if (attrs.ia_valid) {
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = d_inode(dentry);
@@ -5519,17 +5514,15 @@ static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
return -EACCES;
- rc = setattr_prepare(user_ns, dentry, &attrs);
- if (rc)
- return -EINVAL;
-
inode_lock(inode);
- setattr_copy(user_ns, inode, &attrs);
- attrs.ia_valid &= ~ATTR_CTIME;
rc = notify_change(user_ns, dentry, &attrs, NULL);
+ if (!rc) {
+ inode->i_ctime = ctime;
+ mark_inode_dirty(inode);
+ }
inode_unlock(inode);
}
- return 0;
+ return rc;
}
static int set_file_allocation_info(struct ksmbd_work *work,
@@ -5624,6 +5617,7 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
char *buf)
{
+ struct user_namespace *user_ns;
struct ksmbd_file *parent_fp;
struct dentry *parent;
struct dentry *dentry = fp->filp->f_path.dentry;
@@ -5634,11 +5628,12 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
return -EACCES;
}
+ user_ns = file_mnt_user_ns(fp->filp);
if (ksmbd_stream_fd(fp))
goto next;
parent = dget_parent(dentry);
- ret = ksmbd_vfs_lock_parent(parent, dentry);
+ ret = ksmbd_vfs_lock_parent(user_ns, parent, dentry);
if (ret) {
dput(parent);
return ret;
@@ -5655,7 +5650,7 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
}
}
next:
- return smb2_rename(work, fp,
+ return smb2_rename(work, fp, user_ns,
(struct smb2_file_rename_info *)buf,
work->sess->conn->local_nls);
}
@@ -7116,8 +7111,8 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
netdev->ethtool_ops->get_link_ksettings(netdev, &cmd);
speed = cmd.base.speed;
} else {
- pr_err("%s %s\n", netdev->name,
- "speed is unknown, defaulting to 1Gb/sec");
+ ksmbd_debug(SMB, "%s %s\n", netdev->name,
+ "speed is unknown, defaulting to 1Gb/sec");
speed = SPEED_1000;
}
diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
index b108b918ec84..43d3123d8b62 100644
--- a/fs/ksmbd/smb_common.c
+++ b/fs/ksmbd/smb_common.c
@@ -291,7 +291,6 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
char *search_pattern,
int (*fn)(struct ksmbd_conn *, int,
struct ksmbd_dir_info *,
- struct user_namespace *,
struct ksmbd_kstat *))
{
int i, rc = 0;
@@ -322,8 +321,7 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
user_ns,
dir->filp->f_path.dentry->d_parent,
&ksmbd_kstat);
- rc = fn(conn, info_level, d_info,
- user_ns, &ksmbd_kstat);
+ rc = fn(conn, info_level, d_info, &ksmbd_kstat);
if (rc)
break;
if (d_info->out_buf_len <= 0)
diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
index eb667d85558e..57c667c1be06 100644
--- a/fs/ksmbd/smb_common.h
+++ b/fs/ksmbd/smb_common.h
@@ -511,7 +511,6 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
int (*fn)(struct ksmbd_conn *,
int,
struct ksmbd_dir_info *,
- struct user_namespace *,
struct ksmbd_kstat *));
int ksmbd_extract_shortname(struct ksmbd_conn *conn,
diff --git a/fs/ksmbd/smbacl.c b/fs/ksmbd/smbacl.c
index 5456e3ad943e..0a95cdec8c80 100644
--- a/fs/ksmbd/smbacl.c
+++ b/fs/ksmbd/smbacl.c
@@ -274,24 +274,34 @@ static int sid_to_id(struct user_namespace *user_ns,
uid_t id;
id = le32_to_cpu(psid->sub_auth[psid->num_subauth - 1]);
- if (id > 0) {
- uid = make_kuid(user_ns, id);
- if (uid_valid(uid) && kuid_has_mapping(user_ns, uid)) {
- fattr->cf_uid = uid;
- rc = 0;
- }
+ /*
+ * Translate raw sid into kuid in the server's user
+ * namespace.
+ */
+ uid = make_kuid(&init_user_ns, id);
+
+ /* If this is an idmapped mount, apply the idmapping. */
+ uid = kuid_from_mnt(user_ns, uid);
+ if (uid_valid(uid)) {
+ fattr->cf_uid = uid;
+ rc = 0;
}
} else {
kgid_t gid;
gid_t id;
id = le32_to_cpu(psid->sub_auth[psid->num_subauth - 1]);
- if (id > 0) {
- gid = make_kgid(user_ns, id);
- if (gid_valid(gid) && kgid_has_mapping(user_ns, gid)) {
- fattr->cf_gid = gid;
- rc = 0;
- }
+ /*
+ * Translate raw sid into kgid in the server's user
+ * namespace.
+ */
+ gid = make_kgid(&init_user_ns, id);
+
+ /* If this is an idmapped mount, apply the idmapping. */
+ gid = kgid_from_mnt(user_ns, gid);
+ if (gid_valid(gid)) {
+ fattr->cf_gid = gid;
+ rc = 0;
}
}
@@ -587,14 +597,14 @@ static void set_posix_acl_entries_dacl(struct user_namespace *user_ns,
uid_t uid;
unsigned int sid_type = SIDOWNER;
- uid = from_kuid(user_ns, pace->e_uid);
+ uid = posix_acl_uid_translate(user_ns, pace);
if (!uid)
sid_type = SIDUNIX_USER;
id_to_sid(uid, sid_type, sid);
} else if (pace->e_tag == ACL_GROUP) {
gid_t gid;
- gid = from_kgid(user_ns, pace->e_gid);
+ gid = posix_acl_gid_translate(user_ns, pace);
id_to_sid(gid, SIDUNIX_GROUP, sid);
} else if (pace->e_tag == ACL_OTHER && !nt_aces_num) {
smb_copy_sid(sid, &sid_everyone);
@@ -653,12 +663,12 @@ posix_default_acl:
if (pace->e_tag == ACL_USER) {
uid_t uid;
- uid = from_kuid(user_ns, pace->e_uid);
+ uid = posix_acl_uid_translate(user_ns, pace);
id_to_sid(uid, SIDCREATOR_OWNER, sid);
} else if (pace->e_tag == ACL_GROUP) {
gid_t gid;
- gid = from_kgid(user_ns, pace->e_gid);
+ gid = posix_acl_gid_translate(user_ns, pace);
id_to_sid(gid, SIDCREATOR_GROUP, sid);
} else {
kfree(sid);
@@ -723,7 +733,7 @@ static void set_mode_dacl(struct user_namespace *user_ns,
}
/* owner RID */
- uid = from_kuid(user_ns, fattr->cf_uid);
+ uid = from_kuid(&init_user_ns, fattr->cf_uid);
if (uid)
sid = &server_conf.domain_sid;
else
@@ -739,7 +749,7 @@ static void set_mode_dacl(struct user_namespace *user_ns,
ace_size = fill_ace_for_sid(pace, &sid_unix_groups,
ACCESS_ALLOWED, 0, fattr->cf_mode, 0070);
pace->sid.sub_auth[pace->sid.num_subauth++] =
- cpu_to_le32(from_kgid(user_ns, fattr->cf_gid));
+ cpu_to_le32(from_kgid(&init_user_ns, fattr->cf_gid));
pace->size = cpu_to_le16(ace_size + 4);
size += le16_to_cpu(pace->size);
pace = (struct smb_ace *)((char *)pndace + size);
@@ -880,7 +890,7 @@ int build_sec_desc(struct user_namespace *user_ns,
if (!nowner_sid_ptr)
return -ENOMEM;
- uid = from_kuid(user_ns, fattr->cf_uid);
+ uid = from_kuid(&init_user_ns, fattr->cf_uid);
if (!uid)
sid_type = SIDUNIX_USER;
id_to_sid(uid, sid_type, nowner_sid_ptr);
@@ -891,7 +901,7 @@ int build_sec_desc(struct user_namespace *user_ns,
return -ENOMEM;
}
- gid = from_kgid(user_ns, fattr->cf_gid);
+ gid = from_kgid(&init_user_ns, fattr->cf_gid);
id_to_sid(gid, SIDUNIX_GROUP, ngroup_sid_ptr);
offset = sizeof(struct smb_ntsd);
@@ -1234,11 +1244,9 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, struct path *path,
pa_entry = posix_acls->a_entries;
for (i = 0; i < posix_acls->a_count; i++, pa_entry++) {
if (pa_entry->e_tag == ACL_USER)
- id = from_kuid(user_ns,
- pa_entry->e_uid);
+ id = posix_acl_uid_translate(user_ns, pa_entry);
else if (pa_entry->e_tag == ACL_GROUP)
- id = from_kgid(user_ns,
- pa_entry->e_gid);
+ id = posix_acl_gid_translate(user_ns, pa_entry);
else
continue;
@@ -1322,22 +1330,31 @@ int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
newattrs.ia_valid |= ATTR_MODE;
newattrs.ia_mode = (inode->i_mode & ~0777) | (fattr.cf_mode & 0777);
- inode_lock(inode);
- rc = notify_change(user_ns, path->dentry, &newattrs, NULL);
- inode_unlock(inode);
- if (rc)
- goto out;
-
ksmbd_vfs_remove_acl_xattrs(user_ns, path->dentry);
/* Update posix acls */
if (IS_ENABLED(CONFIG_FS_POSIX_ACL) && fattr.cf_dacls) {
rc = set_posix_acl(user_ns, inode,
ACL_TYPE_ACCESS, fattr.cf_acls);
- if (S_ISDIR(inode->i_mode) && fattr.cf_dacls)
+ if (rc < 0)
+ ksmbd_debug(SMB,
+ "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
+ rc);
+ if (S_ISDIR(inode->i_mode) && fattr.cf_dacls) {
rc = set_posix_acl(user_ns, inode,
ACL_TYPE_DEFAULT, fattr.cf_dacls);
+ if (rc)
+ ksmbd_debug(SMB,
+ "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
+ rc);
+ }
}
+ inode_lock(inode);
+ rc = notify_change(user_ns, path->dentry, &newattrs, NULL);
+ inode_unlock(inode);
+ if (rc)
+ goto out;
+
/* Check it only calling from SD BUFFER context */
if (type_check && !(le16_to_cpu(pntsd->type) & DACL_PRESENT))
goto out;
diff --git a/fs/ksmbd/smbacl.h b/fs/ksmbd/smbacl.h
index 940f686a1d95..73e08cad412b 100644
--- a/fs/ksmbd/smbacl.h
+++ b/fs/ksmbd/smbacl.h
@@ -209,4 +209,29 @@ int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
bool type_check);
void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid);
void ksmbd_init_domain(u32 *sub_auth);
+
+static inline uid_t posix_acl_uid_translate(struct user_namespace *mnt_userns,
+ struct posix_acl_entry *pace)
+{
+ kuid_t kuid;
+
+ /* If this is an idmapped mount, apply the idmapping. */
+ kuid = kuid_into_mnt(mnt_userns, pace->e_uid);
+
+ /* Translate the kuid into a userspace id ksmbd would see. */
+ return from_kuid(&init_user_ns, kuid);
+}
+
+static inline gid_t posix_acl_gid_translate(struct user_namespace *mnt_userns,
+ struct posix_acl_entry *pace)
+{
+ kgid_t kgid;
+
+ /* If this is an idmapped mount, apply the idmapping. */
+ kgid = kgid_into_mnt(mnt_userns, pace->e_gid);
+
+ /* Translate the kgid into a userspace id ksmbd would see. */
+ return from_kgid(&init_user_ns, kgid);
+}
+
#endif /* _SMBACL_H */
diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
index 58f530056ac0..52b2556e76b1 100644
--- a/fs/ksmbd/transport_rdma.c
+++ b/fs/ksmbd/transport_rdma.c
@@ -1168,7 +1168,7 @@ static int smb_direct_post_send_data(struct smb_direct_transport *t,
pr_err("failed to map buffer\n");
ret = -ENOMEM;
goto err;
- } else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES - 1) {
+ } else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES) {
pr_err("buffer not fitted into sges\n");
ret = -E2BIG;
ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt,
diff --git a/fs/ksmbd/vfs.c b/fs/ksmbd/vfs.c
index aee28ee6b19c..b047f2980d96 100644
--- a/fs/ksmbd/vfs.c
+++ b/fs/ksmbd/vfs.c
@@ -69,14 +69,15 @@ static void ksmbd_vfs_inherit_owner(struct ksmbd_work *work,
*
* the reference count of @parent isn't incremented.
*/
-int ksmbd_vfs_lock_parent(struct dentry *parent, struct dentry *child)
+int ksmbd_vfs_lock_parent(struct user_namespace *user_ns, struct dentry *parent,
+ struct dentry *child)
{
struct dentry *dentry;
int ret = 0;
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
- dentry = lookup_one_len(child->d_name.name, parent,
- child->d_name.len);
+ dentry = lookup_one(user_ns, child->d_name.name, parent,
+ child->d_name.len);
if (IS_ERR(dentry)) {
ret = PTR_ERR(dentry);
goto out_err;
@@ -102,7 +103,7 @@ int ksmbd_vfs_may_delete(struct user_namespace *user_ns,
int ret;
parent = dget_parent(dentry);
- ret = ksmbd_vfs_lock_parent(parent, dentry);
+ ret = ksmbd_vfs_lock_parent(user_ns, parent, dentry);
if (ret) {
dput(parent);
return ret;
@@ -137,7 +138,7 @@ int ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
*daccess |= FILE_EXECUTE_LE;
parent = dget_parent(dentry);
- ret = ksmbd_vfs_lock_parent(parent, dentry);
+ ret = ksmbd_vfs_lock_parent(user_ns, parent, dentry);
if (ret) {
dput(parent);
return ret;
@@ -197,6 +198,7 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
*/
int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
{
+ struct user_namespace *user_ns;
struct path path;
struct dentry *dentry;
int err;
@@ -210,16 +212,16 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
return err;
}
+ user_ns = mnt_user_ns(path.mnt);
mode |= S_IFDIR;
- err = vfs_mkdir(mnt_user_ns(path.mnt), d_inode(path.dentry),
- dentry, mode);
+ err = vfs_mkdir(user_ns, d_inode(path.dentry), dentry, mode);
if (err) {
goto out;
} else if (d_unhashed(dentry)) {
struct dentry *d;
- d = lookup_one_len(dentry->d_name.name, dentry->d_parent,
- dentry->d_name.len);
+ d = lookup_one(user_ns, dentry->d_name.name, dentry->d_parent,
+ dentry->d_name.len);
if (IS_ERR(d)) {
err = PTR_ERR(d);
goto out;
@@ -582,6 +584,7 @@ int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id)
*/
int ksmbd_vfs_remove_file(struct ksmbd_work *work, char *name)
{
+ struct user_namespace *user_ns;
struct path path;
struct dentry *parent;
int err;
@@ -601,8 +604,9 @@ int ksmbd_vfs_remove_file(struct ksmbd_work *work, char *name)
return err;
}
+ user_ns = mnt_user_ns(path.mnt);
parent = dget_parent(path.dentry);
- err = ksmbd_vfs_lock_parent(parent, path.dentry);
+ err = ksmbd_vfs_lock_parent(user_ns, parent, path.dentry);
if (err) {
dput(parent);
path_put(&path);
@@ -616,14 +620,12 @@ int ksmbd_vfs_remove_file(struct ksmbd_work *work, char *name)
}
if (S_ISDIR(d_inode(path.dentry)->i_mode)) {
- err = vfs_rmdir(mnt_user_ns(path.mnt), d_inode(parent),
- path.dentry);
+ err = vfs_rmdir(user_ns, d_inode(parent), path.dentry);
if (err && err != -ENOTEMPTY)
ksmbd_debug(VFS, "%s: rmdir failed, err %d\n", name,
err);
} else {
- err = vfs_unlink(mnt_user_ns(path.mnt), d_inode(parent),
- path.dentry, NULL);
+ err = vfs_unlink(user_ns, d_inode(parent), path.dentry, NULL);
if (err)
ksmbd_debug(VFS, "%s: unlink failed, err %d\n", name,
err);
@@ -748,7 +750,8 @@ static int __ksmbd_vfs_rename(struct ksmbd_work *work,
if (ksmbd_override_fsids(work))
return -ENOMEM;
- dst_dent = lookup_one_len(dst_name, dst_dent_parent, strlen(dst_name));
+ dst_dent = lookup_one(dst_user_ns, dst_name, dst_dent_parent,
+ strlen(dst_name));
err = PTR_ERR(dst_dent);
if (IS_ERR(dst_dent)) {
pr_err("lookup failed %s [%d]\n", dst_name, err);
@@ -779,6 +782,7 @@ out:
int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
char *newname)
{
+ struct user_namespace *user_ns;
struct path dst_path;
struct dentry *src_dent_parent, *dst_dent_parent;
struct dentry *src_dent, *trap_dent, *src_child;
@@ -808,8 +812,9 @@ int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
trap_dent = lock_rename(src_dent_parent, dst_dent_parent);
dget(src_dent);
dget(dst_dent_parent);
- src_child = lookup_one_len(src_dent->d_name.name, src_dent_parent,
- src_dent->d_name.len);
+ user_ns = file_mnt_user_ns(fp->filp);
+ src_child = lookup_one(user_ns, src_dent->d_name.name, src_dent_parent,
+ src_dent->d_name.len);
if (IS_ERR(src_child)) {
err = PTR_ERR(src_child);
goto out_lock;
@@ -823,7 +828,7 @@ int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
dput(src_child);
err = __ksmbd_vfs_rename(work,
- file_mnt_user_ns(fp->filp),
+ user_ns,
src_dent_parent,
src_dent,
mnt_user_ns(dst_path.mnt),
@@ -1109,7 +1114,7 @@ int ksmbd_vfs_unlink(struct user_namespace *user_ns,
{
int err = 0;
- err = ksmbd_vfs_lock_parent(dir, dentry);
+ err = ksmbd_vfs_lock_parent(user_ns, dir, dentry);
if (err)
return err;
dget(dentry);
@@ -1385,14 +1390,14 @@ static struct xattr_smb_acl *ksmbd_vfs_make_xattr_posix_acl(struct user_namespac
switch (pa_entry->e_tag) {
case ACL_USER:
xa_entry->type = SMB_ACL_USER;
- xa_entry->uid = from_kuid(user_ns, pa_entry->e_uid);
+ xa_entry->uid = posix_acl_uid_translate(user_ns, pa_entry);
break;
case ACL_USER_OBJ:
xa_entry->type = SMB_ACL_USER_OBJ;
break;
case ACL_GROUP:
xa_entry->type = SMB_ACL_GROUP;
- xa_entry->gid = from_kgid(user_ns, pa_entry->e_gid);
+ xa_entry->gid = posix_acl_gid_translate(user_ns, pa_entry);
break;
case ACL_GROUP_OBJ:
xa_entry->type = SMB_ACL_GROUP_OBJ;
diff --git a/fs/ksmbd/vfs.h b/fs/ksmbd/vfs.h
index cb0cba0d5d07..85db50abdb24 100644
--- a/fs/ksmbd/vfs.h
+++ b/fs/ksmbd/vfs.h
@@ -107,7 +107,8 @@ struct ksmbd_kstat {
__le32 file_attributes;
};
-int ksmbd_vfs_lock_parent(struct dentry *parent, struct dentry *child);
+int ksmbd_vfs_lock_parent(struct user_namespace *user_ns, struct dentry *parent,
+ struct dentry *child);
int ksmbd_vfs_may_delete(struct user_namespace *user_ns, struct dentry *dentry);
int ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
struct dentry *dentry, __le32 *daccess);
diff --git a/fs/ksmbd/vfs_cache.c b/fs/ksmbd/vfs_cache.c
index 92d8c61ffd2a..29c1db66bd0f 100644
--- a/fs/ksmbd/vfs_cache.c
+++ b/fs/ksmbd/vfs_cache.c
@@ -666,22 +666,6 @@ void ksmbd_free_global_file_table(void)
ksmbd_destroy_file_table(&global_ft);
}
-int ksmbd_file_table_flush(struct ksmbd_work *work)
-{
- struct ksmbd_file *fp = NULL;
- unsigned int id;
- int ret;
-
- read_lock(&work->sess->file_table.lock);
- idr_for_each_entry(work->sess->file_table.idr, fp, id) {
- ret = ksmbd_vfs_fsync(work, fp->volatile_id, KSMBD_NO_FID);
- if (ret)
- break;
- }
- read_unlock(&work->sess->file_table.lock);
- return ret;
-}
-
int ksmbd_init_file_table(struct ksmbd_file_table *ft)
{
ft->idr = kzalloc(sizeof(struct idr), GFP_KERNEL);
diff --git a/fs/ksmbd/vfs_cache.h b/fs/ksmbd/vfs_cache.h
index 70dfe6a99f13..448576fbe4b7 100644
--- a/fs/ksmbd/vfs_cache.h
+++ b/fs/ksmbd/vfs_cache.h
@@ -152,7 +152,6 @@ void ksmbd_close_session_fds(struct ksmbd_work *work);
int ksmbd_close_inode_fds(struct ksmbd_work *work, struct inode *inode);
int ksmbd_init_global_file_table(void);
void ksmbd_free_global_file_table(void);
-int ksmbd_file_table_flush(struct ksmbd_work *work);
void ksmbd_set_fd_limit(unsigned long limit);
/*
diff --git a/fs/namei.c b/fs/namei.c
index 95a881e0552b..1946d9667790 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -255,7 +255,7 @@ getname_kernel(const char * filename)
void putname(struct filename *name)
{
- if (IS_ERR_OR_NULL(name))
+ if (IS_ERR(name))
return;
BUG_ON(name->refcnt <= 0);
@@ -2467,7 +2467,7 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
return err;
}
-static int __filename_lookup(int dfd, struct filename *name, unsigned flags,
+int filename_lookup(int dfd, struct filename *name, unsigned flags,
struct path *path, struct path *root)
{
int retval;
@@ -2488,15 +2488,6 @@ static int __filename_lookup(int dfd, struct filename *name, unsigned flags,
return retval;
}
-int filename_lookup(int dfd, struct filename *name, unsigned flags,
- struct path *path, struct path *root)
-{
- int retval = __filename_lookup(dfd, name, flags, path, root);
-
- putname(name);
- return retval;
-}
-
/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
static int path_parentat(struct nameidata *nd, unsigned flags,
struct path *parent)
@@ -2514,9 +2505,10 @@ static int path_parentat(struct nameidata *nd, unsigned flags,
return err;
}
-static int __filename_parentat(int dfd, struct filename *name,
- unsigned int flags, struct path *parent,
- struct qstr *last, int *type)
+/* Note: this does not consume "name" */
+static int filename_parentat(int dfd, struct filename *name,
+ unsigned int flags, struct path *parent,
+ struct qstr *last, int *type)
{
int retval;
struct nameidata nd;
@@ -2538,25 +2530,14 @@ static int __filename_parentat(int dfd, struct filename *name,
return retval;
}
-static int filename_parentat(int dfd, struct filename *name,
- unsigned int flags, struct path *parent,
- struct qstr *last, int *type)
-{
- int retval = __filename_parentat(dfd, name, flags, parent, last, type);
-
- putname(name);
- return retval;
-}
-
/* does lookup, returns the object with parent locked */
-struct dentry *kern_path_locked(const char *name, struct path *path)
+static struct dentry *__kern_path_locked(struct filename *name, struct path *path)
{
struct dentry *d;
struct qstr last;
int type, error;
- error = filename_parentat(AT_FDCWD, getname_kernel(name), 0, path,
- &last, &type);
+ error = filename_parentat(AT_FDCWD, name, 0, path, &last, &type);
if (error)
return ERR_PTR(error);
if (unlikely(type != LAST_NORM)) {
@@ -2572,10 +2553,23 @@ struct dentry *kern_path_locked(const char *name, struct path *path)
return d;
}
+struct dentry *kern_path_locked(const char *name, struct path *path)
+{
+ struct filename *filename = getname_kernel(name);
+ struct dentry *res = __kern_path_locked(filename, path);
+
+ putname(filename);
+ return res;
+}
+
int kern_path(const char *name, unsigned int flags, struct path *path)
{
- return filename_lookup(AT_FDCWD, getname_kernel(name),
- flags, path, NULL);
+ struct filename *filename = getname_kernel(name);
+ int ret = filename_lookup(AT_FDCWD, filename, flags, path, NULL);
+
+ putname(filename);
+ return ret;
+
}
EXPORT_SYMBOL(kern_path);
@@ -2591,10 +2585,15 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
const char *name, unsigned int flags,
struct path *path)
{
+ struct filename *filename;
struct path root = {.mnt = mnt, .dentry = dentry};
+ int ret;
+
+ filename = getname_kernel(name);
/* the first argument of filename_lookup() is ignored with root */
- return filename_lookup(AT_FDCWD, getname_kernel(name),
- flags , path, &root);
+ ret = filename_lookup(AT_FDCWD, filename, flags, path, &root);
+ putname(filename);
+ return ret;
}
EXPORT_SYMBOL(vfs_path_lookup);
@@ -2798,8 +2797,11 @@ int path_pts(struct path *path)
int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
struct path *path, int *empty)
{
- return filename_lookup(dfd, getname_flags(name, flags, empty),
- flags, path, NULL);
+ struct filename *filename = getname_flags(name, flags, empty);
+ int ret = filename_lookup(dfd, filename, flags, path, NULL);
+
+ putname(filename);
+ return ret;
}
EXPORT_SYMBOL(user_path_at_empty);
@@ -3618,8 +3620,8 @@ struct file *do_file_open_root(const struct path *root,
return file;
}
-static struct dentry *__filename_create(int dfd, struct filename *name,
- struct path *path, unsigned int lookup_flags)
+static struct dentry *filename_create(int dfd, struct filename *name,
+ struct path *path, unsigned int lookup_flags)
{
struct dentry *dentry = ERR_PTR(-EEXIST);
struct qstr last;
@@ -3634,7 +3636,7 @@ static struct dentry *__filename_create(int dfd, struct filename *name,
*/
lookup_flags &= LOOKUP_REVAL;
- error = __filename_parentat(dfd, name, lookup_flags, path, &last, &type);
+ error = filename_parentat(dfd, name, lookup_flags, path, &last, &type);
if (error)
return ERR_PTR(error);
@@ -3687,21 +3689,15 @@ out:
return dentry;
}
-static inline struct dentry *filename_create(int dfd, struct filename *name,
+struct dentry *kern_path_create(int dfd, const char *pathname,
struct path *path, unsigned int lookup_flags)
{
- struct dentry *res = __filename_create(dfd, name, path, lookup_flags);
+ struct filename *filename = getname_kernel(pathname);
+ struct dentry *res = filename_create(dfd, filename, path, lookup_flags);
- putname(name);
+ putname(filename);
return res;
}
-
-struct dentry *kern_path_create(int dfd, const char *pathname,
- struct path *path, unsigned int lookup_flags)
-{
- return filename_create(dfd, getname_kernel(pathname),
- path, lookup_flags);
-}
EXPORT_SYMBOL(kern_path_create);
void done_path_create(struct path *path, struct dentry *dentry)
@@ -3716,7 +3712,11 @@ EXPORT_SYMBOL(done_path_create);
inline struct dentry *user_path_create(int dfd, const char __user *pathname,
struct path *path, unsigned int lookup_flags)
{
- return filename_create(dfd, getname(pathname), path, lookup_flags);
+ struct filename *filename = getname(pathname);
+ struct dentry *res = filename_create(dfd, filename, path, lookup_flags);
+
+ putname(filename);
+ return res;
}
EXPORT_SYMBOL(user_path_create);
@@ -3797,7 +3797,7 @@ static int do_mknodat(int dfd, struct filename *name, umode_t mode,
if (error)
goto out1;
retry:
- dentry = __filename_create(dfd, name, &path, lookup_flags);
+ dentry = filename_create(dfd, name, &path, lookup_flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out1;
@@ -3897,7 +3897,7 @@ int do_mkdirat(int dfd, struct filename *name, umode_t mode)
unsigned int lookup_flags = LOOKUP_DIRECTORY;
retry:
- dentry = __filename_create(dfd, name, &path, lookup_flags);
+ dentry = filename_create(dfd, name, &path, lookup_flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_putname;
@@ -3996,7 +3996,7 @@ int do_rmdir(int dfd, struct filename *name)
int type;
unsigned int lookup_flags = 0;
retry:
- error = __filename_parentat(dfd, name, lookup_flags, &path, &last, &type);
+ error = filename_parentat(dfd, name, lookup_flags, &path, &last, &type);
if (error)
goto exit1;
@@ -4137,7 +4137,7 @@ int do_unlinkat(int dfd, struct filename *name)
struct inode *delegated_inode = NULL;
unsigned int lookup_flags = 0;
retry:
- error = __filename_parentat(dfd, name, lookup_flags, &path, &last, &type);
+ error = filename_parentat(dfd, name, lookup_flags, &path, &last, &type);
if (error)
goto exit1;
@@ -4266,7 +4266,7 @@ int do_symlinkat(struct filename *from, int newdfd, struct filename *to)
goto out_putnames;
}
retry:
- dentry = __filename_create(newdfd, to, &path, lookup_flags);
+ dentry = filename_create(newdfd, to, &path, lookup_flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_putnames;
@@ -4426,11 +4426,11 @@ int do_linkat(int olddfd, struct filename *old, int newdfd,
if (flags & AT_SYMLINK_FOLLOW)
how |= LOOKUP_FOLLOW;
retry:
- error = __filename_lookup(olddfd, old, how, &old_path, NULL);
+ error = filename_lookup(olddfd, old, how, &old_path, NULL);
if (error)
goto out_putnames;
- new_dentry = __filename_create(newdfd, new, &new_path,
+ new_dentry = filename_create(newdfd, new, &new_path,
(how & LOOKUP_REVAL));
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
@@ -4689,13 +4689,13 @@ int do_renameat2(int olddfd, struct filename *from, int newdfd,
target_flags = 0;
retry:
- error = __filename_parentat(olddfd, from, lookup_flags, &old_path,
- &old_last, &old_type);
+ error = filename_parentat(olddfd, from, lookup_flags, &old_path,
+ &old_last, &old_type);
if (error)
goto put_names;
- error = __filename_parentat(newdfd, to, lookup_flags, &new_path, &new_last,
- &new_type);
+ error = filename_parentat(newdfd, to, lookup_flags, &new_path, &new_last,
+ &new_type);
if (error)
goto exit1;
diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
index 68e8d61e28dd..62f8a7ac19c8 100644
--- a/fs/nilfs2/sysfs.c
+++ b/fs/nilfs2/sysfs.c
@@ -51,11 +51,9 @@ static const struct sysfs_ops nilfs_##name##_attr_ops = { \
#define NILFS_DEV_INT_GROUP_TYPE(name, parent_name) \
static void nilfs_##name##_attr_release(struct kobject *kobj) \
{ \
- struct nilfs_sysfs_##parent_name##_subgroups *subgroups; \
- struct the_nilfs *nilfs = container_of(kobj->parent, \
- struct the_nilfs, \
- ns_##parent_name##_kobj); \
- subgroups = nilfs->ns_##parent_name##_subgroups; \
+ struct nilfs_sysfs_##parent_name##_subgroups *subgroups = container_of(kobj, \
+ struct nilfs_sysfs_##parent_name##_subgroups, \
+ sg_##name##_kobj); \
complete(&subgroups->sg_##name##_kobj_unregister); \
} \
static struct kobj_type nilfs_##name##_ktype = { \
@@ -81,12 +79,12 @@ static int nilfs_sysfs_create_##name##_group(struct the_nilfs *nilfs) \
err = kobject_init_and_add(kobj, &nilfs_##name##_ktype, parent, \
#name); \
if (err) \
- return err; \
- return 0; \
+ kobject_put(kobj); \
+ return err; \
} \
static void nilfs_sysfs_delete_##name##_group(struct the_nilfs *nilfs) \
{ \
- kobject_del(&nilfs->ns_##parent_name##_subgroups->sg_##name##_kobj); \
+ kobject_put(&nilfs->ns_##parent_name##_subgroups->sg_##name##_kobj); \
}
/************************************************************************
@@ -197,14 +195,14 @@ int nilfs_sysfs_create_snapshot_group(struct nilfs_root *root)
}
if (err)
- return err;
+ kobject_put(&root->snapshot_kobj);
- return 0;
+ return err;
}
void nilfs_sysfs_delete_snapshot_group(struct nilfs_root *root)
{
- kobject_del(&root->snapshot_kobj);
+ kobject_put(&root->snapshot_kobj);
}
/************************************************************************
@@ -986,7 +984,7 @@ int nilfs_sysfs_create_device_group(struct super_block *sb)
err = kobject_init_and_add(&nilfs->ns_dev_kobj, &nilfs_dev_ktype, NULL,
"%s", sb->s_id);
if (err)
- goto free_dev_subgroups;
+ goto cleanup_dev_kobject;
err = nilfs_sysfs_create_mounted_snapshots_group(nilfs);
if (err)
@@ -1023,9 +1021,7 @@ delete_mounted_snapshots_group:
nilfs_sysfs_delete_mounted_snapshots_group(nilfs);
cleanup_dev_kobject:
- kobject_del(&nilfs->ns_dev_kobj);
-
-free_dev_subgroups:
+ kobject_put(&nilfs->ns_dev_kobj);
kfree(nilfs->ns_dev_subgroups);
failed_create_device_group:
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 8b7b01a380ce..c8bfc01da5d7 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -792,14 +792,13 @@ nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 cno)
void nilfs_put_root(struct nilfs_root *root)
{
- if (refcount_dec_and_test(&root->count)) {
- struct the_nilfs *nilfs = root->nilfs;
+ struct the_nilfs *nilfs = root->nilfs;
- nilfs_sysfs_delete_snapshot_group(root);
-
- spin_lock(&nilfs->ns_cptree_lock);
+ if (refcount_dec_and_lock(&root->count, &nilfs->ns_cptree_lock)) {
rb_erase(&root->rb_node, &nilfs->ns_cptree);
spin_unlock(&nilfs->ns_cptree_lock);
+
+ nilfs_sysfs_delete_snapshot_group(root);
iput(root->ifile);
kfree(root);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 95006d1d29ab..fa1d99101f89 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -531,6 +531,7 @@ static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp,
/* Someone else created list structure for us */
if (inode)
fsnotify_put_inode_ref(inode);
+ fsnotify_put_sb_connectors(conn);
kmem_cache_free(fsnotify_mark_connector_cachep, conn);
}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index ee0ce8cecc4a..49be8c8ef555 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -98,27 +98,17 @@
void proc_task_name(struct seq_file *m, struct task_struct *p, bool escape)
{
- char *buf;
- size_t size;
char tcomm[64];
- int ret;
if (p->flags & PF_WQ_WORKER)
wq_worker_comm(tcomm, sizeof(tcomm), p);
else
__get_task_comm(tcomm, sizeof(tcomm), p);
- size = seq_get_buf(m, &buf);
- if (escape) {
- ret = string_escape_str(tcomm, buf, size,
- ESCAPE_SPACE | ESCAPE_SPECIAL, "\n\\");
- if (ret >= size)
- ret = -1;
- } else {
- ret = strscpy(buf, tcomm, size);
- }
-
- seq_commit(m, ret);
+ if (escape)
+ seq_escape_str(m, tcomm, ESCAPE_SPACE | ESCAPE_SPECIAL, "\n\\");
+ else
+ seq_printf(m, "%.64s", tcomm);
}
/*
diff --git a/fs/proc/base.c b/fs/proc/base.c
index e5b5f7709d48..533d5836eb9a 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -95,6 +95,7 @@
#include <linux/posix-timers.h>
#include <linux/time_namespace.h>
#include <linux/resctrl.h>
+#include <linux/cn_proc.h>
#include <trace/events/oom.h>
#include "internal.h"
#include "fd.h"
@@ -1674,8 +1675,10 @@ static ssize_t comm_write(struct file *file, const char __user *buf,
if (!p)
return -ESRCH;
- if (same_thread_group(current, p))
+ if (same_thread_group(current, p)) {
set_task_comm(p, buffer);
+ proc_comm_connector(p);
+ }
else
count = -EINVAL;
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
index a6ee23aadd28..2a66844b7ff8 100644
--- a/fs/qnx4/dir.c
+++ b/fs/qnx4/dir.c
@@ -15,13 +15,27 @@
#include <linux/buffer_head.h>
#include "qnx4.h"
+/*
+ * A qnx4 directory entry is an inode entry or link info
+ * depending on the status field in the last byte. The
+ * first byte is where the name start either way, and a
+ * zero means it's empty.
+ */
+union qnx4_directory_entry {
+ struct {
+ char de_name;
+ char de_pad[62];
+ char de_status;
+ };
+ struct qnx4_inode_entry inode;
+ struct qnx4_link_info link;
+};
+
static int qnx4_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
unsigned int offset;
struct buffer_head *bh;
- struct qnx4_inode_entry *de;
- struct qnx4_link_info *le;
unsigned long blknum;
int ix, ino;
int size;
@@ -38,27 +52,30 @@ static int qnx4_readdir(struct file *file, struct dir_context *ctx)
}
ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) {
+ union qnx4_directory_entry *de;
+ const char *name;
+
offset = ix * QNX4_DIR_ENTRY_SIZE;
- de = (struct qnx4_inode_entry *) (bh->b_data + offset);
- if (!de->di_fname[0])
+ de = (union qnx4_directory_entry *) (bh->b_data + offset);
+
+ if (!de->de_name)
continue;
- if (!(de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
+ if (!(de->de_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
continue;
- if (!(de->di_status & QNX4_FILE_LINK))
- size = QNX4_SHORT_NAME_MAX;
- else
- size = QNX4_NAME_MAX;
- size = strnlen(de->di_fname, size);
- QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname));
- if (!(de->di_status & QNX4_FILE_LINK))
+ if (!(de->de_status & QNX4_FILE_LINK)) {
+ size = sizeof(de->inode.di_fname);
+ name = de->inode.di_fname;
ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
- else {
- le = (struct qnx4_link_info*)de;
- ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) *
+ } else {
+ size = sizeof(de->link.dl_fname);
+ name = de->link.dl_fname;
+ ino = ( le32_to_cpu(de->link.dl_inode_blk) - 1 ) *
QNX4_INODES_PER_BLOCK +
- le->dl_inode_ndx;
+ de->link.dl_inode_ndx;
}
- if (!dir_emit(ctx, de->di_fname, size, ino, DT_UNKNOWN)) {
+ size = strnlen(name, size);
+ QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, name));
+ if (!dir_emit(ctx, name, size, ino, DT_UNKNOWN)) {
brelse(bh);
return 0;
}
diff --git a/fs/cifs_common/Makefile b/fs/smbfs_common/Makefile
index 6fedd2f88a25..cafc61a3bfc3 100644
--- a/fs/cifs_common/Makefile
+++ b/fs/smbfs_common/Makefile
@@ -3,5 +3,5 @@
# Makefile for Linux filesystem routines that are shared by client and server.
#
-obj-$(CONFIG_CIFS_COMMON) += cifs_arc4.o
-obj-$(CONFIG_CIFS_COMMON) += cifs_md4.o
+obj-$(CONFIG_SMBFS_COMMON) += cifs_arc4.o
+obj-$(CONFIG_SMBFS_COMMON) += cifs_md4.o
diff --git a/fs/cifs_common/arc4.h b/fs/smbfs_common/arc4.h
index 12e71ec033a1..12e71ec033a1 100644
--- a/fs/cifs_common/arc4.h
+++ b/fs/smbfs_common/arc4.h
diff --git a/fs/cifs_common/cifs_arc4.c b/fs/smbfs_common/cifs_arc4.c
index b964cc682944..85ba15a60b13 100644
--- a/fs/cifs_common/cifs_arc4.c
+++ b/fs/smbfs_common/cifs_arc4.c
@@ -74,14 +74,14 @@ void cifs_arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int l
EXPORT_SYMBOL_GPL(cifs_arc4_crypt);
static int __init
-init_cifs_common(void)
+init_smbfs_common(void)
{
return 0;
}
static void __init
-exit_cifs_common(void)
+exit_smbfs_common(void)
{
}
-module_init(init_cifs_common)
-module_exit(exit_cifs_common)
+module_init(init_smbfs_common)
+module_exit(exit_smbfs_common)
diff --git a/fs/cifs_common/cifs_md4.c b/fs/smbfs_common/cifs_md4.c
index 50f78cfc6ce9..50f78cfc6ce9 100644
--- a/fs/cifs_common/cifs_md4.c
+++ b/fs/smbfs_common/cifs_md4.c
diff --git a/fs/cifs_common/md4.h b/fs/smbfs_common/md4.h
index 5337becc699a..5337becc699a 100644
--- a/fs/cifs_common/md4.h
+++ b/fs/smbfs_common/md4.h
diff --git a/fs/cifs/smbfsctl.h b/fs/smbfs_common/smbfsctl.h
index d0fc42061f49..d01e8c9d7a31 100644
--- a/fs/cifs/smbfsctl.h
+++ b/fs/smbfs_common/smbfsctl.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: LGPL-2.1 */
+/* SPDX-License-Identifier: LGPL-2.1+ */
/*
* fs/cifs/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions
*
@@ -19,11 +19,14 @@
* could be invoked from tools via a specialized hook into the VFS rather
* than via the standard vfs entry points
*
- * See MS-SMB2 Section 2.2.31 (last checked June 2013, all of that list are
+ * See MS-SMB2 Section 2.2.31 (last checked September 2021, all of that list are
* below). Additional detail on less common ones can be found in MS-FSCC
* section 2.3.
*/
+#ifndef __SMBFSCTL_H
+#define __SMBFSCTL_H
+
/*
* FSCTL values are 32 bits and are constructed as
* <device 16bits> <access 2bits> <function 12bits> <method 2bits>
@@ -91,6 +94,7 @@
#define FSCTL_SET_ZERO_ON_DEALLOC 0x00090194 /* BB add struct */
#define FSCTL_SET_SHORT_NAME_BEHAVIOR 0x000901B4 /* BB add struct */
#define FSCTL_GET_INTEGRITY_INFORMATION 0x0009027C
+#define FSCTL_GET_REFS_VOLUME_DATA 0x000902D8 /* See MS-FSCC 2.3.24 */
#define FSCTL_GET_RETRIEVAL_POINTERS_AND_REFCOUNT 0x000903d3
#define FSCTL_GET_RETRIEVAL_POINTER_COUNT 0x0009042b
#define FSCTL_QUERY_ALLOCATED_RANGES 0x000940CF
@@ -146,7 +150,13 @@
#define IO_REPARSE_TAG_LX_CHR 0x80000025
#define IO_REPARSE_TAG_LX_BLK 0x80000026
+#define IO_REPARSE_TAG_LX_SYMLINK_LE cpu_to_le32(0xA000001D)
+#define IO_REPARSE_TAG_AF_UNIX_LE cpu_to_le32(0x80000023)
+#define IO_REPARSE_TAG_LX_FIFO_LE cpu_to_le32(0x80000024)
+#define IO_REPARSE_TAG_LX_CHR_LE cpu_to_le32(0x80000025)
+#define IO_REPARSE_TAG_LX_BLK_LE cpu_to_le32(0x80000026)
+
/* fsctl flags */
/* If Flags is set to this value, the request is an FSCTL not ioctl request */
#define SMB2_0_IOCTL_IS_FSCTL 0x00000001
-
+#endif /* __SMBFSCTL_H */
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 9a86d3ec2cb6..c4e0cd1c1c8c 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -330,6 +330,15 @@ xfs_set_inode_alloc(
return xfs_is_inode32(mp) ? maxagi : agcount;
}
+static bool
+xfs_buftarg_is_dax(
+ struct super_block *sb,
+ struct xfs_buftarg *bt)
+{
+ return dax_supported(bt->bt_daxdev, bt->bt_bdev, sb->s_blocksize, 0,
+ bdev_nr_sectors(bt->bt_bdev));
+}
+
STATIC int
xfs_blkdev_get(
xfs_mount_t *mp,
@@ -1588,11 +1597,10 @@ xfs_fs_fill_super(
xfs_warn(mp,
"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
- datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
- sb->s_blocksize);
+ datadev_is_dax = xfs_buftarg_is_dax(sb, mp->m_ddev_targp);
if (mp->m_rtdev_targp)
- rtdev_is_dax = bdev_dax_supported(
- mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
+ rtdev_is_dax = xfs_buftarg_is_dax(sb,
+ mp->m_rtdev_targp);
if (!rtdev_is_dax && !datadev_is_dax) {
xfs_alert(mp,
"DAX unsupported by block device. Turning off DAX.");
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 9f4985b4d64d..bc159a9b4a73 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -135,6 +135,7 @@ struct cppc_cpudata {
#ifdef CONFIG_ACPI_CPPC_LIB
extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
+extern int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf);
extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
@@ -149,6 +150,10 @@ static inline int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
{
return -ENOTSUPP;
}
+static inline int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
+{
+ return -ENOTSUPP;
+}
static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
{
return -ENOTSUPP;
diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h
index cd905b44a630..13f5aa68a455 100644
--- a/include/asm-generic/div64.h
+++ b/include/asm-generic/div64.h
@@ -57,17 +57,11 @@
/*
* If the divisor happens to be constant, we determine the appropriate
* inverse at compile time to turn the division into a few inline
- * multiplications which ought to be much faster. And yet only if compiling
- * with a sufficiently recent gcc version to perform proper 64-bit constant
- * propagation.
+ * multiplications which ought to be much faster.
*
* (It is unfortunate that gcc doesn't perform all this internally.)
*/
-#ifndef __div64_const32_is_OK
-#define __div64_const32_is_OK (__GNUC__ >= 4)
-#endif
-
#define __div64_const32(n, ___b) \
({ \
/* \
@@ -230,8 +224,7 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
is_power_of_2(__base)) { \
__rem = (n) & (__base - 1); \
(n) >>= ilog2(__base); \
- } else if (__div64_const32_is_OK && \
- __builtin_constant_p(__base) && \
+ } else if (__builtin_constant_p(__base) && \
__base != 0) { \
uint32_t __res_lo, __n_lo = (n); \
(n) = __div64_const32(n, __base); \
@@ -241,8 +234,9 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
} else if (likely(((n) >> 32) == 0)) { \
__rem = (uint32_t)(n) % __base; \
(n) = (uint32_t)(n) / __base; \
- } else \
+ } else { \
__rem = __div64_32(&(n), __base); \
+ } \
__rem; \
})
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
index 9def22e6e2b3..9d0479f50f97 100644
--- a/include/asm-generic/early_ioremap.h
+++ b/include/asm-generic/early_ioremap.h
@@ -19,12 +19,6 @@ extern void *early_memremap_prot(resource_size_t phys_addr,
extern void early_iounmap(void __iomem *addr, unsigned long size);
extern void early_memunmap(void *addr, unsigned long size);
-/*
- * Weak function called by early_ioremap_reset(). It does nothing, but
- * architectures may provide their own version to do any needed cleanups.
- */
-extern void early_ioremap_shutdown(void);
-
#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU)
/* Arch-specific initialization */
extern void early_ioremap_init(void);
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index c1ab6a6e72b5..d3eae6cdbacb 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -197,10 +197,12 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number)
return hv_vp_index[cpu_number];
}
-static inline int cpumask_to_vpset(struct hv_vpset *vpset,
- const struct cpumask *cpus)
+static inline int __cpumask_to_vpset(struct hv_vpset *vpset,
+ const struct cpumask *cpus,
+ bool exclude_self)
{
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
+ int this_cpu = smp_processor_id();
/* valid_bank_mask can represent up to 64 banks */
if (hv_max_vp_index / 64 >= 64)
@@ -218,6 +220,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
* Some banks may end up being empty but this is acceptable.
*/
for_each_cpu(cpu, cpus) {
+ if (exclude_self && cpu == this_cpu)
+ continue;
vcpu = hv_cpu_number_to_vp_number(cpu);
if (vcpu == VP_INVAL)
return -1;
@@ -232,6 +236,19 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
return nr_bank;
}
+static inline int cpumask_to_vpset(struct hv_vpset *vpset,
+ const struct cpumask *cpus)
+{
+ return __cpumask_to_vpset(vpset, cpus, false);
+}
+
+static inline int cpumask_to_vpset_noself(struct hv_vpset *vpset,
+ const struct cpumask *cpus)
+{
+ WARN_ON_ONCE(preemptible());
+ return __cpumask_to_vpset(vpset, cpus, true);
+}
+
void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
bool hv_is_hyperv_initialized(void);
bool hv_is_hibernation_supported(void);
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index d4f16dcc2ed7..df636c6d8e6c 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -52,4 +52,4 @@ static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
}
#endif
-#endif /* __ASM_GENERIC_IO_H */
+#endif /* __ASM_GENERIC_PCI_IOMAP_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index aa50bf2959fe..f2984af2b85b 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -116,11 +116,7 @@
* GCC 4.5 and later have a 32 bytes section alignment for structures.
* Except GCC 4.9, that feels the need to align on 64 bytes.
*/
-#if __GNUC__ == 4 && __GNUC_MINOR__ == 9
-#define STRUCT_ALIGNMENT 64
-#else
#define STRUCT_ALIGNMENT 32
-#endif
#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
/*
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 818680c6a8ed..b20e89d321b0 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -27,11 +27,12 @@
#ifndef _TTM_TT_H_
#define _TTM_TT_H_
+#include <linux/pagemap.h>
#include <linux/types.h>
#include <drm/ttm/ttm_caching.h>
#include <drm/ttm/ttm_kmap_iter.h>
-struct ttm_bo_device;
+struct ttm_device;
struct ttm_tt;
struct ttm_resource;
struct ttm_buffer_object;
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index abe089c27529..537e1b991f11 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -110,7 +110,7 @@ static inline __init bool xbc_node_is_leaf(struct xbc_node *node)
}
/* Tree-based key-value access APIs */
-struct xbc_node * __init xbc_node_find_child(struct xbc_node *parent,
+struct xbc_node * __init xbc_node_find_subkey(struct xbc_node *parent,
const char *key);
const char * __init xbc_node_find_value(struct xbc_node *parent,
@@ -148,7 +148,7 @@ xbc_find_value(const char *key, struct xbc_node **vnode)
*/
static inline struct xbc_node * __init xbc_find_node(const char *key)
{
- return xbc_node_find_child(NULL, key);
+ return xbc_node_find_subkey(NULL, key);
}
/**
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 4f72b47973c3..2f909ed084c6 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -79,24 +79,6 @@ struct cpu_cacheinfo {
bool cpu_map_populated;
};
-/*
- * Helpers to make sure "func" is executed on the cpu whose cache
- * attributes are being detected
- */
-#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \
-static inline void _##func(void *ret) \
-{ \
- int cpu = smp_processor_id(); \
- *(int *)ret = __##func(cpu); \
-} \
- \
-int func(unsigned int cpu) \
-{ \
- int ret; \
- smp_call_function_single(cpu, _##func, &ret, true); \
- return ret; \
-}
-
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
int init_cache_level(unsigned int cpu);
int populate_cache_leaves(unsigned int cpu);
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index e41a811026f6..bc2699feddbe 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -299,6 +299,7 @@ enum {
CEPH_SESSION_FLUSHMSG_ACK,
CEPH_SESSION_FORCE_RO,
CEPH_SESSION_REJECT,
+ CEPH_SESSION_REQUEST_FLUSH_MDLOG,
};
extern const char *ceph_session_op_name(int op);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index e1c705fdfa7c..db2e147e069f 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -752,107 +752,54 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
* sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
* per-socket cgroup information except for memcg association.
*
- * On legacy hierarchies, net_prio and net_cls controllers directly set
- * attributes on each sock which can then be tested by the network layer.
- * On the default hierarchy, each sock is associated with the cgroup it was
- * created in and the networking layer can match the cgroup directly.
- *
- * To avoid carrying all three cgroup related fields separately in sock,
- * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
- * On boot, sock_cgroup_data records the cgroup that the sock was created
- * in so that cgroup2 matches can be made; however, once either net_prio or
- * net_cls starts being used, the area is overridden to carry prioidx and/or
- * classid. The two modes are distinguished by whether the lowest bit is
- * set. Clear bit indicates cgroup pointer while set bit prioidx and
- * classid.
- *
- * While userland may start using net_prio or net_cls at any time, once
- * either is used, cgroup2 matching no longer works. There is no reason to
- * mix the two and this is in line with how legacy and v2 compatibility is
- * handled. On mode switch, cgroup references which are already being
- * pointed to by socks may be leaked. While this can be remedied by adding
- * synchronization around sock_cgroup_data, given that the number of leaked
- * cgroups is bound and highly unlikely to be high, this seems to be the
- * better trade-off.
+ * On legacy hierarchies, net_prio and net_cls controllers directly
+ * set attributes on each sock which can then be tested by the network
+ * layer. On the default hierarchy, each sock is associated with the
+ * cgroup it was created in and the networking layer can match the
+ * cgroup directly.
*/
struct sock_cgroup_data {
- union {
-#ifdef __LITTLE_ENDIAN
- struct {
- u8 is_data : 1;
- u8 no_refcnt : 1;
- u8 unused : 6;
- u8 padding;
- u16 prioidx;
- u32 classid;
- } __packed;
-#else
- struct {
- u32 classid;
- u16 prioidx;
- u8 padding;
- u8 unused : 6;
- u8 no_refcnt : 1;
- u8 is_data : 1;
- } __packed;
+ struct cgroup *cgroup; /* v2 */
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ u32 classid; /* v1 */
+#endif
+#ifdef CONFIG_CGROUP_NET_PRIO
+ u16 prioidx; /* v1 */
#endif
- u64 val;
- };
};
-/*
- * There's a theoretical window where the following accessors race with
- * updaters and return part of the previous pointer as the prioidx or
- * classid. Such races are short-lived and the result isn't critical.
- */
static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{
- /* fallback to 1 which is always the ID of the root cgroup */
- return (skcd->is_data & 1) ? skcd->prioidx : 1;
+#ifdef CONFIG_CGROUP_NET_PRIO
+ return READ_ONCE(skcd->prioidx);
+#else
+ return 1;
+#endif
}
static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
- /* fallback to 0 which is the unconfigured default classid */
- return (skcd->is_data & 1) ? skcd->classid : 0;
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ return READ_ONCE(skcd->classid);
+#else
+ return 0;
+#endif
}
-/*
- * If invoked concurrently, the updaters may clobber each other. The
- * caller is responsible for synchronization.
- */
static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
u16 prioidx)
{
- struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
- if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
- return;
-
- if (!(skcd_buf.is_data & 1)) {
- skcd_buf.val = 0;
- skcd_buf.is_data = 1;
- }
-
- skcd_buf.prioidx = prioidx;
- WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
+#ifdef CONFIG_CGROUP_NET_PRIO
+ WRITE_ONCE(skcd->prioidx, prioidx);
+#endif
}
static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
u32 classid)
{
- struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
- if (sock_cgroup_classid(&skcd_buf) == classid)
- return;
-
- if (!(skcd_buf.is_data & 1)) {
- skcd_buf.val = 0;
- skcd_buf.is_data = 1;
- }
-
- skcd_buf.classid = classid;
- WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ WRITE_ONCE(skcd->classid, classid);
+#endif
}
#else /* CONFIG_SOCK_CGROUP_DATA */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 7bf60454a313..75c151413fda 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -829,33 +829,13 @@ static inline void cgroup_account_cputime_field(struct task_struct *task,
*/
#ifdef CONFIG_SOCK_CGROUP_DATA
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
-extern spinlock_t cgroup_sk_update_lock;
-#endif
-
-void cgroup_sk_alloc_disable(void);
void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
void cgroup_sk_clone(struct sock_cgroup_data *skcd);
void cgroup_sk_free(struct sock_cgroup_data *skcd);
static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
{
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
- unsigned long v;
-
- /*
- * @skcd->val is 64bit but the following is safe on 32bit too as we
- * just need the lower ulong to be written and read atomically.
- */
- v = READ_ONCE(skcd->val);
-
- if (v & 3)
- return &cgrp_dfl_root.cgrp;
-
- return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
-#else
- return (struct cgroup *)(unsigned long)skcd->val;
-#endif
+ return skcd->cgroup;
}
#else /* CONFIG_CGROUP_DATA */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 8e0598c7d1d1..1c758b0e0359 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -395,14 +395,6 @@ struct compat_kexec_segment;
struct compat_mq_attr;
struct compat_msgbuf;
-#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
-
-#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
-
-long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
- unsigned long bitmap_size);
-long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
- unsigned long bitmap_size);
void copy_siginfo_to_external32(struct compat_siginfo *to,
const struct kernel_siginfo *from);
int copy_siginfo_from_user32(kernel_siginfo_t *to,
@@ -519,8 +511,6 @@ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
struct epoll_event; /* fortunately, this one is fixed-layout */
-extern void __user *compat_alloc_user_space(unsigned long len);
-
int compat_restore_altstack(const compat_stack_t __user *uss);
int __compat_save_altstack(compat_stack_t __user *, unsigned long);
#define unsafe_compat_save_altstack(uss, sp, label) do { \
@@ -807,26 +797,6 @@ asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr
/* mm/fadvise.c: No generic prototype for fadvise64_64 */
/* mm/, CONFIG_MMU only */
-asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
- compat_ulong_t mode,
- compat_ulong_t __user *nmask,
- compat_ulong_t maxnode, compat_ulong_t flags);
-asmlinkage long compat_sys_get_mempolicy(int __user *policy,
- compat_ulong_t __user *nmask,
- compat_ulong_t maxnode,
- compat_ulong_t addr,
- compat_ulong_t flags);
-asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
- compat_ulong_t maxnode);
-asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
- compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
- const compat_ulong_t __user *new_nodes);
-asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages,
- __u32 __user *pages,
- const int __user *nodes,
- int __user *status,
- int flags);
-
asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
@@ -976,6 +946,15 @@ static inline bool in_compat_syscall(void) { return false; }
#endif /* CONFIG_COMPAT */
+#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
+
+#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
+
+long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
+ unsigned long bitmap_size);
+long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
+ unsigned long bitmap_size);
+
/*
* Some legacy ABIs like the i386 one use less than natural alignment for 64-bit
* types, and will need special compat treatment for that. Most architectures
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 49b0ac8b6fd3..3c4de9b6c6e3 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -62,19 +62,6 @@
#define __no_sanitize_coverage
#endif
-/*
- * Not all versions of clang implement the type-generic versions
- * of the builtin overflow checkers. Fortunately, clang implements
- * __has_builtin allowing us to avoid awkward version
- * checks. Unfortunately, we don't know which version of gcc clang
- * pretends to be, so the macro may or may not be defined.
- */
-#if __has_builtin(__builtin_mul_overflow) && \
- __has_builtin(__builtin_add_overflow) && \
- __has_builtin(__builtin_sub_overflow)
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
-#endif
-
#if __has_feature(shadow_call_stack)
# define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index cb9217fc60af..bd2b881c6b63 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -43,9 +43,6 @@
#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-#define __compiletime_warning(message) __attribute__((__warning__(message)))
-#define __compiletime_error(message) __attribute__((__error__(message)))
-
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
#define __latent_entropy __attribute__((latent_entropy))
#endif
@@ -98,10 +95,8 @@
#if GCC_VERSION >= 70000
#define KASAN_ABI_VERSION 5
-#elif GCC_VERSION >= 50000
+#else
#define KASAN_ABI_VERSION 4
-#elif GCC_VERSION >= 40902
-#define KASAN_ABI_VERSION 3
#endif
#if __has_attribute(__no_sanitize_address__)
@@ -128,10 +123,6 @@
#define __no_sanitize_coverage
#endif
-#if GCC_VERSION >= 50100
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
-#endif
-
/*
* Turn individual warnings and errors on and off locally, depending
* on version.
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index b67261a1e3e9..3d5af56337bd 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -188,6 +188,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
(typeof(ptr)) (__ptr + (off)); })
#endif
+#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0)
+
#ifndef OPTIMIZER_HIDE_VAR
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
#define OPTIMIZER_HIDE_VAR(var) \
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 2487be0e7199..e6ec63403965 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -21,26 +21,6 @@
*/
/*
- * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
- * In the meantime, to support gcc < 5, we implement __has_attribute
- * by hand.
- */
-#ifndef __has_attribute
-# define __has_attribute(x) __GCC4_has_attribute_##x
-# define __GCC4_has_attribute___assume_aligned__ 1
-# define __GCC4_has_attribute___copy__ 0
-# define __GCC4_has_attribute___designated_init__ 0
-# define __GCC4_has_attribute___externally_visible__ 1
-# define __GCC4_has_attribute___no_caller_saved_registers__ 0
-# define __GCC4_has_attribute___noclone__ 1
-# define __GCC4_has_attribute___no_profile_instrument_function__ 0
-# define __GCC4_has_attribute___nonstring__ 0
-# define __GCC4_has_attribute___no_sanitize_address__ 1
-# define __GCC4_has_attribute___no_sanitize_undefined__ 1
-# define __GCC4_has_attribute___fallthrough__ 0
-#endif
-
-/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute
*/
#define __alias(symbol) __attribute__((__alias__(#symbol)))
@@ -74,7 +54,6 @@
* compiler should see some alignment anyway, when the return value is
* massaged by 'flags = ptr & 3; ptr &= ~3;').
*
- * Optional: only supported since gcc >= 4.9
* Optional: not supported by icc
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute
@@ -138,6 +117,17 @@
#endif
/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-error-function-attribute
+ */
+#if __has_attribute(__error__)
+# define __compiletime_error(msg) __attribute__((__error__(msg)))
+#else
+# define __compiletime_error(msg)
+#endif
+
+/*
* Optional: not supported by clang
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-externally_005fvisible-function-attribute
@@ -299,6 +289,17 @@
#define __must_check __attribute__((__warn_unused_result__))
/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warning-function-attribute
+ */
+#if __has_attribute(__warning__)
+# define __compiletime_warning(msg) __attribute__((__warning__(msg)))
+#else
+# define __compiletime_warning(msg)
+#endif
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute
*/
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index e4ea86fc584d..b6ff83a714ca 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -294,12 +294,6 @@ struct ftrace_likely_data {
#ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
#endif
-#ifndef __compiletime_warning
-# define __compiletime_warning(message)
-#endif
-#ifndef __compiletime_error
-# define __compiletime_error(message)
-#endif
#ifdef __OPTIMIZE__
# define __compiletime_assert(condition, msg, prefix, suffix) \
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 94a578a96202..9cf51e41e697 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -143,12 +143,6 @@ static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
#endif /* !CONFIG_HOTPLUG_CPU */
-/* Wrappers which go away once all code is converted */
-static inline void cpu_hotplug_begin(void) { cpus_write_lock(); }
-static inline void cpu_hotplug_done(void) { cpus_write_unlock(); }
-static inline void get_online_cpus(void) { cpus_read_lock(); }
-static inline void put_online_cpus(void) { cpus_read_unlock(); }
-
#ifdef CONFIG_PM_SLEEP_SMP
extern int freeze_secondary_cpus(int primary);
extern void thaw_secondary_cpus(void);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 9fd719475fcd..ff88bb3e44fc 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -9,10 +9,14 @@
#define _LINUX_CPUFREQ_H
#include <linux/clk.h>
+#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
@@ -365,14 +369,17 @@ struct cpufreq_driver {
int (*suspend)(struct cpufreq_policy *policy);
int (*resume)(struct cpufreq_policy *policy);
- /* Will be called after the driver is fully initialized */
- void (*ready)(struct cpufreq_policy *policy);
-
struct freq_attr **attr;
/* platform specific boost support code */
bool boost_enabled;
int (*set_boost)(struct cpufreq_policy *policy, int state);
+
+ /*
+ * Set by drivers that want to register with the energy model after the
+ * policy is properly initialized, but before the governor is started.
+ */
+ void (*register_em)(struct cpufreq_policy *policy);
};
/* flags */
@@ -995,6 +1002,55 @@ static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy
return count;
}
+
+static inline int parse_perf_domain(int cpu, const char *list_name,
+ const char *cell_name)
+{
+ struct device_node *cpu_np;
+ struct of_phandle_args args;
+ int ret;
+
+ cpu_np = of_cpu_device_node_get(cpu);
+ if (!cpu_np)
+ return -ENODEV;
+
+ ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0,
+ &args);
+ if (ret < 0)
+ return ret;
+
+ of_node_put(cpu_np);
+
+ return args.args[0];
+}
+
+static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
+ const char *cell_name, struct cpumask *cpumask)
+{
+ int target_idx;
+ int cpu, ret;
+
+ ret = parse_perf_domain(pcpu, list_name, cell_name);
+ if (ret < 0)
+ return ret;
+
+ target_idx = ret;
+ cpumask_set_cpu(pcpu, cpumask);
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == pcpu)
+ continue;
+
+ ret = parse_perf_domain(pcpu, list_name, cell_name);
+ if (ret < 0)
+ continue;
+
+ if (target_idx == ret)
+ cpumask_set_cpu(cpu, cpumask);
+ }
+
+ return target_idx;
+}
#else
static inline int cpufreq_boost_trigger_state(int state)
{
@@ -1014,6 +1070,12 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
{
return false;
}
+
+static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
+ const char *cell_name, struct cpumask *cpumask)
+{
+ return -EOPNOTSUPP;
+}
#endif
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
@@ -1035,7 +1097,6 @@ void arch_set_freq_scale(const struct cpumask *cpus,
{
}
#endif
-
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
@@ -1046,4 +1107,10 @@ unsigned int cpufreq_generic_get(unsigned int cpu);
void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
+
+static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
+{
+ dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
+ policy->related_cpus);
+}
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 39cf84a30b9f..832d8a74fa59 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -22,8 +22,42 @@
* AP_ACTIVE AP_ACTIVE
*/
+/*
+ * CPU hotplug states. The state machine invokes the installed state
+ * startup callbacks sequentially from CPUHP_OFFLINE + 1 to CPUHP_ONLINE
+ * during a CPU online operation. During a CPU offline operation the
+ * installed teardown callbacks are invoked in the reverse order from
+ * CPU_ONLINE - 1 down to CPUHP_OFFLINE.
+ *
+ * The state space has three sections: PREPARE, STARTING and ONLINE.
+ *
+ * PREPARE: The callbacks are invoked on a control CPU before the
+ * hotplugged CPU is started up or after the hotplugged CPU has died.
+ *
+ * STARTING: The callbacks are invoked on the hotplugged CPU from the low level
+ * hotplug startup/teardown code with interrupts disabled.
+ *
+ * ONLINE: The callbacks are invoked on the hotplugged CPU from the per CPU
+ * hotplug thread with interrupts and preemption enabled.
+ *
+ * Adding explicit states to this enum is only necessary when:
+ *
+ * 1) The state is within the STARTING section
+ *
+ * 2) The state has ordering constraints vs. other states in the
+ * same section.
+ *
+ * If neither #1 nor #2 apply, please use the dynamic state space when
+ * setting up a state by using CPUHP_PREPARE_DYN or CPUHP_PREPARE_ONLINE
+ * for the @state argument of the setup function.
+ *
+ * See Documentation/core-api/cpu_hotplug.rst for further information and
+ * examples.
+ */
enum cpuhp_state {
CPUHP_INVALID = -1,
+
+ /* PREPARE section invoked on a control CPU */
CPUHP_OFFLINE = 0,
CPUHP_CREATE_THREADS,
CPUHP_PERF_PREPARE,
@@ -95,6 +129,11 @@ enum cpuhp_state {
CPUHP_BP_PREPARE_DYN,
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
CPUHP_BRINGUP_CPU,
+
+ /*
+ * STARTING section invoked on the hotplugged CPU in low level
+ * bringup and teardown code.
+ */
CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
CPUHP_AP_SCHED_STARTING,
@@ -155,6 +194,8 @@ enum cpuhp_state {
CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
+
+ /* Online section invoked on the hotplugged CPU from the hotplug thread */
CPUHP_AP_ONLINE_IDLE,
CPUHP_AP_SCHED_WAIT_EMPTY,
CPUHP_AP_SMPBOOT_THREADS,
@@ -216,14 +257,15 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name,
int (*teardown)(unsigned int cpu),
bool multi_instance);
/**
- * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
+ * cpuhp_setup_state - Setup hotplug state callbacks with calling the @startup
+ * callback
* @state: The state for which the calls are installed
* @name: Name of the callback (will be used in debug output)
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
- * Installs the callback functions and invokes the startup callback on
- * the present cpus which have already reached the @state.
+ * Installs the callback functions and invokes the @startup callback on
+ * the online cpus which have already reached the @state.
*/
static inline int cpuhp_setup_state(enum cpuhp_state state,
const char *name,
@@ -233,6 +275,18 @@ static inline int cpuhp_setup_state(enum cpuhp_state state,
return __cpuhp_setup_state(state, name, true, startup, teardown, false);
}
+/**
+ * cpuhp_setup_state_cpuslocked - Setup hotplug state callbacks with calling
+ * @startup callback from a cpus_read_lock()
+ * held region
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback (will be used in debug output)
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
+ *
+ * Same as cpuhp_setup_state() except that it must be invoked from within a
+ * cpus_read_lock() held region.
+ */
static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
@@ -244,14 +298,14 @@ static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
/**
* cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the
- * callbacks
+ * @startup callback
* @state: The state for which the calls are installed
* @name: Name of the callback.
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
- * Same as @cpuhp_setup_state except that no calls are executed are invoked
- * during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n.
+ * Same as cpuhp_setup_state() except that the @startup callback is not
+ * invoked during installation. NOP if SMP=n or HOTPLUG_CPU=n.
*/
static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
const char *name,
@@ -262,6 +316,19 @@ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
false);
}
+/**
+ * cpuhp_setup_state_nocalls_cpuslocked - Setup hotplug state callbacks without
+ * invoking the @startup callback from
+ * a cpus_read_lock() held region
+ * callbacks
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback.
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
+ *
+ * Same as cpuhp_setup_state_nocalls() except that it must be invoked from
+ * within a cpus_read_lock() held region.
+ */
static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
@@ -275,13 +342,13 @@ static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
* cpuhp_setup_state_multi - Add callbacks for multi state
* @state: The state for which the calls are installed
* @name: Name of the callback.
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
* Sets the internal multi_instance flag and prepares a state to work as a multi
* instance callback. No callbacks are invoked at this point. The callbacks are
* invoked once an instance for this state are registered via
- * @cpuhp_state_add_instance or @cpuhp_state_add_instance_nocalls.
+ * cpuhp_state_add_instance() or cpuhp_state_add_instance_nocalls()
*/
static inline int cpuhp_setup_state_multi(enum cpuhp_state state,
const char *name,
@@ -306,9 +373,10 @@ int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
* @state: The state for which the instance is installed
* @node: The node for this individual state.
*
- * Installs the instance for the @state and invokes the startup callback on
- * the present cpus which have already reached the @state. The @state must have
- * been earlier marked as multi-instance by @cpuhp_setup_state_multi.
+ * Installs the instance for the @state and invokes the registered startup
+ * callback on the online cpus which have already reached the @state. The
+ * @state must have been earlier marked as multi-instance by
+ * cpuhp_setup_state_multi().
*/
static inline int cpuhp_state_add_instance(enum cpuhp_state state,
struct hlist_node *node)
@@ -322,8 +390,9 @@ static inline int cpuhp_state_add_instance(enum cpuhp_state state,
* @state: The state for which the instance is installed
* @node: The node for this individual state.
*
- * Installs the instance for the @state The @state must have been earlier
- * marked as multi-instance by @cpuhp_setup_state_multi.
+ * Installs the instance for the @state. The @state must have been earlier
+ * marked as multi-instance by cpuhp_setup_state_multi. NOP if SMP=n or
+ * HOTPLUG_CPU=n.
*/
static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
struct hlist_node *node)
@@ -331,6 +400,17 @@ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
return __cpuhp_state_add_instance(state, node, false);
}
+/**
+ * cpuhp_state_add_instance_nocalls_cpuslocked - Add an instance for a state
+ * without invoking the startup
+ * callback from a cpus_read_lock()
+ * held region.
+ * @state: The state for which the instance is installed
+ * @node: The node for this individual state.
+ *
+ * Same as cpuhp_state_add_instance_nocalls() except that it must be
+ * invoked from within a cpus_read_lock() held region.
+ */
static inline int
cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state,
struct hlist_node *node)
@@ -346,7 +426,7 @@ void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke);
* @state: The state for which the calls are removed
*
* Removes the callback functions and invokes the teardown callback on
- * the present cpus which have already reached the @state.
+ * the online cpus which have already reached the @state.
*/
static inline void cpuhp_remove_state(enum cpuhp_state state)
{
@@ -355,7 +435,7 @@ static inline void cpuhp_remove_state(enum cpuhp_state state)
/**
* cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking
- * teardown
+ * the teardown callback
* @state: The state for which the calls are removed
*/
static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
@@ -363,6 +443,14 @@ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
__cpuhp_remove_state(state, false);
}
+/**
+ * cpuhp_remove_state_nocalls_cpuslocked - Remove hotplug state callbacks without invoking
+ * teardown from a cpus_read_lock() held region.
+ * @state: The state for which the calls are removed
+ *
+ * Same as cpuhp_remove_state nocalls() except that it must be invoked
+ * from within a cpus_read_lock() held region.
+ */
static inline void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state)
{
__cpuhp_remove_state_cpuslocked(state, false);
@@ -390,8 +478,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
* @state: The state from which the instance is removed
* @node: The node for this individual state.
*
- * Removes the instance and invokes the teardown callback on the present cpus
- * which have already reached the @state.
+ * Removes the instance and invokes the teardown callback on the online cpus
+ * which have already reached @state.
*/
static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
struct hlist_node *node)
diff --git a/include/linux/damon.h b/include/linux/damon.h
new file mode 100644
index 000000000000..d68b67b8d458
--- /dev/null
+++ b/include/linux/damon.h
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DAMON api
+ *
+ * Author: SeongJae Park <sjpark@amazon.de>
+ */
+
+#ifndef _DAMON_H_
+#define _DAMON_H_
+
+#include <linux/mutex.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+
+/* Minimal region size. Every damon_region is aligned by this. */
+#define DAMON_MIN_REGION PAGE_SIZE
+
+/**
+ * struct damon_addr_range - Represents an address region of [@start, @end).
+ * @start: Start address of the region (inclusive).
+ * @end: End address of the region (exclusive).
+ */
+struct damon_addr_range {
+ unsigned long start;
+ unsigned long end;
+};
+
+/**
+ * struct damon_region - Represents a monitoring target region.
+ * @ar: The address range of the region.
+ * @sampling_addr: Address of the sample for the next access check.
+ * @nr_accesses: Access frequency of this region.
+ * @list: List head for siblings.
+ */
+struct damon_region {
+ struct damon_addr_range ar;
+ unsigned long sampling_addr;
+ unsigned int nr_accesses;
+ struct list_head list;
+};
+
+/**
+ * struct damon_target - Represents a monitoring target.
+ * @id: Unique identifier for this target.
+ * @nr_regions: Number of monitoring target regions of this target.
+ * @regions_list: Head of the monitoring target regions of this target.
+ * @list: List head for siblings.
+ *
+ * Each monitoring context could have multiple targets. For example, a context
+ * for virtual memory address spaces could have multiple target processes. The
+ * @id of each target should be unique among the targets of the context. For
+ * example, in the virtual address monitoring context, it could be a pidfd or
+ * an address of an mm_struct.
+ */
+struct damon_target {
+ unsigned long id;
+ unsigned int nr_regions;
+ struct list_head regions_list;
+ struct list_head list;
+};
+
+struct damon_ctx;
+
+/**
+ * struct damon_primitive Monitoring primitives for given use cases.
+ *
+ * @init: Initialize primitive-internal data structures.
+ * @update: Update primitive-internal data structures.
+ * @prepare_access_checks: Prepare next access check of target regions.
+ * @check_accesses: Check the accesses to target regions.
+ * @reset_aggregated: Reset aggregated accesses monitoring results.
+ * @target_valid: Determine if the target is valid.
+ * @cleanup: Clean up the context.
+ *
+ * DAMON can be extended for various address spaces and usages. For this,
+ * users should register the low level primitives for their target address
+ * space and usecase via the &damon_ctx.primitive. Then, the monitoring thread
+ * (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting
+ * the monitoring, @update after each &damon_ctx.primitive_update_interval, and
+ * @check_accesses, @target_valid and @prepare_access_checks after each
+ * &damon_ctx.sample_interval. Finally, @reset_aggregated is called after each
+ * &damon_ctx.aggr_interval.
+ *
+ * @init should initialize primitive-internal data structures. For example,
+ * this could be used to construct proper monitoring target regions and link
+ * those to @damon_ctx.adaptive_targets.
+ * @update should update the primitive-internal data structures. For example,
+ * this could be used to update monitoring target regions for current status.
+ * @prepare_access_checks should manipulate the monitoring regions to be
+ * prepared for the next access check.
+ * @check_accesses should check the accesses to each region that made after the
+ * last preparation and update the number of observed accesses of each region.
+ * It should also return max number of observed accesses that made as a result
+ * of its update. The value will be used for regions adjustment threshold.
+ * @reset_aggregated should reset the access monitoring results that aggregated
+ * by @check_accesses.
+ * @target_valid should check whether the target is still valid for the
+ * monitoring.
+ * @cleanup is called from @kdamond just before its termination.
+ */
+struct damon_primitive {
+ void (*init)(struct damon_ctx *context);
+ void (*update)(struct damon_ctx *context);
+ void (*prepare_access_checks)(struct damon_ctx *context);
+ unsigned int (*check_accesses)(struct damon_ctx *context);
+ void (*reset_aggregated)(struct damon_ctx *context);
+ bool (*target_valid)(void *target);
+ void (*cleanup)(struct damon_ctx *context);
+};
+
+/*
+ * struct damon_callback Monitoring events notification callbacks.
+ *
+ * @before_start: Called before starting the monitoring.
+ * @after_sampling: Called after each sampling.
+ * @after_aggregation: Called after each aggregation.
+ * @before_terminate: Called before terminating the monitoring.
+ * @private: User private data.
+ *
+ * The monitoring thread (&damon_ctx.kdamond) calls @before_start and
+ * @before_terminate just before starting and finishing the monitoring,
+ * respectively. Therefore, those are good places for installing and cleaning
+ * @private.
+ *
+ * The monitoring thread calls @after_sampling and @after_aggregation for each
+ * of the sampling intervals and aggregation intervals, respectively.
+ * Therefore, users can safely access the monitoring results without additional
+ * protection. For the reason, users are recommended to use these callback for
+ * the accesses to the results.
+ *
+ * If any callback returns non-zero, monitoring stops.
+ */
+struct damon_callback {
+ void *private;
+
+ int (*before_start)(struct damon_ctx *context);
+ int (*after_sampling)(struct damon_ctx *context);
+ int (*after_aggregation)(struct damon_ctx *context);
+ int (*before_terminate)(struct damon_ctx *context);
+};
+
+/**
+ * struct damon_ctx - Represents a context for each monitoring. This is the
+ * main interface that allows users to set the attributes and get the results
+ * of the monitoring.
+ *
+ * @sample_interval: The time between access samplings.
+ * @aggr_interval: The time between monitor results aggregations.
+ * @primitive_update_interval: The time between monitoring primitive updates.
+ *
+ * For each @sample_interval, DAMON checks whether each region is accessed or
+ * not. It aggregates and keeps the access information (number of accesses to
+ * each region) for @aggr_interval time. DAMON also checks whether the target
+ * memory regions need update (e.g., by ``mmap()`` calls from the application,
+ * in case of virtual memory monitoring) and applies the changes for each
+ * @primitive_update_interval. All time intervals are in micro-seconds.
+ * Please refer to &struct damon_primitive and &struct damon_callback for more
+ * detail.
+ *
+ * @kdamond: Kernel thread who does the monitoring.
+ * @kdamond_stop: Notifies whether kdamond should stop.
+ * @kdamond_lock: Mutex for the synchronizations with @kdamond.
+ *
+ * For each monitoring context, one kernel thread for the monitoring is
+ * created. The pointer to the thread is stored in @kdamond.
+ *
+ * Once started, the monitoring thread runs until explicitly required to be
+ * terminated or every monitoring target is invalid. The validity of the
+ * targets is checked via the &damon_primitive.target_valid of @primitive. The
+ * termination can also be explicitly requested by writing non-zero to
+ * @kdamond_stop. The thread sets @kdamond to NULL when it terminates.
+ * Therefore, users can know whether the monitoring is ongoing or terminated by
+ * reading @kdamond. Reads and writes to @kdamond and @kdamond_stop from
+ * outside of the monitoring thread must be protected by @kdamond_lock.
+ *
+ * Note that the monitoring thread protects only @kdamond and @kdamond_stop via
+ * @kdamond_lock. Accesses to other fields must be protected by themselves.
+ *
+ * @primitive: Set of monitoring primitives for given use cases.
+ * @callback: Set of callbacks for monitoring events notifications.
+ *
+ * @min_nr_regions: The minimum number of adaptive monitoring regions.
+ * @max_nr_regions: The maximum number of adaptive monitoring regions.
+ * @adaptive_targets: Head of monitoring targets (&damon_target) list.
+ */
+struct damon_ctx {
+ unsigned long sample_interval;
+ unsigned long aggr_interval;
+ unsigned long primitive_update_interval;
+
+/* private: internal use only */
+ struct timespec64 last_aggregation;
+ struct timespec64 last_primitive_update;
+
+/* public: */
+ struct task_struct *kdamond;
+ bool kdamond_stop;
+ struct mutex kdamond_lock;
+
+ struct damon_primitive primitive;
+ struct damon_callback callback;
+
+ unsigned long min_nr_regions;
+ unsigned long max_nr_regions;
+ struct list_head adaptive_targets;
+};
+
+#define damon_next_region(r) \
+ (container_of(r->list.next, struct damon_region, list))
+
+#define damon_prev_region(r) \
+ (container_of(r->list.prev, struct damon_region, list))
+
+#define damon_for_each_region(r, t) \
+ list_for_each_entry(r, &t->regions_list, list)
+
+#define damon_for_each_region_safe(r, next, t) \
+ list_for_each_entry_safe(r, next, &t->regions_list, list)
+
+#define damon_for_each_target(t, ctx) \
+ list_for_each_entry(t, &(ctx)->adaptive_targets, list)
+
+#define damon_for_each_target_safe(t, next, ctx) \
+ list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list)
+
+#ifdef CONFIG_DAMON
+
+struct damon_region *damon_new_region(unsigned long start, unsigned long end);
+inline void damon_insert_region(struct damon_region *r,
+ struct damon_region *prev, struct damon_region *next,
+ struct damon_target *t);
+void damon_add_region(struct damon_region *r, struct damon_target *t);
+void damon_destroy_region(struct damon_region *r, struct damon_target *t);
+
+struct damon_target *damon_new_target(unsigned long id);
+void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
+void damon_free_target(struct damon_target *t);
+void damon_destroy_target(struct damon_target *t);
+unsigned int damon_nr_regions(struct damon_target *t);
+
+struct damon_ctx *damon_new_ctx(void);
+void damon_destroy_ctx(struct damon_ctx *ctx);
+int damon_set_targets(struct damon_ctx *ctx,
+ unsigned long *ids, ssize_t nr_ids);
+int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
+ unsigned long aggr_int, unsigned long primitive_upd_int,
+ unsigned long min_nr_reg, unsigned long max_nr_reg);
+int damon_nr_running_ctxs(void);
+
+int damon_start(struct damon_ctx **ctxs, int nr_ctxs);
+int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+
+#endif /* CONFIG_DAMON */
+
+#ifdef CONFIG_DAMON_VADDR
+
+/* Monitoring primitives for virtual memory address spaces */
+void damon_va_init(struct damon_ctx *ctx);
+void damon_va_update(struct damon_ctx *ctx);
+void damon_va_prepare_access_checks(struct damon_ctx *ctx);
+unsigned int damon_va_check_accesses(struct damon_ctx *ctx);
+bool damon_va_target_valid(void *t);
+void damon_va_cleanup(struct damon_ctx *ctx);
+void damon_va_set_primitives(struct damon_ctx *ctx);
+
+#endif /* CONFIG_DAMON_VADDR */
+
+#endif /* _DAMON_H */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index b52f084aa643..2619d94c308d 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -41,7 +41,6 @@ struct dax_operations {
extern struct attribute_group dax_attribute_group;
#if IS_ENABLED(CONFIG_DAX)
-struct dax_device *dax_get_by_host(const char *host);
struct dax_device *alloc_dax(void *private, const char *host,
const struct dax_operations *ops, unsigned long flags);
void put_dax(struct dax_device *dax_dev);
@@ -58,8 +57,6 @@ static inline void set_dax_synchronous(struct dax_device *dax_dev)
{
__set_dax_synchronous(dax_dev);
}
-bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
- int blocksize, sector_t start, sector_t len);
/*
* Check if given mapping is supported by the file / underlying device.
*/
@@ -73,10 +70,6 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
return dax_synchronous(dax_dev);
}
#else
-static inline struct dax_device *dax_get_by_host(const char *host)
-{
- return NULL;
-}
static inline struct dax_device *alloc_dax(void *private, const char *host,
const struct dax_operations *ops, unsigned long flags)
{
@@ -106,12 +99,6 @@ static inline bool dax_synchronous(struct dax_device *dax_dev)
static inline void set_dax_synchronous(struct dax_device *dax_dev)
{
}
-static inline bool dax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t len)
-{
- return false;
-}
static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
struct dax_device *dax_dev)
{
@@ -122,22 +109,12 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
struct writeback_control;
int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
#if IS_ENABLED(CONFIG_FS_DAX)
-bool __bdev_dax_supported(struct block_device *bdev, int blocksize);
-static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
-{
- return __bdev_dax_supported(bdev, blocksize);
-}
-
-bool __generic_fsdax_supported(struct dax_device *dax_dev,
+bool generic_fsdax_supported(struct dax_device *dax_dev,
struct block_device *bdev, int blocksize, sector_t start,
sector_t sectors);
-static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t sectors)
-{
- return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
- sectors);
-}
+
+bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
+ int blocksize, sector_t start, sector_t len);
static inline void fs_put_dax(struct dax_device *dax_dev)
{
@@ -153,15 +130,11 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t st
dax_entry_t dax_lock_page(struct page *page);
void dax_unlock_page(struct page *page, dax_entry_t cookie);
#else
-static inline bool bdev_dax_supported(struct block_device *bdev,
- int blocksize)
-{
- return false;
-}
+#define generic_fsdax_supported NULL
-static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
+static inline bool dax_supported(struct dax_device *dax_dev,
struct block_device *bdev, int blocksize, sector_t start,
- sector_t sectors)
+ sector_t len)
{
return false;
}
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 93c3ca5fdafd..e5c2c9e71bf1 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -380,6 +380,7 @@ enum dma_slave_buswidth {
DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
+ DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
};
/**
@@ -398,7 +399,7 @@ enum dma_slave_buswidth {
* @src_addr_width: this is the width in bytes of the source (RX)
* register where DMA data shall be read. If the source
* is memory this may be ignored depending on architecture.
- * Legal values: 1, 2, 3, 4, 8, 16, 32, 64.
+ * Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128.
* @dst_addr_width: same as src_addr_width but for destination
* target (TX) mutatis mutandis.
* @src_maxburst: the maximum number of words (note: words, as in
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index 1834752c5617..39dcadd492b5 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -11,7 +11,7 @@
#include <linux/types.h>
/**
- * em_perf_state - Performance state of a performance domain
+ * struct em_perf_state - Performance state of a performance domain
* @frequency: The frequency in KHz, for consistency with CPUFreq
* @power: The power consumed at this level (by 1 CPU or by a registered
* device). It can be a total power: static and dynamic.
@@ -25,7 +25,7 @@ struct em_perf_state {
};
/**
- * em_perf_domain - Performance domain
+ * struct em_perf_domain - Performance domain
* @table: List of performance states, in ascending order
* @nr_perf_states: Number of performance states
* @milliwatts: Flag indicating the power values are in milli-Watts
@@ -103,12 +103,12 @@ void em_dev_unregister_perf_domain(struct device *dev);
/**
* em_cpu_energy() - Estimates the energy consumed by the CPUs of a
- performance domain
+ * performance domain
* @pd : performance domain for which energy has to be estimated
* @max_util : highest utilization among CPUs of the domain
* @sum_util : sum of the utilization of all CPUs in the domain
* @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which
- might reflect reduced frequency (due to thermal)
+ * might reflect reduced frequency (due to thermal)
*
* This function must be used only for CPU devices. There is no validation,
* i.e. if the EM is a CPU type and has cpumask allocated. It is called from
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index 593322c946e6..3337745d81bd 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -68,4 +68,22 @@ static inline void eventpoll_release(struct file *file) {}
#endif
+#if defined(CONFIG_ARM) && defined(CONFIG_OABI_COMPAT)
+/* ARM OABI has an incompatible struct layout and needs a special handler */
+extern struct epoll_event __user *
+epoll_put_uevent(__poll_t revents, __u64 data,
+ struct epoll_event __user *uevent);
+#else
+static inline struct epoll_event __user *
+epoll_put_uevent(__poll_t revents, __u64 data,
+ struct epoll_event __user *uevent)
+{
+ if (__put_user(revents, &uevent->events) ||
+ __put_user(data, &uevent->data))
+ return NULL;
+
+ return uevent+1;
+}
+#endif
+
#endif /* #ifndef _LINUX_EVENTPOLL_H */
diff --git a/include/linux/file.h b/include/linux/file.h
index 2de2e4613d7b..51e830b4fe3a 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -94,6 +94,9 @@ extern void fd_install(unsigned int fd, struct file *file);
extern int __receive_fd(struct file *file, int __user *ufd,
unsigned int o_flags);
+
+extern int receive_fd(struct file *file, unsigned int o_flags);
+
static inline int receive_fd_user(struct file *file, int __user *ufd,
unsigned int o_flags)
{
@@ -101,10 +104,6 @@ static inline int receive_fd_user(struct file *file, int __user *ufd,
return -EFAULT;
return __receive_fd(file, ufd, o_flags);
}
-static inline int receive_fd(struct file *file, unsigned int o_flags)
-{
- return __receive_fd(file, NULL, o_flags);
-}
int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags);
extern void flush_delayed_fput(void);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 37ad9a730a89..e7a633353fd2 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -3439,6 +3439,8 @@ extern int buffer_migrate_page_norefs(struct address_space *,
#define buffer_migrate_page_norefs NULL
#endif
+int may_setattr(struct user_namespace *mnt_userns, struct inode *inode,
+ unsigned int ia_valid);
int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *);
extern int inode_newsize_ok(const struct inode *, loff_t offset);
void setattr_copy(struct user_namespace *, struct inode *inode,
@@ -3592,7 +3594,7 @@ int proc_nr_dentry(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int proc_nr_inodes(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-int __init get_filesystem_list(char *buf);
+int __init list_bdev_fs_names(char *buf, size_t size);
#define __FMODE_EXEC ((__force int) FMODE_EXEC)
#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index 7902c7d8b55f..4aa1031d3e4c 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -90,7 +90,11 @@ static inline void __kunmap_local(void *vaddr)
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
- preempt_disable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
+
pagefault_disable();
return __kmap_local_page_prot(page, prot);
}
@@ -102,7 +106,11 @@ static inline void *kmap_atomic(struct page *page)
static inline void *kmap_atomic_pfn(unsigned long pfn)
{
- preempt_disable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
+
pagefault_disable();
return __kmap_local_pfn_prot(pfn, kmap_prot);
}
@@ -111,7 +119,10 @@ static inline void __kunmap_atomic(void *addr)
{
kunmap_local_indexed(addr);
pagefault_enable();
- preempt_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_enable();
+ else
+ preempt_enable();
}
unsigned int __nr_free_highpages(void);
@@ -179,7 +190,10 @@ static inline void __kunmap_local(void *addr)
static inline void *kmap_atomic(struct page *page)
{
- preempt_disable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
pagefault_disable();
return page_address(page);
}
@@ -200,7 +214,10 @@ static inline void __kunmap_atomic(void *addr)
kunmap_flush_on_unmap(addr);
#endif
pagefault_enable();
- preempt_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_enable();
+ else
+ preempt_enable();
}
static inline unsigned int nr_free_highpages(void) { return 0; }
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index f7ca1a3870ea..1faebe1cd0ed 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -858,6 +858,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
+static inline void hugetlb_count_init(struct mm_struct *mm)
+{
+ atomic_long_set(&mm->hugetlb_usage, 0);
+}
+
static inline void hugetlb_count_add(long l, struct mm_struct *mm)
{
atomic_long_add(l, &mm->hugetlb_usage);
@@ -1042,6 +1047,10 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
return &mm->page_table_lock;
}
+static inline void hugetlb_count_init(struct mm_struct *mm)
+{
+}
+
static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
{
}
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index b066024c62e3..34de69b3b8ba 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -118,6 +118,7 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
void memblock_free_all(void);
+void memblock_free_ptr(void *ptr, size_t size);
void reset_node_managed_pages(pg_data_t *pgdat);
void reset_all_zones_managed_pages(void);
diff --git a/include/linux/memory.h b/include/linux/memory.h
index d9a0b61cd432..7efc0a7c14c9 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -23,6 +23,48 @@
#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS)
+/**
+ * struct memory_group - a logical group of memory blocks
+ * @nid: The node id for all memory blocks inside the memory group.
+ * @blocks: List of all memory blocks belonging to this memory group.
+ * @present_kernel_pages: Present (online) memory outside ZONE_MOVABLE of this
+ * memory group.
+ * @present_movable_pages: Present (online) memory in ZONE_MOVABLE of this
+ * memory group.
+ * @is_dynamic: The memory group type: static vs. dynamic
+ * @s.max_pages: Valid with &memory_group.is_dynamic == false. The maximum
+ * number of pages we'll have in this static memory group.
+ * @d.unit_pages: Valid with &memory_group.is_dynamic == true. Unit in pages
+ * in which memory is added/removed in this dynamic memory group.
+ * This granularity defines the alignment of a unit in physical
+ * address space; it has to be at least as big as a single
+ * memory block.
+ *
+ * A memory group logically groups memory blocks; each memory block
+ * belongs to at most one memory group. A memory group corresponds to
+ * a memory device, such as a DIMM or a NUMA node, which spans multiple
+ * memory blocks and might even span multiple non-contiguous physical memory
+ * ranges.
+ *
+ * Modification of members after registration is serialized by memory
+ * hot(un)plug code.
+ */
+struct memory_group {
+ int nid;
+ struct list_head memory_blocks;
+ unsigned long present_kernel_pages;
+ unsigned long present_movable_pages;
+ bool is_dynamic;
+ union {
+ struct {
+ unsigned long max_pages;
+ } s;
+ struct {
+ unsigned long unit_pages;
+ } d;
+ };
+};
+
struct memory_block {
unsigned long start_section_nr;
unsigned long state; /* serialized by the dev->lock */
@@ -34,6 +76,8 @@ struct memory_block {
* lay at the beginning of the memory block.
*/
unsigned long nr_vmemmap_pages;
+ struct memory_group *group; /* group (if any) for this block */
+ struct list_head group_next; /* next block inside memory group */
};
int arch_get_memory_phys_device(unsigned long start_pfn);
@@ -86,7 +130,8 @@ static inline int memory_notify(unsigned long val, void *v)
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
int create_memory_block_devices(unsigned long start, unsigned long size,
- unsigned long vmemmap_pages);
+ unsigned long vmemmap_pages,
+ struct memory_group *group);
void remove_memory_block_devices(unsigned long start, unsigned long size);
extern void memory_dev_init(void);
extern int memory_notify(unsigned long val, void *v);
@@ -96,6 +141,14 @@ extern int walk_memory_blocks(unsigned long start, unsigned long size,
void *arg, walk_memory_blocks_func_t func);
extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
+
+extern int memory_group_register_static(int nid, unsigned long max_pages);
+extern int memory_group_register_dynamic(int nid, unsigned long unit_pages);
+extern int memory_group_unregister(int mgid);
+struct memory_group *memory_group_find_by_id(int mgid);
+typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *);
+int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
+ struct memory_group *excluded, void *arg);
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
#ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index a7fd2c3ccb77..e5a867c950b2 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -12,6 +12,7 @@ struct zone;
struct pglist_data;
struct mem_section;
struct memory_block;
+struct memory_group;
struct resource;
struct vmem_altmap;
@@ -50,6 +51,11 @@ typedef int __bitwise mhp_t;
* Only selected architectures support it with SPARSE_VMEMMAP.
*/
#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
+/*
+ * The nid field specifies a memory group id (mgid) instead. The memory group
+ * implies the node id (nid).
+ */
+#define MHP_NID_IS_MGID ((__force mhp_t)BIT(2))
/*
* Extended parameters for memory hotplug:
@@ -95,13 +101,15 @@ static inline void zone_seqlock_init(struct zone *zone)
extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
-extern void adjust_present_page_count(struct zone *zone, long nr_pages);
+extern void adjust_present_page_count(struct page *page,
+ struct memory_group *group,
+ long nr_pages);
/* VM interface that may be used by firmware interface */
extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
struct zone *zone);
extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
extern int online_pages(unsigned long pfn, unsigned long nr_pages,
- struct zone *zone);
+ struct zone *zone, struct memory_group *group);
extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
unsigned long end_pfn);
extern void __offline_isolated_pages(unsigned long start_pfn,
@@ -130,8 +138,7 @@ static inline bool movable_node_is_enabled(void)
return movable_node_enabled;
}
-extern void arch_remove_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap);
+extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
struct vmem_altmap *altmap);
@@ -292,25 +299,27 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
#ifdef CONFIG_MEMORY_HOTREMOVE
extern void try_offline_node(int nid);
-extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
-extern int remove_memory(int nid, u64 start, u64 size);
-extern void __remove_memory(int nid, u64 start, u64 size);
-extern int offline_and_remove_memory(int nid, u64 start, u64 size);
+extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct memory_group *group);
+extern int remove_memory(u64 start, u64 size);
+extern void __remove_memory(u64 start, u64 size);
+extern int offline_and_remove_memory(u64 start, u64 size);
#else
static inline void try_offline_node(int nid) {}
-static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
+static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct memory_group *group)
{
return -EINVAL;
}
-static inline int remove_memory(int nid, u64 start, u64 size)
+static inline int remove_memory(u64 start, u64 size)
{
return -EBUSY;
}
-static inline void __remove_memory(int nid, u64 start, u64 size) {}
+static inline void __remove_memory(u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
extern void set_zone_contiguous(struct zone *zone);
@@ -339,7 +348,8 @@ extern void sparse_remove_section(struct mem_section *ms,
unsigned long map_offset, struct vmem_altmap *altmap);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
-extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
+extern struct zone *zone_for_pfn_range(int online_type, int nid,
+ struct memory_group *group, unsigned long start_pfn,
unsigned long nr_pages);
extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
struct mhp_params *params);
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 0540f0156f58..96e113e23d04 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -101,14 +101,14 @@ static inline bool mmap_write_trylock(struct mm_struct *mm)
static inline void mmap_write_unlock(struct mm_struct *mm)
{
- up_write(&mm->mmap_lock);
__mmap_lock_trace_released(mm, true);
+ up_write(&mm->mmap_lock);
}
static inline void mmap_write_downgrade(struct mm_struct *mm)
{
- downgrade_write(&mm->mmap_lock);
__mmap_lock_trace_acquire_returned(mm, false, true);
+ downgrade_write(&mm->mmap_lock);
}
static inline void mmap_read_lock(struct mm_struct *mm)
@@ -140,23 +140,14 @@ static inline bool mmap_read_trylock(struct mm_struct *mm)
static inline void mmap_read_unlock(struct mm_struct *mm)
{
- up_read(&mm->mmap_lock);
__mmap_lock_trace_released(mm, false);
-}
-
-static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
-{
- if (mmap_read_trylock(mm)) {
- rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
- return true;
- }
- return false;
+ up_read(&mm->mmap_lock);
}
static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
{
- up_read_non_owner(&mm->mmap_lock);
__mmap_lock_trace_released(mm, false);
+ up_read_non_owner(&mm->mmap_lock);
}
static inline void mmap_assert_locked(struct mm_struct *mm)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 1bd5f5955f9a..6a1d79d84675 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -540,6 +540,10 @@ struct zone {
* is calculated as:
* present_pages = spanned_pages - absent_pages(pages in holes);
*
+ * present_early_pages is present pages existing within the zone
+ * located on memory available since early boot, excluding hotplugged
+ * memory.
+ *
* managed_pages is present pages managed by the buddy system, which
* is calculated as (reserved_pages includes pages allocated by the
* bootmem allocator):
@@ -572,6 +576,9 @@ struct zone {
atomic_long_t managed_pages;
unsigned long spanned_pages;
unsigned long present_pages;
+#if defined(CONFIG_MEMORY_HOTPLUG)
+ unsigned long present_early_pages;
+#endif
#ifdef CONFIG_CMA
unsigned long cma_pages;
#endif
@@ -1525,18 +1532,6 @@ void sparse_init(void);
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
-/*
- * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
- * need to check pfn validity within that MAX_ORDER_NR_PAGES block.
- * pfn_valid_within() should be used in this case; we optimise this away
- * when we have no holes within a MAX_ORDER_NR_PAGES block.
- */
-#ifdef CONFIG_HOLES_IN_ZONE
-#define pfn_valid_within(pfn) pfn_valid(pfn)
-#else
-#define pfn_valid_within(pfn) (1)
-#endif
-
#endif /* !__GENERATING_BOUNDS.H */
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */
diff --git a/include/linux/once.h b/include/linux/once.h
index ae6f4eb41cbe..d361fb14ac3a 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -16,7 +16,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
* out the condition into a nop. DO_ONCE() guarantees type safety of
* arguments!
*
- * Not that the following is not equivalent ...
+ * Note that the following is not equivalent ...
*
* DO_ONCE(func, arg);
* DO_ONCE(func, arg);
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 0f12345c21fb..4669632bd72b 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -6,12 +6,9 @@
#include <linux/limits.h>
/*
- * In the fallback code below, we need to compute the minimum and
- * maximum values representable in a given type. These macros may also
- * be useful elsewhere, so we provide them outside the
- * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
- *
- * It would seem more obvious to do something like
+ * We need to compute the minimum and maximum values representable in a given
+ * type. These macros may also be useful elsewhere. It would seem more obvious
+ * to do something like:
*
* #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
* #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
@@ -54,7 +51,6 @@ static inline bool __must_check __must_check_overflow(bool overflow)
return unlikely(overflow);
}
-#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
/*
* For simplicity and code hygiene, the fallback code below insists on
* a, b and *d having the same type (similar to the min() and max()
@@ -90,134 +86,6 @@ static inline bool __must_check __must_check_overflow(bool overflow)
__builtin_mul_overflow(__a, __b, __d); \
}))
-#else
-
-
-/* Checking for unsigned overflow is relatively easy without causing UB. */
-#define __unsigned_add_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a + __b; \
- *__d < __a; \
-})
-#define __unsigned_sub_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a - __b; \
- __a < __b; \
-})
-/*
- * If one of a or b is a compile-time constant, this avoids a division.
- */
-#define __unsigned_mul_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a * __b; \
- __builtin_constant_p(__b) ? \
- __b > 0 && __a > type_max(typeof(__a)) / __b : \
- __a > 0 && __b > type_max(typeof(__b)) / __a; \
-})
-
-/*
- * For signed types, detecting overflow is much harder, especially if
- * we want to avoid UB. But the interface of these macros is such that
- * we must provide a result in *d, and in fact we must produce the
- * result promised by gcc's builtins, which is simply the possibly
- * wrapped-around value. Fortunately, we can just formally do the
- * operations in the widest relevant unsigned type (u64) and then
- * truncate the result - gcc is smart enough to generate the same code
- * with and without the (u64) casts.
- */
-
-/*
- * Adding two signed integers can overflow only if they have the same
- * sign, and overflow has happened iff the result has the opposite
- * sign.
- */
-#define __signed_add_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a + (u64)__b; \
- (((~(__a ^ __b)) & (*__d ^ __a)) \
- & type_min(typeof(__a))) != 0; \
-})
-
-/*
- * Subtraction is similar, except that overflow can now happen only
- * when the signs are opposite. In this case, overflow has happened if
- * the result has the opposite sign of a.
- */
-#define __signed_sub_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a - (u64)__b; \
- ((((__a ^ __b)) & (*__d ^ __a)) \
- & type_min(typeof(__a))) != 0; \
-})
-
-/*
- * Signed multiplication is rather hard. gcc always follows C99, so
- * division is truncated towards 0. This means that we can write the
- * overflow check like this:
- *
- * (a > 0 && (b > MAX/a || b < MIN/a)) ||
- * (a < -1 && (b > MIN/a || b < MAX/a) ||
- * (a == -1 && b == MIN)
- *
- * The redundant casts of -1 are to silence an annoying -Wtype-limits
- * (included in -Wextra) warning: When the type is u8 or u16, the
- * __b_c_e in check_mul_overflow obviously selects
- * __unsigned_mul_overflow, but unfortunately gcc still parses this
- * code and warns about the limited range of __b.
- */
-
-#define __signed_mul_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- typeof(a) __tmax = type_max(typeof(a)); \
- typeof(a) __tmin = type_min(typeof(a)); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a * (u64)__b; \
- (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
- (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
- (__b == (typeof(__b))-1 && __a == __tmin); \
-})
-
-
-#define check_add_overflow(a, b, d) __must_check_overflow( \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_add_overflow(a, b, d), \
- __unsigned_add_overflow(a, b, d)))
-
-#define check_sub_overflow(a, b, d) __must_check_overflow( \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_sub_overflow(a, b, d), \
- __unsigned_sub_overflow(a, b, d)))
-
-#define check_mul_overflow(a, b, d) __must_check_overflow( \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_mul_overflow(a, b, d), \
- __unsigned_mul_overflow(a, b, d)))
-
-#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
-
/** check_shl_overflow() - Calculate a left-shifted value and check overflow
*
* @a: Value to be shifted
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 1ace27c4a8e0..a558d67ee86f 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -131,7 +131,7 @@ enum pageflags {
#ifdef CONFIG_MEMORY_FAILURE
PG_hwpoison, /* hardware poisoned page. Don't touch */
#endif
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
PG_young,
PG_idle,
#endif
@@ -178,6 +178,8 @@ enum pageflags {
PG_reported = PG_uptodate,
};
+#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
+
#ifndef __GENERATING_BOUNDS_H
static inline unsigned long _compound_head(const struct page *page)
@@ -439,7 +441,7 @@ PAGEFLAG_FALSE(HWPoison)
#define __PG_HWPOISON 0
#endif
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
TESTPAGEFLAG(Young, young, PF_ANY)
SETPAGEFLAG(Young, young, PF_ANY)
TESTCLEARFLAG(Young, young, PF_ANY)
@@ -778,6 +780,15 @@ static inline int PageSlabPfmemalloc(struct page *page)
return PageActive(page);
}
+/*
+ * A version of PageSlabPfmemalloc() for opportunistic checks where the page
+ * might have been freed under us and not be a PageSlab anymore.
+ */
+static inline int __PageSlabPfmemalloc(struct page *page)
+{
+ return PageActive(page);
+}
+
static inline void SetPageSlabPfmemalloc(struct page *page)
{
VM_BUG_ON_PAGE(!PageSlab(page), page);
@@ -822,7 +833,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
* alloc-free cycle to prevent from reusing the page.
*/
#define PAGE_FLAGS_CHECK_AT_PREP \
- (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
+ (PAGEFLAGS_MASK & ~__PG_HWPOISON)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index aff81ba31bd8..fabb2e1e087f 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -19,7 +19,7 @@ struct page_ext_operations {
enum page_ext_flags {
PAGE_EXT_OWNER,
PAGE_EXT_OWNER_ALLOCATED,
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
+#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
PAGE_EXT_YOUNG,
PAGE_EXT_IDLE,
#endif
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index 1e894d34bdce..d8a6aecf99cb 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -6,7 +6,7 @@
#include <linux/page-flags.h>
#include <linux/page_ext.h>
-#ifdef CONFIG_IDLE_PAGE_TRACKING
+#ifdef CONFIG_PAGE_IDLE_FLAG
#ifdef CONFIG_64BIT
static inline bool page_is_young(struct page *page)
@@ -106,7 +106,7 @@ static inline void clear_page_idle(struct page *page)
}
#endif /* CONFIG_64BIT */
-#else /* !CONFIG_IDLE_PAGE_TRACKING */
+#else /* !CONFIG_PAGE_IDLE_FLAG */
static inline bool page_is_young(struct page *page)
{
@@ -135,6 +135,6 @@ static inline void clear_page_idle(struct page *page)
{
}
-#endif /* CONFIG_IDLE_PAGE_TRACKING */
+#endif /* CONFIG_PAGE_IDLE_FLAG */
#endif /* _LINUX_MM_PAGE_IDLE_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 5dcf446f42e5..62db6b0176b9 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -521,18 +521,17 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
*/
static inline pgoff_t page_to_index(struct page *page)
{
- pgoff_t pgoff;
+ struct page *head;
if (likely(!PageTransTail(page)))
return page->index;
+ head = compound_head(page);
/*
* We don't initialize ->index for tail pages: calculate based on
* head page
*/
- pgoff = compound_head(page)->index;
- pgoff += page - compound_head(page);
- return pgoff;
+ return head->index + page - head;
}
extern pgoff_t hugetlb_basepage_index(struct page *page);
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 5ba475ca9078..f16de399d2de 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -122,6 +122,9 @@ static inline void pci_acpi_add_edr_notifier(struct pci_dev *pdev) { }
static inline void pci_acpi_remove_edr_notifier(struct pci_dev *pdev) { }
#endif /* CONFIG_PCIE_EDR */
+int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *));
+void pci_acpi_clear_companion_lookup_hook(void);
+
#else /* CONFIG_ACPI */
static inline void acpi_pci_add_bus(struct pci_bus *bus) { }
static inline void acpi_pci_remove_bus(struct pci_bus *bus) { }
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 50a649d33e68..a48778e1a4ee 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -62,31 +62,32 @@ pci_epc_interface_string(enum pci_epc_interface_type type)
* @owner: the module owner containing the ops
*/
struct pci_epc_ops {
- int (*write_header)(struct pci_epc *epc, u8 func_no,
+ int (*write_header)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr);
- int (*set_bar)(struct pci_epc *epc, u8 func_no,
+ int (*set_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
- void (*clear_bar)(struct pci_epc *epc, u8 func_no,
+ void (*clear_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
- int (*map_addr)(struct pci_epc *epc, u8 func_no,
+ int (*map_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr, u64 pci_addr, size_t size);
- void (*unmap_addr)(struct pci_epc *epc, u8 func_no,
+ void (*unmap_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr);
- int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts);
- int (*get_msi)(struct pci_epc *epc, u8 func_no);
- int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno, u32 offset);
- int (*get_msix)(struct pci_epc *epc, u8 func_no);
- int (*raise_irq)(struct pci_epc *epc, u8 func_no,
+ int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts);
+ int (*get_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+ int (*set_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno, u32 offset);
+ int (*get_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+ int (*raise_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num);
- int (*map_msi_irq)(struct pci_epc *epc, u8 func_no,
+ int (*map_msi_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr, u8 interrupt_num,
u32 entry_size, u32 *msi_data,
u32 *msi_addr_offset);
int (*start)(struct pci_epc *epc);
void (*stop)(struct pci_epc *epc);
const struct pci_epc_features* (*get_features)(struct pci_epc *epc,
- u8 func_no);
+ u8 func_no, u8 vfunc_no);
struct module *owner;
};
@@ -128,6 +129,8 @@ struct pci_epc_mem {
* single window.
* @num_windows: number of windows supported by device
* @max_functions: max number of functions that can be configured in this EPC
+ * @max_vfs: Array indicating the maximum number of virtual functions that can
+ * be associated with each physical function
* @group: configfs group representing the PCI EPC device
* @lock: mutex to protect pci_epc ops
* @function_num_map: bitmap to manage physical function number
@@ -141,6 +144,7 @@ struct pci_epc {
struct pci_epc_mem *mem;
unsigned int num_windows;
u8 max_functions;
+ u8 *max_vfs;
struct config_group *group;
/* mutex to protect against concurrent access of EP controller */
struct mutex lock;
@@ -208,31 +212,32 @@ void pci_epc_linkup(struct pci_epc *epc);
void pci_epc_init_notify(struct pci_epc *epc);
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
-int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
+int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr);
-int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
+int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
-void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
-int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
+int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr,
u64 pci_addr, size_t size);
-void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
+void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr);
-int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts);
-int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno, u32 offset);
-int pci_epc_get_msix(struct pci_epc *epc, u8 func_no);
-int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no,
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts);
+int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno, u32 offset);
+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr, u8 interrupt_num,
u32 entry_size, u32 *msi_data, u32 *msi_addr_offset);
-int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
+int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
enum pci_epc_irq_type type, u16 interrupt_num);
int pci_epc_start(struct pci_epc *epc);
void pci_epc_stop(struct pci_epc *epc);
const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
- u8 func_no);
+ u8 func_no, u8 vfunc_no);
enum pci_barno
pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features);
enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 8292420426f3..009a07147c61 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -121,8 +121,10 @@ struct pci_epf_bar {
* @bar: represents the BAR of EPF device
* @msi_interrupts: number of MSI interrupts required by this function
* @msix_interrupts: number of MSI-X interrupts required by this function
- * @func_no: unique function number within this endpoint device
+ * @func_no: unique (physical) function number within this endpoint device
+ * @vfunc_no: unique virtual function number within a physical function
* @epc: the EPC device to which this EPF device is bound
+ * @epf_pf: the physical EPF device to which this virtual EPF device is bound
* @driver: the EPF driver to which this EPF device is bound
* @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
* @nb: notifier block to notify EPF of any EPC events (like linkup)
@@ -133,6 +135,10 @@ struct pci_epf_bar {
* @sec_epc_bar: represents the BAR of EPF device associated with secondary EPC
* @sec_epc_func_no: unique (physical) function number within the secondary EPC
* @group: configfs group associated with the EPF device
+ * @is_bound: indicates if bind notification to function driver has been invoked
+ * @is_vf: true - virtual function, false - physical function
+ * @vfunction_num_map: bitmap to manage virtual function number
+ * @pci_vepf: list of virtual endpoint functions associated with this function
*/
struct pci_epf {
struct device dev;
@@ -142,8 +148,10 @@ struct pci_epf {
u8 msi_interrupts;
u16 msix_interrupts;
u8 func_no;
+ u8 vfunc_no;
struct pci_epc *epc;
+ struct pci_epf *epf_pf;
struct pci_epf_driver *driver;
struct list_head list;
struct notifier_block nb;
@@ -156,6 +164,10 @@ struct pci_epf {
struct pci_epf_bar sec_epc_bar[6];
u8 sec_epc_func_no;
struct config_group *group;
+ unsigned int is_bound;
+ unsigned int is_vf;
+ unsigned long vfunction_num_map;
+ struct list_head pci_vepf;
};
/**
@@ -199,4 +211,6 @@ int pci_epf_bind(struct pci_epf *epf);
void pci_epf_unbind(struct pci_epf *epf);
struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf,
struct config_group *group);
+int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf);
+void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf);
#endif /* __LINUX_PCI_EPF_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 34d7d94ddf6d..cd8aa6fce204 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -49,6 +49,12 @@
PCI_STATUS_SIG_TARGET_ABORT | \
PCI_STATUS_PARITY)
+/* Number of reset methods used in pci_reset_fn_methods array in pci.c */
+#define PCI_NUM_RESET_METHODS 7
+
+#define PCI_RESET_PROBE true
+#define PCI_RESET_DO_RESET false
+
/*
* The PCI interface treats multi-function devices as independent
* devices. The slot/function address of each device is encoded
@@ -288,21 +294,14 @@ enum pci_bus_speed {
enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
-struct pci_cap_saved_data {
- u16 cap_nr;
- bool cap_extended;
- unsigned int size;
- u32 data[];
-};
-
-struct pci_cap_saved_state {
- struct hlist_node next;
- struct pci_cap_saved_data cap;
+struct pci_vpd {
+ struct mutex lock;
+ unsigned int len;
+ u8 cap;
};
struct irq_affinity;
struct pcie_link_state;
-struct pci_vpd;
struct pci_sriov;
struct pci_p2pdma;
struct rcec_ea;
@@ -333,6 +332,7 @@ struct pci_dev {
struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */
struct pci_dev *rcec; /* Associated RCEC device */
#endif
+ u32 devcap; /* PCIe Device Capabilities */
u8 pcie_cap; /* PCIe capability offset */
u8 msi_cap; /* MSI capability offset */
u8 msix_cap; /* MSI-X capability offset */
@@ -388,6 +388,7 @@ struct pci_dev {
supported from root to here */
u16 l1ss; /* L1SS Capability pointer */
#endif
+ unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */
unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */
pci_channel_state_t error_state; /* Current connectivity state */
@@ -427,7 +428,6 @@ struct pci_dev {
unsigned int state_saved:1;
unsigned int is_physfn:1;
unsigned int is_virtfn:1;
- unsigned int reset_fn:1;
unsigned int is_hotplug_bridge:1;
unsigned int shpc_managed:1; /* SHPC owned by shpchp */
unsigned int is_thunderbolt:1; /* Thunderbolt controller */
@@ -473,7 +473,7 @@ struct pci_dev {
#ifdef CONFIG_PCI_MSI
const struct attribute_group **msi_irq_groups;
#endif
- struct pci_vpd *vpd;
+ struct pci_vpd vpd;
#ifdef CONFIG_PCIE_DPC
u16 dpc_cap;
unsigned int dpc_rp_extensions:1;
@@ -505,6 +505,9 @@ struct pci_dev {
char *driver_override; /* Driver name to force a match */
unsigned long priv_flags; /* Private flags for the PCI driver */
+
+ /* These methods index pci_reset_fn_methods[] */
+ u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
};
static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
@@ -526,6 +529,16 @@ static inline int pci_channel_offline(struct pci_dev *pdev)
return (pdev->error_state != pci_channel_io_normal);
}
+/*
+ * Currently in ACPI spec, for each PCI host bridge, PCI Segment
+ * Group number is limited to a 16-bit value, therefore (int)-1 is
+ * not a valid PCI domain number, and can be used as a sentinel
+ * value indicating ->domain_nr is not set by the driver (and
+ * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
+ * pci_bus_find_domain_nr()).
+ */
+#define PCI_DOMAIN_NR_NOT_SET (-1)
+
struct pci_host_bridge {
struct device dev;
struct pci_bus *bus; /* Root bus */
@@ -533,6 +546,7 @@ struct pci_host_bridge {
struct pci_ops *child_ops;
void *sysdata;
int busnr;
+ int domain_nr;
struct list_head windows; /* resource_entry */
struct list_head dma_ranges; /* dma ranges resource list */
u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
@@ -1257,7 +1271,7 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
enum pci_bus_speed *speed,
enum pcie_link_width *width);
void pcie_print_link_status(struct pci_dev *dev);
-bool pcie_has_flr(struct pci_dev *dev);
+int pcie_reset_flr(struct pci_dev *dev, bool probe);
int pcie_flr(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
@@ -1307,12 +1321,6 @@ int pci_load_saved_state(struct pci_dev *dev,
struct pci_saved_state *state);
int pci_load_and_free_saved_state(struct pci_dev *dev,
struct pci_saved_state **state);
-struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
-struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
- u16 cap);
-int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
-int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
- u16 cap, unsigned int size);
int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
@@ -1779,8 +1787,9 @@ static inline void pci_disable_device(struct pci_dev *dev) { }
static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
static inline int pci_assign_resource(struct pci_dev *dev, int i)
{ return -EBUSY; }
-static inline int __pci_register_driver(struct pci_driver *drv,
- struct module *owner)
+static inline int __must_check __pci_register_driver(struct pci_driver *drv,
+ struct module *owner,
+ const char *mod_name)
{ return 0; }
static inline int pci_register_driver(struct pci_driver *drv)
{ return 0; }
@@ -1920,9 +1929,7 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end)
#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags)
#define pci_resource_len(dev,bar) \
- ((pci_resource_start((dev), (bar)) == 0 && \
- pci_resource_end((dev), (bar)) == \
- pci_resource_start((dev), (bar))) ? 0 : \
+ ((pci_resource_end((dev), (bar)) == 0) ? 0 : \
\
(pci_resource_end((dev), (bar)) - \
pci_resource_start((dev), (bar)) + 1))
@@ -2289,20 +2296,6 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
#define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
-/* Small Resource Data Type Tag Item Names */
-#define PCI_VPD_STIN_END 0x0f /* End */
-
-#define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3)
-
-#define PCI_VPD_SRDT_TIN_MASK 0x78
-#define PCI_VPD_SRDT_LEN_MASK 0x07
-#define PCI_VPD_LRDT_TIN_MASK 0x7f
-
-#define PCI_VPD_LRDT_TAG_SIZE 3
-#define PCI_VPD_SRDT_TAG_SIZE 1
-
-#define PCI_VPD_INFO_FLD_HDR_SIZE 3
-
#define PCI_VPD_RO_KEYWORD_PARTNO "PN"
#define PCI_VPD_RO_KEYWORD_SERIALNO "SN"
#define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
@@ -2310,83 +2303,45 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
#define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
/**
- * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
- * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
- *
- * Returns the extracted Large Resource Data Type length.
- */
-static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
-{
- return (u16)lrdt[1] + ((u16)lrdt[2] << 8);
-}
-
-/**
- * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
- * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
- *
- * Returns the extracted Large Resource Data Type Tag item.
- */
-static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
-{
- return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
-}
-
-/**
- * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
- * @srdt: Pointer to the beginning of the Small Resource Data Type tag
- *
- * Returns the extracted Small Resource Data Type length.
- */
-static inline u8 pci_vpd_srdt_size(const u8 *srdt)
-{
- return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
-}
-
-/**
- * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
- * @srdt: Pointer to the beginning of the Small Resource Data Type tag
+ * pci_vpd_alloc - Allocate buffer and read VPD into it
+ * @dev: PCI device
+ * @size: pointer to field where VPD length is returned
*
- * Returns the extracted Small Resource Data Type Tag Item.
+ * Returns pointer to allocated buffer or an ERR_PTR in case of failure
*/
-static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
-{
- return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
-}
+void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
/**
- * pci_vpd_info_field_size - Extracts the information field length
- * @info_field: Pointer to the beginning of an information field header
+ * pci_vpd_find_id_string - Locate id string in VPD
+ * @buf: Pointer to buffered VPD data
+ * @len: The length of the buffer area in which to search
+ * @size: Pointer to field where length of id string is returned
*
- * Returns the extracted information field length.
+ * Returns the index of the id string or -ENOENT if not found.
*/
-static inline u8 pci_vpd_info_field_size(const u8 *info_field)
-{
- return info_field[2];
-}
+int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
/**
- * pci_vpd_find_tag - Locates the Resource Data Type tag provided
- * @buf: Pointer to buffered vpd data
- * @len: The length of the vpd buffer
- * @rdt: The Resource Data Type to search for
+ * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
+ * @buf: Pointer to buffered VPD data
+ * @len: The length of the buffer area in which to search
+ * @kw: The keyword to search for
+ * @size: Pointer to field where length of found keyword data is returned
*
- * Returns the index where the Resource Data Type was found or
- * -ENOENT otherwise.
+ * Returns the index of the information field keyword data or -ENOENT if
+ * not found.
*/
-int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt);
+int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
+ const char *kw, unsigned int *size);
/**
- * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD
- * @buf: Pointer to buffered vpd data
- * @off: The offset into the buffer at which to begin the search
- * @len: The length of the buffer area, relative to off, in which to search
- * @kw: The keyword to search for
+ * pci_vpd_check_csum - Check VPD checksum
+ * @buf: Pointer to buffered VPD data
+ * @len: VPD size
*
- * Returns the index where the information field keyword was found or
- * -ENOENT otherwise.
+ * Returns 1 if VPD has no checksum, otherwise 0 or an errno
*/
-int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
- unsigned int len, const char *kw);
+int pci_vpd_check_csum(const void *buf, unsigned int len);
/* PCI <-> OF binding helpers */
#ifdef CONFIG_OF
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 2dac431d94ac..3a10d6ec3ee7 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -44,7 +44,7 @@ struct hotplug_slot_ops {
int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
- int (*reset_slot) (struct hotplug_slot *slot, int probe);
+ int (*reset_slot) (struct hotplug_slot *slot, bool probe);
};
/**
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 06eccef155ad..011f2f1ea5bb 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2453,7 +2453,8 @@
#define PCI_VENDOR_ID_TDI 0x192E
#define PCI_DEVICE_ID_TDI_EHCI 0x0101
-#define PCI_VENDOR_ID_FREESCALE 0x1957
+#define PCI_VENDOR_ID_FREESCALE 0x1957 /* duplicate: NXP */
+#define PCI_VENDOR_ID_NXP 0x1957 /* duplicate: FREESCALE */
#define PCI_DEVICE_ID_MPC8308 0xc006
#define PCI_DEVICE_ID_MPC8315E 0x00b4
#define PCI_DEVICE_ID_MPC8315 0x00b5
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index b34a094b2258..860ba4bc5ead 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -41,36 +41,39 @@ struct dw_dma_slave {
/**
* struct dw_dma_platform_data - Controller configuration parameters
+ * @nr_masters: Number of AHB masters supported by the controller
* @nr_channels: Number of channels supported by hardware (max 8)
* @chan_allocation_order: Allocate channels starting from 0 or 7
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
* @block_size: Maximum block size supported by the controller
- * @nr_masters: Number of AHB masters supported by the controller
* @data_width: Maximum data width supported by hardware per AHB master
* (in bytes, power of 2)
* @multi_block: Multi block transfers supported by hardware per channel.
* @max_burst: Maximum value of burst transaction size supported by hardware
* per channel (in units of CTL.SRC_TR_WIDTH/CTL.DST_TR_WIDTH).
* @protctl: Protection control signals setting per channel.
+ * @quirks: Optional platform quirks.
*/
struct dw_dma_platform_data {
- unsigned int nr_channels;
+ u32 nr_masters;
+ u32 nr_channels;
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
- unsigned char chan_allocation_order;
+ u32 chan_allocation_order;
#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
- unsigned char chan_priority;
- unsigned int block_size;
- unsigned char nr_masters;
- unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
- unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS];
+ u32 chan_priority;
+ u32 block_size;
+ u32 data_width[DW_DMA_MAX_NR_MASTERS];
+ u32 multi_block[DW_DMA_MAX_NR_CHANNELS];
u32 max_burst[DW_DMA_MAX_NR_CHANNELS];
#define CHAN_PROTCTL_PRIVILEGED BIT(0)
#define CHAN_PROTCTL_BUFFERABLE BIT(1)
#define CHAN_PROTCTL_CACHEABLE BIT(2)
#define CHAN_PROTCTL_MASK GENMASK(2, 0)
- unsigned char protctl;
+ u32 protctl;
+#define DW_DMA_QUIRK_XBAR_PRESENT BIT(0)
+ u32 quirks;
};
#endif /* _PLATFORM_DATA_DMA_DW_H */
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index a0b7e43049d5..725c9b784e60 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -404,7 +404,7 @@ int pwm_set_chip_data(struct pwm_device *pwm, void *data);
void *pwm_get_chip_data(struct pwm_device *pwm);
int pwmchip_add(struct pwm_chip *chip);
-int pwmchip_remove(struct pwm_chip *chip);
+void pwmchip_remove(struct pwm_chip *chip);
int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip);
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 0165824c5128..c0475d1c9885 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -109,6 +109,12 @@ extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp);
extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
+
+extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
+ u64 limit_node, u32 node_id, u64 version);
+extern int qcom_scm_lmh_profile_change(u32 profile_id);
+extern bool qcom_scm_lmh_dcvsh_available(void);
+
#else
#include <linux/errno.h>
@@ -170,5 +176,13 @@ static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
{ return -ENODEV; }
+
+static inline int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
+ u64 limit_node, u32 node_id, u64 version)
+ { return -ENODEV; }
+
+static inline int qcom_scm_lmh_profile_change(u32 profile_id) { return -ENODEV; }
+
+static inline bool qcom_scm_lmh_dcvsh_available(void) { return -ENODEV; }
#endif
#endif
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 426e98e0b675..352c6127cb90 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -142,22 +142,14 @@ struct rw_semaphore {
#define DECLARE_RWSEM(lockname) \
struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern void __rwsem_init(struct rw_semaphore *rwsem, const char *name,
+extern void __init_rwsem(struct rw_semaphore *rwsem, const char *name,
struct lock_class_key *key);
-#else
-static inline void __rwsem_init(struct rw_semaphore *rwsem, const char *name,
- struct lock_class_key *key)
-{
-}
-#endif
#define init_rwsem(sem) \
do { \
static struct lock_class_key __key; \
\
- init_rwbase_rt(&(sem)->rwbase); \
- __rwsem_init((sem), #sem, &__key); \
+ __init_rwsem((sem), #sem, &__key); \
} while (0)
static __always_inline int rwsem_is_locked(struct rw_semaphore *sem)
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index 2462f7d07695..00ed419dd464 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -4,6 +4,7 @@
#include <linux/uidgid.h>
#include <linux/atomic.h>
+#include <linux/percpu_counter.h>
#include <linux/refcount.h>
#include <linux/ratelimit.h>
@@ -13,7 +14,7 @@
struct user_struct {
refcount_t __count; /* reference count */
#ifdef CONFIG_EPOLL
- atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
+ struct percpu_counter epoll_watches; /* The number of file descriptors currently watched */
#endif
unsigned long unix_inflight; /* How many files in flight in unix sockets */
atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 6bdb0db3e825..841e2f0f5240 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1940,7 +1940,7 @@ static inline void __skb_insert(struct sk_buff *newsk,
WRITE_ONCE(newsk->prev, prev);
WRITE_ONCE(next->prev, newsk);
WRITE_ONCE(prev->next, newsk);
- list->qlen++;
+ WRITE_ONCE(list->qlen, list->qlen + 1);
}
static inline void __skb_queue_splice(const struct sk_buff_head *list,
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index dcde82a4434c..85499f0586b0 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -10,6 +10,7 @@
#include <linux/kfence.h>
#include <linux/kobject.h>
#include <linux/reciprocal_div.h>
+#include <linux/local_lock.h>
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -40,6 +41,10 @@ enum stat_item {
CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
NR_SLUB_STAT_ITEMS };
+/*
+ * When changing the layout, make sure freelist and tid are still compatible
+ * with this_cpu_cmpxchg_double() alignment requirements.
+ */
struct kmem_cache_cpu {
void **freelist; /* Pointer to next available object */
unsigned long tid; /* Globally unique transaction id */
@@ -47,6 +52,7 @@ struct kmem_cache_cpu {
#ifdef CONFIG_SLUB_CPU_PARTIAL
struct page *partial; /* Partially allocated frozen slabs */
#endif
+ local_lock_t lock; /* Protects the fields above */
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 60a3ab0ad2cc..252243c7783d 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -1373,6 +1373,9 @@ long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned int nsops,
const struct old_timespec32 __user *timeout);
+long __do_semtimedop(int semid, struct sembuf *tsems, unsigned int nsops,
+ const struct timespec64 *timeout,
+ struct ipc_namespace *ns);
int __sys_getsockopt(int fd, int level, int optname, char __user *optval,
int __user *optlen);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index d296f3b88fb9..c314893970b3 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -285,7 +285,7 @@ struct thermal_zone_params {
};
/**
- * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones
+ * struct thermal_zone_of_device_ops - callbacks for handling DT based zones
*
* Mandatory:
* @get_temp: a pointer to a function that reads the sensor temperature.
@@ -404,12 +404,13 @@ static inline void thermal_zone_device_unregister(
struct thermal_zone_device *tz)
{ }
static inline struct thermal_cooling_device *
-thermal_cooling_device_register(char *type, void *devdata,
+thermal_cooling_device_register(const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
{ return ERR_PTR(-ENODEV); }
static inline struct thermal_cooling_device *
thermal_of_cooling_device_register(struct device_node *np,
- char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
+ const char *type, void *devdata,
+ const struct thermal_cooling_device_ops *ops)
{ return ERR_PTR(-ENODEV); }
static inline struct thermal_cooling_device *
devm_thermal_of_cooling_device_register(struct device *dev,
diff --git a/include/linux/threads.h b/include/linux/threads.h
index 18d5a74bcc3d..c34173e6c5f1 100644
--- a/include/linux/threads.h
+++ b/include/linux/threads.h
@@ -38,7 +38,7 @@
* Define a minimum number of pids per cpu. Heuristically based
* on original pid max of 32k for 32 cpus. Also, increase the
* minimum settable value for pid_max on the running system based
- * on similar defaults. See kernel/pid.c:pidmap_init() for details.
+ * on similar defaults. See kernel/pid.c:pid_idr_init() for details.
*/
#define PIDS_PER_CPU_DEFAULT 1024
#define PIDS_PER_CPU_MIN 8
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 5117cb5b5656..81b9686a2079 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -25,7 +25,9 @@ struct itimerspec64 {
#define TIME64_MIN (-TIME64_MAX - 1)
#define KTIME_MAX ((s64)~((u64)1 << 63))
+#define KTIME_MIN (-KTIME_MAX - 1)
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
+#define KTIME_SEC_MIN (KTIME_MIN / NSEC_PER_SEC)
/*
* Limits for settimeofday():
@@ -124,10 +126,13 @@ static inline bool timespec64_valid_settod(const struct timespec64 *ts)
*/
static inline s64 timespec64_to_ns(const struct timespec64 *ts)
{
- /* Prevent multiplication overflow */
- if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
+ /* Prevent multiplication overflow / underflow */
+ if (ts->tv_sec >= KTIME_SEC_MAX)
return KTIME_MAX;
+ if (ts->tv_sec <= KTIME_SEC_MIN)
+ return KTIME_MIN;
+
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
}
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index c05e903cef02..ac0394087f7d 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -200,16 +200,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
n = _copy_to_user(to, from, n);
return n;
}
-#ifdef CONFIG_COMPAT
-static __always_inline unsigned long __must_check
-copy_in_user(void __user *to, const void __user *from, unsigned long n)
-{
- might_fault();
- if (access_ok(to, n) && access_ok(from, n))
- n = raw_copy_in_user(to, from, n);
- return n;
-}
-#endif
#ifndef copy_mc_to_kernel
/*
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 82c3c3e819e0..5265024e8b90 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -47,6 +47,7 @@ struct iov_iter {
};
loff_t xarray_start;
};
+ size_t truncated;
};
static inline enum iter_type iov_iter_type(const struct iov_iter *i)
@@ -254,8 +255,10 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
* conversion in assignement is by definition greater than all
* values of size_t, including old i->count.
*/
- if (i->count > count)
+ if (i->count > count) {
+ i->truncated += i->count - count;
i->count = count;
+ }
}
/*
@@ -264,6 +267,7 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
*/
static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
+ i->truncated -= count - i->count;
i->count = count;
}
diff --git a/include/linux/units.h b/include/linux/units.h
index 4a25e0cc8fb3..681fc652e3d7 100644
--- a/include/linux/units.h
+++ b/include/linux/units.h
@@ -20,9 +20,13 @@
#define PICO 1000000000000ULL
#define FEMTO 1000000000000000ULL
-#define MILLIWATT_PER_WATT 1000L
-#define MICROWATT_PER_MILLIWATT 1000L
-#define MICROWATT_PER_WATT 1000000L
+#define HZ_PER_KHZ 1000UL
+#define KHZ_PER_MHZ 1000UL
+#define HZ_PER_MHZ 1000000UL
+
+#define MILLIWATT_PER_WATT 1000UL
+#define MICROWATT_PER_MILLIWATT 1000UL
+#define MICROWATT_PER_WATT 1000000UL
#define ABSOLUTE_ZERO_MILLICELSIUS -273150
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 8cfe49d201dd..3972ab765de1 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -43,17 +43,17 @@ struct vdpa_vq_state_split {
* @last_used_idx: used index
*/
struct vdpa_vq_state_packed {
- u16 last_avail_counter:1;
- u16 last_avail_idx:15;
- u16 last_used_counter:1;
- u16 last_used_idx:15;
+ u16 last_avail_counter:1;
+ u16 last_avail_idx:15;
+ u16 last_used_counter:1;
+ u16 last_used_idx:15;
};
struct vdpa_vq_state {
- union {
- struct vdpa_vq_state_split split;
- struct vdpa_vq_state_packed packed;
- };
+ union {
+ struct vdpa_vq_state_split split;
+ struct vdpa_vq_state_packed packed;
+ };
};
struct vdpa_mgmt_dev;
@@ -65,6 +65,7 @@ struct vdpa_mgmt_dev;
* @config: the configuration ops for this device.
* @index: device index
* @features_valid: were features initialized? for legacy guests
+ * @use_va: indicate whether virtual address must be used by this device
* @nvqs: maximum number of supported virtqueues
* @mdev: management device pointer; caller must setup when registering device as part
* of dev_add() mgmtdev ops callback before invoking _vdpa_register_device().
@@ -75,6 +76,7 @@ struct vdpa_device {
const struct vdpa_config_ops *config;
unsigned int index;
bool features_valid;
+ bool use_va;
int nvqs;
struct vdpa_mgmt_dev *mdev;
};
@@ -90,6 +92,16 @@ struct vdpa_iova_range {
};
/**
+ * Corresponding file area for device memory mapping
+ * @file: vma->vm_file for the mapping
+ * @offset: mapping offset in the vm_file
+ */
+struct vdpa_map_file {
+ struct file *file;
+ u64 offset;
+};
+
+/**
* struct vdpa_config_ops - operations for configuring a vDPA device.
* Note: vDPA device drivers are required to implement all of the
* operations unless it is mentioned to be optional in the following
@@ -131,7 +143,7 @@ struct vdpa_iova_range {
* @vdev: vdpa device
* @idx: virtqueue index
* @state: pointer to returned state (last_avail_idx)
- * @get_vq_notification: Get the notification area for a virtqueue
+ * @get_vq_notification: Get the notification area for a virtqueue
* @vdev: vdpa device
* @idx: virtqueue index
* Returns the notifcation area
@@ -171,6 +183,9 @@ struct vdpa_iova_range {
* @set_status: Set the device status
* @vdev: vdpa device
* @status: virtio device status
+ * @reset: Reset device
+ * @vdev: vdpa device
+ * Returns integer: success (0) or error (< 0)
* @get_config_size: Get the size of the configuration space
* @vdev: vdpa device
* Returns size_t: configuration size
@@ -255,6 +270,7 @@ struct vdpa_config_ops {
u32 (*get_vendor_id)(struct vdpa_device *vdev);
u8 (*get_status)(struct vdpa_device *vdev);
void (*set_status)(struct vdpa_device *vdev, u8 status);
+ int (*reset)(struct vdpa_device *vdev);
size_t (*get_config_size)(struct vdpa_device *vdev);
void (*get_config)(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len);
@@ -266,7 +282,7 @@ struct vdpa_config_ops {
/* DMA ops */
int (*set_map)(struct vdpa_device *vdev, struct vhost_iotlb *iotlb);
int (*dma_map)(struct vdpa_device *vdev, u64 iova, u64 size,
- u64 pa, u32 perm);
+ u64 pa, u32 perm, void *opaque);
int (*dma_unmap)(struct vdpa_device *vdev, u64 iova, u64 size);
/* Free device resources */
@@ -275,7 +291,8 @@ struct vdpa_config_ops {
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
- size_t size, const char *name);
+ size_t size, const char *name,
+ bool use_va);
/**
* vdpa_alloc_device - allocate and initilaize a vDPA device
@@ -285,15 +302,16 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
* @parent: the parent device
* @config: the bus operations that is supported by this device
* @name: name of the vdpa device
+ * @use_va: indicate whether virtual address must be used by this device
*
* Return allocated data structure or ERR_PTR upon error
*/
-#define vdpa_alloc_device(dev_struct, member, parent, config, name) \
+#define vdpa_alloc_device(dev_struct, member, parent, config, name, use_va) \
container_of(__vdpa_alloc_device( \
parent, config, \
sizeof(dev_struct) + \
BUILD_BUG_ON_ZERO(offsetof( \
- dev_struct, member)), name), \
+ dev_struct, member)), name, use_va), \
dev_struct, member)
int vdpa_register_device(struct vdpa_device *vdev, int nvqs);
@@ -348,27 +366,27 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
return vdev->dma_dev;
}
-static inline void vdpa_reset(struct vdpa_device *vdev)
+static inline int vdpa_reset(struct vdpa_device *vdev)
{
- const struct vdpa_config_ops *ops = vdev->config;
+ const struct vdpa_config_ops *ops = vdev->config;
vdev->features_valid = false;
- ops->set_status(vdev, 0);
+ return ops->reset(vdev);
}
static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
{
- const struct vdpa_config_ops *ops = vdev->config;
+ const struct vdpa_config_ops *ops = vdev->config;
vdev->features_valid = true;
- return ops->set_features(vdev, features);
+ return ops->set_features(vdev, features);
}
-
-static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset,
- void *buf, unsigned int len)
+static inline void vdpa_get_config(struct vdpa_device *vdev,
+ unsigned int offset, void *buf,
+ unsigned int len)
{
- const struct vdpa_config_ops *ops = vdev->config;
+ const struct vdpa_config_ops *ops = vdev->config;
/*
* Config accesses aren't supposed to trigger before features are set.
diff --git a/include/linux/vhost_iotlb.h b/include/linux/vhost_iotlb.h
index 6b09b786a762..2d0e2f52f938 100644
--- a/include/linux/vhost_iotlb.h
+++ b/include/linux/vhost_iotlb.h
@@ -17,6 +17,7 @@ struct vhost_iotlb_map {
u32 perm;
u32 flags_padding;
u64 __subtree_last;
+ void *opaque;
};
#define VHOST_IOTLB_FLAG_RETIRE 0x1
@@ -29,6 +30,8 @@ struct vhost_iotlb {
unsigned int flags;
};
+int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb, u64 start, u64 last,
+ u64 addr, unsigned int perm, void *opaque);
int vhost_iotlb_add_range(struct vhost_iotlb *iotlb, u64 start, u64 last,
u64 addr, unsigned int perm);
void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last);
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 2644425b6dce..671d402c3778 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -225,9 +225,6 @@ static inline bool is_vm_area_hugepages(const void *addr)
}
#ifdef CONFIG_MMU
-int vmap_range(unsigned long addr, unsigned long end,
- phys_addr_t phys_addr, pgprot_t prot,
- unsigned int max_page_shift);
void vunmap_range(unsigned long addr, unsigned long end);
static inline void set_vm_flush_reset_perms(void *addr)
{
diff --git a/include/net/dsa.h b/include/net/dsa.h
index f9a17145255a..258867eff230 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -447,6 +447,11 @@ static inline bool dsa_port_is_user(struct dsa_port *dp)
return dp->type == DSA_PORT_TYPE_USER;
}
+static inline bool dsa_port_is_unused(struct dsa_port *dp)
+{
+ return dp->type == DSA_PORT_TYPE_UNUSED;
+}
+
static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
{
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h
new file mode 100644
index 000000000000..2f422f4f1fb9
--- /dev/null
+++ b/include/trace/events/damon.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM damon
+
+#if !defined(_TRACE_DAMON_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DAMON_H
+
+#include <linux/damon.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(damon_aggregated,
+
+ TP_PROTO(struct damon_target *t, struct damon_region *r,
+ unsigned int nr_regions),
+
+ TP_ARGS(t, r, nr_regions),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, target_id)
+ __field(unsigned int, nr_regions)
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ __field(unsigned int, nr_accesses)
+ ),
+
+ TP_fast_assign(
+ __entry->target_id = t->id;
+ __entry->nr_regions = nr_regions;
+ __entry->start = r->ar.start;
+ __entry->end = r->ar.end;
+ __entry->nr_accesses = r->nr_accesses;
+ ),
+
+ TP_printk("target_id=%lu nr_regions=%u %lu-%lu: %u",
+ __entry->target_id, __entry->nr_regions,
+ __entry->start, __entry->end, __entry->nr_accesses)
+);
+
+#endif /* _TRACE_DAMON_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 0b53e855c4ac..116ed4d5d0f8 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -75,7 +75,7 @@
#define IF_HAVE_PG_HWPOISON(flag,string)
#endif
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
#define IF_HAVE_PG_IDLE(flag,string) ,{1UL << flag, string}
#else
#define IF_HAVE_PG_IDLE(flag,string)
diff --git a/include/trace/events/page_ref.h b/include/trace/events/page_ref.h
index 5d2ea93956ce..8a99c1cd417b 100644
--- a/include/trace/events/page_ref.h
+++ b/include/trace/events/page_ref.h
@@ -38,7 +38,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_template,
TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d",
__entry->pfn,
- show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)),
+ show_page_flags(__entry->flags & PAGEFLAGS_MASK),
__entry->count,
__entry->mapcount, __entry->mapping, __entry->mt,
__entry->val)
@@ -88,7 +88,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_and_test_template,
TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d ret=%d",
__entry->pfn,
- show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)),
+ show_page_flags(__entry->flags & PAGEFLAGS_MASK),
__entry->count,
__entry->mapcount, __entry->mapping, __entry->mt,
__entry->val, __entry->ret)
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 14c8fe863c6d..1c5fb86d455a 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -673,15 +673,15 @@ __SYSCALL(__NR_madvise, sys_madvise)
#define __NR_remap_file_pages 234
__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
#define __NR_mbind 235
-__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind)
+__SYSCALL(__NR_mbind, sys_mbind)
#define __NR_get_mempolicy 236
-__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy)
+__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
#define __NR_set_mempolicy 237
-__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy)
+__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
#define __NR_migrate_pages 238
-__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages)
+__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
#define __NR_move_pages 239
-__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages)
+__SYSCALL(__NR_move_pages, sys_move_pages)
#endif
#define __NR_rt_tgsigqueueinfo 240
diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h
index f6e8a005b113..8d206f27bb6d 100644
--- a/include/uapi/linux/cxl_mem.h
+++ b/include/uapi/linux/cxl_mem.h
@@ -50,7 +50,7 @@ enum { CXL_CMDS };
#define ___C(a, b) { b }
static const struct {
const char *name;
-} cxl_command_names[] = { CXL_CMDS };
+} cxl_command_names[] __attribute__((__unused__)) = { CXL_CMDS };
/*
* Here's how this actually breaks out:
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
index edc346a77c91..c750eac09fc9 100644
--- a/include/uapi/linux/idxd.h
+++ b/include/uapi/linux/idxd.h
@@ -9,6 +9,30 @@
#include <stdint.h>
#endif
+/* Driver command error status */
+enum idxd_scmd_stat {
+ IDXD_SCMD_DEV_ENABLED = 0x80000010,
+ IDXD_SCMD_DEV_NOT_ENABLED = 0x80000020,
+ IDXD_SCMD_WQ_ENABLED = 0x80000021,
+ IDXD_SCMD_DEV_DMA_ERR = 0x80020000,
+ IDXD_SCMD_WQ_NO_GRP = 0x80030000,
+ IDXD_SCMD_WQ_NO_NAME = 0x80040000,
+ IDXD_SCMD_WQ_NO_SVM = 0x80050000,
+ IDXD_SCMD_WQ_NO_THRESH = 0x80060000,
+ IDXD_SCMD_WQ_PORTAL_ERR = 0x80070000,
+ IDXD_SCMD_WQ_RES_ALLOC_ERR = 0x80080000,
+ IDXD_SCMD_PERCPU_ERR = 0x80090000,
+ IDXD_SCMD_DMA_CHAN_ERR = 0x800a0000,
+ IDXD_SCMD_CDEV_ERR = 0x800b0000,
+ IDXD_SCMD_WQ_NO_SWQ_SUPPORT = 0x800c0000,
+ IDXD_SCMD_WQ_NONE_CONFIGURED = 0x800d0000,
+ IDXD_SCMD_WQ_NO_SIZE = 0x800e0000,
+ IDXD_SCMD_WQ_NO_PRIV = 0x800f0000,
+};
+
+#define IDXD_SCMD_SOFTERR_MASK 0x80000000
+#define IDXD_SCMD_SOFTERR_SHIFT 16
+
/* Descriptor flags */
#define IDXD_OP_FLAG_FENCE 0x0001
#define IDXD_OP_FLAG_BOF 0x0002
diff --git a/include/uapi/linux/vduse.h b/include/uapi/linux/vduse.h
new file mode 100644
index 000000000000..7cfe1c1280c0
--- /dev/null
+++ b/include/uapi/linux/vduse.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_VDUSE_H_
+#define _UAPI_VDUSE_H_
+
+#include <linux/types.h>
+
+#define VDUSE_BASE 0x81
+
+/* The ioctls for control device (/dev/vduse/control) */
+
+#define VDUSE_API_VERSION 0
+
+/*
+ * Get the version of VDUSE API that kernel supported (VDUSE_API_VERSION).
+ * This is used for future extension.
+ */
+#define VDUSE_GET_API_VERSION _IOR(VDUSE_BASE, 0x00, __u64)
+
+/* Set the version of VDUSE API that userspace supported. */
+#define VDUSE_SET_API_VERSION _IOW(VDUSE_BASE, 0x01, __u64)
+
+/**
+ * struct vduse_dev_config - basic configuration of a VDUSE device
+ * @name: VDUSE device name, needs to be NUL terminated
+ * @vendor_id: virtio vendor id
+ * @device_id: virtio device id
+ * @features: virtio features
+ * @vq_num: the number of virtqueues
+ * @vq_align: the allocation alignment of virtqueue's metadata
+ * @reserved: for future use, needs to be initialized to zero
+ * @config_size: the size of the configuration space
+ * @config: the buffer of the configuration space
+ *
+ * Structure used by VDUSE_CREATE_DEV ioctl to create VDUSE device.
+ */
+struct vduse_dev_config {
+#define VDUSE_NAME_MAX 256
+ char name[VDUSE_NAME_MAX];
+ __u32 vendor_id;
+ __u32 device_id;
+ __u64 features;
+ __u32 vq_num;
+ __u32 vq_align;
+ __u32 reserved[13];
+ __u32 config_size;
+ __u8 config[];
+};
+
+/* Create a VDUSE device which is represented by a char device (/dev/vduse/$NAME) */
+#define VDUSE_CREATE_DEV _IOW(VDUSE_BASE, 0x02, struct vduse_dev_config)
+
+/*
+ * Destroy a VDUSE device. Make sure there are no more references
+ * to the char device (/dev/vduse/$NAME).
+ */
+#define VDUSE_DESTROY_DEV _IOW(VDUSE_BASE, 0x03, char[VDUSE_NAME_MAX])
+
+/* The ioctls for VDUSE device (/dev/vduse/$NAME) */
+
+/**
+ * struct vduse_iotlb_entry - entry of IOTLB to describe one IOVA region [start, last]
+ * @offset: the mmap offset on returned file descriptor
+ * @start: start of the IOVA region
+ * @last: last of the IOVA region
+ * @perm: access permission of the IOVA region
+ *
+ * Structure used by VDUSE_IOTLB_GET_FD ioctl to find an overlapped IOVA region.
+ */
+struct vduse_iotlb_entry {
+ __u64 offset;
+ __u64 start;
+ __u64 last;
+#define VDUSE_ACCESS_RO 0x1
+#define VDUSE_ACCESS_WO 0x2
+#define VDUSE_ACCESS_RW 0x3
+ __u8 perm;
+};
+
+/*
+ * Find the first IOVA region that overlaps with the range [start, last]
+ * and return the corresponding file descriptor. Return -EINVAL means the
+ * IOVA region doesn't exist. Caller should set start and last fields.
+ */
+#define VDUSE_IOTLB_GET_FD _IOWR(VDUSE_BASE, 0x10, struct vduse_iotlb_entry)
+
+/*
+ * Get the negotiated virtio features. It's a subset of the features in
+ * struct vduse_dev_config which can be accepted by virtio driver. It's
+ * only valid after FEATURES_OK status bit is set.
+ */
+#define VDUSE_DEV_GET_FEATURES _IOR(VDUSE_BASE, 0x11, __u64)
+
+/**
+ * struct vduse_config_data - data used to update configuration space
+ * @offset: the offset from the beginning of configuration space
+ * @length: the length to write to configuration space
+ * @buffer: the buffer used to write from
+ *
+ * Structure used by VDUSE_DEV_SET_CONFIG ioctl to update device
+ * configuration space.
+ */
+struct vduse_config_data {
+ __u32 offset;
+ __u32 length;
+ __u8 buffer[];
+};
+
+/* Set device configuration space */
+#define VDUSE_DEV_SET_CONFIG _IOW(VDUSE_BASE, 0x12, struct vduse_config_data)
+
+/*
+ * Inject a config interrupt. It's usually used to notify virtio driver
+ * that device configuration space has changed.
+ */
+#define VDUSE_DEV_INJECT_CONFIG_IRQ _IO(VDUSE_BASE, 0x13)
+
+/**
+ * struct vduse_vq_config - basic configuration of a virtqueue
+ * @index: virtqueue index
+ * @max_size: the max size of virtqueue
+ * @reserved: for future use, needs to be initialized to zero
+ *
+ * Structure used by VDUSE_VQ_SETUP ioctl to setup a virtqueue.
+ */
+struct vduse_vq_config {
+ __u32 index;
+ __u16 max_size;
+ __u16 reserved[13];
+};
+
+/*
+ * Setup the specified virtqueue. Make sure all virtqueues have been
+ * configured before the device is attached to vDPA bus.
+ */
+#define VDUSE_VQ_SETUP _IOW(VDUSE_BASE, 0x14, struct vduse_vq_config)
+
+/**
+ * struct vduse_vq_state_split - split virtqueue state
+ * @avail_index: available index
+ */
+struct vduse_vq_state_split {
+ __u16 avail_index;
+};
+
+/**
+ * struct vduse_vq_state_packed - packed virtqueue state
+ * @last_avail_counter: last driver ring wrap counter observed by device
+ * @last_avail_idx: device available index
+ * @last_used_counter: device ring wrap counter
+ * @last_used_idx: used index
+ */
+struct vduse_vq_state_packed {
+ __u16 last_avail_counter;
+ __u16 last_avail_idx;
+ __u16 last_used_counter;
+ __u16 last_used_idx;
+};
+
+/**
+ * struct vduse_vq_info - information of a virtqueue
+ * @index: virtqueue index
+ * @num: the size of virtqueue
+ * @desc_addr: address of desc area
+ * @driver_addr: address of driver area
+ * @device_addr: address of device area
+ * @split: split virtqueue state
+ * @packed: packed virtqueue state
+ * @ready: ready status of virtqueue
+ *
+ * Structure used by VDUSE_VQ_GET_INFO ioctl to get virtqueue's information.
+ */
+struct vduse_vq_info {
+ __u32 index;
+ __u32 num;
+ __u64 desc_addr;
+ __u64 driver_addr;
+ __u64 device_addr;
+ union {
+ struct vduse_vq_state_split split;
+ struct vduse_vq_state_packed packed;
+ };
+ __u8 ready;
+};
+
+/* Get the specified virtqueue's information. Caller should set index field. */
+#define VDUSE_VQ_GET_INFO _IOWR(VDUSE_BASE, 0x15, struct vduse_vq_info)
+
+/**
+ * struct vduse_vq_eventfd - eventfd configuration for a virtqueue
+ * @index: virtqueue index
+ * @fd: eventfd, -1 means de-assigning the eventfd
+ *
+ * Structure used by VDUSE_VQ_SETUP_KICKFD ioctl to setup kick eventfd.
+ */
+struct vduse_vq_eventfd {
+ __u32 index;
+#define VDUSE_EVENTFD_DEASSIGN -1
+ int fd;
+};
+
+/*
+ * Setup kick eventfd for specified virtqueue. The kick eventfd is used
+ * by VDUSE kernel module to notify userspace to consume the avail vring.
+ */
+#define VDUSE_VQ_SETUP_KICKFD _IOW(VDUSE_BASE, 0x16, struct vduse_vq_eventfd)
+
+/*
+ * Inject an interrupt for specific virtqueue. It's used to notify virtio driver
+ * to consume the used vring.
+ */
+#define VDUSE_VQ_INJECT_IRQ _IOW(VDUSE_BASE, 0x17, __u32)
+
+/* The control messages definition for read(2)/write(2) on /dev/vduse/$NAME */
+
+/**
+ * enum vduse_req_type - request type
+ * @VDUSE_GET_VQ_STATE: get the state for specified virtqueue from userspace
+ * @VDUSE_SET_STATUS: set the device status
+ * @VDUSE_UPDATE_IOTLB: Notify userspace to update the memory mapping for
+ * specified IOVA range via VDUSE_IOTLB_GET_FD ioctl
+ */
+enum vduse_req_type {
+ VDUSE_GET_VQ_STATE,
+ VDUSE_SET_STATUS,
+ VDUSE_UPDATE_IOTLB,
+};
+
+/**
+ * struct vduse_vq_state - virtqueue state
+ * @index: virtqueue index
+ * @split: split virtqueue state
+ * @packed: packed virtqueue state
+ */
+struct vduse_vq_state {
+ __u32 index;
+ union {
+ struct vduse_vq_state_split split;
+ struct vduse_vq_state_packed packed;
+ };
+};
+
+/**
+ * struct vduse_dev_status - device status
+ * @status: device status
+ */
+struct vduse_dev_status {
+ __u8 status;
+};
+
+/**
+ * struct vduse_iova_range - IOVA range [start, last]
+ * @start: start of the IOVA range
+ * @last: last of the IOVA range
+ */
+struct vduse_iova_range {
+ __u64 start;
+ __u64 last;
+};
+
+/**
+ * struct vduse_dev_request - control request
+ * @type: request type
+ * @request_id: request id
+ * @reserved: for future use
+ * @vq_state: virtqueue state, only index field is available
+ * @s: device status
+ * @iova: IOVA range for updating
+ * @padding: padding
+ *
+ * Structure used by read(2) on /dev/vduse/$NAME.
+ */
+struct vduse_dev_request {
+ __u32 type;
+ __u32 request_id;
+ __u32 reserved[4];
+ union {
+ struct vduse_vq_state vq_state;
+ struct vduse_dev_status s;
+ struct vduse_iova_range iova;
+ __u32 padding[32];
+ };
+};
+
+/**
+ * struct vduse_dev_response - response to control request
+ * @request_id: corresponding request id
+ * @result: the result of request
+ * @reserved: for future use, needs to be initialized to zero
+ * @vq_state: virtqueue state
+ * @padding: padding
+ *
+ * Structure used by write(2) on /dev/vduse/$NAME.
+ */
+struct vduse_dev_response {
+ __u32 request_id;
+#define VDUSE_REQ_RESULT_OK 0x00
+#define VDUSE_REQ_RESULT_FAILED 0x01
+ __u32 result;
+ __u32 reserved[4];
+ union {
+ struct vduse_vq_state vq_state;
+ __u32 padding[32];
+ };
+};
+
+#endif /* _UAPI_VDUSE_H_ */
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 50d352f5e86f..80d76b75bccd 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -54,9 +54,18 @@
#define VIRTIO_ID_SOUND 25 /* virtio sound */
#define VIRTIO_ID_FS 26 /* virtio filesystem */
#define VIRTIO_ID_PMEM 27 /* virtio pmem */
+#define VIRTIO_ID_RPMB 28 /* virtio rpmb */
#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */
+#define VIRTIO_ID_VIDEO_ENCODER 30 /* virtio video encoder */
+#define VIRTIO_ID_VIDEO_DECODER 31 /* virtio video decoder */
#define VIRTIO_ID_SCMI 32 /* virtio SCMI */
+#define VIRTIO_ID_NITRO_SEC_MOD 33 /* virtio nitro secure module*/
#define VIRTIO_ID_I2C_ADAPTER 34 /* virtio i2c adapter */
+#define VIRTIO_ID_WATCHDOG 35 /* virtio watchdog */
+#define VIRTIO_ID_CAN 36 /* virtio can */
+#define VIRTIO_ID_DMABUF 37 /* virtio dmabuf */
+#define VIRTIO_ID_PARAM_SERV 38 /* virtio parameter server */
+#define VIRTIO_ID_AUDIO_POLICY 39 /* virtio audio policy */
#define VIRTIO_ID_BT 40 /* virtio bluetooth */
#define VIRTIO_ID_GPIO 41 /* virtio gpio */
diff --git a/include/uapi/linux/virtio_pcidev.h b/include/uapi/linux/virtio_pcidev.h
index 89daa88bcfef..668b07ce515b 100644
--- a/include/uapi/linux/virtio_pcidev.h
+++ b/include/uapi/linux/virtio_pcidev.h
@@ -9,13 +9,14 @@
/**
* enum virtio_pcidev_ops - virtual PCI device operations
+ * @VIRTIO_PCIDEV_OP_RESERVED: reserved to catch errors
* @VIRTIO_PCIDEV_OP_CFG_READ: read config space, size is 1, 2, 4 or 8;
* the @data field should be filled in by the device (in little endian).
* @VIRTIO_PCIDEV_OP_CFG_WRITE: write config space, size is 1, 2, 4 or 8;
* the @data field contains the data to write (in little endian).
- * @VIRTIO_PCIDEV_OP_BAR_READ: read BAR mem/pio, size can be variable;
+ * @VIRTIO_PCIDEV_OP_MMIO_READ: read BAR mem/pio, size can be variable;
* the @data field should be filled in by the device (in little endian).
- * @VIRTIO_PCIDEV_OP_BAR_WRITE: write BAR mem/pio, size can be variable;
+ * @VIRTIO_PCIDEV_OP_MMIO_WRITE: write BAR mem/pio, size can be variable;
* the @data field contains the data to write (in little endian).
* @VIRTIO_PCIDEV_OP_MMIO_MEMSET: memset MMIO, size is variable but
* the @data field only has one byte (unlike @VIRTIO_PCIDEV_OP_MMIO_WRITE)
diff --git a/include/uapi/linux/virtio_vsock.h b/include/uapi/linux/virtio_vsock.h
index 3dd3555b2740..64738838bee5 100644
--- a/include/uapi/linux/virtio_vsock.h
+++ b/include/uapi/linux/virtio_vsock.h
@@ -97,7 +97,8 @@ enum virtio_vsock_shutdown {
/* VIRTIO_VSOCK_OP_RW flags values */
enum virtio_vsock_rw {
- VIRTIO_VSOCK_SEQ_EOR = 1,
+ VIRTIO_VSOCK_SEQ_EOM = 1,
+ VIRTIO_VSOCK_SEQ_EOR = 2,
};
#endif /* _UAPI_LINUX_VIRTIO_VSOCK_H */
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index a47a731e4527..7cc2a0f3f2f5 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -276,7 +276,17 @@ enum hl_device_status {
HL_DEVICE_STATUS_OPERATIONAL,
HL_DEVICE_STATUS_IN_RESET,
HL_DEVICE_STATUS_MALFUNCTION,
- HL_DEVICE_STATUS_NEEDS_RESET
+ HL_DEVICE_STATUS_NEEDS_RESET,
+ HL_DEVICE_STATUS_IN_DEVICE_CREATION,
+ HL_DEVICE_STATUS_LAST = HL_DEVICE_STATUS_IN_DEVICE_CREATION
+};
+
+enum hl_server_type {
+ HL_SERVER_TYPE_UNKNOWN = 0,
+ HL_SERVER_GAUDI_HLS1 = 1,
+ HL_SERVER_GAUDI_HLS1H = 2,
+ HL_SERVER_GAUDI_TYPE1 = 3,
+ HL_SERVER_GAUDI_TYPE2 = 4
};
/* Opcode for management ioctl
@@ -337,17 +347,49 @@ enum hl_device_status {
#define HL_INFO_VERSION_MAX_LEN 128
#define HL_INFO_CARD_NAME_MAX_LEN 16
+/**
+ * struct hl_info_hw_ip_info - hardware information on various IPs in the ASIC
+ * @sram_base_address: The first SRAM physical base address that is free to be
+ * used by the user.
+ * @dram_base_address: The first DRAM virtual or physical base address that is
+ * free to be used by the user.
+ * @dram_size: The DRAM size that is available to the user.
+ * @sram_size: The SRAM size that is available to the user.
+ * @num_of_events: The number of events that can be received from the f/w. This
+ * is needed so the user can what is the size of the h/w events
+ * array he needs to pass to the kernel when he wants to fetch
+ * the event counters.
+ * @device_id: PCI device ID of the ASIC.
+ * @module_id: Module ID of the ASIC for mezzanine cards in servers
+ * (From OCP spec).
+ * @first_available_interrupt_id: The first available interrupt ID for the user
+ * to be used when it works with user interrupts.
+ * @server_type: Server type that the Gaudi ASIC is currently installed in.
+ * The value is according to enum hl_server_type
+ * @cpld_version: CPLD version on the board.
+ * @psoc_pci_pll_nr: PCI PLL NR value. Needed by the profiler in some ASICs.
+ * @psoc_pci_pll_nf: PCI PLL NF value. Needed by the profiler in some ASICs.
+ * @psoc_pci_pll_od: PCI PLL OD value. Needed by the profiler in some ASICs.
+ * @psoc_pci_pll_div_factor: PCI PLL DIV factor value. Needed by the profiler
+ * in some ASICs.
+ * @tpc_enabled_mask: Bit-mask that represents which TPCs are enabled. Relevant
+ * for Goya/Gaudi only.
+ * @dram_enabled: Whether the DRAM is enabled.
+ * @cpucp_version: The CPUCP f/w version.
+ * @card_name: The card name as passed by the f/w.
+ * @dram_page_size: The DRAM physical page size.
+ */
struct hl_info_hw_ip_info {
__u64 sram_base_address;
__u64 dram_base_address;
__u64 dram_size;
__u32 sram_size;
__u32 num_of_events;
- __u32 device_id; /* PCI Device ID */
- __u32 module_id; /* For mezzanine cards in servers (From OCP spec.) */
+ __u32 device_id;
+ __u32 module_id;
__u32 reserved;
__u16 first_available_interrupt_id;
- __u16 reserved2;
+ __u16 server_type;
__u32 cpld_version;
__u32 psoc_pci_pll_nr;
__u32 psoc_pci_pll_nf;
@@ -358,7 +400,7 @@ struct hl_info_hw_ip_info {
__u8 pad[2];
__u8 cpucp_version[HL_INFO_VERSION_MAX_LEN];
__u8 card_name[HL_INFO_CARD_NAME_MAX_LEN];
- __u64 reserved3;
+ __u64 reserved2;
__u64 dram_page_size;
};
@@ -628,12 +670,21 @@ struct hl_cs_chunk {
__u64 cb_handle;
/* Relevant only when HL_CS_FLAGS_WAIT or
- * HL_CS_FLAGS_COLLECTIVE_WAIT is set.
+ * HL_CS_FLAGS_COLLECTIVE_WAIT is set
* This holds address of array of u64 values that contain
- * signal CS sequence numbers. The wait described by this job
- * will listen on all those signals (wait event per signal)
+ * signal CS sequence numbers. The wait described by
+ * this job will listen on all those signals
+ * (wait event per signal)
*/
__u64 signal_seq_arr;
+
+ /*
+ * Relevant only when HL_CS_FLAGS_WAIT or
+ * HL_CS_FLAGS_COLLECTIVE_WAIT is set
+ * along with HL_CS_FLAGS_ENCAP_SIGNALS.
+ * This is the CS sequence which has the encapsulated signals.
+ */
+ __u64 encaps_signal_seq;
};
/* Index of queue to put the CB on */
@@ -651,6 +702,17 @@ struct hl_cs_chunk {
* Number of entries in signal_seq_arr
*/
__u32 num_signal_seq_arr;
+
+ /* Relevant only when HL_CS_FLAGS_WAIT or
+ * HL_CS_FLAGS_COLLECTIVE_WAIT is set along
+ * with HL_CS_FLAGS_ENCAP_SIGNALS
+ * This set the signals range that the user want to wait for
+ * out of the whole reserved signals range.
+ * e.g if the signals range is 20, and user don't want
+ * to wait for signal 8, so he set this offset to 7, then
+ * he call the API again with 9 and so on till 20.
+ */
+ __u32 encaps_signal_offset;
};
/* HL_CS_CHUNK_FLAGS_* */
@@ -678,6 +740,28 @@ struct hl_cs_chunk {
#define HL_CS_FLAGS_CUSTOM_TIMEOUT 0x200
#define HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT 0x400
+/*
+ * The encapsulated signals CS is merged into the existing CS ioctls.
+ * In order to use this feature need to follow the below procedure:
+ * 1. Reserve signals, set the CS type to HL_CS_FLAGS_RESERVE_SIGNALS_ONLY
+ * the output of this API will be the SOB offset from CFG_BASE.
+ * this address will be used to patch CB cmds to do the signaling for this
+ * SOB by incrementing it's value.
+ * for reverting the reservation use HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY
+ * CS type, note that this might fail if out-of-sync happened to the SOB
+ * value, in case other signaling request to the same SOB occurred between
+ * reserve-unreserve calls.
+ * 2. Use the staged CS to do the encapsulated signaling jobs.
+ * use HL_CS_FLAGS_STAGED_SUBMISSION and HL_CS_FLAGS_STAGED_SUBMISSION_FIRST
+ * along with HL_CS_FLAGS_ENCAP_SIGNALS flag, and set encaps_signal_offset
+ * field. This offset allows app to wait on part of the reserved signals.
+ * 3. Use WAIT/COLLECTIVE WAIT CS along with HL_CS_FLAGS_ENCAP_SIGNALS flag
+ * to wait for the encapsulated signals.
+ */
+#define HL_CS_FLAGS_ENCAP_SIGNALS 0x800
+#define HL_CS_FLAGS_RESERVE_SIGNALS_ONLY 0x1000
+#define HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY 0x2000
+
#define HL_CS_STATUS_SUCCESS 0
#define HL_MAX_JOBS_PER_CS 512
@@ -690,10 +774,35 @@ struct hl_cs_in {
/* holds address of array of hl_cs_chunk for execution phase */
__u64 chunks_execute;
- /* Sequence number of a staged submission CS
- * valid only if HL_CS_FLAGS_STAGED_SUBMISSION is set
- */
- __u64 seq;
+ union {
+ /*
+ * Sequence number of a staged submission CS
+ * valid only if HL_CS_FLAGS_STAGED_SUBMISSION is set and
+ * HL_CS_FLAGS_STAGED_SUBMISSION_FIRST is unset.
+ */
+ __u64 seq;
+
+ /*
+ * Encapsulated signals handle id
+ * Valid for two flows:
+ * 1. CS with encapsulated signals:
+ * when HL_CS_FLAGS_STAGED_SUBMISSION and
+ * HL_CS_FLAGS_STAGED_SUBMISSION_FIRST
+ * and HL_CS_FLAGS_ENCAP_SIGNALS are set.
+ * 2. unreserve signals:
+ * valid when HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY is set.
+ */
+ __u32 encaps_sig_handle_id;
+
+ /* Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY is set */
+ struct {
+ /* Encapsulated signals number */
+ __u32 encaps_signals_count;
+
+ /* Encapsulated signals queue index (stream) */
+ __u32 encaps_signals_q_idx;
+ };
+ };
/* Number of chunks in restore phase array. Maximum number is
* HL_MAX_JOBS_PER_CS
@@ -718,14 +827,31 @@ struct hl_cs_in {
};
struct hl_cs_out {
+ union {
+ /*
+ * seq holds the sequence number of the CS to pass to wait
+ * ioctl. All values are valid except for 0 and ULLONG_MAX
+ */
+ __u64 seq;
+
+ /* Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY is set */
+ struct {
+ /* This is the resereved signal handle id */
+ __u32 handle_id;
+
+ /* This is the signals count */
+ __u32 count;
+ };
+ };
+
+ /* HL_CS_STATUS */
+ __u32 status;
+
/*
- * seq holds the sequence number of the CS to pass to wait ioctl. All
- * values are valid except for 0 and ULLONG_MAX
+ * SOB base address offset
+ * Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY is set
*/
- __u64 seq;
- /* HL_CS_STATUS_* */
- __u32 status;
- __u32 pad;
+ __u32 sob_base_addr_offset;
};
union hl_cs_args {
@@ -735,11 +861,18 @@ union hl_cs_args {
#define HL_WAIT_CS_FLAGS_INTERRUPT 0x2
#define HL_WAIT_CS_FLAGS_INTERRUPT_MASK 0xFFF00000
+#define HL_WAIT_CS_FLAGS_MULTI_CS 0x4
+
+#define HL_WAIT_MULTI_CS_LIST_MAX_LEN 32
struct hl_wait_cs_in {
union {
struct {
- /* Command submission sequence number */
+ /*
+ * In case of wait_cs holds the CS sequence number.
+ * In case of wait for multi CS hold a user pointer to
+ * an array of CS sequence numbers
+ */
__u64 seq;
/* Absolute timeout to wait for command submission
* in microseconds
@@ -767,12 +900,17 @@ struct hl_wait_cs_in {
/* Context ID - Currently not in use */
__u32 ctx_id;
+
/* HL_WAIT_CS_FLAGS_*
* If HL_WAIT_CS_FLAGS_INTERRUPT is set, this field should include
* interrupt id according to HL_WAIT_CS_FLAGS_INTERRUPT_MASK, in order
* not to specify an interrupt id ,set mask to all 1s.
*/
__u32 flags;
+
+ /* Multi CS API info- valid entries in multi-CS array */
+ __u8 seq_arr_len;
+ __u8 pad[7];
};
#define HL_WAIT_CS_STATUS_COMPLETED 0
@@ -789,8 +927,15 @@ struct hl_wait_cs_out {
__u32 status;
/* HL_WAIT_CS_STATUS_FLAG* */
__u32 flags;
- /* valid only if HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD is set */
+ /*
+ * valid only if HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD is set
+ * for wait_cs: timestamp of CS completion
+ * for wait_multi_cs: timestamp of FIRST CS completion
+ */
__s64 timestamp_nsec;
+ /* multi CS completion bitmap */
+ __u32 cs_completion_map;
+ __u32 pad;
};
union hl_wait_cs_args {
@@ -813,6 +958,7 @@ union hl_wait_cs_args {
#define HL_MEM_CONTIGUOUS 0x1
#define HL_MEM_SHARED 0x2
#define HL_MEM_USERPTR 0x4
+#define HL_MEM_FORCE_HINT 0x8
struct hl_mem_in {
union {
diff --git a/init/Kconfig b/init/Kconfig
index 8cb97f141b70..11f8a845f259 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -139,7 +139,7 @@ config COMPILE_TEST
config WERROR
bool "Compile the kernel with warnings as errors"
- default y
+ default COMPILE_TEST
help
A kernel build should not cause any compiler warnings, and this
enables the '-Werror' flag to enforce that rule by default.
diff --git a/init/do_mounts.c b/init/do_mounts.c
index b691d6891e51..2ed30ff6c906 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -338,31 +338,22 @@ __setup("rootflags=", root_data_setup);
__setup("rootfstype=", fs_names_setup);
__setup("rootdelay=", root_delay_setup);
-static void __init get_fs_names(char *page)
+static int __init split_fs_names(char *page, char *names)
{
- char *s = page;
+ int count = 0;
+ char *p = page;
- if (root_fs_names) {
- strcpy(page, root_fs_names);
- while (*s++) {
- if (s[-1] == ',')
- s[-1] = '\0';
- }
- } else {
- int len = get_filesystem_list(page);
- char *p, *next;
-
- page[len] = '\0';
- for (p = page-1; p; p = next) {
- next = strchr(++p, '\n');
- if (*p++ != '\t')
- continue;
- while ((*s++ = *p++) != '\n')
- ;
- s[-1] = '\0';
- }
+ strcpy(p, root_fs_names);
+ while (*p++) {
+ if (p[-1] == ',')
+ p[-1] = '\0';
}
- *s = '\0';
+ *p = '\0';
+
+ for (p = page; *p; p += strlen(p)+1)
+ count++;
+
+ return count;
}
static int __init do_mount_root(const char *name, const char *fs,
@@ -408,12 +399,16 @@ void __init mount_block_root(char *name, int flags)
char *fs_names = page_address(page);
char *p;
char b[BDEVNAME_SIZE];
+ int num_fs, i;
scnprintf(b, BDEVNAME_SIZE, "unknown-block(%u,%u)",
MAJOR(ROOT_DEV), MINOR(ROOT_DEV));
- get_fs_names(fs_names);
+ if (root_fs_names)
+ num_fs = split_fs_names(fs_names, root_fs_names);
+ else
+ num_fs = list_bdev_fs_names(fs_names, PAGE_SIZE);
retry:
- for (p = fs_names; *p; p += strlen(p)+1) {
+ for (i = 0, p = fs_names; i < num_fs; i++, p += strlen(p)+1) {
int err = do_mount_root(name, p, flags, root_mount_data);
switch (err) {
case 0:
@@ -442,7 +437,7 @@ retry:
printk("List of all partitions:\n");
printk_all_partitions();
printk("No filesystem could mount root, tried: ");
- for (p = fs_names; *p; p += strlen(p)+1)
+ for (i = 0, p = fs_names; i < num_fs; i++, p += strlen(p)+1)
printk(" %s", p);
printk("\n");
panic("VFS: Unable to mount root fs on %s", b);
@@ -526,6 +521,47 @@ static int __init mount_cifs_root(void)
}
#endif
+static bool __init fs_is_nodev(char *fstype)
+{
+ struct file_system_type *fs = get_fs_type(fstype);
+ bool ret = false;
+
+ if (fs) {
+ ret = !(fs->fs_flags & FS_REQUIRES_DEV);
+ put_filesystem(fs);
+ }
+
+ return ret;
+}
+
+static int __init mount_nodev_root(void)
+{
+ char *fs_names, *fstype;
+ int err = -EINVAL;
+ int num_fs, i;
+
+ fs_names = (void *)__get_free_page(GFP_KERNEL);
+ if (!fs_names)
+ return -EINVAL;
+ num_fs = split_fs_names(fs_names, root_fs_names);
+
+ for (i = 0, fstype = fs_names; i < num_fs;
+ i++, fstype += strlen(fstype) + 1) {
+ if (!fs_is_nodev(fstype))
+ continue;
+ err = do_mount_root(root_device_name, fstype, root_mountflags,
+ root_mount_data);
+ if (!err)
+ break;
+ if (err != -EACCES && err != -EINVAL)
+ panic("VFS: Unable to mount root \"%s\" (%s), err=%d\n",
+ root_device_name, fstype, err);
+ }
+
+ free_page((unsigned long)fs_names);
+ return err;
+}
+
void __init mount_root(void)
{
#ifdef CONFIG_ROOT_NFS
@@ -542,6 +578,10 @@ void __init mount_root(void)
return;
}
#endif
+ if (ROOT_DEV == 0 && root_device_name && root_fs_names) {
+ if (mount_nodev_root() == 0)
+ return;
+ }
#ifdef CONFIG_BLOCK
{
int err = create_dev("/dev/root", ROOT_DEV);
diff --git a/init/initramfs.c b/init/initramfs.c
index af27abc59643..a842c0544745 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -15,6 +15,7 @@
#include <linux/mm.h>
#include <linux/namei.h>
#include <linux/init_syscalls.h>
+#include <linux/umh.h>
static ssize_t __init xwrite(struct file *file, const char *p, size_t count,
loff_t *pos)
@@ -727,6 +728,7 @@ static int __init populate_rootfs(void)
{
initramfs_cookie = async_schedule_domain(do_populate_rootfs, NULL,
&initramfs_domain);
+ usermodehelper_enable();
if (!initramfs_async)
wait_for_initramfs();
return 0;
diff --git a/init/main.c b/init/main.c
index daad6979f782..3f7216934441 100644
--- a/init/main.c
+++ b/init/main.c
@@ -153,10 +153,10 @@ static char *extra_init_args;
#ifdef CONFIG_BOOT_CONFIG
/* Is bootconfig on command line? */
static bool bootconfig_found;
-static bool initargs_found;
+static size_t initargs_offs;
#else
# define bootconfig_found false
-# define initargs_found false
+# define initargs_offs 0
#endif
static char *execute_command;
@@ -422,9 +422,9 @@ static void __init setup_boot_config(void)
if (IS_ERR(err) || !bootconfig_found)
return;
- /* parse_args() stops at '--' and returns an address */
+ /* parse_args() stops at the next param of '--' and returns an address */
if (err)
- initargs_found = true;
+ initargs_offs = err - tmp_cmdline;
if (!data) {
pr_err("'bootconfig' found on command line, but no bootconfig found\n");
@@ -468,7 +468,12 @@ static void __init setup_boot_config(void)
return;
}
-#else
+static void __init exit_boot_config(void)
+{
+ xbc_destroy_all();
+}
+
+#else /* !CONFIG_BOOT_CONFIG */
static void __init setup_boot_config(void)
{
@@ -481,7 +486,11 @@ static int __init warn_bootconfig(char *str)
pr_warn("WARNING: 'bootconfig' found on the kernel command line but CONFIG_BOOT_CONFIG is not set.\n");
return 0;
}
-#endif
+
+#define exit_boot_config() do {} while (0)
+
+#endif /* CONFIG_BOOT_CONFIG */
+
early_param("bootconfig", warn_bootconfig);
/* Change NUL term back to "=", to make "param" the whole string. */
@@ -646,16 +655,21 @@ static void __init setup_command_line(char *command_line)
* Append supplemental init boot args to saved_command_line
* so that user can check what command line options passed
* to init.
+ * The order should always be
+ * " -- "[bootconfig init-param][cmdline init-param]
*/
- len = strlen(saved_command_line);
- if (initargs_found) {
- saved_command_line[len++] = ' ';
+ if (initargs_offs) {
+ len = xlen + initargs_offs;
+ strcpy(saved_command_line + len, extra_init_args);
+ len += ilen - 4; /* strlen(extra_init_args) */
+ strcpy(saved_command_line + len,
+ boot_command_line + initargs_offs - 1);
} else {
+ len = strlen(saved_command_line);
strcpy(saved_command_line + len, " -- ");
len += 4;
+ strcpy(saved_command_line + len, extra_init_args);
}
-
- strcpy(saved_command_line + len, extra_init_args);
}
}
@@ -777,6 +791,8 @@ void __init __weak poking_init(void) { }
void __init __weak pgtable_cache_init(void) { }
+void __init __weak trap_init(void) { }
+
bool initcall_debug;
core_param(initcall_debug, initcall_debug, bool, 0644);
@@ -908,7 +924,7 @@ static void __init print_unknown_bootoptions(void)
end += sprintf(end, " %s", *p);
pr_notice("Unknown command line parameters:%s\n", unknown_options);
- memblock_free(__pa(unknown_options), len);
+ memblock_free_ptr(unknown_options, len);
}
asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
@@ -1392,7 +1408,6 @@ static void __init do_basic_setup(void)
driver_init();
init_irq_proc();
do_ctors();
- usermodehelper_enable();
do_initcalls();
}
@@ -1493,6 +1508,7 @@ static int __ref kernel_init(void *unused)
kprobe_free_init_mem();
ftrace_free_init_mem();
kgdb_free_init_mem();
+ exit_boot_config();
free_initmem();
mark_readonly();
diff --git a/init/noinitramfs.c b/init/noinitramfs.c
index 3d62b07f3bb9..d1d26b93d25c 100644
--- a/init/noinitramfs.c
+++ b/init/noinitramfs.c
@@ -10,6 +10,7 @@
#include <linux/kdev_t.h>
#include <linux/syscalls.h>
#include <linux/init_syscalls.h>
+#include <linux/umh.h>
/*
* Create a simple rootfs that is similar to the default initramfs
@@ -18,6 +19,7 @@ static int __init default_rootfs(void)
{
int err;
+ usermodehelper_enable();
err = init_mkdir("/dev", 0755);
if (err < 0)
goto out;
diff --git a/ipc/sem.c b/ipc/sem.c
index 1a8b9f0ac047..6693daf4fe11 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1984,47 +1984,34 @@ out:
return un;
}
-static long do_semtimedop(int semid, struct sembuf __user *tsops,
- unsigned nsops, const struct timespec64 *timeout)
+long __do_semtimedop(int semid, struct sembuf *sops,
+ unsigned nsops, const struct timespec64 *timeout,
+ struct ipc_namespace *ns)
{
int error = -EINVAL;
struct sem_array *sma;
- struct sembuf fast_sops[SEMOPM_FAST];
- struct sembuf *sops = fast_sops, *sop;
+ struct sembuf *sop;
struct sem_undo *un;
int max, locknum;
bool undos = false, alter = false, dupsop = false;
struct sem_queue queue;
unsigned long dup = 0, jiffies_left = 0;
- struct ipc_namespace *ns;
-
- ns = current->nsproxy->ipc_ns;
if (nsops < 1 || semid < 0)
return -EINVAL;
if (nsops > ns->sc_semopm)
return -E2BIG;
- if (nsops > SEMOPM_FAST) {
- sops = kvmalloc_array(nsops, sizeof(*sops),
- GFP_KERNEL_ACCOUNT);
- if (sops == NULL)
- return -ENOMEM;
- }
-
- if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
- error = -EFAULT;
- goto out_free;
- }
if (timeout) {
if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 ||
timeout->tv_nsec >= 1000000000L) {
error = -EINVAL;
- goto out_free;
+ goto out;
}
jiffies_left = timespec64_to_jiffies(timeout);
}
+
max = 0;
for (sop = sops; sop < sops + nsops; sop++) {
unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
@@ -2053,7 +2040,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
- goto out_free;
+ goto out;
}
} else {
un = NULL;
@@ -2064,25 +2051,25 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
if (IS_ERR(sma)) {
rcu_read_unlock();
error = PTR_ERR(sma);
- goto out_free;
+ goto out;
}
error = -EFBIG;
if (max >= sma->sem_nsems) {
rcu_read_unlock();
- goto out_free;
+ goto out;
}
error = -EACCES;
if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
rcu_read_unlock();
- goto out_free;
+ goto out;
}
error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
if (error) {
rcu_read_unlock();
- goto out_free;
+ goto out;
}
error = -EIDRM;
@@ -2096,7 +2083,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
* entangled here and why it's RMID race safe on comments at sem_lock()
*/
if (!ipc_valid_object(&sma->sem_perm))
- goto out_unlock_free;
+ goto out_unlock;
/*
* semid identifiers are not unique - find_alloc_undo may have
* allocated an undo structure, it was invalidated by an RMID
@@ -2105,7 +2092,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
* "un" itself is guaranteed by rcu.
*/
if (un && un->semid == -1)
- goto out_unlock_free;
+ goto out_unlock;
queue.sops = sops;
queue.nsops = nsops;
@@ -2131,10 +2118,10 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
rcu_read_unlock();
wake_up_q(&wake_q);
- goto out_free;
+ goto out;
}
if (error < 0) /* non-blocking error path */
- goto out_unlock_free;
+ goto out_unlock;
/*
* We need to sleep on this operation, so we put the current
@@ -2199,14 +2186,14 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
if (error != -EINTR) {
/* see SEM_BARRIER_2 for purpose/pairing */
smp_acquire__after_ctrl_dep();
- goto out_free;
+ goto out;
}
rcu_read_lock();
locknum = sem_lock(sma, sops, nsops);
if (!ipc_valid_object(&sma->sem_perm))
- goto out_unlock_free;
+ goto out_unlock;
/*
* No necessity for any barrier: We are protect by sem_lock()
@@ -2218,7 +2205,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
* Leave without unlink_queue(), but with sem_unlock().
*/
if (error != -EINTR)
- goto out_unlock_free;
+ goto out_unlock;
/*
* If an interrupt occurred we have to clean up the queue.
@@ -2229,13 +2216,45 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
unlink_queue(sma, &queue);
-out_unlock_free:
+out_unlock:
sem_unlock(sma, locknum);
rcu_read_unlock();
+out:
+ return error;
+}
+
+static long do_semtimedop(int semid, struct sembuf __user *tsops,
+ unsigned nsops, const struct timespec64 *timeout)
+{
+ struct sembuf fast_sops[SEMOPM_FAST];
+ struct sembuf *sops = fast_sops;
+ struct ipc_namespace *ns;
+ int ret;
+
+ ns = current->nsproxy->ipc_ns;
+ if (nsops > ns->sc_semopm)
+ return -E2BIG;
+ if (nsops < 1)
+ return -EINVAL;
+
+ if (nsops > SEMOPM_FAST) {
+ sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
+ if (sops == NULL)
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ ret = __do_semtimedop(semid, sops, nsops, timeout, ns);
+
out_free:
if (sops != fast_sops)
kvfree(sops);
- return error;
+
+ return ret;
}
long ksys_semtimedop(int semid, struct sembuf __user *tsops,
diff --git a/ipc/util.c b/ipc/util.c
index 0027e47626b7..d48d8cfa1f3f 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -788,21 +788,13 @@ struct pid_namespace *ipc_seq_pid_ns(struct seq_file *s)
static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
loff_t *new_pos)
{
- struct kern_ipc_perm *ipc;
- int total, id;
-
- total = 0;
- for (id = 0; id < pos && total < ids->in_use; id++) {
- ipc = idr_find(&ids->ipcs_idr, id);
- if (ipc != NULL)
- total++;
- }
+ struct kern_ipc_perm *ipc = NULL;
+ int max_idx = ipc_get_maxidx(ids);
- ipc = NULL;
- if (total >= ids->in_use)
+ if (max_idx == -1 || pos > max_idx)
goto out;
- for (; pos < ipc_mni; pos++) {
+ for (; pos <= max_idx; pos++) {
ipc = idr_find(&ids->ipcs_idr, pos);
if (ipc != NULL) {
rcu_read_lock();
diff --git a/kernel/acct.c b/kernel/acct.c
index a64102be2bb0..23a7ab8e6cbc 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -478,7 +478,7 @@ static void do_acct_process(struct bsd_acct_struct *acct)
/*
* Accounting records are not subject to resource limits.
*/
- flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+ flim = rlimit(RLIMIT_FSIZE);
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
/* Perform file operations on behalf of whoever enabled accounting */
orig_cred = override_creds(file->f_cred);
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index ca3cd9aaa6ce..7b4afb7d96db 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2016 Facebook
*/
diff --git a/kernel/bpf/disasm.h b/kernel/bpf/disasm.h
index e546b18d27da..a4b040793f44 100644
--- a/kernel/bpf/disasm.h
+++ b/kernel/bpf/disasm.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2016 Facebook
*/
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index e8eefdf8cf3e..09a3fd97d329 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -179,7 +179,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
* with build_id.
*/
if (!user || !current || !current->mm || irq_work_busy ||
- !mmap_read_trylock_non_owner(current->mm)) {
+ !mmap_read_trylock(current->mm)) {
/* cannot access current->mm, fall back to ips */
for (i = 0; i < trace_nr; i++) {
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
@@ -204,9 +204,15 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
}
if (!work) {
- mmap_read_unlock_non_owner(current->mm);
+ mmap_read_unlock(current->mm);
} else {
work->mm = current->mm;
+
+ /* The lock will be released once we're out of interrupt
+ * context. Tell lockdep that we've released it now so
+ * it doesn't complain that we forgot to release it.
+ */
+ rwsem_release(&current->mm->mmap_lock.dep_map, _RET_IP_);
irq_work_queue(&work->irq_work);
}
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 047ac4b4703b..e76b55917905 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -9912,6 +9912,8 @@ static int check_btf_line(struct bpf_verifier_env *env,
nr_linfo = attr->line_info_cnt;
if (!nr_linfo)
return 0;
+ if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
+ return -EINVAL;
rec_size = attr->line_info_rec_size;
if (rec_size < MIN_BPF_LINEINFO_SIZE ||
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 881ce1470beb..8afa8690d288 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -6572,74 +6572,44 @@ int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v)
*/
#ifdef CONFIG_SOCK_CGROUP_DATA
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
-
-DEFINE_SPINLOCK(cgroup_sk_update_lock);
-static bool cgroup_sk_alloc_disabled __read_mostly;
-
-void cgroup_sk_alloc_disable(void)
-{
- if (cgroup_sk_alloc_disabled)
- return;
- pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n");
- cgroup_sk_alloc_disabled = true;
-}
-
-#else
-
-#define cgroup_sk_alloc_disabled false
-
-#endif
-
void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
{
- if (cgroup_sk_alloc_disabled) {
- skcd->no_refcnt = 1;
- return;
- }
-
/* Don't associate the sock with unrelated interrupted task's cgroup. */
if (in_interrupt())
return;
rcu_read_lock();
-
while (true) {
struct css_set *cset;
cset = task_css_set(current);
if (likely(cgroup_tryget(cset->dfl_cgrp))) {
- skcd->val = (unsigned long)cset->dfl_cgrp;
+ skcd->cgroup = cset->dfl_cgrp;
cgroup_bpf_get(cset->dfl_cgrp);
break;
}
cpu_relax();
}
-
rcu_read_unlock();
}
void cgroup_sk_clone(struct sock_cgroup_data *skcd)
{
- if (skcd->val) {
- if (skcd->no_refcnt)
- return;
- /*
- * We might be cloning a socket which is left in an empty
- * cgroup and the cgroup might have already been rmdir'd.
- * Don't use cgroup_get_live().
- */
- cgroup_get(sock_cgroup_ptr(skcd));
- cgroup_bpf_get(sock_cgroup_ptr(skcd));
- }
+ struct cgroup *cgrp = sock_cgroup_ptr(skcd);
+
+ /*
+ * We might be cloning a socket which is left in an empty
+ * cgroup and the cgroup might have already been rmdir'd.
+ * Don't use cgroup_get_live().
+ */
+ cgroup_get(cgrp);
+ cgroup_bpf_get(cgrp);
}
void cgroup_sk_free(struct sock_cgroup_data *skcd)
{
struct cgroup *cgrp = sock_cgroup_ptr(skcd);
- if (skcd->no_refcnt)
- return;
cgroup_bpf_put(cgrp);
cgroup_put(cgrp);
}
diff --git a/kernel/compat.c b/kernel/compat.c
index 05adfd6fa8bf..55551989d9da 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -269,24 +269,3 @@ get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat)
return 0;
}
EXPORT_SYMBOL_GPL(get_compat_sigset);
-
-/*
- * Allocate user-space memory for the duration of a single system call,
- * in order to marshall parameters inside a compat thunk.
- */
-void __user *compat_alloc_user_space(unsigned long len)
-{
- void __user *ptr;
-
- /* If len would occupy more than half of the entire compat space... */
- if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
- return NULL;
-
- ptr = arch_compat_alloc_user_space(len);
-
- if (unlikely(!access_ok(ptr, len)))
- return NULL;
-
- return ptr;
-}
-EXPORT_SYMBOL_GPL(compat_alloc_user_space);
diff --git a/kernel/fork.c b/kernel/fork.c
index 6d2e10a3df0b..38681ad44c76 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1063,6 +1063,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm->pmd_huge_pte = NULL;
#endif
mm_init_uprobes_state(mm);
+ hugetlb_count_init(mm);
if (current->mm) {
mm->flags = current->mm->flags & MMF_INIT_MASK;
@@ -1262,7 +1263,6 @@ struct file *get_mm_exe_file(struct mm_struct *mm)
rcu_read_unlock();
return exe_file;
}
-EXPORT_SYMBOL(get_mm_exe_file);
/**
* get_task_exe_file - acquire a reference to the task's executable file
@@ -1285,7 +1285,6 @@ struct file *get_task_exe_file(struct task_struct *task)
task_unlock(task);
return exe_file;
}
-EXPORT_SYMBOL(get_task_exe_file);
/**
* get_task_mm - acquire a reference to the task's mm
diff --git a/kernel/futex.c b/kernel/futex.c
index e7b4c6121da4..c15ad276fd15 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1263,6 +1263,36 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval,
return -ESRCH;
}
+static void __attach_to_pi_owner(struct task_struct *p, union futex_key *key,
+ struct futex_pi_state **ps)
+{
+ /*
+ * No existing pi state. First waiter. [2]
+ *
+ * This creates pi_state, we have hb->lock held, this means nothing can
+ * observe this state, wait_lock is irrelevant.
+ */
+ struct futex_pi_state *pi_state = alloc_pi_state();
+
+ /*
+ * Initialize the pi_mutex in locked state and make @p
+ * the owner of it:
+ */
+ rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
+
+ /* Store the key for possible exit cleanups: */
+ pi_state->key = *key;
+
+ WARN_ON(!list_empty(&pi_state->list));
+ list_add(&pi_state->list, &p->pi_state_list);
+ /*
+ * Assignment without holding pi_state->pi_mutex.wait_lock is safe
+ * because there is no concurrency as the object is not published yet.
+ */
+ pi_state->owner = p;
+
+ *ps = pi_state;
+}
/*
* Lookup the task for the TID provided from user space and attach to
* it after doing proper sanity checks.
@@ -1272,7 +1302,6 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
struct task_struct **exiting)
{
pid_t pid = uval & FUTEX_TID_MASK;
- struct futex_pi_state *pi_state;
struct task_struct *p;
/*
@@ -1324,36 +1353,11 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
return ret;
}
- /*
- * No existing pi state. First waiter. [2]
- *
- * This creates pi_state, we have hb->lock held, this means nothing can
- * observe this state, wait_lock is irrelevant.
- */
- pi_state = alloc_pi_state();
-
- /*
- * Initialize the pi_mutex in locked state and make @p
- * the owner of it:
- */
- rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
-
- /* Store the key for possible exit cleanups: */
- pi_state->key = *key;
-
- WARN_ON(!list_empty(&pi_state->list));
- list_add(&pi_state->list, &p->pi_state_list);
- /*
- * Assignment without holding pi_state->pi_mutex.wait_lock is safe
- * because there is no concurrency as the object is not published yet.
- */
- pi_state->owner = p;
+ __attach_to_pi_owner(p, key, ps);
raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
- *ps = pi_state;
-
return 0;
}
@@ -1454,8 +1458,26 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
newval |= FUTEX_WAITERS;
ret = lock_pi_update_atomic(uaddr, uval, newval);
- /* If the take over worked, return 1 */
- return ret < 0 ? ret : 1;
+ if (ret)
+ return ret;
+
+ /*
+ * If the waiter bit was requested the caller also needs PI
+ * state attached to the new owner of the user space futex.
+ *
+ * @task is guaranteed to be alive and it cannot be exiting
+ * because it is either sleeping or waiting in
+ * futex_requeue_pi_wakeup_sync().
+ *
+ * No need to do the full attach_to_pi_owner() exercise
+ * because @task is known and valid.
+ */
+ if (set_waiters) {
+ raw_spin_lock_irq(&task->pi_lock);
+ __attach_to_pi_owner(task, key, ps);
+ raw_spin_unlock_irq(&task->pi_lock);
+ }
+ return 1;
}
/*
@@ -1939,12 +1961,26 @@ static inline int futex_requeue_pi_wakeup_sync(struct futex_q *q)
* @hb: the hash_bucket of the requeue target futex
*
* During futex_requeue, with requeue_pi=1, it is possible to acquire the
- * target futex if it is uncontended or via a lock steal. Set the futex_q key
- * to the requeue target futex so the waiter can detect the wakeup on the right
- * futex, but remove it from the hb and NULL the rt_waiter so it can detect
- * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
- * to protect access to the pi_state to fixup the owner later. Must be called
- * with both q->lock_ptr and hb->lock held.
+ * target futex if it is uncontended or via a lock steal.
+ *
+ * 1) Set @q::key to the requeue target futex key so the waiter can detect
+ * the wakeup on the right futex.
+ *
+ * 2) Dequeue @q from the hash bucket.
+ *
+ * 3) Set @q::rt_waiter to NULL so the woken up task can detect atomic lock
+ * acquisition.
+ *
+ * 4) Set the q->lock_ptr to the requeue target hb->lock for the case that
+ * the waiter has to fixup the pi state.
+ *
+ * 5) Complete the requeue state so the waiter can make progress. After
+ * this point the waiter task can return from the syscall immediately in
+ * case that the pi state does not have to be fixed up.
+ *
+ * 6) Wake the waiter task.
+ *
+ * Must be called with both q->lock_ptr and hb->lock held.
*/
static inline
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
@@ -1998,7 +2034,7 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
{
struct futex_q *top_waiter = NULL;
u32 curval;
- int ret, vpid;
+ int ret;
if (get_futex_value_locked(&curval, pifutex))
return -EFAULT;
@@ -2025,7 +2061,7 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
* and waiting on the 'waitqueue' futex which is always !PI.
*/
if (!top_waiter->rt_waiter || top_waiter->pi_state)
- ret = -EINVAL;
+ return -EINVAL;
/* Ensure we requeue to the expected futex. */
if (!match_futex(top_waiter->requeue_pi_key, key2))
@@ -2036,17 +2072,23 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
return -EAGAIN;
/*
- * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
- * the contended case or if set_waiters is 1. The pi_state is returned
- * in ps in contended cases.
+ * Try to take the lock for top_waiter and set the FUTEX_WAITERS bit
+ * in the contended case or if @set_waiters is true.
+ *
+ * In the contended case PI state is attached to the lock owner. If
+ * the user space lock can be acquired then PI state is attached to
+ * the new owner (@top_waiter->task) when @set_waiters is true.
*/
- vpid = task_pid_vnr(top_waiter->task);
ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
exiting, set_waiters);
if (ret == 1) {
- /* Dequeue, wake up and update top_waiter::requeue_state */
+ /*
+ * Lock was acquired in user space and PI state was
+ * attached to @top_waiter->task. That means state is fully
+ * consistent and the waiter can return to user space
+ * immediately after the wakeup.
+ */
requeue_pi_wake_futex(top_waiter, key2, hb2);
- return vpid;
} else if (ret < 0) {
/* Rewind top_waiter::requeue_state */
futex_requeue_pi_complete(top_waiter, ret);
@@ -2208,19 +2250,26 @@ retry_private:
&exiting, nr_requeue);
/*
- * At this point the top_waiter has either taken uaddr2 or is
- * waiting on it. If the former, then the pi_state will not
- * exist yet, look it up one more time to ensure we have a
- * reference to it. If the lock was taken, @ret contains the
- * VPID of the top waiter task.
- * If the lock was not taken, we have pi_state and an initial
- * refcount on it. In case of an error we have nothing.
+ * At this point the top_waiter has either taken uaddr2 or
+ * is waiting on it. In both cases pi_state has been
+ * established and an initial refcount on it. In case of an
+ * error there's nothing.
*
* The top waiter's requeue_state is up to date:
*
- * - If the lock was acquired atomically (ret > 0), then
+ * - If the lock was acquired atomically (ret == 1), then
* the state is Q_REQUEUE_PI_LOCKED.
*
+ * The top waiter has been dequeued and woken up and can
+ * return to user space immediately. The kernel/user
+ * space state is consistent. In case that there must be
+ * more waiters requeued the WAITERS bit in the user
+ * space futex is set so the top waiter task has to go
+ * into the syscall slowpath to unlock the futex. This
+ * will block until this requeue operation has been
+ * completed and the hash bucket locks have been
+ * dropped.
+ *
* - If the trylock failed with an error (ret < 0) then
* the state is either Q_REQUEUE_PI_NONE, i.e. "nothing
* happened", or Q_REQUEUE_PI_IGNORE when there was an
@@ -2234,36 +2283,20 @@ retry_private:
* the same sanity checks for requeue_pi as the loop
* below does.
*/
- if (ret > 0) {
- WARN_ON(pi_state);
- task_count++;
- /*
- * If futex_proxy_trylock_atomic() acquired the
- * user space futex, then the user space value
- * @uaddr2 has been set to the @hb1's top waiter
- * task VPID. This task is guaranteed to be alive
- * and cannot be exiting because it is either
- * sleeping or blocked on @hb2 lock.
- *
- * The @uaddr2 futex cannot have waiters either as
- * otherwise futex_proxy_trylock_atomic() would not
- * have succeeded.
- *
- * In order to requeue waiters to @hb2, pi state is
- * required. Hand in the VPID value (@ret) and
- * allocate PI state with an initial refcount on
- * it.
- */
- ret = attach_to_pi_owner(uaddr2, ret, &key2, &pi_state,
- &exiting);
- WARN_ON(ret);
- }
-
switch (ret) {
case 0:
/* We hold a reference on the pi state. */
break;
+ case 1:
+ /*
+ * futex_proxy_trylock_atomic() acquired the user space
+ * futex. Adjust task_count.
+ */
+ task_count++;
+ ret = 0;
+ break;
+
/*
* If the above failed, then pi_state is NULL and
* waiter::requeue_state is correct.
@@ -2395,9 +2428,8 @@ retry_private:
}
/*
- * We took an extra initial reference to the pi_state either in
- * futex_proxy_trylock_atomic() or in attach_to_pi_owner(). We need
- * to drop it here again.
+ * We took an extra initial reference to the pi_state in
+ * futex_proxy_trylock_atomic(). We need to drop it here again.
*/
put_pi_state(pi_state);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index c82c6c06f051..b5e40f069768 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -19,26 +19,9 @@
#include "kexec_internal.h"
-static int copy_user_segment_list(struct kimage *image,
- unsigned long nr_segments,
- struct kexec_segment __user *segments)
-{
- int ret;
- size_t segment_bytes;
-
- /* Read in the segments */
- image->nr_segments = nr_segments;
- segment_bytes = nr_segments * sizeof(*segments);
- ret = copy_from_user(image->segment, segments, segment_bytes);
- if (ret)
- ret = -EFAULT;
-
- return ret;
-}
-
static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
unsigned long nr_segments,
- struct kexec_segment __user *segments,
+ struct kexec_segment *segments,
unsigned long flags)
{
int ret;
@@ -58,10 +41,8 @@ static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
return -ENOMEM;
image->start = entry;
-
- ret = copy_user_segment_list(image, nr_segments, segments);
- if (ret)
- goto out_free_image;
+ image->nr_segments = nr_segments;
+ memcpy(image->segment, segments, nr_segments * sizeof(*segments));
if (kexec_on_panic) {
/* Enable special crash kernel control page alloc policy. */
@@ -104,12 +85,23 @@ out_free_image:
}
static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
- struct kexec_segment __user *segments, unsigned long flags)
+ struct kexec_segment *segments, unsigned long flags)
{
struct kimage **dest_image, *image;
unsigned long i;
int ret;
+ /*
+ * Because we write directly to the reserved memory region when loading
+ * crash kernels we need a mutex here to prevent multiple crash kernels
+ * from attempting to load simultaneously, and to prevent a crash kernel
+ * from loading over the top of a in use crash kernel.
+ *
+ * KISS: always take the mutex.
+ */
+ if (!mutex_trylock(&kexec_mutex))
+ return -EBUSY;
+
if (flags & KEXEC_ON_CRASH) {
dest_image = &kexec_crash_image;
if (kexec_crash_image)
@@ -121,7 +113,8 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
if (nr_segments == 0) {
/* Uninstall image */
kimage_free(xchg(dest_image, NULL));
- return 0;
+ ret = 0;
+ goto out_unlock;
}
if (flags & KEXEC_ON_CRASH) {
/*
@@ -134,7 +127,7 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags);
if (ret)
- return ret;
+ goto out_unlock;
if (flags & KEXEC_PRESERVE_CONTEXT)
image->preserve_context = 1;
@@ -171,6 +164,8 @@ out:
arch_kexec_protect_crashkres();
kimage_free(image);
+out_unlock:
+ mutex_unlock(&kexec_mutex);
return ret;
}
@@ -236,7 +231,8 @@ static inline int kexec_load_check(unsigned long nr_segments,
SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
struct kexec_segment __user *, segments, unsigned long, flags)
{
- int result;
+ struct kexec_segment *ksegments;
+ unsigned long result;
result = kexec_load_check(nr_segments, flags);
if (result)
@@ -247,20 +243,12 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
return -EINVAL;
- /* Because we write directly to the reserved memory
- * region when loading crash kernels we need a mutex here to
- * prevent multiple crash kernels from attempting to load
- * simultaneously, and to prevent a crash kernel from loading
- * over the top of a in use crash kernel.
- *
- * KISS: always take the mutex.
- */
- if (!mutex_trylock(&kexec_mutex))
- return -EBUSY;
+ ksegments = memdup_user(segments, nr_segments * sizeof(ksegments[0]));
+ if (IS_ERR(ksegments))
+ return PTR_ERR(ksegments);
- result = do_kexec_load(entry, nr_segments, segments, flags);
-
- mutex_unlock(&kexec_mutex);
+ result = do_kexec_load(entry, nr_segments, ksegments, flags);
+ kfree(ksegments);
return result;
}
@@ -272,7 +260,7 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
compat_ulong_t, flags)
{
struct compat_kexec_segment in;
- struct kexec_segment out, __user *ksegments;
+ struct kexec_segment *ksegments;
unsigned long i, result;
result = kexec_load_check(nr_segments, flags);
@@ -285,37 +273,26 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
return -EINVAL;
- ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
+ ksegments = kmalloc_array(nr_segments, sizeof(ksegments[0]),
+ GFP_KERNEL);
+ if (!ksegments)
+ return -ENOMEM;
+
for (i = 0; i < nr_segments; i++) {
result = copy_from_user(&in, &segments[i], sizeof(in));
if (result)
- return -EFAULT;
+ goto fail;
- out.buf = compat_ptr(in.buf);
- out.bufsz = in.bufsz;
- out.mem = in.mem;
- out.memsz = in.memsz;
-
- result = copy_to_user(&ksegments[i], &out, sizeof(out));
- if (result)
- return -EFAULT;
+ ksegments[i].buf = compat_ptr(in.buf);
+ ksegments[i].bufsz = in.bufsz;
+ ksegments[i].mem = in.mem;
+ ksegments[i].memsz = in.memsz;
}
- /* Because we write directly to the reserved memory
- * region when loading crash kernels we need a mutex here to
- * prevent multiple crash kernels from attempting to load
- * simultaneously, and to prevent a crash kernel from loading
- * over the top of a in use crash kernel.
- *
- * KISS: always take the mutex.
- */
- if (!mutex_trylock(&kexec_mutex))
- return -EBUSY;
-
result = do_kexec_load(entry, nr_segments, ksegments, flags);
- mutex_unlock(&kexec_mutex);
-
+fail:
+ kfree(ksegments);
return result;
}
#endif
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 8eabdc79602b..6bb116c559b4 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -753,7 +753,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
* other configuration and we fail to report; also, see
* lockdep.
*/
- if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter->ww_ctx)
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter && orig_waiter->ww_ctx)
ret = 0;
raw_spin_unlock(&lock->wait_lock);
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 9215b4d6a9de..000e8d5a2884 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -1376,15 +1376,17 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
#include "rwbase_rt.c"
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void __rwsem_init(struct rw_semaphore *sem, const char *name,
+void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key)
{
+ init_rwbase_rt(&(sem)->rwbase);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
-}
-EXPORT_SYMBOL(__rwsem_init);
#endif
+}
+EXPORT_SYMBOL(__init_rwsem);
static inline void __down_read(struct rw_semaphore *sem)
{
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 825277e1e742..a8d0a58deebc 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1166,9 +1166,9 @@ void __init setup_log_buf(int early)
return;
err_free_descs:
- memblock_free(__pa(new_descs), new_descs_size);
+ memblock_free_ptr(new_descs, new_descs_size);
err_free_log_buf:
- memblock_free(__pa(new_log_buf), new_log_buf_len);
+ memblock_free_ptr(new_log_buf, new_log_buf_len);
}
static bool __read_mostly ignore_loglevel;
diff --git a/kernel/profile.c b/kernel/profile.c
index c2ebddb5e974..eb9c7f0f5ac5 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -41,7 +41,8 @@ struct profile_hit {
#define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
static atomic_t *prof_buffer;
-static unsigned long prof_len, prof_shift;
+static unsigned long prof_len;
+static unsigned short int prof_shift;
int prof_on __read_mostly;
EXPORT_SYMBOL_GPL(prof_on);
@@ -67,8 +68,8 @@ int profile_setup(char *str)
if (str[strlen(sleepstr)] == ',')
str += strlen(sleepstr) + 1;
if (get_option(&str, &par))
- prof_shift = par;
- pr_info("kernel sleep profiling enabled (shift: %ld)\n",
+ prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
+ pr_info("kernel sleep profiling enabled (shift: %u)\n",
prof_shift);
#else
pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
@@ -78,21 +79,21 @@ int profile_setup(char *str)
if (str[strlen(schedstr)] == ',')
str += strlen(schedstr) + 1;
if (get_option(&str, &par))
- prof_shift = par;
- pr_info("kernel schedule profiling enabled (shift: %ld)\n",
+ prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
+ pr_info("kernel schedule profiling enabled (shift: %u)\n",
prof_shift);
} else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
prof_on = KVM_PROFILING;
if (str[strlen(kvmstr)] == ',')
str += strlen(kvmstr) + 1;
if (get_option(&str, &par))
- prof_shift = par;
- pr_info("kernel KVM profiling enabled (shift: %ld)\n",
+ prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
+ pr_info("kernel KVM profiling enabled (shift: %u)\n",
prof_shift);
} else if (get_option(&str, &par)) {
- prof_shift = par;
+ prof_shift = clamp(par, 0, BITS_PER_LONG - 1);
prof_on = CPU_PROFILING;
- pr_info("kernel profiling enabled (shift: %ld)\n",
+ pr_info("kernel profiling enabled (shift: %u)\n",
prof_shift);
}
return 1;
@@ -468,7 +469,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
unsigned long p = *ppos;
ssize_t read;
char *pnt;
- unsigned int sample_step = 1 << prof_shift;
+ unsigned long sample_step = 1UL << prof_shift;
profile_flip_buffers();
if (p >= (prof_len+1)*sizeof(unsigned int))
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c4462c454ab9..1bba4128a3e6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8836,7 +8836,6 @@ static void balance_push(struct rq *rq)
struct task_struct *push_task = rq->curr;
lockdep_assert_rq_held(rq);
- SCHED_WARN_ON(rq->cpu != smp_processor_id());
/*
* Ensure the thing is persistent until balance_push_set(.on = false);
@@ -8844,9 +8843,10 @@ static void balance_push(struct rq *rq)
rq->balance_callback = &balance_push_callback;
/*
- * Only active while going offline.
+ * Only active while going offline and when invoked on the outgoing
+ * CPU.
*/
- if (!cpu_dying(rq->cpu))
+ if (!cpu_dying(rq->cpu) || rq != this_rq())
return;
/*
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 912b47aa99d8..d17b0a5ce6ac 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -379,10 +379,10 @@ void play_idle_precise(u64 duration_ns, u64 latency_ns)
cpuidle_use_deepest_state(latency_ns);
it.done = 0;
- hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
it.timer.function = idle_inject_timer_fn;
hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
- HRTIMER_MODE_REL_PINNED);
+ HRTIMER_MODE_REL_PINNED_HARD);
while (!READ_ONCE(it.done))
do_idle();
diff --git a/kernel/sys.c b/kernel/sys.c
index b6aa704f861d..8fdac0d90504 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1930,13 +1930,6 @@ static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
error = -EINVAL;
/*
- * @brk should be after @end_data in traditional maps.
- */
- if (prctl_map->start_brk <= prctl_map->end_data ||
- prctl_map->brk <= prctl_map->end_data)
- goto out;
-
- /*
* Neither we should allow to override limits if they set.
*/
if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 64578adfe115..f43d89d92860 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -292,15 +292,10 @@ COND_SYSCALL(process_madvise);
COND_SYSCALL(process_mrelease);
COND_SYSCALL(remap_file_pages);
COND_SYSCALL(mbind);
-COND_SYSCALL_COMPAT(mbind);
COND_SYSCALL(get_mempolicy);
-COND_SYSCALL_COMPAT(get_mempolicy);
COND_SYSCALL(set_mempolicy);
-COND_SYSCALL_COMPAT(set_mempolicy);
COND_SYSCALL(migrate_pages);
-COND_SYSCALL_COMPAT(migrate_pages);
COND_SYSCALL(move_pages);
-COND_SYSCALL_COMPAT(move_pages);
COND_SYSCALL(perf_event_open);
COND_SYSCALL(accept4);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2dbf797aa133..7896d30d90f7 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2603,6 +2603,15 @@ enum print_line_t trace_handle_return(struct trace_seq *s)
}
EXPORT_SYMBOL_GPL(trace_handle_return);
+static unsigned short migration_disable_value(void)
+{
+#if defined(CONFIG_SMP)
+ return current->migration_disabled;
+#else
+ return 0;
+#endif
+}
+
unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
{
unsigned int trace_flags = irqs_status;
@@ -2621,7 +2630,8 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
trace_flags |= TRACE_FLAG_NEED_RESCHED;
if (test_preempt_need_resched())
trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
- return (trace_flags << 16) | (pc & 0xff);
+ return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
+ (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
}
struct ring_buffer_event *
@@ -4189,9 +4199,10 @@ static void print_lat_help_header(struct seq_file *m)
"# | / _----=> need-resched \n"
"# || / _---=> hardirq/softirq \n"
"# ||| / _--=> preempt-depth \n"
- "# |||| / delay \n"
- "# cmd pid ||||| time | caller \n"
- "# \\ / ||||| \\ | / \n");
+ "# |||| / _-=> migrate-disable \n"
+ "# ||||| / delay \n"
+ "# cmd pid |||||| time | caller \n"
+ "# \\ / |||||| \\ | / \n");
}
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
@@ -4229,9 +4240,10 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file
seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
- seq_printf(m, "# %.*s||| / delay\n", prec, space);
- seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
- seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
+ seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
+ seq_printf(m, "# %.*s|||| / delay\n", prec, space);
+ seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
+ seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
}
void
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index 1060b0446032..8d252f63cd78 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -219,13 +219,12 @@ static int __init
trace_boot_hist_add_array(struct xbc_node *hnode, char **bufp,
char *end, const char *key)
{
- struct xbc_node *knode, *anode;
+ struct xbc_node *anode;
const char *p;
char sep;
- knode = xbc_node_find_child(hnode, key);
- if (knode) {
- anode = xbc_node_get_child(knode);
+ p = xbc_node_find_value(hnode, key, &anode);
+ if (p) {
if (!anode) {
pr_err("hist.%s requires value(s).\n", key);
return -EINVAL;
@@ -263,9 +262,9 @@ trace_boot_hist_add_one_handler(struct xbc_node *hnode, char **bufp,
append_printf(bufp, end, ":%s(%s)", handler, p);
/* Compose 'action' parameter */
- knode = xbc_node_find_child(hnode, "trace");
+ knode = xbc_node_find_subkey(hnode, "trace");
if (!knode)
- knode = xbc_node_find_child(hnode, "save");
+ knode = xbc_node_find_subkey(hnode, "save");
if (knode) {
anode = xbc_node_get_child(knode);
@@ -284,7 +283,7 @@ trace_boot_hist_add_one_handler(struct xbc_node *hnode, char **bufp,
sep = ',';
}
append_printf(bufp, end, ")");
- } else if (xbc_node_find_child(hnode, "snapshot")) {
+ } else if (xbc_node_find_subkey(hnode, "snapshot")) {
append_printf(bufp, end, ".snapshot()");
} else {
pr_err("hist.%s requires an action.\n",
@@ -315,7 +314,7 @@ trace_boot_hist_add_handlers(struct xbc_node *hnode, char **bufp,
break;
}
- if (xbc_node_find_child(hnode, param))
+ if (xbc_node_find_subkey(hnode, param))
ret = trace_boot_hist_add_one_handler(hnode, bufp, end, handler, param);
return ret;
@@ -375,7 +374,7 @@ trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size)
if (p)
append_printf(&buf, end, ":name=%s", p);
- node = xbc_node_find_child(hnode, "var");
+ node = xbc_node_find_subkey(hnode, "var");
if (node) {
xbc_node_for_each_key_value(node, knode, p) {
/* Expression must not include spaces. */
@@ -386,21 +385,21 @@ trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size)
}
/* Histogram control attributes (mutual exclusive) */
- if (xbc_node_find_child(hnode, "pause"))
+ if (xbc_node_find_value(hnode, "pause", NULL))
append_printf(&buf, end, ":pause");
- else if (xbc_node_find_child(hnode, "continue"))
+ else if (xbc_node_find_value(hnode, "continue", NULL))
append_printf(&buf, end, ":continue");
- else if (xbc_node_find_child(hnode, "clear"))
+ else if (xbc_node_find_value(hnode, "clear", NULL))
append_printf(&buf, end, ":clear");
/* Histogram handler and actions */
- node = xbc_node_find_child(hnode, "onmax");
+ node = xbc_node_find_subkey(hnode, "onmax");
if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0)
return -EINVAL;
- node = xbc_node_find_child(hnode, "onchange");
+ node = xbc_node_find_subkey(hnode, "onchange");
if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0)
return -EINVAL;
- node = xbc_node_find_child(hnode, "onmatch");
+ node = xbc_node_find_subkey(hnode, "onmatch");
if (node && trace_boot_hist_add_handlers(node, &buf, end, "event") < 0)
return -EINVAL;
@@ -437,7 +436,7 @@ trace_boot_init_histograms(struct trace_event_file *file,
}
}
- if (xbc_node_find_child(hnode, "keys")) {
+ if (xbc_node_find_subkey(hnode, "keys")) {
if (trace_boot_compose_hist_cmd(hnode, buf, size) == 0) {
tmp = kstrdup(buf, GFP_KERNEL);
if (trigger_process_regex(file, buf) < 0)
@@ -496,7 +495,7 @@ trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode,
else if (trigger_process_regex(file, buf) < 0)
pr_err("Failed to apply an action: %s\n", p);
}
- anode = xbc_node_find_child(enode, "hist");
+ anode = xbc_node_find_subkey(enode, "hist");
if (anode)
trace_boot_init_histograms(file, anode, buf, ARRAY_SIZE(buf));
} else if (xbc_node_find_value(enode, "actions", NULL))
@@ -518,18 +517,18 @@ trace_boot_init_events(struct trace_array *tr, struct xbc_node *node)
bool enable, enable_all = false;
const char *data;
- node = xbc_node_find_child(node, "event");
+ node = xbc_node_find_subkey(node, "event");
if (!node)
return;
/* per-event key starts with "event.GROUP.EVENT" */
- xbc_node_for_each_child(node, gnode) {
+ xbc_node_for_each_subkey(node, gnode) {
data = xbc_node_get_data(gnode);
if (!strcmp(data, "enable")) {
enable_all = true;
continue;
}
enable = false;
- xbc_node_for_each_child(gnode, enode) {
+ xbc_node_for_each_subkey(gnode, enode) {
data = xbc_node_get_data(enode);
if (!strcmp(data, "enable")) {
enable = true;
@@ -621,11 +620,11 @@ trace_boot_init_instances(struct xbc_node *node)
struct trace_array *tr;
const char *p;
- node = xbc_node_find_child(node, "instance");
+ node = xbc_node_find_subkey(node, "instance");
if (!node)
return;
- xbc_node_for_each_child(node, inode) {
+ xbc_node_for_each_subkey(node, inode) {
p = xbc_node_get_data(inode);
if (!p || *p == '\0')
continue;
diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c
index 56a96e9750cf..3044b762cbd7 100644
--- a/kernel/trace/trace_eprobe.c
+++ b/kernel/trace/trace_eprobe.c
@@ -151,7 +151,7 @@ static struct trace_eprobe *alloc_event_probe(const char *group,
ep = kzalloc(struct_size(ep, tp.args, nargs), GFP_KERNEL);
if (!ep) {
- trace_event_put_ref(ep->event);
+ trace_event_put_ref(event);
goto error;
}
ep->event = event;
@@ -851,7 +851,8 @@ static int __trace_eprobe_create(int argc, const char *argv[])
ret = PTR_ERR(ep);
/* This must return -ENOMEM, else there is a bug */
WARN_ON_ONCE(ret != -ENOMEM);
- goto error; /* We know ep is not allocated */
+ ep = NULL;
+ goto error;
}
argc -= 2; argv += 2;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 1349b6de5eeb..830b3b9940f4 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -181,6 +181,7 @@ static int trace_define_common_fields(void)
__common_field(unsigned short, type);
__common_field(unsigned char, flags);
+ /* Holds both preempt_count and migrate_disable */
__common_field(unsigned char, preempt_count);
__common_field(int, pid);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 9d91b1c06957..a6061a69aa84 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -508,7 +508,8 @@ struct track_data {
struct hist_elt_data {
char *comm;
u64 *var_ref_vals;
- char *field_var_str[SYNTH_FIELDS_MAX];
+ char **field_var_str;
+ int n_field_var_str;
};
struct snapshot_context {
@@ -1401,9 +1402,11 @@ static void hist_elt_data_free(struct hist_elt_data *elt_data)
{
unsigned int i;
- for (i = 0; i < SYNTH_FIELDS_MAX; i++)
+ for (i = 0; i < elt_data->n_field_var_str; i++)
kfree(elt_data->field_var_str[i]);
+ kfree(elt_data->field_var_str);
+
kfree(elt_data->comm);
kfree(elt_data);
}
@@ -1451,6 +1454,13 @@ static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
size = STR_VAR_LEN_MAX;
+ elt_data->field_var_str = kcalloc(n_str, sizeof(char *), GFP_KERNEL);
+ if (!elt_data->field_var_str) {
+ hist_elt_data_free(elt_data);
+ return -EINVAL;
+ }
+ elt_data->n_field_var_str = n_str;
+
for (i = 0; i < n_str; i++) {
elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
if (!elt_data->field_var_str[i]) {
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index 65b08b8e5bf8..ce053619f289 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -1548,7 +1548,7 @@ static int start_kthread(unsigned int cpu)
static int start_per_cpu_kthreads(struct trace_array *tr)
{
struct cpumask *current_mask = &save_cpumask;
- int retval;
+ int retval = 0;
int cpu;
cpus_read_lock();
@@ -1568,13 +1568,13 @@ static int start_per_cpu_kthreads(struct trace_array *tr)
retval = start_kthread(cpu);
if (retval) {
stop_per_cpu_kthreads();
- return retval;
+ break;
}
}
cpus_read_unlock();
- return 0;
+ return retval;
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index a0bf446bb034..c2ca40e8595b 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -492,8 +492,13 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
trace_seq_printf(s, "%c%c%c",
irqs_off, need_resched, hardsoft_irq);
- if (entry->preempt_count)
- trace_seq_printf(s, "%x", entry->preempt_count);
+ if (entry->preempt_count & 0xf)
+ trace_seq_printf(s, "%x", entry->preempt_count & 0xf);
+ else
+ trace_seq_putc(s, '.');
+
+ if (entry->preempt_count & 0xf0)
+ trace_seq_printf(s, "%x", entry->preempt_count >> 4);
else
trace_seq_putc(s, '.');
@@ -656,7 +661,7 @@ int trace_print_lat_context(struct trace_iterator *iter)
trace_seq_printf(
s, "%16s %7d %3d %d %08x %08lx ",
comm, entry->pid, iter->cpu, entry->flags,
- entry->preempt_count, iter->idx);
+ entry->preempt_count & 0xf, iter->idx);
} else {
lat_print_generic(s, entry, iter->cpu);
}
diff --git a/kernel/trace/trace_synth.h b/kernel/trace/trace_synth.h
index 4007fe95cf42..b29595fe3ac5 100644
--- a/kernel/trace/trace_synth.h
+++ b/kernel/trace/trace_synth.h
@@ -5,7 +5,7 @@
#include "trace_dynevent.h"
#define SYNTH_SYSTEM "synthetic"
-#define SYNTH_FIELDS_MAX 32
+#define SYNTH_FIELDS_MAX 64
#define STR_VAR_LEN_MAX MAX_FILTER_STR_VAL /* must be multiple of sizeof(u64) */
diff --git a/kernel/user.c b/kernel/user.c
index c82399c1618a..e2cf8c22b539 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -129,6 +129,22 @@ static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
return NULL;
}
+static int user_epoll_alloc(struct user_struct *up)
+{
+#ifdef CONFIG_EPOLL
+ return percpu_counter_init(&up->epoll_watches, 0, GFP_KERNEL);
+#else
+ return 0;
+#endif
+}
+
+static void user_epoll_free(struct user_struct *up)
+{
+#ifdef CONFIG_EPOLL
+ percpu_counter_destroy(&up->epoll_watches);
+#endif
+}
+
/* IRQs are disabled and uidhash_lock is held upon function entry.
* IRQ state (as stored in flags) is restored and uidhash_lock released
* upon function exit.
@@ -138,6 +154,7 @@ static void free_user(struct user_struct *up, unsigned long flags)
{
uid_hash_remove(up);
spin_unlock_irqrestore(&uidhash_lock, flags);
+ user_epoll_free(up);
kmem_cache_free(uid_cachep, up);
}
@@ -185,6 +202,10 @@ struct user_struct *alloc_uid(kuid_t uid)
new->uid = uid;
refcount_set(&new->__count, 1);
+ if (user_epoll_alloc(new)) {
+ kmem_cache_free(uid_cachep, new);
+ return NULL;
+ }
ratelimit_state_init(&new->ratelimit, HZ, 100);
ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
@@ -195,6 +216,7 @@ struct user_struct *alloc_uid(kuid_t uid)
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
+ user_epoll_free(new);
kmem_cache_free(uid_cachep, new);
} else {
uid_hash_insert(new, hashent);
@@ -216,6 +238,9 @@ static int __init uid_cache_init(void)
for(n = 0; n < UIDHASH_SZ; ++n)
INIT_HLIST_HEAD(uidhash_table + n);
+ if (user_epoll_alloc(&root_user))
+ panic("root_user epoll percpu counter alloc failed");
+
/* Insert the root user immediately (init already runs as root) */
spin_lock_irq(&uidhash_lock);
uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0f61b1ec385d..d566f601780f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -295,7 +295,7 @@ config DEBUG_INFO_DWARF4
config DEBUG_INFO_DWARF5
bool "Generate DWARF Version 5 debuginfo"
- depends on GCC_VERSION >= 50000 || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
+ depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
depends on !DEBUG_INFO_BTF
help
Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
@@ -1064,7 +1064,6 @@ config HARDLOCKUP_DETECTOR
depends on HAVE_HARDLOCKUP_DETECTOR_PERF || HAVE_HARDLOCKUP_DETECTOR_ARCH
select LOCKUP_DETECTOR
select HARDLOCKUP_DETECTOR_PERF if HAVE_HARDLOCKUP_DETECTOR_PERF
- select HARDLOCKUP_DETECTOR_ARCH if HAVE_HARDLOCKUP_DETECTOR_ARCH
help
Say Y here to enable the kernel to act as a watchdog to detect
hard lockups.
@@ -2061,8 +2060,9 @@ config TEST_MIN_HEAP
If unsure, say N.
config TEST_SORT
- tristate "Array-based sort test"
- depends on DEBUG_KERNEL || m
+ tristate "Array-based sort test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
help
This option enables the self-test function of 'sort()' at boot,
or at module load time.
@@ -2443,8 +2443,7 @@ config SLUB_KUNIT_TEST
config RATIONAL_KUNIT_TEST
tristate "KUnit test for rational.c" if !KUNIT_ALL_TESTS
- depends on KUNIT
- select RATIONAL
+ depends on KUNIT && RATIONAL
default KUNIT_ALL_TESTS
help
This builds the rational math unit test.
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 927017431fb6..5ae248b29373 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -142,16 +142,16 @@ xbc_node_match_prefix(struct xbc_node *node, const char **prefix)
}
/**
- * xbc_node_find_child() - Find a child node which matches given key
+ * xbc_node_find_subkey() - Find a subkey node which matches given key
* @parent: An XBC node.
* @key: A key string.
*
- * Search a node under @parent which matches @key. The @key can contain
+ * Search a key node under @parent which matches @key. The @key can contain
* several words jointed with '.'. If @parent is NULL, this searches the
* node from whole tree. Return NULL if no node is matched.
*/
struct xbc_node * __init
-xbc_node_find_child(struct xbc_node *parent, const char *key)
+xbc_node_find_subkey(struct xbc_node *parent, const char *key)
{
struct xbc_node *node;
@@ -191,7 +191,7 @@ const char * __init
xbc_node_find_value(struct xbc_node *parent, const char *key,
struct xbc_node **vnode)
{
- struct xbc_node *node = xbc_node_find_child(parent, key);
+ struct xbc_node *node = xbc_node_find_subkey(parent, key);
if (!node || !xbc_node_is_key(node))
return NULL;
@@ -792,7 +792,7 @@ void __init xbc_destroy_all(void)
xbc_data = NULL;
xbc_data_size = 0;
xbc_node_num = 0;
- memblock_free(__pa(xbc_nodes), sizeof(struct xbc_node) * XBC_NODE_MAX);
+ memblock_free_ptr(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX);
xbc_nodes = NULL;
brace_index = 0;
}
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index cd3387bb34e5..6b7f1bf6715d 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -89,7 +89,8 @@ static void __dump_stack(const char *log_lvl)
}
/**
- * dump_stack - dump the current task information and its stack trace
+ * dump_stack_lvl - dump the current task information and its stack trace
+ * @log_lvl: log level
*
* Architectures can override this implementation by implementing its own.
*/
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index e23123ae3a13..f2d50d69a6c3 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -672,7 +672,7 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
* _copy_mc_to_iter - copy to iter with source memory error exception handling
* @addr: source kernel address
* @bytes: total transfer length
- * @iter: destination iterator
+ * @i: destination iterator
*
* The pmem driver deploys this for the dax operation
* (dax_copy_to_iter()) for dax reads (bypass page-cache and the
@@ -690,6 +690,8 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
* * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
* Compare to copy_to_iter() where only ITER_IOVEC attempts might return
* a short copy.
+ *
+ * Return: number of bytes copied (may be %0)
*/
size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
@@ -744,7 +746,7 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
* _copy_from_iter_flushcache - write destination through cpu cache
* @addr: destination kernel address
* @bytes: total transfer length
- * @iter: source iterator
+ * @i: source iterator
*
* The pmem driver arranges for filesystem-dax to use this facility via
* dax_copy_from_iter() for ensuring that writes to persistent memory
@@ -753,6 +755,8 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
* all iterator types. The _copy_from_iter_nocache() only attempts to
* bypass the cache for the ITER_IOVEC case, and on some archs may use
* instructions that strand dirty-data in the cache.
+ *
+ * Return: number of bytes copied (may be %0)
*/
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
diff --git a/lib/logic_iomem.c b/lib/logic_iomem.c
index b76b92dd0f1f..9bdfde0c0f86 100644
--- a/lib/logic_iomem.c
+++ b/lib/logic_iomem.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/logic_iomem.h>
+#include <asm/io.h>
struct logic_iomem_region {
const struct resource *res;
@@ -78,7 +79,7 @@ static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
static void real_iounmap(void __iomem *addr)
{
WARN(1, "invalid iounmap for addr 0x%llx\n",
- (unsigned long long)addr);
+ (unsigned long long __force)addr);
}
#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
@@ -172,14 +173,15 @@ EXPORT_SYMBOL(iounmap);
static u##sz real_raw_read ## op(const volatile void __iomem *addr) \
{ \
WARN(1, "Invalid read" #op " at address %llx\n", \
- (unsigned long long)addr); \
+ (unsigned long long __force)addr); \
return (u ## sz)~0ULL; \
} \
\
-void real_raw_write ## op(u ## sz val, volatile void __iomem *addr) \
+static void real_raw_write ## op(u ## sz val, \
+ volatile void __iomem *addr) \
{ \
WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \
- (unsigned long long)val, (unsigned long long)addr); \
+ (unsigned long long)val, (unsigned long long __force)addr);\
} \
MAKE_FALLBACK(b, 8);
@@ -192,14 +194,14 @@ MAKE_FALLBACK(q, 64);
static void real_memset_io(volatile void __iomem *addr, int value, size_t size)
{
WARN(1, "Invalid memset_io at address 0x%llx\n",
- (unsigned long long)addr);
+ (unsigned long long __force)addr);
}
static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr,
size_t size)
{
WARN(1, "Invalid memcpy_fromio at address 0x%llx\n",
- (unsigned long long)addr);
+ (unsigned long long __force)addr);
memset(buffer, 0xff, size);
}
@@ -208,7 +210,7 @@ static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer,
size_t size)
{
WARN(1, "Invalid memcpy_toio at address 0x%llx\n",
- (unsigned long long)addr);
+ (unsigned long long __force)addr);
}
#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
diff --git a/lib/math/Kconfig b/lib/math/Kconfig
index f19bc9734fa7..0634b428d0cb 100644
--- a/lib/math/Kconfig
+++ b/lib/math/Kconfig
@@ -14,4 +14,4 @@ config PRIME_NUMBERS
If unsure, say N.
config RATIONAL
- bool
+ tristate
diff --git a/lib/math/rational.c b/lib/math/rational.c
index c0ab51d8fbb9..ec59d426ea63 100644
--- a/lib/math/rational.c
+++ b/lib/math/rational.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/minmax.h>
#include <linux/limits.h>
+#include <linux/module.h>
/*
* calculate best rational approximation for a given fraction
@@ -106,3 +107,5 @@ void rational_best_approximation(
}
EXPORT_SYMBOL(rational_best_approximation);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 8a48b61c3763..55082432f37e 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -614,7 +614,7 @@ page_flags_test(int section, int node, int zone, int last_cpupid,
bool append = false;
int i;
- flags &= BIT(NR_PAGEFLAGS) - 1;
+ flags &= PAGEFLAGS_MASK;
if (flags) {
page_flags |= flags;
snprintf(cmp_buf + size, BUF_SIZE - size, "%s", name);
diff --git a/lib/test_sort.c b/lib/test_sort.c
index 52edbe10f2e5..be02e3a098cf 100644
--- a/lib/test_sort.c
+++ b/lib/test_sort.c
@@ -1,4 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -7,18 +10,17 @@
#define TEST_LEN 1000
-static int __init cmpint(const void *a, const void *b)
+static int cmpint(const void *a, const void *b)
{
return *(int *)a - *(int *)b;
}
-static int __init test_sort_init(void)
+static void test_sort(struct kunit *test)
{
- int *a, i, r = 1, err = -ENOMEM;
+ int *a, i, r = 1;
- a = kmalloc_array(TEST_LEN, sizeof(*a), GFP_KERNEL);
- if (!a)
- return err;
+ a = kunit_kmalloc_array(test, TEST_LEN, sizeof(*a), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a);
for (i = 0; i < TEST_LEN; i++) {
r = (r * 725861) % 6599;
@@ -27,24 +29,20 @@ static int __init test_sort_init(void)
sort(a, TEST_LEN, sizeof(*a), cmpint, NULL);
- err = -EINVAL;
for (i = 0; i < TEST_LEN-1; i++)
- if (a[i] > a[i+1]) {
- pr_err("test has failed\n");
- goto exit;
- }
- err = 0;
- pr_info("test passed\n");
-exit:
- kfree(a);
- return err;
+ KUNIT_ASSERT_LE(test, a[i], a[i + 1]);
}
-static void __exit test_sort_exit(void)
-{
-}
+static struct kunit_case sort_test_cases[] = {
+ KUNIT_CASE(test_sort),
+ {}
+};
+
+static struct kunit_suite sort_test_suite = {
+ .name = "lib_sort",
+ .test_cases = sort_test_cases,
+};
-module_init(test_sort_init);
-module_exit(test_sort_exit);
+kunit_test_suites(&sort_test_suite);
MODULE_LICENSE("GPL");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 3bcb7be03f93..d7ad44f2c8f5 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -2019,7 +2019,7 @@ static const struct page_flags_fields pff[] = {
static
char *format_page_flags(char *buf, char *end, unsigned long flags)
{
- unsigned long main_flags = flags & (BIT(NR_PAGEFLAGS) - 1);
+ unsigned long main_flags = flags & PAGEFLAGS_MASK;
bool append = false;
int i;
diff --git a/mm/Kconfig b/mm/Kconfig
index 40a9bfcd5062..d16ba9249bc5 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -96,9 +96,6 @@ config HAVE_FAST_GUP
depends on MMU
bool
-config HOLES_IN_ZONE
- bool
-
# Don't discard allocated memory used to track "memory" and "reserved" memblocks
# after early boot, so it can still be used to test for validity of memory.
# Also, memblocks are updated with memory hot(un)plug.
@@ -742,10 +739,18 @@ config DEFERRED_STRUCT_PAGE_INIT
lifetime of the system until these kthreads finish the
initialisation.
+config PAGE_IDLE_FLAG
+ bool
+ select PAGE_EXTENSION if !64BIT
+ help
+ This adds PG_idle and PG_young flags to 'struct page'. PTE Accessed
+ bit writers can set the state of the bit in the flags so that PTE
+ Accessed bit readers may avoid disturbance.
+
config IDLE_PAGE_TRACKING
bool "Enable idle page tracking"
depends on SYSFS && MMU
- select PAGE_EXTENSION if !64BIT
+ select PAGE_IDLE_FLAG
help
This feature allows to estimate the amount of user pages that have
not been touched during a given period of time. This information can
@@ -889,4 +894,6 @@ config IO_MAPPING
config SECRETMEM
def_bool ARCH_HAS_SET_DIRECT_MAP && !EMBEDDED
+source "mm/damon/Kconfig"
+
endmenu
diff --git a/mm/Makefile b/mm/Makefile
index e3436741d539..fc60a40ce954 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -38,7 +38,7 @@ mmu-y := nommu.o
mmu-$(CONFIG_MMU) := highmem.o memory.o mincore.o \
mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
msync.o page_vma_mapped.o pagewalk.o \
- pgtable-generic.o rmap.o vmalloc.o ioremap.o
+ pgtable-generic.o rmap.o vmalloc.o
ifdef CONFIG_CROSS_MEMORY_ATTACH
@@ -118,6 +118,7 @@ obj-$(CONFIG_CMA_SYSFS) += cma_sysfs.o
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
+obj-$(CONFIG_DAMON) += damon/
obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o
obj-$(CONFIG_ZONE_DEVICE) += memremap.o
@@ -128,3 +129,4 @@ obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
obj-$(CONFIG_PAGE_REPORTING) += page_reporting.o
obj-$(CONFIG_IO_MAPPING) += io-mapping.o
obj-$(CONFIG_HAVE_BOOTMEM_INFO_NODE) += bootmem_info.o
+obj-$(CONFIG_GENERIC_IOREMAP) += ioremap.o
diff --git a/mm/compaction.c b/mm/compaction.c
index fa9b2b598eab..bfc93da1c2c7 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -306,16 +306,14 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
* is necessary for the block to be a migration source/target.
*/
do {
- if (pfn_valid_within(pfn)) {
- if (check_source && PageLRU(page)) {
- clear_pageblock_skip(page);
- return true;
- }
+ if (check_source && PageLRU(page)) {
+ clear_pageblock_skip(page);
+ return true;
+ }
- if (check_target && PageBuddy(page)) {
- clear_pageblock_skip(page);
- return true;
- }
+ if (check_target && PageBuddy(page)) {
+ clear_pageblock_skip(page);
+ return true;
}
page += (1 << PAGE_ALLOC_COSTLY_ORDER);
@@ -585,8 +583,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
break;
nr_scanned++;
- if (!pfn_valid_within(blockpfn))
- goto isolate_fail;
/*
* For compound pages such as THP and hugetlbfs, we can save
@@ -885,8 +881,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
cond_resched();
}
- if (!pfn_valid_within(low_pfn))
- goto isolate_fail;
nr_scanned++;
page = pfn_to_page(low_pfn);
diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig
new file mode 100644
index 000000000000..37024798a97c
--- /dev/null
+++ b/mm/damon/Kconfig
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+menu "Data Access Monitoring"
+
+config DAMON
+ bool "DAMON: Data Access Monitoring Framework"
+ help
+ This builds a framework that allows kernel subsystems to monitor
+ access frequency of each memory region. The information can be useful
+ for performance-centric DRAM level memory management.
+
+ See https://damonitor.github.io/doc/html/latest-damon/index.html for
+ more information.
+
+config DAMON_KUNIT_TEST
+ bool "Test for damon" if !KUNIT_ALL_TESTS
+ depends on DAMON && KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds the DAMON Kunit test suite.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation.
+
+ If unsure, say N.
+
+config DAMON_VADDR
+ bool "Data access monitoring primitives for virtual address spaces"
+ depends on DAMON && MMU
+ select PAGE_IDLE_FLAG
+ help
+ This builds the default data access monitoring primitives for DAMON
+ that works for virtual address spaces.
+
+config DAMON_VADDR_KUNIT_TEST
+ bool "Test for DAMON primitives" if !KUNIT_ALL_TESTS
+ depends on DAMON_VADDR && KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds the DAMON virtual addresses primitives Kunit test suite.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation.
+
+ If unsure, say N.
+
+config DAMON_DBGFS
+ bool "DAMON debugfs interface"
+ depends on DAMON_VADDR && DEBUG_FS
+ help
+ This builds the debugfs interface for DAMON. The user space admins
+ can use the interface for arbitrary data access monitoring.
+
+ If unsure, say N.
+
+config DAMON_DBGFS_KUNIT_TEST
+ bool "Test for damon debugfs interface" if !KUNIT_ALL_TESTS
+ depends on DAMON_DBGFS && KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds the DAMON debugfs interface Kunit test suite.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation.
+
+ If unsure, say N.
+
+endmenu
diff --git a/mm/damon/Makefile b/mm/damon/Makefile
new file mode 100644
index 000000000000..fed4be3bace3
--- /dev/null
+++ b/mm/damon/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_DAMON) := core.o
+obj-$(CONFIG_DAMON_VADDR) += vaddr.o
+obj-$(CONFIG_DAMON_DBGFS) += dbgfs.o
diff --git a/mm/damon/core-test.h b/mm/damon/core-test.h
new file mode 100644
index 000000000000..c938a9c34e6c
--- /dev/null
+++ b/mm/damon/core-test.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Data Access Monitor Unit Tests
+ *
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ *
+ * Author: SeongJae Park <sjpark@amazon.de>
+ */
+
+#ifdef CONFIG_DAMON_KUNIT_TEST
+
+#ifndef _DAMON_CORE_TEST_H
+#define _DAMON_CORE_TEST_H
+
+#include <kunit/test.h>
+
+static void damon_test_regions(struct kunit *test)
+{
+ struct damon_region *r;
+ struct damon_target *t;
+
+ r = damon_new_region(1, 2);
+ KUNIT_EXPECT_EQ(test, 1ul, r->ar.start);
+ KUNIT_EXPECT_EQ(test, 2ul, r->ar.end);
+ KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
+
+ t = damon_new_target(42);
+ KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
+
+ damon_add_region(r, t);
+ KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t));
+
+ damon_del_region(r, t);
+ KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
+
+ damon_free_target(t);
+}
+
+static unsigned int nr_damon_targets(struct damon_ctx *ctx)
+{
+ struct damon_target *t;
+ unsigned int nr_targets = 0;
+
+ damon_for_each_target(t, ctx)
+ nr_targets++;
+
+ return nr_targets;
+}
+
+static void damon_test_target(struct kunit *test)
+{
+ struct damon_ctx *c = damon_new_ctx();
+ struct damon_target *t;
+
+ t = damon_new_target(42);
+ KUNIT_EXPECT_EQ(test, 42ul, t->id);
+ KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
+
+ damon_add_target(c, t);
+ KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c));
+
+ damon_destroy_target(t);
+ KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
+
+ damon_destroy_ctx(c);
+}
+
+/*
+ * Test kdamond_reset_aggregated()
+ *
+ * DAMON checks access to each region and aggregates this information as the
+ * access frequency of each region. In detail, it increases '->nr_accesses' of
+ * regions that an access has confirmed. 'kdamond_reset_aggregated()' flushes
+ * the aggregated information ('->nr_accesses' of each regions) to the result
+ * buffer. As a result of the flushing, the '->nr_accesses' of regions are
+ * initialized to zero.
+ */
+static void damon_test_aggregate(struct kunit *test)
+{
+ struct damon_ctx *ctx = damon_new_ctx();
+ unsigned long target_ids[] = {1, 2, 3};
+ unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} };
+ unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} };
+ unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} };
+ struct damon_target *t;
+ struct damon_region *r;
+ int it, ir;
+
+ damon_set_targets(ctx, target_ids, 3);
+
+ it = 0;
+ damon_for_each_target(t, ctx) {
+ for (ir = 0; ir < 3; ir++) {
+ r = damon_new_region(saddr[it][ir], eaddr[it][ir]);
+ r->nr_accesses = accesses[it][ir];
+ damon_add_region(r, t);
+ }
+ it++;
+ }
+ kdamond_reset_aggregated(ctx);
+ it = 0;
+ damon_for_each_target(t, ctx) {
+ ir = 0;
+ /* '->nr_accesses' should be zeroed */
+ damon_for_each_region(r, t) {
+ KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
+ ir++;
+ }
+ /* regions should be preserved */
+ KUNIT_EXPECT_EQ(test, 3, ir);
+ it++;
+ }
+ /* targets also should be preserved */
+ KUNIT_EXPECT_EQ(test, 3, it);
+
+ damon_destroy_ctx(ctx);
+}
+
+static void damon_test_split_at(struct kunit *test)
+{
+ struct damon_ctx *c = damon_new_ctx();
+ struct damon_target *t;
+ struct damon_region *r;
+
+ t = damon_new_target(42);
+ r = damon_new_region(0, 100);
+ damon_add_region(r, t);
+ damon_split_region_at(c, t, r, 25);
+ KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
+ KUNIT_EXPECT_EQ(test, r->ar.end, 25ul);
+
+ r = damon_next_region(r);
+ KUNIT_EXPECT_EQ(test, r->ar.start, 25ul);
+ KUNIT_EXPECT_EQ(test, r->ar.end, 100ul);
+
+ damon_free_target(t);
+ damon_destroy_ctx(c);
+}
+
+static void damon_test_merge_two(struct kunit *test)
+{
+ struct damon_target *t;
+ struct damon_region *r, *r2, *r3;
+ int i;
+
+ t = damon_new_target(42);
+ r = damon_new_region(0, 100);
+ r->nr_accesses = 10;
+ damon_add_region(r, t);
+ r2 = damon_new_region(100, 300);
+ r2->nr_accesses = 20;
+ damon_add_region(r2, t);
+
+ damon_merge_two_regions(t, r, r2);
+ KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
+ KUNIT_EXPECT_EQ(test, r->ar.end, 300ul);
+ KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u);
+
+ i = 0;
+ damon_for_each_region(r3, t) {
+ KUNIT_EXPECT_PTR_EQ(test, r, r3);
+ i++;
+ }
+ KUNIT_EXPECT_EQ(test, i, 1);
+
+ damon_free_target(t);
+}
+
+static struct damon_region *__nth_region_of(struct damon_target *t, int idx)
+{
+ struct damon_region *r;
+ unsigned int i = 0;
+
+ damon_for_each_region(r, t) {
+ if (i++ == idx)
+ return r;
+ }
+
+ return NULL;
+}
+
+static void damon_test_merge_regions_of(struct kunit *test)
+{
+ struct damon_target *t;
+ struct damon_region *r;
+ unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184};
+ unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230};
+ unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2};
+
+ unsigned long saddrs[] = {0, 114, 130, 156, 170};
+ unsigned long eaddrs[] = {112, 130, 156, 170, 230};
+ int i;
+
+ t = damon_new_target(42);
+ for (i = 0; i < ARRAY_SIZE(sa); i++) {
+ r = damon_new_region(sa[i], ea[i]);
+ r->nr_accesses = nrs[i];
+ damon_add_region(r, t);
+ }
+
+ damon_merge_regions_of(t, 9, 9999);
+ /* 0-112, 114-130, 130-156, 156-170 */
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
+ for (i = 0; i < 5; i++) {
+ r = __nth_region_of(t, i);
+ KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]);
+ KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]);
+ }
+ damon_free_target(t);
+}
+
+static void damon_test_split_regions_of(struct kunit *test)
+{
+ struct damon_ctx *c = damon_new_ctx();
+ struct damon_target *t;
+ struct damon_region *r;
+
+ t = damon_new_target(42);
+ r = damon_new_region(0, 22);
+ damon_add_region(r, t);
+ damon_split_regions_of(c, t, 2);
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2u);
+ damon_free_target(t);
+
+ t = damon_new_target(42);
+ r = damon_new_region(0, 220);
+ damon_add_region(r, t);
+ damon_split_regions_of(c, t, 4);
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 4u);
+ damon_free_target(t);
+ damon_destroy_ctx(c);
+}
+
+static struct kunit_case damon_test_cases[] = {
+ KUNIT_CASE(damon_test_target),
+ KUNIT_CASE(damon_test_regions),
+ KUNIT_CASE(damon_test_aggregate),
+ KUNIT_CASE(damon_test_split_at),
+ KUNIT_CASE(damon_test_merge_two),
+ KUNIT_CASE(damon_test_merge_regions_of),
+ KUNIT_CASE(damon_test_split_regions_of),
+ {},
+};
+
+static struct kunit_suite damon_test_suite = {
+ .name = "damon",
+ .test_cases = damon_test_cases,
+};
+kunit_test_suite(damon_test_suite);
+
+#endif /* _DAMON_CORE_TEST_H */
+
+#endif /* CONFIG_DAMON_KUNIT_TEST */
diff --git a/mm/damon/core.c b/mm/damon/core.c
new file mode 100644
index 000000000000..30e9211f494a
--- /dev/null
+++ b/mm/damon/core.c
@@ -0,0 +1,720 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Data Access Monitor
+ *
+ * Author: SeongJae Park <sjpark@amazon.de>
+ */
+
+#define pr_fmt(fmt) "damon: " fmt
+
+#include <linux/damon.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/damon.h>
+
+#ifdef CONFIG_DAMON_KUNIT_TEST
+#undef DAMON_MIN_REGION
+#define DAMON_MIN_REGION 1
+#endif
+
+/* Get a random number in [l, r) */
+#define damon_rand(l, r) (l + prandom_u32_max(r - l))
+
+static DEFINE_MUTEX(damon_lock);
+static int nr_running_ctxs;
+
+/*
+ * Construct a damon_region struct
+ *
+ * Returns the pointer to the new struct if success, or NULL otherwise
+ */
+struct damon_region *damon_new_region(unsigned long start, unsigned long end)
+{
+ struct damon_region *region;
+
+ region = kmalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return NULL;
+
+ region->ar.start = start;
+ region->ar.end = end;
+ region->nr_accesses = 0;
+ INIT_LIST_HEAD(&region->list);
+
+ return region;
+}
+
+/*
+ * Add a region between two other regions
+ */
+inline void damon_insert_region(struct damon_region *r,
+ struct damon_region *prev, struct damon_region *next,
+ struct damon_target *t)
+{
+ __list_add(&r->list, &prev->list, &next->list);
+ t->nr_regions++;
+}
+
+void damon_add_region(struct damon_region *r, struct damon_target *t)
+{
+ list_add_tail(&r->list, &t->regions_list);
+ t->nr_regions++;
+}
+
+static void damon_del_region(struct damon_region *r, struct damon_target *t)
+{
+ list_del(&r->list);
+ t->nr_regions--;
+}
+
+static void damon_free_region(struct damon_region *r)
+{
+ kfree(r);
+}
+
+void damon_destroy_region(struct damon_region *r, struct damon_target *t)
+{
+ damon_del_region(r, t);
+ damon_free_region(r);
+}
+
+/*
+ * Construct a damon_target struct
+ *
+ * Returns the pointer to the new struct if success, or NULL otherwise
+ */
+struct damon_target *damon_new_target(unsigned long id)
+{
+ struct damon_target *t;
+
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return NULL;
+
+ t->id = id;
+ t->nr_regions = 0;
+ INIT_LIST_HEAD(&t->regions_list);
+
+ return t;
+}
+
+void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
+{
+ list_add_tail(&t->list, &ctx->adaptive_targets);
+}
+
+static void damon_del_target(struct damon_target *t)
+{
+ list_del(&t->list);
+}
+
+void damon_free_target(struct damon_target *t)
+{
+ struct damon_region *r, *next;
+
+ damon_for_each_region_safe(r, next, t)
+ damon_free_region(r);
+ kfree(t);
+}
+
+void damon_destroy_target(struct damon_target *t)
+{
+ damon_del_target(t);
+ damon_free_target(t);
+}
+
+unsigned int damon_nr_regions(struct damon_target *t)
+{
+ return t->nr_regions;
+}
+
+struct damon_ctx *damon_new_ctx(void)
+{
+ struct damon_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ ctx->sample_interval = 5 * 1000;
+ ctx->aggr_interval = 100 * 1000;
+ ctx->primitive_update_interval = 60 * 1000 * 1000;
+
+ ktime_get_coarse_ts64(&ctx->last_aggregation);
+ ctx->last_primitive_update = ctx->last_aggregation;
+
+ mutex_init(&ctx->kdamond_lock);
+
+ ctx->min_nr_regions = 10;
+ ctx->max_nr_regions = 1000;
+
+ INIT_LIST_HEAD(&ctx->adaptive_targets);
+
+ return ctx;
+}
+
+static void damon_destroy_targets(struct damon_ctx *ctx)
+{
+ struct damon_target *t, *next_t;
+
+ if (ctx->primitive.cleanup) {
+ ctx->primitive.cleanup(ctx);
+ return;
+ }
+
+ damon_for_each_target_safe(t, next_t, ctx)
+ damon_destroy_target(t);
+}
+
+void damon_destroy_ctx(struct damon_ctx *ctx)
+{
+ damon_destroy_targets(ctx);
+ kfree(ctx);
+}
+
+/**
+ * damon_set_targets() - Set monitoring targets.
+ * @ctx: monitoring context
+ * @ids: array of target ids
+ * @nr_ids: number of entries in @ids
+ *
+ * This function should not be called while the kdamond is running.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int damon_set_targets(struct damon_ctx *ctx,
+ unsigned long *ids, ssize_t nr_ids)
+{
+ ssize_t i;
+ struct damon_target *t, *next;
+
+ damon_destroy_targets(ctx);
+
+ for (i = 0; i < nr_ids; i++) {
+ t = damon_new_target(ids[i]);
+ if (!t) {
+ pr_err("Failed to alloc damon_target\n");
+ /* The caller should do cleanup of the ids itself */
+ damon_for_each_target_safe(t, next, ctx)
+ damon_destroy_target(t);
+ return -ENOMEM;
+ }
+ damon_add_target(ctx, t);
+ }
+
+ return 0;
+}
+
+/**
+ * damon_set_attrs() - Set attributes for the monitoring.
+ * @ctx: monitoring context
+ * @sample_int: time interval between samplings
+ * @aggr_int: time interval between aggregations
+ * @primitive_upd_int: time interval between monitoring primitive updates
+ * @min_nr_reg: minimal number of regions
+ * @max_nr_reg: maximum number of regions
+ *
+ * This function should not be called while the kdamond is running.
+ * Every time interval is in micro-seconds.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
+ unsigned long aggr_int, unsigned long primitive_upd_int,
+ unsigned long min_nr_reg, unsigned long max_nr_reg)
+{
+ if (min_nr_reg < 3) {
+ pr_err("min_nr_regions (%lu) must be at least 3\n",
+ min_nr_reg);
+ return -EINVAL;
+ }
+ if (min_nr_reg > max_nr_reg) {
+ pr_err("invalid nr_regions. min (%lu) > max (%lu)\n",
+ min_nr_reg, max_nr_reg);
+ return -EINVAL;
+ }
+
+ ctx->sample_interval = sample_int;
+ ctx->aggr_interval = aggr_int;
+ ctx->primitive_update_interval = primitive_upd_int;
+ ctx->min_nr_regions = min_nr_reg;
+ ctx->max_nr_regions = max_nr_reg;
+
+ return 0;
+}
+
+/**
+ * damon_nr_running_ctxs() - Return number of currently running contexts.
+ */
+int damon_nr_running_ctxs(void)
+{
+ int nr_ctxs;
+
+ mutex_lock(&damon_lock);
+ nr_ctxs = nr_running_ctxs;
+ mutex_unlock(&damon_lock);
+
+ return nr_ctxs;
+}
+
+/* Returns the size upper limit for each monitoring region */
+static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
+{
+ struct damon_target *t;
+ struct damon_region *r;
+ unsigned long sz = 0;
+
+ damon_for_each_target(t, ctx) {
+ damon_for_each_region(r, t)
+ sz += r->ar.end - r->ar.start;
+ }
+
+ if (ctx->min_nr_regions)
+ sz /= ctx->min_nr_regions;
+ if (sz < DAMON_MIN_REGION)
+ sz = DAMON_MIN_REGION;
+
+ return sz;
+}
+
+static bool damon_kdamond_running(struct damon_ctx *ctx)
+{
+ bool running;
+
+ mutex_lock(&ctx->kdamond_lock);
+ running = ctx->kdamond != NULL;
+ mutex_unlock(&ctx->kdamond_lock);
+
+ return running;
+}
+
+static int kdamond_fn(void *data);
+
+/*
+ * __damon_start() - Starts monitoring with given context.
+ * @ctx: monitoring context
+ *
+ * This function should be called while damon_lock is hold.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int __damon_start(struct damon_ctx *ctx)
+{
+ int err = -EBUSY;
+
+ mutex_lock(&ctx->kdamond_lock);
+ if (!ctx->kdamond) {
+ err = 0;
+ ctx->kdamond_stop = false;
+ ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
+ nr_running_ctxs);
+ if (IS_ERR(ctx->kdamond)) {
+ err = PTR_ERR(ctx->kdamond);
+ ctx->kdamond = 0;
+ }
+ }
+ mutex_unlock(&ctx->kdamond_lock);
+
+ return err;
+}
+
+/**
+ * damon_start() - Starts the monitorings for a given group of contexts.
+ * @ctxs: an array of the pointers for contexts to start monitoring
+ * @nr_ctxs: size of @ctxs
+ *
+ * This function starts a group of monitoring threads for a group of monitoring
+ * contexts. One thread per each context is created and run in parallel. The
+ * caller should handle synchronization between the threads by itself. If a
+ * group of threads that created by other 'damon_start()' call is currently
+ * running, this function does nothing but returns -EBUSY.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int damon_start(struct damon_ctx **ctxs, int nr_ctxs)
+{
+ int i;
+ int err = 0;
+
+ mutex_lock(&damon_lock);
+ if (nr_running_ctxs) {
+ mutex_unlock(&damon_lock);
+ return -EBUSY;
+ }
+
+ for (i = 0; i < nr_ctxs; i++) {
+ err = __damon_start(ctxs[i]);
+ if (err)
+ break;
+ nr_running_ctxs++;
+ }
+ mutex_unlock(&damon_lock);
+
+ return err;
+}
+
+/*
+ * __damon_stop() - Stops monitoring of given context.
+ * @ctx: monitoring context
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int __damon_stop(struct damon_ctx *ctx)
+{
+ mutex_lock(&ctx->kdamond_lock);
+ if (ctx->kdamond) {
+ ctx->kdamond_stop = true;
+ mutex_unlock(&ctx->kdamond_lock);
+ while (damon_kdamond_running(ctx))
+ usleep_range(ctx->sample_interval,
+ ctx->sample_interval * 2);
+ return 0;
+ }
+ mutex_unlock(&ctx->kdamond_lock);
+
+ return -EPERM;
+}
+
+/**
+ * damon_stop() - Stops the monitorings for a given group of contexts.
+ * @ctxs: an array of the pointers for contexts to stop monitoring
+ * @nr_ctxs: size of @ctxs
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
+{
+ int i, err = 0;
+
+ for (i = 0; i < nr_ctxs; i++) {
+ /* nr_running_ctxs is decremented in kdamond_fn */
+ err = __damon_stop(ctxs[i]);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+/*
+ * damon_check_reset_time_interval() - Check if a time interval is elapsed.
+ * @baseline: the time to check whether the interval has elapsed since
+ * @interval: the time interval (microseconds)
+ *
+ * See whether the given time interval has passed since the given baseline
+ * time. If so, it also updates the baseline to current time for next check.
+ *
+ * Return: true if the time interval has passed, or false otherwise.
+ */
+static bool damon_check_reset_time_interval(struct timespec64 *baseline,
+ unsigned long interval)
+{
+ struct timespec64 now;
+
+ ktime_get_coarse_ts64(&now);
+ if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
+ interval * 1000)
+ return false;
+ *baseline = now;
+ return true;
+}
+
+/*
+ * Check whether it is time to flush the aggregated information
+ */
+static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
+{
+ return damon_check_reset_time_interval(&ctx->last_aggregation,
+ ctx->aggr_interval);
+}
+
+/*
+ * Reset the aggregated monitoring results ('nr_accesses' of each region).
+ */
+static void kdamond_reset_aggregated(struct damon_ctx *c)
+{
+ struct damon_target *t;
+
+ damon_for_each_target(t, c) {
+ struct damon_region *r;
+
+ damon_for_each_region(r, t) {
+ trace_damon_aggregated(t, r, damon_nr_regions(t));
+ r->nr_accesses = 0;
+ }
+ }
+}
+
+#define sz_damon_region(r) (r->ar.end - r->ar.start)
+
+/*
+ * Merge two adjacent regions into one region
+ */
+static void damon_merge_two_regions(struct damon_target *t,
+ struct damon_region *l, struct damon_region *r)
+{
+ unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r);
+
+ l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
+ (sz_l + sz_r);
+ l->ar.end = r->ar.end;
+ damon_destroy_region(r, t);
+}
+
+#define diff_of(a, b) (a > b ? a - b : b - a)
+
+/*
+ * Merge adjacent regions having similar access frequencies
+ *
+ * t target affected by this merge operation
+ * thres '->nr_accesses' diff threshold for the merge
+ * sz_limit size upper limit of each region
+ */
+static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
+ unsigned long sz_limit)
+{
+ struct damon_region *r, *prev = NULL, *next;
+
+ damon_for_each_region_safe(r, next, t) {
+ if (prev && prev->ar.end == r->ar.start &&
+ diff_of(prev->nr_accesses, r->nr_accesses) <= thres &&
+ sz_damon_region(prev) + sz_damon_region(r) <= sz_limit)
+ damon_merge_two_regions(t, prev, r);
+ else
+ prev = r;
+ }
+}
+
+/*
+ * Merge adjacent regions having similar access frequencies
+ *
+ * threshold '->nr_accesses' diff threshold for the merge
+ * sz_limit size upper limit of each region
+ *
+ * This function merges monitoring target regions which are adjacent and their
+ * access frequencies are similar. This is for minimizing the monitoring
+ * overhead under the dynamically changeable access pattern. If a merge was
+ * unnecessarily made, later 'kdamond_split_regions()' will revert it.
+ */
+static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
+ unsigned long sz_limit)
+{
+ struct damon_target *t;
+
+ damon_for_each_target(t, c)
+ damon_merge_regions_of(t, threshold, sz_limit);
+}
+
+/*
+ * Split a region in two
+ *
+ * r the region to be split
+ * sz_r size of the first sub-region that will be made
+ */
+static void damon_split_region_at(struct damon_ctx *ctx,
+ struct damon_target *t, struct damon_region *r,
+ unsigned long sz_r)
+{
+ struct damon_region *new;
+
+ new = damon_new_region(r->ar.start + sz_r, r->ar.end);
+ if (!new)
+ return;
+
+ r->ar.end = new->ar.start;
+
+ damon_insert_region(new, r, damon_next_region(r), t);
+}
+
+/* Split every region in the given target into 'nr_subs' regions */
+static void damon_split_regions_of(struct damon_ctx *ctx,
+ struct damon_target *t, int nr_subs)
+{
+ struct damon_region *r, *next;
+ unsigned long sz_region, sz_sub = 0;
+ int i;
+
+ damon_for_each_region_safe(r, next, t) {
+ sz_region = r->ar.end - r->ar.start;
+
+ for (i = 0; i < nr_subs - 1 &&
+ sz_region > 2 * DAMON_MIN_REGION; i++) {
+ /*
+ * Randomly select size of left sub-region to be at
+ * least 10 percent and at most 90% of original region
+ */
+ sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
+ sz_region / 10, DAMON_MIN_REGION);
+ /* Do not allow blank region */
+ if (sz_sub == 0 || sz_sub >= sz_region)
+ continue;
+
+ damon_split_region_at(ctx, t, r, sz_sub);
+ sz_region = sz_sub;
+ }
+ }
+}
+
+/*
+ * Split every target region into randomly-sized small regions
+ *
+ * This function splits every target region into random-sized small regions if
+ * current total number of the regions is equal or smaller than half of the
+ * user-specified maximum number of regions. This is for maximizing the
+ * monitoring accuracy under the dynamically changeable access patterns. If a
+ * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
+ * it.
+ */
+static void kdamond_split_regions(struct damon_ctx *ctx)
+{
+ struct damon_target *t;
+ unsigned int nr_regions = 0;
+ static unsigned int last_nr_regions;
+ int nr_subregions = 2;
+
+ damon_for_each_target(t, ctx)
+ nr_regions += damon_nr_regions(t);
+
+ if (nr_regions > ctx->max_nr_regions / 2)
+ return;
+
+ /* Maybe the middle of the region has different access frequency */
+ if (last_nr_regions == nr_regions &&
+ nr_regions < ctx->max_nr_regions / 3)
+ nr_subregions = 3;
+
+ damon_for_each_target(t, ctx)
+ damon_split_regions_of(ctx, t, nr_subregions);
+
+ last_nr_regions = nr_regions;
+}
+
+/*
+ * Check whether it is time to check and apply the target monitoring regions
+ *
+ * Returns true if it is.
+ */
+static bool kdamond_need_update_primitive(struct damon_ctx *ctx)
+{
+ return damon_check_reset_time_interval(&ctx->last_primitive_update,
+ ctx->primitive_update_interval);
+}
+
+/*
+ * Check whether current monitoring should be stopped
+ *
+ * The monitoring is stopped when either the user requested to stop, or all
+ * monitoring targets are invalid.
+ *
+ * Returns true if need to stop current monitoring.
+ */
+static bool kdamond_need_stop(struct damon_ctx *ctx)
+{
+ struct damon_target *t;
+ bool stop;
+
+ mutex_lock(&ctx->kdamond_lock);
+ stop = ctx->kdamond_stop;
+ mutex_unlock(&ctx->kdamond_lock);
+ if (stop)
+ return true;
+
+ if (!ctx->primitive.target_valid)
+ return false;
+
+ damon_for_each_target(t, ctx) {
+ if (ctx->primitive.target_valid(t))
+ return false;
+ }
+
+ return true;
+}
+
+static void set_kdamond_stop(struct damon_ctx *ctx)
+{
+ mutex_lock(&ctx->kdamond_lock);
+ ctx->kdamond_stop = true;
+ mutex_unlock(&ctx->kdamond_lock);
+}
+
+/*
+ * The monitoring daemon that runs as a kernel thread
+ */
+static int kdamond_fn(void *data)
+{
+ struct damon_ctx *ctx = (struct damon_ctx *)data;
+ struct damon_target *t;
+ struct damon_region *r, *next;
+ unsigned int max_nr_accesses = 0;
+ unsigned long sz_limit = 0;
+
+ mutex_lock(&ctx->kdamond_lock);
+ pr_info("kdamond (%d) starts\n", ctx->kdamond->pid);
+ mutex_unlock(&ctx->kdamond_lock);
+
+ if (ctx->primitive.init)
+ ctx->primitive.init(ctx);
+ if (ctx->callback.before_start && ctx->callback.before_start(ctx))
+ set_kdamond_stop(ctx);
+
+ sz_limit = damon_region_sz_limit(ctx);
+
+ while (!kdamond_need_stop(ctx)) {
+ if (ctx->primitive.prepare_access_checks)
+ ctx->primitive.prepare_access_checks(ctx);
+ if (ctx->callback.after_sampling &&
+ ctx->callback.after_sampling(ctx))
+ set_kdamond_stop(ctx);
+
+ usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
+
+ if (ctx->primitive.check_accesses)
+ max_nr_accesses = ctx->primitive.check_accesses(ctx);
+
+ if (kdamond_aggregate_interval_passed(ctx)) {
+ kdamond_merge_regions(ctx,
+ max_nr_accesses / 10,
+ sz_limit);
+ if (ctx->callback.after_aggregation &&
+ ctx->callback.after_aggregation(ctx))
+ set_kdamond_stop(ctx);
+ kdamond_reset_aggregated(ctx);
+ kdamond_split_regions(ctx);
+ if (ctx->primitive.reset_aggregated)
+ ctx->primitive.reset_aggregated(ctx);
+ }
+
+ if (kdamond_need_update_primitive(ctx)) {
+ if (ctx->primitive.update)
+ ctx->primitive.update(ctx);
+ sz_limit = damon_region_sz_limit(ctx);
+ }
+ }
+ damon_for_each_target(t, ctx) {
+ damon_for_each_region_safe(r, next, t)
+ damon_destroy_region(r, t);
+ }
+
+ if (ctx->callback.before_terminate &&
+ ctx->callback.before_terminate(ctx))
+ set_kdamond_stop(ctx);
+ if (ctx->primitive.cleanup)
+ ctx->primitive.cleanup(ctx);
+
+ pr_debug("kdamond (%d) finishes\n", ctx->kdamond->pid);
+ mutex_lock(&ctx->kdamond_lock);
+ ctx->kdamond = NULL;
+ mutex_unlock(&ctx->kdamond_lock);
+
+ mutex_lock(&damon_lock);
+ nr_running_ctxs--;
+ mutex_unlock(&damon_lock);
+
+ do_exit(0);
+}
+
+#include "core-test.h"
diff --git a/mm/damon/dbgfs-test.h b/mm/damon/dbgfs-test.h
new file mode 100644
index 000000000000..930e83bceef0
--- /dev/null
+++ b/mm/damon/dbgfs-test.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DAMON Debugfs Interface Unit Tests
+ *
+ * Author: SeongJae Park <sjpark@amazon.de>
+ */
+
+#ifdef CONFIG_DAMON_DBGFS_KUNIT_TEST
+
+#ifndef _DAMON_DBGFS_TEST_H
+#define _DAMON_DBGFS_TEST_H
+
+#include <kunit/test.h>
+
+static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
+{
+ char *question;
+ unsigned long *answers;
+ unsigned long expected[] = {12, 35, 46};
+ ssize_t nr_integers = 0, i;
+
+ question = "123";
+ answers = str_to_target_ids(question, strnlen(question, 128),
+ &nr_integers);
+ KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
+ KUNIT_EXPECT_EQ(test, 123ul, answers[0]);
+ kfree(answers);
+
+ question = "123abc";
+ answers = str_to_target_ids(question, strnlen(question, 128),
+ &nr_integers);
+ KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
+ KUNIT_EXPECT_EQ(test, 123ul, answers[0]);
+ kfree(answers);
+
+ question = "a123";
+ answers = str_to_target_ids(question, strnlen(question, 128),
+ &nr_integers);
+ KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
+ kfree(answers);
+
+ question = "12 35";
+ answers = str_to_target_ids(question, strnlen(question, 128),
+ &nr_integers);
+ KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
+ for (i = 0; i < nr_integers; i++)
+ KUNIT_EXPECT_EQ(test, expected[i], answers[i]);
+ kfree(answers);
+
+ question = "12 35 46";
+ answers = str_to_target_ids(question, strnlen(question, 128),
+ &nr_integers);
+ KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers);
+ for (i = 0; i < nr_integers; i++)
+ KUNIT_EXPECT_EQ(test, expected[i], answers[i]);
+ kfree(answers);
+
+ question = "12 35 abc 46";
+ answers = str_to_target_ids(question, strnlen(question, 128),
+ &nr_integers);
+ KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
+ for (i = 0; i < 2; i++)
+ KUNIT_EXPECT_EQ(test, expected[i], answers[i]);
+ kfree(answers);
+
+ question = "";
+ answers = str_to_target_ids(question, strnlen(question, 128),
+ &nr_integers);
+ KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
+ kfree(answers);
+
+ question = "\n";
+ answers = str_to_target_ids(question, strnlen(question, 128),
+ &nr_integers);
+ KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
+ kfree(answers);
+}
+
+static void damon_dbgfs_test_set_targets(struct kunit *test)
+{
+ struct damon_ctx *ctx = dbgfs_new_ctx();
+ unsigned long ids[] = {1, 2, 3};
+ char buf[64];
+
+ /* Make DAMON consider target id as plain number */
+ ctx->primitive.target_valid = NULL;
+ ctx->primitive.cleanup = NULL;
+
+ damon_set_targets(ctx, ids, 3);
+ sprint_target_ids(ctx, buf, 64);
+ KUNIT_EXPECT_STREQ(test, (char *)buf, "1 2 3\n");
+
+ damon_set_targets(ctx, NULL, 0);
+ sprint_target_ids(ctx, buf, 64);
+ KUNIT_EXPECT_STREQ(test, (char *)buf, "\n");
+
+ damon_set_targets(ctx, (unsigned long []){1, 2}, 2);
+ sprint_target_ids(ctx, buf, 64);
+ KUNIT_EXPECT_STREQ(test, (char *)buf, "1 2\n");
+
+ damon_set_targets(ctx, (unsigned long []){2}, 1);
+ sprint_target_ids(ctx, buf, 64);
+ KUNIT_EXPECT_STREQ(test, (char *)buf, "2\n");
+
+ damon_set_targets(ctx, NULL, 0);
+ sprint_target_ids(ctx, buf, 64);
+ KUNIT_EXPECT_STREQ(test, (char *)buf, "\n");
+
+ dbgfs_destroy_ctx(ctx);
+}
+
+static struct kunit_case damon_test_cases[] = {
+ KUNIT_CASE(damon_dbgfs_test_str_to_target_ids),
+ KUNIT_CASE(damon_dbgfs_test_set_targets),
+ {},
+};
+
+static struct kunit_suite damon_test_suite = {
+ .name = "damon-dbgfs",
+ .test_cases = damon_test_cases,
+};
+kunit_test_suite(damon_test_suite);
+
+#endif /* _DAMON_TEST_H */
+
+#endif /* CONFIG_DAMON_KUNIT_TEST */
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
new file mode 100644
index 000000000000..faee070977d8
--- /dev/null
+++ b/mm/damon/dbgfs.c
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DAMON Debugfs Interface
+ *
+ * Author: SeongJae Park <sjpark@amazon.de>
+ */
+
+#define pr_fmt(fmt) "damon-dbgfs: " fmt
+
+#include <linux/damon.h>
+#include <linux/debugfs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/page_idle.h>
+#include <linux/slab.h>
+
+static struct damon_ctx **dbgfs_ctxs;
+static int dbgfs_nr_ctxs;
+static struct dentry **dbgfs_dirs;
+static DEFINE_MUTEX(damon_dbgfs_lock);
+
+/*
+ * Returns non-empty string on success, negative error code otherwise.
+ */
+static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos)
+{
+ char *kbuf;
+ ssize_t ret;
+
+ /* We do not accept continuous write */
+ if (*ppos)
+ return ERR_PTR(-EINVAL);
+
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf)
+ return ERR_PTR(-ENOMEM);
+
+ ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count);
+ if (ret != count) {
+ kfree(kbuf);
+ return ERR_PTR(-EIO);
+ }
+ kbuf[ret] = '\0';
+
+ return kbuf;
+}
+
+static ssize_t dbgfs_attrs_read(struct file *file,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ struct damon_ctx *ctx = file->private_data;
+ char kbuf[128];
+ int ret;
+
+ mutex_lock(&ctx->kdamond_lock);
+ ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n",
+ ctx->sample_interval, ctx->aggr_interval,
+ ctx->primitive_update_interval, ctx->min_nr_regions,
+ ctx->max_nr_regions);
+ mutex_unlock(&ctx->kdamond_lock);
+
+ return simple_read_from_buffer(buf, count, ppos, kbuf, ret);
+}
+
+static ssize_t dbgfs_attrs_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct damon_ctx *ctx = file->private_data;
+ unsigned long s, a, r, minr, maxr;
+ char *kbuf;
+ ssize_t ret = count;
+ int err;
+
+ kbuf = user_input_str(buf, count, ppos);
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
+
+ if (sscanf(kbuf, "%lu %lu %lu %lu %lu",
+ &s, &a, &r, &minr, &maxr) != 5) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&ctx->kdamond_lock);
+ if (ctx->kdamond) {
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+
+ err = damon_set_attrs(ctx, s, a, r, minr, maxr);
+ if (err)
+ ret = err;
+unlock_out:
+ mutex_unlock(&ctx->kdamond_lock);
+out:
+ kfree(kbuf);
+ return ret;
+}
+
+static inline bool targetid_is_pid(const struct damon_ctx *ctx)
+{
+ return ctx->primitive.target_valid == damon_va_target_valid;
+}
+
+static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len)
+{
+ struct damon_target *t;
+ unsigned long id;
+ int written = 0;
+ int rc;
+
+ damon_for_each_target(t, ctx) {
+ id = t->id;
+ if (targetid_is_pid(ctx))
+ /* Show pid numbers to debugfs users */
+ id = (unsigned long)pid_vnr((struct pid *)id);
+
+ rc = scnprintf(&buf[written], len - written, "%lu ", id);
+ if (!rc)
+ return -ENOMEM;
+ written += rc;
+ }
+ if (written)
+ written -= 1;
+ written += scnprintf(&buf[written], len - written, "\n");
+ return written;
+}
+
+static ssize_t dbgfs_target_ids_read(struct file *file,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ struct damon_ctx *ctx = file->private_data;
+ ssize_t len;
+ char ids_buf[320];
+
+ mutex_lock(&ctx->kdamond_lock);
+ len = sprint_target_ids(ctx, ids_buf, 320);
+ mutex_unlock(&ctx->kdamond_lock);
+ if (len < 0)
+ return len;
+
+ return simple_read_from_buffer(buf, count, ppos, ids_buf, len);
+}
+
+/*
+ * Converts a string into an array of unsigned long integers
+ *
+ * Returns an array of unsigned long integers if the conversion success, or
+ * NULL otherwise.
+ */
+static unsigned long *str_to_target_ids(const char *str, ssize_t len,
+ ssize_t *nr_ids)
+{
+ unsigned long *ids;
+ const int max_nr_ids = 32;
+ unsigned long id;
+ int pos = 0, parsed, ret;
+
+ *nr_ids = 0;
+ ids = kmalloc_array(max_nr_ids, sizeof(id), GFP_KERNEL);
+ if (!ids)
+ return NULL;
+ while (*nr_ids < max_nr_ids && pos < len) {
+ ret = sscanf(&str[pos], "%lu%n", &id, &parsed);
+ pos += parsed;
+ if (ret != 1)
+ break;
+ ids[*nr_ids] = id;
+ *nr_ids += 1;
+ }
+
+ return ids;
+}
+
+static void dbgfs_put_pids(unsigned long *ids, int nr_ids)
+{
+ int i;
+
+ for (i = 0; i < nr_ids; i++)
+ put_pid((struct pid *)ids[i]);
+}
+
+static ssize_t dbgfs_target_ids_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct damon_ctx *ctx = file->private_data;
+ char *kbuf, *nrs;
+ unsigned long *targets;
+ ssize_t nr_targets;
+ ssize_t ret = count;
+ int i;
+ int err;
+
+ kbuf = user_input_str(buf, count, ppos);
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
+
+ nrs = kbuf;
+
+ targets = str_to_target_ids(nrs, ret, &nr_targets);
+ if (!targets) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (targetid_is_pid(ctx)) {
+ for (i = 0; i < nr_targets; i++) {
+ targets[i] = (unsigned long)find_get_pid(
+ (int)targets[i]);
+ if (!targets[i]) {
+ dbgfs_put_pids(targets, i);
+ ret = -EINVAL;
+ goto free_targets_out;
+ }
+ }
+ }
+
+ mutex_lock(&ctx->kdamond_lock);
+ if (ctx->kdamond) {
+ if (targetid_is_pid(ctx))
+ dbgfs_put_pids(targets, nr_targets);
+ ret = -EBUSY;
+ goto unlock_out;
+ }
+
+ err = damon_set_targets(ctx, targets, nr_targets);
+ if (err) {
+ if (targetid_is_pid(ctx))
+ dbgfs_put_pids(targets, nr_targets);
+ ret = err;
+ }
+
+unlock_out:
+ mutex_unlock(&ctx->kdamond_lock);
+free_targets_out:
+ kfree(targets);
+out:
+ kfree(kbuf);
+ return ret;
+}
+
+static ssize_t dbgfs_kdamond_pid_read(struct file *file,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ struct damon_ctx *ctx = file->private_data;
+ char *kbuf;
+ ssize_t len;
+
+ kbuf = kmalloc(count, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ mutex_lock(&ctx->kdamond_lock);
+ if (ctx->kdamond)
+ len = scnprintf(kbuf, count, "%d\n", ctx->kdamond->pid);
+ else
+ len = scnprintf(kbuf, count, "none\n");
+ mutex_unlock(&ctx->kdamond_lock);
+ if (!len)
+ goto out;
+ len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
+
+out:
+ kfree(kbuf);
+ return len;
+}
+
+static int damon_dbgfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return nonseekable_open(inode, file);
+}
+
+static const struct file_operations attrs_fops = {
+ .open = damon_dbgfs_open,
+ .read = dbgfs_attrs_read,
+ .write = dbgfs_attrs_write,
+};
+
+static const struct file_operations target_ids_fops = {
+ .open = damon_dbgfs_open,
+ .read = dbgfs_target_ids_read,
+ .write = dbgfs_target_ids_write,
+};
+
+static const struct file_operations kdamond_pid_fops = {
+ .open = damon_dbgfs_open,
+ .read = dbgfs_kdamond_pid_read,
+};
+
+static void dbgfs_fill_ctx_dir(struct dentry *dir, struct damon_ctx *ctx)
+{
+ const char * const file_names[] = {"attrs", "target_ids",
+ "kdamond_pid"};
+ const struct file_operations *fops[] = {&attrs_fops, &target_ids_fops,
+ &kdamond_pid_fops};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(file_names); i++)
+ debugfs_create_file(file_names[i], 0600, dir, ctx, fops[i]);
+}
+
+static int dbgfs_before_terminate(struct damon_ctx *ctx)
+{
+ struct damon_target *t, *next;
+
+ if (!targetid_is_pid(ctx))
+ return 0;
+
+ damon_for_each_target_safe(t, next, ctx) {
+ put_pid((struct pid *)t->id);
+ damon_destroy_target(t);
+ }
+ return 0;
+}
+
+static struct damon_ctx *dbgfs_new_ctx(void)
+{
+ struct damon_ctx *ctx;
+
+ ctx = damon_new_ctx();
+ if (!ctx)
+ return NULL;
+
+ damon_va_set_primitives(ctx);
+ ctx->callback.before_terminate = dbgfs_before_terminate;
+ return ctx;
+}
+
+static void dbgfs_destroy_ctx(struct damon_ctx *ctx)
+{
+ damon_destroy_ctx(ctx);
+}
+
+/*
+ * Make a context of @name and create a debugfs directory for it.
+ *
+ * This function should be called while holding damon_dbgfs_lock.
+ *
+ * Returns 0 on success, negative error code otherwise.
+ */
+static int dbgfs_mk_context(char *name)
+{
+ struct dentry *root, **new_dirs, *new_dir;
+ struct damon_ctx **new_ctxs, *new_ctx;
+
+ if (damon_nr_running_ctxs())
+ return -EBUSY;
+
+ new_ctxs = krealloc(dbgfs_ctxs, sizeof(*dbgfs_ctxs) *
+ (dbgfs_nr_ctxs + 1), GFP_KERNEL);
+ if (!new_ctxs)
+ return -ENOMEM;
+ dbgfs_ctxs = new_ctxs;
+
+ new_dirs = krealloc(dbgfs_dirs, sizeof(*dbgfs_dirs) *
+ (dbgfs_nr_ctxs + 1), GFP_KERNEL);
+ if (!new_dirs)
+ return -ENOMEM;
+ dbgfs_dirs = new_dirs;
+
+ root = dbgfs_dirs[0];
+ if (!root)
+ return -ENOENT;
+
+ new_dir = debugfs_create_dir(name, root);
+ dbgfs_dirs[dbgfs_nr_ctxs] = new_dir;
+
+ new_ctx = dbgfs_new_ctx();
+ if (!new_ctx) {
+ debugfs_remove(new_dir);
+ dbgfs_dirs[dbgfs_nr_ctxs] = NULL;
+ return -ENOMEM;
+ }
+
+ dbgfs_ctxs[dbgfs_nr_ctxs] = new_ctx;
+ dbgfs_fill_ctx_dir(dbgfs_dirs[dbgfs_nr_ctxs],
+ dbgfs_ctxs[dbgfs_nr_ctxs]);
+ dbgfs_nr_ctxs++;
+
+ return 0;
+}
+
+static ssize_t dbgfs_mk_context_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ char *kbuf;
+ char *ctx_name;
+ ssize_t ret = count;
+ int err;
+
+ kbuf = user_input_str(buf, count, ppos);
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
+ ctx_name = kmalloc(count + 1, GFP_KERNEL);
+ if (!ctx_name) {
+ kfree(kbuf);
+ return -ENOMEM;
+ }
+
+ /* Trim white space */
+ if (sscanf(kbuf, "%s", ctx_name) != 1) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&damon_dbgfs_lock);
+ err = dbgfs_mk_context(ctx_name);
+ if (err)
+ ret = err;
+ mutex_unlock(&damon_dbgfs_lock);
+
+out:
+ kfree(kbuf);
+ kfree(ctx_name);
+ return ret;
+}
+
+/*
+ * Remove a context of @name and its debugfs directory.
+ *
+ * This function should be called while holding damon_dbgfs_lock.
+ *
+ * Return 0 on success, negative error code otherwise.
+ */
+static int dbgfs_rm_context(char *name)
+{
+ struct dentry *root, *dir, **new_dirs;
+ struct damon_ctx **new_ctxs;
+ int i, j;
+
+ if (damon_nr_running_ctxs())
+ return -EBUSY;
+
+ root = dbgfs_dirs[0];
+ if (!root)
+ return -ENOENT;
+
+ dir = debugfs_lookup(name, root);
+ if (!dir)
+ return -ENOENT;
+
+ new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
+ GFP_KERNEL);
+ if (!new_dirs)
+ return -ENOMEM;
+
+ new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs),
+ GFP_KERNEL);
+ if (!new_ctxs) {
+ kfree(new_dirs);
+ return -ENOMEM;
+ }
+
+ for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) {
+ if (dbgfs_dirs[i] == dir) {
+ debugfs_remove(dbgfs_dirs[i]);
+ dbgfs_destroy_ctx(dbgfs_ctxs[i]);
+ continue;
+ }
+ new_dirs[j] = dbgfs_dirs[i];
+ new_ctxs[j++] = dbgfs_ctxs[i];
+ }
+
+ kfree(dbgfs_dirs);
+ kfree(dbgfs_ctxs);
+
+ dbgfs_dirs = new_dirs;
+ dbgfs_ctxs = new_ctxs;
+ dbgfs_nr_ctxs--;
+
+ return 0;
+}
+
+static ssize_t dbgfs_rm_context_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ char *kbuf;
+ ssize_t ret = count;
+ int err;
+ char *ctx_name;
+
+ kbuf = user_input_str(buf, count, ppos);
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
+ ctx_name = kmalloc(count + 1, GFP_KERNEL);
+ if (!ctx_name) {
+ kfree(kbuf);
+ return -ENOMEM;
+ }
+
+ /* Trim white space */
+ if (sscanf(kbuf, "%s", ctx_name) != 1) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&damon_dbgfs_lock);
+ err = dbgfs_rm_context(ctx_name);
+ if (err)
+ ret = err;
+ mutex_unlock(&damon_dbgfs_lock);
+
+out:
+ kfree(kbuf);
+ kfree(ctx_name);
+ return ret;
+}
+
+static ssize_t dbgfs_monitor_on_read(struct file *file,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ char monitor_on_buf[5];
+ bool monitor_on = damon_nr_running_ctxs() != 0;
+ int len;
+
+ len = scnprintf(monitor_on_buf, 5, monitor_on ? "on\n" : "off\n");
+
+ return simple_read_from_buffer(buf, count, ppos, monitor_on_buf, len);
+}
+
+static ssize_t dbgfs_monitor_on_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ ssize_t ret = count;
+ char *kbuf;
+ int err;
+
+ kbuf = user_input_str(buf, count, ppos);
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
+
+ /* Remove white space */
+ if (sscanf(kbuf, "%s", kbuf) != 1) {
+ kfree(kbuf);
+ return -EINVAL;
+ }
+
+ if (!strncmp(kbuf, "on", count))
+ err = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs);
+ else if (!strncmp(kbuf, "off", count))
+ err = damon_stop(dbgfs_ctxs, dbgfs_nr_ctxs);
+ else
+ err = -EINVAL;
+
+ if (err)
+ ret = err;
+ kfree(kbuf);
+ return ret;
+}
+
+static const struct file_operations mk_contexts_fops = {
+ .write = dbgfs_mk_context_write,
+};
+
+static const struct file_operations rm_contexts_fops = {
+ .write = dbgfs_rm_context_write,
+};
+
+static const struct file_operations monitor_on_fops = {
+ .read = dbgfs_monitor_on_read,
+ .write = dbgfs_monitor_on_write,
+};
+
+static int __init __damon_dbgfs_init(void)
+{
+ struct dentry *dbgfs_root;
+ const char * const file_names[] = {"mk_contexts", "rm_contexts",
+ "monitor_on"};
+ const struct file_operations *fops[] = {&mk_contexts_fops,
+ &rm_contexts_fops, &monitor_on_fops};
+ int i;
+
+ dbgfs_root = debugfs_create_dir("damon", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(file_names); i++)
+ debugfs_create_file(file_names[i], 0600, dbgfs_root, NULL,
+ fops[i]);
+ dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]);
+
+ dbgfs_dirs = kmalloc_array(1, sizeof(dbgfs_root), GFP_KERNEL);
+ if (!dbgfs_dirs) {
+ debugfs_remove(dbgfs_root);
+ return -ENOMEM;
+ }
+ dbgfs_dirs[0] = dbgfs_root;
+
+ return 0;
+}
+
+/*
+ * Functions for the initialization
+ */
+
+static int __init damon_dbgfs_init(void)
+{
+ int rc;
+
+ dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL);
+ if (!dbgfs_ctxs)
+ return -ENOMEM;
+ dbgfs_ctxs[0] = dbgfs_new_ctx();
+ if (!dbgfs_ctxs[0]) {
+ kfree(dbgfs_ctxs);
+ return -ENOMEM;
+ }
+ dbgfs_nr_ctxs = 1;
+
+ rc = __damon_dbgfs_init();
+ if (rc) {
+ kfree(dbgfs_ctxs[0]);
+ kfree(dbgfs_ctxs);
+ pr_err("%s: dbgfs init failed\n", __func__);
+ }
+
+ return rc;
+}
+
+module_init(damon_dbgfs_init);
+
+#include "dbgfs-test.h"
diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h
new file mode 100644
index 000000000000..1f5c13257dba
--- /dev/null
+++ b/mm/damon/vaddr-test.h
@@ -0,0 +1,329 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Data Access Monitor Unit Tests
+ *
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ *
+ * Author: SeongJae Park <sjpark@amazon.de>
+ */
+
+#ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
+
+#ifndef _DAMON_VADDR_TEST_H
+#define _DAMON_VADDR_TEST_H
+
+#include <kunit/test.h>
+
+static void __link_vmas(struct vm_area_struct *vmas, ssize_t nr_vmas)
+{
+ int i, j;
+ unsigned long largest_gap, gap;
+
+ if (!nr_vmas)
+ return;
+
+ for (i = 0; i < nr_vmas - 1; i++) {
+ vmas[i].vm_next = &vmas[i + 1];
+
+ vmas[i].vm_rb.rb_left = NULL;
+ vmas[i].vm_rb.rb_right = &vmas[i + 1].vm_rb;
+
+ largest_gap = 0;
+ for (j = i; j < nr_vmas; j++) {
+ if (j == 0)
+ continue;
+ gap = vmas[j].vm_start - vmas[j - 1].vm_end;
+ if (gap > largest_gap)
+ largest_gap = gap;
+ }
+ vmas[i].rb_subtree_gap = largest_gap;
+ }
+ vmas[i].vm_next = NULL;
+ vmas[i].vm_rb.rb_right = NULL;
+ vmas[i].rb_subtree_gap = 0;
+}
+
+/*
+ * Test __damon_va_three_regions() function
+ *
+ * In case of virtual memory address spaces monitoring, DAMON converts the
+ * complex and dynamic memory mappings of each target task to three
+ * discontiguous regions which cover every mapped areas. However, the three
+ * regions should not include the two biggest unmapped areas in the original
+ * mapping, because the two biggest areas are normally the areas between 1)
+ * heap and the mmap()-ed regions, and 2) the mmap()-ed regions and stack.
+ * Because these two unmapped areas are very huge but obviously never accessed,
+ * covering the region is just a waste.
+ *
+ * '__damon_va_three_regions() receives an address space of a process. It
+ * first identifies the start of mappings, end of mappings, and the two biggest
+ * unmapped areas. After that, based on the information, it constructs the
+ * three regions and returns. For more detail, refer to the comment of
+ * 'damon_init_regions_of()' function definition in 'mm/damon.c' file.
+ *
+ * For example, suppose virtual address ranges of 10-20, 20-25, 200-210,
+ * 210-220, 300-305, and 307-330 (Other comments represent this mappings in
+ * more short form: 10-20-25, 200-210-220, 300-305, 307-330) of a process are
+ * mapped. To cover every mappings, the three regions should start with 10,
+ * and end with 305. The process also has three unmapped areas, 25-200,
+ * 220-300, and 305-307. Among those, 25-200 and 220-300 are the biggest two
+ * unmapped areas, and thus it should be converted to three regions of 10-25,
+ * 200-220, and 300-330.
+ */
+static void damon_test_three_regions_in_vmas(struct kunit *test)
+{
+ struct damon_addr_range regions[3] = {0,};
+ /* 10-20-25, 200-210-220, 300-305, 307-330 */
+ struct vm_area_struct vmas[] = {
+ (struct vm_area_struct) {.vm_start = 10, .vm_end = 20},
+ (struct vm_area_struct) {.vm_start = 20, .vm_end = 25},
+ (struct vm_area_struct) {.vm_start = 200, .vm_end = 210},
+ (struct vm_area_struct) {.vm_start = 210, .vm_end = 220},
+ (struct vm_area_struct) {.vm_start = 300, .vm_end = 305},
+ (struct vm_area_struct) {.vm_start = 307, .vm_end = 330},
+ };
+
+ __link_vmas(vmas, 6);
+
+ __damon_va_three_regions(&vmas[0], regions);
+
+ KUNIT_EXPECT_EQ(test, 10ul, regions[0].start);
+ KUNIT_EXPECT_EQ(test, 25ul, regions[0].end);
+ KUNIT_EXPECT_EQ(test, 200ul, regions[1].start);
+ KUNIT_EXPECT_EQ(test, 220ul, regions[1].end);
+ KUNIT_EXPECT_EQ(test, 300ul, regions[2].start);
+ KUNIT_EXPECT_EQ(test, 330ul, regions[2].end);
+}
+
+static struct damon_region *__nth_region_of(struct damon_target *t, int idx)
+{
+ struct damon_region *r;
+ unsigned int i = 0;
+
+ damon_for_each_region(r, t) {
+ if (i++ == idx)
+ return r;
+ }
+
+ return NULL;
+}
+
+/*
+ * Test 'damon_va_apply_three_regions()'
+ *
+ * test kunit object
+ * regions an array containing start/end addresses of current
+ * monitoring target regions
+ * nr_regions the number of the addresses in 'regions'
+ * three_regions The three regions that need to be applied now
+ * expected start/end addresses of monitoring target regions that
+ * 'three_regions' are applied
+ * nr_expected the number of addresses in 'expected'
+ *
+ * The memory mapping of the target processes changes dynamically. To follow
+ * the change, DAMON periodically reads the mappings, simplifies it to the
+ * three regions, and updates the monitoring target regions to fit in the three
+ * regions. The update of current target regions is the role of
+ * 'damon_va_apply_three_regions()'.
+ *
+ * This test passes the given target regions and the new three regions that
+ * need to be applied to the function and check whether it updates the regions
+ * as expected.
+ */
+static void damon_do_test_apply_three_regions(struct kunit *test,
+ unsigned long *regions, int nr_regions,
+ struct damon_addr_range *three_regions,
+ unsigned long *expected, int nr_expected)
+{
+ struct damon_ctx *ctx = damon_new_ctx();
+ struct damon_target *t;
+ struct damon_region *r;
+ int i;
+
+ t = damon_new_target(42);
+ for (i = 0; i < nr_regions / 2; i++) {
+ r = damon_new_region(regions[i * 2], regions[i * 2 + 1]);
+ damon_add_region(r, t);
+ }
+ damon_add_target(ctx, t);
+
+ damon_va_apply_three_regions(t, three_regions);
+
+ for (i = 0; i < nr_expected / 2; i++) {
+ r = __nth_region_of(t, i);
+ KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
+ KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
+ }
+
+ damon_destroy_ctx(ctx);
+}
+
+/*
+ * This function test most common case where the three big regions are only
+ * slightly changed. Target regions should adjust their boundary (10-20-30,
+ * 50-55, 70-80, 90-100) to fit with the new big regions or remove target
+ * regions (57-79) that now out of the three regions.
+ */
+static void damon_test_apply_three_regions1(struct kunit *test)
+{
+ /* 10-20-30, 50-55-57-59, 70-80-90-100 */
+ unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59,
+ 70, 80, 80, 90, 90, 100};
+ /* 5-27, 45-55, 73-104 */
+ struct damon_addr_range new_three_regions[3] = {
+ (struct damon_addr_range){.start = 5, .end = 27},
+ (struct damon_addr_range){.start = 45, .end = 55},
+ (struct damon_addr_range){.start = 73, .end = 104} };
+ /* 5-20-27, 45-55, 73-80-90-104 */
+ unsigned long expected[] = {5, 20, 20, 27, 45, 55,
+ 73, 80, 80, 90, 90, 104};
+
+ damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions),
+ new_three_regions, expected, ARRAY_SIZE(expected));
+}
+
+/*
+ * Test slightly bigger change. Similar to above, but the second big region
+ * now require two target regions (50-55, 57-59) to be removed.
+ */
+static void damon_test_apply_three_regions2(struct kunit *test)
+{
+ /* 10-20-30, 50-55-57-59, 70-80-90-100 */
+ unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59,
+ 70, 80, 80, 90, 90, 100};
+ /* 5-27, 56-57, 65-104 */
+ struct damon_addr_range new_three_regions[3] = {
+ (struct damon_addr_range){.start = 5, .end = 27},
+ (struct damon_addr_range){.start = 56, .end = 57},
+ (struct damon_addr_range){.start = 65, .end = 104} };
+ /* 5-20-27, 56-57, 65-80-90-104 */
+ unsigned long expected[] = {5, 20, 20, 27, 56, 57,
+ 65, 80, 80, 90, 90, 104};
+
+ damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions),
+ new_three_regions, expected, ARRAY_SIZE(expected));
+}
+
+/*
+ * Test a big change. The second big region has totally freed and mapped to
+ * different area (50-59 -> 61-63). The target regions which were in the old
+ * second big region (50-55-57-59) should be removed and new target region
+ * covering the second big region (61-63) should be created.
+ */
+static void damon_test_apply_three_regions3(struct kunit *test)
+{
+ /* 10-20-30, 50-55-57-59, 70-80-90-100 */
+ unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59,
+ 70, 80, 80, 90, 90, 100};
+ /* 5-27, 61-63, 65-104 */
+ struct damon_addr_range new_three_regions[3] = {
+ (struct damon_addr_range){.start = 5, .end = 27},
+ (struct damon_addr_range){.start = 61, .end = 63},
+ (struct damon_addr_range){.start = 65, .end = 104} };
+ /* 5-20-27, 61-63, 65-80-90-104 */
+ unsigned long expected[] = {5, 20, 20, 27, 61, 63,
+ 65, 80, 80, 90, 90, 104};
+
+ damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions),
+ new_three_regions, expected, ARRAY_SIZE(expected));
+}
+
+/*
+ * Test another big change. Both of the second and third big regions (50-59
+ * and 70-100) has totally freed and mapped to different area (30-32 and
+ * 65-68). The target regions which were in the old second and third big
+ * regions should now be removed and new target regions covering the new second
+ * and third big regions should be crated.
+ */
+static void damon_test_apply_three_regions4(struct kunit *test)
+{
+ /* 10-20-30, 50-55-57-59, 70-80-90-100 */
+ unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59,
+ 70, 80, 80, 90, 90, 100};
+ /* 5-7, 30-32, 65-68 */
+ struct damon_addr_range new_three_regions[3] = {
+ (struct damon_addr_range){.start = 5, .end = 7},
+ (struct damon_addr_range){.start = 30, .end = 32},
+ (struct damon_addr_range){.start = 65, .end = 68} };
+ /* expect 5-7, 30-32, 65-68 */
+ unsigned long expected[] = {5, 7, 30, 32, 65, 68};
+
+ damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions),
+ new_three_regions, expected, ARRAY_SIZE(expected));
+}
+
+static void damon_test_split_evenly(struct kunit *test)
+{
+ struct damon_ctx *c = damon_new_ctx();
+ struct damon_target *t;
+ struct damon_region *r;
+ unsigned long i;
+
+ KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
+ -EINVAL);
+
+ t = damon_new_target(42);
+ r = damon_new_region(0, 100);
+ KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 0), -EINVAL);
+
+ damon_add_region(r, t);
+ KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 10), 0);
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 10u);
+
+ i = 0;
+ damon_for_each_region(r, t) {
+ KUNIT_EXPECT_EQ(test, r->ar.start, i++ * 10);
+ KUNIT_EXPECT_EQ(test, r->ar.end, i * 10);
+ }
+ damon_free_target(t);
+
+ t = damon_new_target(42);
+ r = damon_new_region(5, 59);
+ damon_add_region(r, t);
+ KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 5), 0);
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
+
+ i = 0;
+ damon_for_each_region(r, t) {
+ if (i == 4)
+ break;
+ KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i++);
+ KUNIT_EXPECT_EQ(test, r->ar.end, 5 + 10 * i);
+ }
+ KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i);
+ KUNIT_EXPECT_EQ(test, r->ar.end, 59ul);
+ damon_free_target(t);
+
+ t = damon_new_target(42);
+ r = damon_new_region(5, 6);
+ damon_add_region(r, t);
+ KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 2), -EINVAL);
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
+
+ damon_for_each_region(r, t) {
+ KUNIT_EXPECT_EQ(test, r->ar.start, 5ul);
+ KUNIT_EXPECT_EQ(test, r->ar.end, 6ul);
+ }
+ damon_free_target(t);
+ damon_destroy_ctx(c);
+}
+
+static struct kunit_case damon_test_cases[] = {
+ KUNIT_CASE(damon_test_three_regions_in_vmas),
+ KUNIT_CASE(damon_test_apply_three_regions1),
+ KUNIT_CASE(damon_test_apply_three_regions2),
+ KUNIT_CASE(damon_test_apply_three_regions3),
+ KUNIT_CASE(damon_test_apply_three_regions4),
+ KUNIT_CASE(damon_test_split_evenly),
+ {},
+};
+
+static struct kunit_suite damon_test_suite = {
+ .name = "damon-primitives",
+ .test_cases = damon_test_cases,
+};
+kunit_test_suite(damon_test_suite);
+
+#endif /* _DAMON_VADDR_TEST_H */
+
+#endif /* CONFIG_DAMON_VADDR_KUNIT_TEST */
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
new file mode 100644
index 000000000000..58c1fb2aafa9
--- /dev/null
+++ b/mm/damon/vaddr.c
@@ -0,0 +1,672 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DAMON Primitives for Virtual Address Spaces
+ *
+ * Author: SeongJae Park <sjpark@amazon.de>
+ */
+
+#define pr_fmt(fmt) "damon-va: " fmt
+
+#include <linux/damon.h>
+#include <linux/hugetlb.h>
+#include <linux/mm.h>
+#include <linux/mmu_notifier.h>
+#include <linux/highmem.h>
+#include <linux/page_idle.h>
+#include <linux/pagewalk.h>
+#include <linux/random.h>
+#include <linux/sched/mm.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
+#undef DAMON_MIN_REGION
+#define DAMON_MIN_REGION 1
+#endif
+
+/* Get a random number in [l, r) */
+#define damon_rand(l, r) (l + prandom_u32_max(r - l))
+
+/*
+ * 't->id' should be the pointer to the relevant 'struct pid' having reference
+ * count. Caller must put the returned task, unless it is NULL.
+ */
+#define damon_get_task_struct(t) \
+ (get_pid_task((struct pid *)t->id, PIDTYPE_PID))
+
+/*
+ * Get the mm_struct of the given target
+ *
+ * Caller _must_ put the mm_struct after use, unless it is NULL.
+ *
+ * Returns the mm_struct of the target on success, NULL on failure
+ */
+static struct mm_struct *damon_get_mm(struct damon_target *t)
+{
+ struct task_struct *task;
+ struct mm_struct *mm;
+
+ task = damon_get_task_struct(t);
+ if (!task)
+ return NULL;
+
+ mm = get_task_mm(task);
+ put_task_struct(task);
+ return mm;
+}
+
+/*
+ * Functions for the initial monitoring target regions construction
+ */
+
+/*
+ * Size-evenly split a region into 'nr_pieces' small regions
+ *
+ * Returns 0 on success, or negative error code otherwise.
+ */
+static int damon_va_evenly_split_region(struct damon_target *t,
+ struct damon_region *r, unsigned int nr_pieces)
+{
+ unsigned long sz_orig, sz_piece, orig_end;
+ struct damon_region *n = NULL, *next;
+ unsigned long start;
+
+ if (!r || !nr_pieces)
+ return -EINVAL;
+
+ orig_end = r->ar.end;
+ sz_orig = r->ar.end - r->ar.start;
+ sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION);
+
+ if (!sz_piece)
+ return -EINVAL;
+
+ r->ar.end = r->ar.start + sz_piece;
+ next = damon_next_region(r);
+ for (start = r->ar.end; start + sz_piece <= orig_end;
+ start += sz_piece) {
+ n = damon_new_region(start, start + sz_piece);
+ if (!n)
+ return -ENOMEM;
+ damon_insert_region(n, r, next, t);
+ r = n;
+ }
+ /* complement last region for possible rounding error */
+ if (n)
+ n->ar.end = orig_end;
+
+ return 0;
+}
+
+static unsigned long sz_range(struct damon_addr_range *r)
+{
+ return r->end - r->start;
+}
+
+static void swap_ranges(struct damon_addr_range *r1,
+ struct damon_addr_range *r2)
+{
+ struct damon_addr_range tmp;
+
+ tmp = *r1;
+ *r1 = *r2;
+ *r2 = tmp;
+}
+
+/*
+ * Find three regions separated by two biggest unmapped regions
+ *
+ * vma the head vma of the target address space
+ * regions an array of three address ranges that results will be saved
+ *
+ * This function receives an address space and finds three regions in it which
+ * separated by the two biggest unmapped regions in the space. Please refer to
+ * below comments of '__damon_va_init_regions()' function to know why this is
+ * necessary.
+ *
+ * Returns 0 if success, or negative error code otherwise.
+ */
+static int __damon_va_three_regions(struct vm_area_struct *vma,
+ struct damon_addr_range regions[3])
+{
+ struct damon_addr_range gap = {0}, first_gap = {0}, second_gap = {0};
+ struct vm_area_struct *last_vma = NULL;
+ unsigned long start = 0;
+ struct rb_root rbroot;
+
+ /* Find two biggest gaps so that first_gap > second_gap > others */
+ for (; vma; vma = vma->vm_next) {
+ if (!last_vma) {
+ start = vma->vm_start;
+ goto next;
+ }
+
+ if (vma->rb_subtree_gap <= sz_range(&second_gap)) {
+ rbroot.rb_node = &vma->vm_rb;
+ vma = rb_entry(rb_last(&rbroot),
+ struct vm_area_struct, vm_rb);
+ goto next;
+ }
+
+ gap.start = last_vma->vm_end;
+ gap.end = vma->vm_start;
+ if (sz_range(&gap) > sz_range(&second_gap)) {
+ swap_ranges(&gap, &second_gap);
+ if (sz_range(&second_gap) > sz_range(&first_gap))
+ swap_ranges(&second_gap, &first_gap);
+ }
+next:
+ last_vma = vma;
+ }
+
+ if (!sz_range(&second_gap) || !sz_range(&first_gap))
+ return -EINVAL;
+
+ /* Sort the two biggest gaps by address */
+ if (first_gap.start > second_gap.start)
+ swap_ranges(&first_gap, &second_gap);
+
+ /* Store the result */
+ regions[0].start = ALIGN(start, DAMON_MIN_REGION);
+ regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION);
+ regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION);
+ regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION);
+ regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION);
+ regions[2].end = ALIGN(last_vma->vm_end, DAMON_MIN_REGION);
+
+ return 0;
+}
+
+/*
+ * Get the three regions in the given target (task)
+ *
+ * Returns 0 on success, negative error code otherwise.
+ */
+static int damon_va_three_regions(struct damon_target *t,
+ struct damon_addr_range regions[3])
+{
+ struct mm_struct *mm;
+ int rc;
+
+ mm = damon_get_mm(t);
+ if (!mm)
+ return -EINVAL;
+
+ mmap_read_lock(mm);
+ rc = __damon_va_three_regions(mm->mmap, regions);
+ mmap_read_unlock(mm);
+
+ mmput(mm);
+ return rc;
+}
+
+/*
+ * Initialize the monitoring target regions for the given target (task)
+ *
+ * t the given target
+ *
+ * Because only a number of small portions of the entire address space
+ * is actually mapped to the memory and accessed, monitoring the unmapped
+ * regions is wasteful. That said, because we can deal with small noises,
+ * tracking every mapping is not strictly required but could even incur a high
+ * overhead if the mapping frequently changes or the number of mappings is
+ * high. The adaptive regions adjustment mechanism will further help to deal
+ * with the noise by simply identifying the unmapped areas as a region that
+ * has no access. Moreover, applying the real mappings that would have many
+ * unmapped areas inside will make the adaptive mechanism quite complex. That
+ * said, too huge unmapped areas inside the monitoring target should be removed
+ * to not take the time for the adaptive mechanism.
+ *
+ * For the reason, we convert the complex mappings to three distinct regions
+ * that cover every mapped area of the address space. Also the two gaps
+ * between the three regions are the two biggest unmapped areas in the given
+ * address space. In detail, this function first identifies the start and the
+ * end of the mappings and the two biggest unmapped areas of the address space.
+ * Then, it constructs the three regions as below:
+ *
+ * [mappings[0]->start, big_two_unmapped_areas[0]->start)
+ * [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start)
+ * [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end)
+ *
+ * As usual memory map of processes is as below, the gap between the heap and
+ * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed
+ * region and the stack will be two biggest unmapped regions. Because these
+ * gaps are exceptionally huge areas in usual address space, excluding these
+ * two biggest unmapped regions will be sufficient to make a trade-off.
+ *
+ * <heap>
+ * <BIG UNMAPPED REGION 1>
+ * <uppermost mmap()-ed region>
+ * (other mmap()-ed regions and small unmapped regions)
+ * <lowermost mmap()-ed region>
+ * <BIG UNMAPPED REGION 2>
+ * <stack>
+ */
+static void __damon_va_init_regions(struct damon_ctx *ctx,
+ struct damon_target *t)
+{
+ struct damon_region *r;
+ struct damon_addr_range regions[3];
+ unsigned long sz = 0, nr_pieces;
+ int i;
+
+ if (damon_va_three_regions(t, regions)) {
+ pr_err("Failed to get three regions of target %lu\n", t->id);
+ return;
+ }
+
+ for (i = 0; i < 3; i++)
+ sz += regions[i].end - regions[i].start;
+ if (ctx->min_nr_regions)
+ sz /= ctx->min_nr_regions;
+ if (sz < DAMON_MIN_REGION)
+ sz = DAMON_MIN_REGION;
+
+ /* Set the initial three regions of the target */
+ for (i = 0; i < 3; i++) {
+ r = damon_new_region(regions[i].start, regions[i].end);
+ if (!r) {
+ pr_err("%d'th init region creation failed\n", i);
+ return;
+ }
+ damon_add_region(r, t);
+
+ nr_pieces = (regions[i].end - regions[i].start) / sz;
+ damon_va_evenly_split_region(t, r, nr_pieces);
+ }
+}
+
+/* Initialize '->regions_list' of every target (task) */
+void damon_va_init(struct damon_ctx *ctx)
+{
+ struct damon_target *t;
+
+ damon_for_each_target(t, ctx) {
+ /* the user may set the target regions as they want */
+ if (!damon_nr_regions(t))
+ __damon_va_init_regions(ctx, t);
+ }
+}
+
+/*
+ * Functions for the dynamic monitoring target regions update
+ */
+
+/*
+ * Check whether a region is intersecting an address range
+ *
+ * Returns true if it is.
+ */
+static bool damon_intersect(struct damon_region *r, struct damon_addr_range *re)
+{
+ return !(r->ar.end <= re->start || re->end <= r->ar.start);
+}
+
+/*
+ * Update damon regions for the three big regions of the given target
+ *
+ * t the given target
+ * bregions the three big regions of the target
+ */
+static void damon_va_apply_three_regions(struct damon_target *t,
+ struct damon_addr_range bregions[3])
+{
+ struct damon_region *r, *next;
+ unsigned int i = 0;
+
+ /* Remove regions which are not in the three big regions now */
+ damon_for_each_region_safe(r, next, t) {
+ for (i = 0; i < 3; i++) {
+ if (damon_intersect(r, &bregions[i]))
+ break;
+ }
+ if (i == 3)
+ damon_destroy_region(r, t);
+ }
+
+ /* Adjust intersecting regions to fit with the three big regions */
+ for (i = 0; i < 3; i++) {
+ struct damon_region *first = NULL, *last;
+ struct damon_region *newr;
+ struct damon_addr_range *br;
+
+ br = &bregions[i];
+ /* Get the first and last regions which intersects with br */
+ damon_for_each_region(r, t) {
+ if (damon_intersect(r, br)) {
+ if (!first)
+ first = r;
+ last = r;
+ }
+ if (r->ar.start >= br->end)
+ break;
+ }
+ if (!first) {
+ /* no damon_region intersects with this big region */
+ newr = damon_new_region(
+ ALIGN_DOWN(br->start,
+ DAMON_MIN_REGION),
+ ALIGN(br->end, DAMON_MIN_REGION));
+ if (!newr)
+ continue;
+ damon_insert_region(newr, damon_prev_region(r), r, t);
+ } else {
+ first->ar.start = ALIGN_DOWN(br->start,
+ DAMON_MIN_REGION);
+ last->ar.end = ALIGN(br->end, DAMON_MIN_REGION);
+ }
+ }
+}
+
+/*
+ * Update regions for current memory mappings
+ */
+void damon_va_update(struct damon_ctx *ctx)
+{
+ struct damon_addr_range three_regions[3];
+ struct damon_target *t;
+
+ damon_for_each_target(t, ctx) {
+ if (damon_va_three_regions(t, three_regions))
+ continue;
+ damon_va_apply_three_regions(t, three_regions);
+ }
+}
+
+/*
+ * Get an online page for a pfn if it's in the LRU list. Otherwise, returns
+ * NULL.
+ *
+ * The body of this function is stolen from the 'page_idle_get_page()'. We
+ * steal rather than reuse it because the code is quite simple.
+ */
+static struct page *damon_get_page(unsigned long pfn)
+{
+ struct page *page = pfn_to_online_page(pfn);
+
+ if (!page || !PageLRU(page) || !get_page_unless_zero(page))
+ return NULL;
+
+ if (unlikely(!PageLRU(page))) {
+ put_page(page);
+ page = NULL;
+ }
+ return page;
+}
+
+static void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm,
+ unsigned long addr)
+{
+ bool referenced = false;
+ struct page *page = damon_get_page(pte_pfn(*pte));
+
+ if (!page)
+ return;
+
+ if (pte_young(*pte)) {
+ referenced = true;
+ *pte = pte_mkold(*pte);
+ }
+
+#ifdef CONFIG_MMU_NOTIFIER
+ if (mmu_notifier_clear_young(mm, addr, addr + PAGE_SIZE))
+ referenced = true;
+#endif /* CONFIG_MMU_NOTIFIER */
+
+ if (referenced)
+ set_page_young(page);
+
+ set_page_idle(page);
+ put_page(page);
+}
+
+static void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm,
+ unsigned long addr)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ bool referenced = false;
+ struct page *page = damon_get_page(pmd_pfn(*pmd));
+
+ if (!page)
+ return;
+
+ if (pmd_young(*pmd)) {
+ referenced = true;
+ *pmd = pmd_mkold(*pmd);
+ }
+
+#ifdef CONFIG_MMU_NOTIFIER
+ if (mmu_notifier_clear_young(mm, addr,
+ addr + ((1UL) << HPAGE_PMD_SHIFT)))
+ referenced = true;
+#endif /* CONFIG_MMU_NOTIFIER */
+
+ if (referenced)
+ set_page_young(page);
+
+ set_page_idle(page);
+ put_page(page);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+}
+
+static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pte_t *pte;
+ spinlock_t *ptl;
+
+ if (pmd_huge(*pmd)) {
+ ptl = pmd_lock(walk->mm, pmd);
+ if (pmd_huge(*pmd)) {
+ damon_pmdp_mkold(pmd, walk->mm, addr);
+ spin_unlock(ptl);
+ return 0;
+ }
+ spin_unlock(ptl);
+ }
+
+ if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+ return 0;
+ pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ if (!pte_present(*pte))
+ goto out;
+ damon_ptep_mkold(pte, walk->mm, addr);
+out:
+ pte_unmap_unlock(pte, ptl);
+ return 0;
+}
+
+static struct mm_walk_ops damon_mkold_ops = {
+ .pmd_entry = damon_mkold_pmd_entry,
+};
+
+static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
+{
+ mmap_read_lock(mm);
+ walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL);
+ mmap_read_unlock(mm);
+}
+
+/*
+ * Functions for the access checking of the regions
+ */
+
+static void damon_va_prepare_access_check(struct damon_ctx *ctx,
+ struct mm_struct *mm, struct damon_region *r)
+{
+ r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
+
+ damon_va_mkold(mm, r->sampling_addr);
+}
+
+void damon_va_prepare_access_checks(struct damon_ctx *ctx)
+{
+ struct damon_target *t;
+ struct mm_struct *mm;
+ struct damon_region *r;
+
+ damon_for_each_target(t, ctx) {
+ mm = damon_get_mm(t);
+ if (!mm)
+ continue;
+ damon_for_each_region(r, t)
+ damon_va_prepare_access_check(ctx, mm, r);
+ mmput(mm);
+ }
+}
+
+struct damon_young_walk_private {
+ unsigned long *page_sz;
+ bool young;
+};
+
+static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pte_t *pte;
+ spinlock_t *ptl;
+ struct page *page;
+ struct damon_young_walk_private *priv = walk->private;
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (pmd_huge(*pmd)) {
+ ptl = pmd_lock(walk->mm, pmd);
+ if (!pmd_huge(*pmd)) {
+ spin_unlock(ptl);
+ goto regular_page;
+ }
+ page = damon_get_page(pmd_pfn(*pmd));
+ if (!page)
+ goto huge_out;
+ if (pmd_young(*pmd) || !page_is_idle(page) ||
+ mmu_notifier_test_young(walk->mm,
+ addr)) {
+ *priv->page_sz = ((1UL) << HPAGE_PMD_SHIFT);
+ priv->young = true;
+ }
+ put_page(page);
+huge_out:
+ spin_unlock(ptl);
+ return 0;
+ }
+
+regular_page:
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+ if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+ return -EINVAL;
+ pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ if (!pte_present(*pte))
+ goto out;
+ page = damon_get_page(pte_pfn(*pte));
+ if (!page)
+ goto out;
+ if (pte_young(*pte) || !page_is_idle(page) ||
+ mmu_notifier_test_young(walk->mm, addr)) {
+ *priv->page_sz = PAGE_SIZE;
+ priv->young = true;
+ }
+ put_page(page);
+out:
+ pte_unmap_unlock(pte, ptl);
+ return 0;
+}
+
+static struct mm_walk_ops damon_young_ops = {
+ .pmd_entry = damon_young_pmd_entry,
+};
+
+static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
+ unsigned long *page_sz)
+{
+ struct damon_young_walk_private arg = {
+ .page_sz = page_sz,
+ .young = false,
+ };
+
+ mmap_read_lock(mm);
+ walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg);
+ mmap_read_unlock(mm);
+ return arg.young;
+}
+
+/*
+ * Check whether the region was accessed after the last preparation
+ *
+ * mm 'mm_struct' for the given virtual address space
+ * r the region to be checked
+ */
+static void damon_va_check_access(struct damon_ctx *ctx,
+ struct mm_struct *mm, struct damon_region *r)
+{
+ static struct mm_struct *last_mm;
+ static unsigned long last_addr;
+ static unsigned long last_page_sz = PAGE_SIZE;
+ static bool last_accessed;
+
+ /* If the region is in the last checked page, reuse the result */
+ if (mm == last_mm && (ALIGN_DOWN(last_addr, last_page_sz) ==
+ ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
+ if (last_accessed)
+ r->nr_accesses++;
+ return;
+ }
+
+ last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz);
+ if (last_accessed)
+ r->nr_accesses++;
+
+ last_mm = mm;
+ last_addr = r->sampling_addr;
+}
+
+unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
+{
+ struct damon_target *t;
+ struct mm_struct *mm;
+ struct damon_region *r;
+ unsigned int max_nr_accesses = 0;
+
+ damon_for_each_target(t, ctx) {
+ mm = damon_get_mm(t);
+ if (!mm)
+ continue;
+ damon_for_each_region(r, t) {
+ damon_va_check_access(ctx, mm, r);
+ max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
+ }
+ mmput(mm);
+ }
+
+ return max_nr_accesses;
+}
+
+/*
+ * Functions for the target validity check and cleanup
+ */
+
+bool damon_va_target_valid(void *target)
+{
+ struct damon_target *t = target;
+ struct task_struct *task;
+
+ task = damon_get_task_struct(t);
+ if (task) {
+ put_task_struct(task);
+ return true;
+ }
+
+ return false;
+}
+
+void damon_va_set_primitives(struct damon_ctx *ctx)
+{
+ ctx->primitive.init = damon_va_init;
+ ctx->primitive.update = damon_va_update;
+ ctx->primitive.prepare_access_checks = damon_va_prepare_access_checks;
+ ctx->primitive.check_accesses = damon_va_check_accesses;
+ ctx->primitive.reset_aggregated = NULL;
+ ctx->primitive.target_valid = damon_va_target_valid;
+ ctx->primitive.cleanup = NULL;
+}
+
+#include "vaddr-test.h"
diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c
index 164607c7cdf1..74984c23a87e 100644
--- a/mm/early_ioremap.c
+++ b/mm/early_ioremap.c
@@ -38,13 +38,8 @@ pgprot_t __init __weak early_memremap_pgprot_adjust(resource_size_t phys_addr,
return prot;
}
-void __init __weak early_ioremap_shutdown(void)
-{
-}
-
void __init early_ioremap_reset(void)
{
- early_ioremap_shutdown();
after_paging_init = 1;
}
diff --git a/mm/highmem.c b/mm/highmem.c
index 4fb51d735aa6..4212ad0e4a19 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -436,7 +436,7 @@ EXPORT_SYMBOL(zero_user_segments);
static inline int kmap_local_idx_push(void)
{
- WARN_ON_ONCE(in_irq() && !irqs_disabled());
+ WARN_ON_ONCE(in_hardirq() && !irqs_disabled());
current->kmap_ctrl.idx += KM_INCR;
BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX);
return current->kmap_ctrl.idx - 1;
diff --git a/mm/hmm.c b/mm/hmm.c
index fad6be2bf072..842e26599238 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -295,10 +295,13 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
goto fault;
/*
+ * Bypass devmap pte such as DAX page when all pfn requested
+ * flags(pfn_req_flags) are fulfilled.
* Since each architecture defines a struct page for the zero page, just
* fall through and treat it like a normal page.
*/
- if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
+ if (pte_special(pte) && !pte_devmap(pte) &&
+ !is_zero_pfn(pte_pfn(pte))) {
if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
pte_unmap(ptep);
return -EFAULT;
diff --git a/mm/ioremap.c b/mm/ioremap.c
index 8ee0136f8cb0..5fe598ecd9b7 100644
--- a/mm/ioremap.c
+++ b/mm/ioremap.c
@@ -8,33 +8,9 @@
*/
#include <linux/vmalloc.h>
#include <linux/mm.h>
-#include <linux/sched.h>
#include <linux/io.h>
#include <linux/export.h>
-#include <asm/cacheflush.h>
-#include "pgalloc-track.h"
-
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-static unsigned int __ro_after_init iomap_max_page_shift = BITS_PER_LONG - 1;
-
-static int __init set_nohugeiomap(char *str)
-{
- iomap_max_page_shift = PAGE_SHIFT;
- return 0;
-}
-early_param("nohugeiomap", set_nohugeiomap);
-#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
-static const unsigned int iomap_max_page_shift = PAGE_SHIFT;
-#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
-
-int ioremap_page_range(unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
-{
- return vmap_range(addr, end, phys_addr, prot, iomap_max_page_shift);
-}
-
-#ifdef CONFIG_GENERIC_IOREMAP
void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
{
unsigned long offset, vaddr;
@@ -71,4 +47,3 @@ void iounmap(volatile void __iomem *addr)
vunmap((void *)((unsigned long)addr & PAGE_MASK));
}
EXPORT_SYMBOL(iounmap);
-#endif /* CONFIG_GENERIC_IOREMAP */
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 575c685aa642..7a97db8bc8e7 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -20,6 +20,7 @@
#include <linux/moduleparam.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
+#include <linux/sched/clock.h>
#include <linux/sched/sysctl.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -196,6 +197,8 @@ static noinline void metadata_update_state(struct kfence_metadata *meta,
*/
track->num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
track->pid = task_pid_nr(current);
+ track->cpu = raw_smp_processor_id();
+ track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
/*
* Pairs with READ_ONCE() in
diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h
index 24065321ff8a..c1f23c61e5f9 100644
--- a/mm/kfence/kfence.h
+++ b/mm/kfence/kfence.h
@@ -36,6 +36,8 @@ enum kfence_object_state {
/* Alloc/free tracking information. */
struct kfence_track {
pid_t pid;
+ int cpu;
+ u64 ts_nsec;
int num_stack_entries;
unsigned long stack_entries[KFENCE_STACK_DEPTH];
};
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index eb6307c199ea..f1690cf54199 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -800,6 +800,9 @@ static int test_init(struct kunit *test)
unsigned long flags;
int i;
+ if (!__kfence_pool)
+ return -EINVAL;
+
spin_lock_irqsave(&observed.lock, flags);
for (i = 0; i < ARRAY_SIZE(observed.lines); i++)
observed.lines[i][0] = '\0';
diff --git a/mm/kfence/report.c b/mm/kfence/report.c
index 4b891dd75650..f93a7b2a338b 100644
--- a/mm/kfence/report.c
+++ b/mm/kfence/report.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/lockdep.h>
+#include <linux/math.h>
#include <linux/printk.h>
#include <linux/sched/debug.h>
#include <linux/seq_file.h>
@@ -100,6 +101,13 @@ static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadat
bool show_alloc)
{
const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track;
+ u64 ts_sec = track->ts_nsec;
+ unsigned long rem_nsec = do_div(ts_sec, NSEC_PER_SEC);
+
+ /* Timestamp matches printk timestamp format. */
+ seq_con_printf(seq, "%s by task %d on cpu %d at %lu.%06lus:\n",
+ show_alloc ? "allocated" : "freed", track->pid,
+ track->cpu, (unsigned long)ts_sec, rem_nsec / 1000);
if (track->num_stack_entries) {
/* Skip allocation/free internals stack. */
@@ -126,15 +134,14 @@ void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *met
return;
}
- seq_con_printf(seq,
- "kfence-#%td [0x%p-0x%p"
- ", size=%d, cache=%s] allocated by task %d:\n",
- meta - kfence_metadata, (void *)start, (void *)(start + size - 1), size,
- (cache && cache->name) ? cache->name : "<destroyed>", meta->alloc_track.pid);
+ seq_con_printf(seq, "kfence-#%td: 0x%p-0x%p, size=%d, cache=%s\n\n",
+ meta - kfence_metadata, (void *)start, (void *)(start + size - 1),
+ size, (cache && cache->name) ? cache->name : "<destroyed>");
+
kfence_print_stack(seq, meta, true);
if (meta->state == KFENCE_OBJECT_FREED) {
- seq_con_printf(seq, "\nfreed by task %d:\n", meta->free_track.pid);
+ seq_con_printf(seq, "\n");
kfence_print_stack(seq, meta, false);
}
}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 73d46d16d575..b57383c17cf6 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -113,7 +113,8 @@
#define BYTES_PER_POINTER sizeof(void *)
/* GFP bitmask for kmemleak internal allocations */
-#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
+#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
+ __GFP_NOLOCKDEP)) | \
__GFP_NORETRY | __GFP_NOMEMALLOC | \
__GFP_NOWARN)
@@ -598,7 +599,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
object->checksum = 0;
/* task information */
- if (in_irq()) {
+ if (in_hardirq()) {
object->pid = 0;
strncpy(object->comm, "hardirq", sizeof(object->comm));
} else if (in_serving_softirq()) {
diff --git a/mm/ksm.c b/mm/ksm.c
index 025338128cd9..a5716fdec1aa 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -651,10 +651,8 @@ static void remove_node_from_stable_tree(struct stable_node *stable_node)
* from &migrate_nodes. This will verify that future list.h changes
* don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it.
*/
-#if defined(GCC_VERSION) && GCC_VERSION >= 40903
BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
-#endif
if (stable_node->head == &migrate_nodes)
list_del(&stable_node->list);
diff --git a/mm/maccess.c b/mm/maccess.c
index 3bd70405f2d8..d3f1a1f0b1c1 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -24,13 +24,21 @@ bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src,
long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
{
+ unsigned long align = 0;
+
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ align = (unsigned long)dst | (unsigned long)src;
+
if (!copy_from_kernel_nofault_allowed(src, size))
return -ERANGE;
pagefault_disable();
- copy_from_kernel_nofault_loop(dst, src, size, u64, Efault);
- copy_from_kernel_nofault_loop(dst, src, size, u32, Efault);
- copy_from_kernel_nofault_loop(dst, src, size, u16, Efault);
+ if (!(align & 7))
+ copy_from_kernel_nofault_loop(dst, src, size, u64, Efault);
+ if (!(align & 3))
+ copy_from_kernel_nofault_loop(dst, src, size, u32, Efault);
+ if (!(align & 1))
+ copy_from_kernel_nofault_loop(dst, src, size, u16, Efault);
copy_from_kernel_nofault_loop(dst, src, size, u8, Efault);
pagefault_enable();
return 0;
@@ -50,10 +58,18 @@ EXPORT_SYMBOL_GPL(copy_from_kernel_nofault);
long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
{
+ unsigned long align = 0;
+
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ align = (unsigned long)dst | (unsigned long)src;
+
pagefault_disable();
- copy_to_kernel_nofault_loop(dst, src, size, u64, Efault);
- copy_to_kernel_nofault_loop(dst, src, size, u32, Efault);
- copy_to_kernel_nofault_loop(dst, src, size, u16, Efault);
+ if (!(align & 7))
+ copy_to_kernel_nofault_loop(dst, src, size, u64, Efault);
+ if (!(align & 3))
+ copy_to_kernel_nofault_loop(dst, src, size, u32, Efault);
+ if (!(align & 1))
+ copy_to_kernel_nofault_loop(dst, src, size, u16, Efault);
copy_to_kernel_nofault_loop(dst, src, size, u8, Efault);
pagefault_enable();
return 0;
diff --git a/mm/memblock.c b/mm/memblock.c
index 0ab5a749bfa6..184dcd2e5d99 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -472,7 +472,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
kfree(old_array);
else if (old_array != memblock_memory_init_regions &&
old_array != memblock_reserved_init_regions)
- memblock_free(__pa(old_array), old_alloc_size);
+ memblock_free_ptr(old_array, old_alloc_size);
/*
* Reserve the new array if that comes from the memblock. Otherwise, we
@@ -796,6 +796,20 @@ int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
}
/**
+ * memblock_free_ptr - free boot memory allocation
+ * @ptr: starting address of the boot memory allocation
+ * @size: size of the boot memory block in bytes
+ *
+ * Free boot memory block previously allocated by memblock_alloc_xx() API.
+ * The freeing memory will not be released to the buddy allocator.
+ */
+void __init_memblock memblock_free_ptr(void *ptr, size_t size)
+{
+ if (ptr)
+ memblock_free(__pa(ptr), size);
+}
+
+/**
* memblock_free - free boot memory block
* @base: phys starting address of the boot memory block
* @size: size of the boot memory block in bytes
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 4c527a80b6c9..9fd0be32a281 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -52,6 +52,73 @@ module_param(memmap_on_memory, bool, 0444);
MODULE_PARM_DESC(memmap_on_memory, "Enable memmap on memory for memory hotplug");
#endif
+enum {
+ ONLINE_POLICY_CONTIG_ZONES = 0,
+ ONLINE_POLICY_AUTO_MOVABLE,
+};
+
+const char *online_policy_to_str[] = {
+ [ONLINE_POLICY_CONTIG_ZONES] = "contig-zones",
+ [ONLINE_POLICY_AUTO_MOVABLE] = "auto-movable",
+};
+
+static int set_online_policy(const char *val, const struct kernel_param *kp)
+{
+ int ret = sysfs_match_string(online_policy_to_str, val);
+
+ if (ret < 0)
+ return ret;
+ *((int *)kp->arg) = ret;
+ return 0;
+}
+
+static int get_online_policy(char *buffer, const struct kernel_param *kp)
+{
+ return sprintf(buffer, "%s\n", online_policy_to_str[*((int *)kp->arg)]);
+}
+
+/*
+ * memory_hotplug.online_policy: configure online behavior when onlining without
+ * specifying a zone (MMOP_ONLINE)
+ *
+ * "contig-zones": keep zone contiguous
+ * "auto-movable": online memory to ZONE_MOVABLE if the configuration
+ * (auto_movable_ratio, auto_movable_numa_aware) allows for it
+ */
+static int online_policy __read_mostly = ONLINE_POLICY_CONTIG_ZONES;
+static const struct kernel_param_ops online_policy_ops = {
+ .set = set_online_policy,
+ .get = get_online_policy,
+};
+module_param_cb(online_policy, &online_policy_ops, &online_policy, 0644);
+MODULE_PARM_DESC(online_policy,
+ "Set the online policy (\"contig-zones\", \"auto-movable\") "
+ "Default: \"contig-zones\"");
+
+/*
+ * memory_hotplug.auto_movable_ratio: specify maximum MOVABLE:KERNEL ratio
+ *
+ * The ratio represent an upper limit and the kernel might decide to not
+ * online some memory to ZONE_MOVABLE -- e.g., because hotplugged KERNEL memory
+ * doesn't allow for more MOVABLE memory.
+ */
+static unsigned int auto_movable_ratio __read_mostly = 301;
+module_param(auto_movable_ratio, uint, 0644);
+MODULE_PARM_DESC(auto_movable_ratio,
+ "Set the maximum ratio of MOVABLE:KERNEL memory in the system "
+ "in percent for \"auto-movable\" online policy. Default: 301");
+
+/*
+ * memory_hotplug.auto_movable_numa_aware: consider numa node stats
+ */
+#ifdef CONFIG_NUMA
+static bool auto_movable_numa_aware __read_mostly = true;
+module_param(auto_movable_numa_aware, bool, 0644);
+MODULE_PARM_DESC(auto_movable_numa_aware,
+ "Consider numa node stats in addition to global stats in "
+ "\"auto-movable\" online policy. Default: true");
+#endif /* CONFIG_NUMA */
+
/*
* online_page_callback contains pointer to current page onlining function.
* Initially it is generic_online_page(). If it is required it could be
@@ -410,15 +477,13 @@ void __ref remove_pfn_range_from_zone(struct zone *zone,
sizeof(struct page) * cur_nr_pages);
}
-#ifdef CONFIG_ZONE_DEVICE
/*
* Zone shrinking code cannot properly deal with ZONE_DEVICE. So
* we will not try to shrink the zones - which is okay as
* set_zone_contiguous() cannot deal with ZONE_DEVICE either way.
*/
- if (zone_idx(zone) == ZONE_DEVICE)
+ if (zone_is_zone_device(zone))
return;
-#endif
clear_zone_contiguous(zone);
@@ -663,6 +728,109 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
set_zone_contiguous(zone);
}
+struct auto_movable_stats {
+ unsigned long kernel_early_pages;
+ unsigned long movable_pages;
+};
+
+static void auto_movable_stats_account_zone(struct auto_movable_stats *stats,
+ struct zone *zone)
+{
+ if (zone_idx(zone) == ZONE_MOVABLE) {
+ stats->movable_pages += zone->present_pages;
+ } else {
+ stats->kernel_early_pages += zone->present_early_pages;
+#ifdef CONFIG_CMA
+ /*
+ * CMA pages (never on hotplugged memory) behave like
+ * ZONE_MOVABLE.
+ */
+ stats->movable_pages += zone->cma_pages;
+ stats->kernel_early_pages -= zone->cma_pages;
+#endif /* CONFIG_CMA */
+ }
+}
+struct auto_movable_group_stats {
+ unsigned long movable_pages;
+ unsigned long req_kernel_early_pages;
+};
+
+static int auto_movable_stats_account_group(struct memory_group *group,
+ void *arg)
+{
+ const int ratio = READ_ONCE(auto_movable_ratio);
+ struct auto_movable_group_stats *stats = arg;
+ long pages;
+
+ /*
+ * We don't support modifying the config while the auto-movable online
+ * policy is already enabled. Just avoid the division by zero below.
+ */
+ if (!ratio)
+ return 0;
+
+ /*
+ * Calculate how many early kernel pages this group requires to
+ * satisfy the configured zone ratio.
+ */
+ pages = group->present_movable_pages * 100 / ratio;
+ pages -= group->present_kernel_pages;
+
+ if (pages > 0)
+ stats->req_kernel_early_pages += pages;
+ stats->movable_pages += group->present_movable_pages;
+ return 0;
+}
+
+static bool auto_movable_can_online_movable(int nid, struct memory_group *group,
+ unsigned long nr_pages)
+{
+ unsigned long kernel_early_pages, movable_pages;
+ struct auto_movable_group_stats group_stats = {};
+ struct auto_movable_stats stats = {};
+ pg_data_t *pgdat = NODE_DATA(nid);
+ struct zone *zone;
+ int i;
+
+ /* Walk all relevant zones and collect MOVABLE vs. KERNEL stats. */
+ if (nid == NUMA_NO_NODE) {
+ /* TODO: cache values */
+ for_each_populated_zone(zone)
+ auto_movable_stats_account_zone(&stats, zone);
+ } else {
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ zone = pgdat->node_zones + i;
+ if (populated_zone(zone))
+ auto_movable_stats_account_zone(&stats, zone);
+ }
+ }
+
+ kernel_early_pages = stats.kernel_early_pages;
+ movable_pages = stats.movable_pages;
+
+ /*
+ * Kernel memory inside dynamic memory group allows for more MOVABLE
+ * memory within the same group. Remove the effect of all but the
+ * current group from the stats.
+ */
+ walk_dynamic_memory_groups(nid, auto_movable_stats_account_group,
+ group, &group_stats);
+ if (kernel_early_pages <= group_stats.req_kernel_early_pages)
+ return false;
+ kernel_early_pages -= group_stats.req_kernel_early_pages;
+ movable_pages -= group_stats.movable_pages;
+
+ if (group && group->is_dynamic)
+ kernel_early_pages += group->present_kernel_pages;
+
+ /*
+ * Test if we could online the given number of pages to ZONE_MOVABLE
+ * and still stay in the configured ratio.
+ */
+ movable_pages += nr_pages;
+ return movable_pages <= (auto_movable_ratio * kernel_early_pages) / 100;
+}
+
/*
* Returns a default kernel memory zone for the given pfn range.
* If no kernel zone covers this pfn range it will automatically go
@@ -684,6 +852,117 @@ static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn
return &pgdat->node_zones[ZONE_NORMAL];
}
+/*
+ * Determine to which zone to online memory dynamically based on user
+ * configuration and system stats. We care about the following ratio:
+ *
+ * MOVABLE : KERNEL
+ *
+ * Whereby MOVABLE is memory in ZONE_MOVABLE and KERNEL is memory in
+ * one of the kernel zones. CMA pages inside one of the kernel zones really
+ * behaves like ZONE_MOVABLE, so we treat them accordingly.
+ *
+ * We don't allow for hotplugged memory in a KERNEL zone to increase the
+ * amount of MOVABLE memory we can have, so we end up with:
+ *
+ * MOVABLE : KERNEL_EARLY
+ *
+ * Whereby KERNEL_EARLY is memory in one of the kernel zones, available sinze
+ * boot. We base our calculation on KERNEL_EARLY internally, because:
+ *
+ * a) Hotplugged memory in one of the kernel zones can sometimes still get
+ * hotunplugged, especially when hot(un)plugging individual memory blocks.
+ * There is no coordination across memory devices, therefore "automatic"
+ * hotunplugging, as implemented in hypervisors, could result in zone
+ * imbalances.
+ * b) Early/boot memory in one of the kernel zones can usually not get
+ * hotunplugged again (e.g., no firmware interface to unplug, fragmented
+ * with unmovable allocations). While there are corner cases where it might
+ * still work, it is barely relevant in practice.
+ *
+ * Exceptions are dynamic memory groups, which allow for more MOVABLE
+ * memory within the same memory group -- because in that case, there is
+ * coordination within the single memory device managed by a single driver.
+ *
+ * We rely on "present pages" instead of "managed pages", as the latter is
+ * highly unreliable and dynamic in virtualized environments, and does not
+ * consider boot time allocations. For example, memory ballooning adjusts the
+ * managed pages when inflating/deflating the balloon, and balloon compaction
+ * can even migrate inflated pages between zones.
+ *
+ * Using "present pages" is better but some things to keep in mind are:
+ *
+ * a) Some memblock allocations, such as for the crashkernel area, are
+ * effectively unused by the kernel, yet they account to "present pages".
+ * Fortunately, these allocations are comparatively small in relevant setups
+ * (e.g., fraction of system memory).
+ * b) Some hotplugged memory blocks in virtualized environments, esecially
+ * hotplugged by virtio-mem, look like they are completely present, however,
+ * only parts of the memory block are actually currently usable.
+ * "present pages" is an upper limit that can get reached at runtime. As
+ * we base our calculations on KERNEL_EARLY, this is not an issue.
+ */
+static struct zone *auto_movable_zone_for_pfn(int nid,
+ struct memory_group *group,
+ unsigned long pfn,
+ unsigned long nr_pages)
+{
+ unsigned long online_pages = 0, max_pages, end_pfn;
+ struct page *page;
+
+ if (!auto_movable_ratio)
+ goto kernel_zone;
+
+ if (group && !group->is_dynamic) {
+ max_pages = group->s.max_pages;
+ online_pages = group->present_movable_pages;
+
+ /* If anything is !MOVABLE online the rest !MOVABLE. */
+ if (group->present_kernel_pages)
+ goto kernel_zone;
+ } else if (!group || group->d.unit_pages == nr_pages) {
+ max_pages = nr_pages;
+ } else {
+ max_pages = group->d.unit_pages;
+ /*
+ * Take a look at all online sections in the current unit.
+ * We can safely assume that all pages within a section belong
+ * to the same zone, because dynamic memory groups only deal
+ * with hotplugged memory.
+ */
+ pfn = ALIGN_DOWN(pfn, group->d.unit_pages);
+ end_pfn = pfn + group->d.unit_pages;
+ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ page = pfn_to_online_page(pfn);
+ if (!page)
+ continue;
+ /* If anything is !MOVABLE online the rest !MOVABLE. */
+ if (page_zonenum(page) != ZONE_MOVABLE)
+ goto kernel_zone;
+ online_pages += PAGES_PER_SECTION;
+ }
+ }
+
+ /*
+ * Online MOVABLE if we could *currently* online all remaining parts
+ * MOVABLE. We expect to (add+) online them immediately next, so if
+ * nobody interferes, all will be MOVABLE if possible.
+ */
+ nr_pages = max_pages - online_pages;
+ if (!auto_movable_can_online_movable(NUMA_NO_NODE, group, nr_pages))
+ goto kernel_zone;
+
+#ifdef CONFIG_NUMA
+ if (auto_movable_numa_aware &&
+ !auto_movable_can_online_movable(nid, group, nr_pages))
+ goto kernel_zone;
+#endif /* CONFIG_NUMA */
+
+ return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
+kernel_zone:
+ return default_kernel_zone_for_pfn(nid, pfn, nr_pages);
+}
+
static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
unsigned long nr_pages)
{
@@ -708,7 +987,8 @@ static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn
return movable_node_enabled ? movable_zone : kernel_zone;
}
-struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
+struct zone *zone_for_pfn_range(int online_type, int nid,
+ struct memory_group *group, unsigned long start_pfn,
unsigned long nr_pages)
{
if (online_type == MMOP_ONLINE_KERNEL)
@@ -717,6 +997,9 @@ struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
if (online_type == MMOP_ONLINE_MOVABLE)
return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
+ if (online_policy == ONLINE_POLICY_AUTO_MOVABLE)
+ return auto_movable_zone_for_pfn(nid, group, start_pfn, nr_pages);
+
return default_zone_for_pfn(nid, start_pfn, nr_pages);
}
@@ -724,10 +1007,25 @@ struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
* This function should only be called by memory_block_{online,offline},
* and {online,offline}_pages.
*/
-void adjust_present_page_count(struct zone *zone, long nr_pages)
+void adjust_present_page_count(struct page *page, struct memory_group *group,
+ long nr_pages)
{
+ struct zone *zone = page_zone(page);
+ const bool movable = zone_idx(zone) == ZONE_MOVABLE;
+
+ /*
+ * We only support onlining/offlining/adding/removing of complete
+ * memory blocks; therefore, either all is either early or hotplugged.
+ */
+ if (early_section(__pfn_to_section(page_to_pfn(page))))
+ zone->present_early_pages += nr_pages;
zone->present_pages += nr_pages;
zone->zone_pgdat->node_present_pages += nr_pages;
+
+ if (group && movable)
+ group->present_movable_pages += nr_pages;
+ else if (group && !movable)
+ group->present_kernel_pages += nr_pages;
}
int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
@@ -773,7 +1071,8 @@ void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
}
-int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *zone)
+int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group)
{
unsigned long flags;
int need_zonelists_rebuild = 0;
@@ -826,7 +1125,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z
}
online_pages_range(pfn, nr_pages);
- adjust_present_page_count(zone, nr_pages);
+ adjust_present_page_count(pfn_to_page(pfn), group, nr_pages);
node_states_set_node(nid, &arg);
if (need_zonelists_rebuild)
@@ -1059,6 +1358,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
{
struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) };
struct vmem_altmap mhp_altmap = {};
+ struct memory_group *group = NULL;
u64 start, size;
bool new_node = false;
int ret;
@@ -1070,6 +1370,13 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
if (ret)
return ret;
+ if (mhp_flags & MHP_NID_IS_MGID) {
+ group = memory_group_find_by_id(nid);
+ if (!group)
+ return -EINVAL;
+ nid = group->nid;
+ }
+
if (!node_possible(nid)) {
WARN(1, "node %d was absent from the node_possible_map\n", nid);
return -EINVAL;
@@ -1104,9 +1411,10 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
goto error;
/* create memory block devices after memory was added */
- ret = create_memory_block_devices(start, size, mhp_altmap.alloc);
+ ret = create_memory_block_devices(start, size, mhp_altmap.alloc,
+ group);
if (ret) {
- arch_remove_memory(nid, start, size, NULL);
+ arch_remove_memory(start, size, NULL);
goto error;
}
@@ -1298,7 +1606,7 @@ struct zone *test_pages_in_a_zone(unsigned long start_pfn,
unsigned long pfn, sec_end_pfn;
struct zone *zone = NULL;
struct page *page;
- int i;
+
for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
pfn < end_pfn;
pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
@@ -1307,17 +1615,10 @@ struct zone *test_pages_in_a_zone(unsigned long start_pfn,
continue;
for (; pfn < sec_end_pfn && pfn < end_pfn;
pfn += MAX_ORDER_NR_PAGES) {
- i = 0;
- /* This is just a CONFIG_HOLES_IN_ZONE check.*/
- while ((i < MAX_ORDER_NR_PAGES) &&
- !pfn_valid_within(pfn + i))
- i++;
- if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
- continue;
/* Check if we got outside of the zone */
- if (zone && !zone_spans_pfn(zone, pfn + i))
+ if (zone && !zone_spans_pfn(zone, pfn))
return NULL;
- page = pfn_to_page(pfn + i);
+ page = pfn_to_page(pfn);
if (zone && page_zone(page) != zone)
return NULL;
zone = page_zone(page);
@@ -1568,7 +1869,8 @@ static int count_system_ram_pages_cb(unsigned long start_pfn,
return 0;
}
-int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
+int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct memory_group *group)
{
const unsigned long end_pfn = start_pfn + nr_pages;
unsigned long pfn, system_ram_pages = 0;
@@ -1704,7 +2006,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
/* removal success */
adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
- adjust_present_page_count(zone, -nr_pages);
+ adjust_present_page_count(pfn_to_page(start_pfn), group, -nr_pages);
/* reinitialise watermarks and update pcp limits */
init_per_zone_wmark_min();
@@ -1746,7 +2048,9 @@ failed_removal:
static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
{
int ret = !is_memblock_offlined(mem);
+ int *nid = arg;
+ *nid = mem->nid;
if (unlikely(ret)) {
phys_addr_t beginpa, endpa;
@@ -1839,12 +2143,12 @@ void try_offline_node(int nid)
}
EXPORT_SYMBOL(try_offline_node);
-static int __ref try_remove_memory(int nid, u64 start, u64 size)
+static int __ref try_remove_memory(u64 start, u64 size)
{
- int rc = 0;
struct vmem_altmap mhp_altmap = {};
struct vmem_altmap *altmap = NULL;
unsigned long nr_vmemmap_pages;
+ int rc = 0, nid = NUMA_NO_NODE;
BUG_ON(check_hotplug_memory_range(start, size));
@@ -1852,8 +2156,12 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
* All memory blocks must be offlined before removing memory. Check
* whether all memory blocks in question are offline and return error
* if this is not the case.
+ *
+ * While at it, determine the nid. Note that if we'd have mixed nodes,
+ * we'd only try to offline the last determined one -- which is good
+ * enough for the cases we care about.
*/
- rc = walk_memory_blocks(start, size, NULL, check_memblock_offlined_cb);
+ rc = walk_memory_blocks(start, size, &nid, check_memblock_offlined_cb);
if (rc)
return rc;
@@ -1893,7 +2201,7 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
mem_hotplug_begin();
- arch_remove_memory(nid, start, size, altmap);
+ arch_remove_memory(start, size, altmap);
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
memblock_free(start, size);
@@ -1902,7 +2210,8 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
release_mem_region_adjustable(start, size);
- try_offline_node(nid);
+ if (nid != NUMA_NO_NODE)
+ try_offline_node(nid);
mem_hotplug_done();
return 0;
@@ -1910,7 +2219,6 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
/**
* __remove_memory - Remove memory if every memory block is offline
- * @nid: the node ID
* @start: physical address of the region to remove
* @size: size of the region to remove
*
@@ -1918,14 +2226,14 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
* and online/offline operations before this call, as required by
* try_offline_node().
*/
-void __remove_memory(int nid, u64 start, u64 size)
+void __remove_memory(u64 start, u64 size)
{
/*
* trigger BUG() if some memory is not offlined prior to calling this
* function
*/
- if (try_remove_memory(nid, start, size))
+ if (try_remove_memory(start, size))
BUG();
}
@@ -1933,12 +2241,12 @@ void __remove_memory(int nid, u64 start, u64 size)
* Remove memory if every memory block is offline, otherwise return -EBUSY is
* some memory is not offline
*/
-int remove_memory(int nid, u64 start, u64 size)
+int remove_memory(u64 start, u64 size)
{
int rc;
lock_device_hotplug();
- rc = try_remove_memory(nid, start, size);
+ rc = try_remove_memory(start, size);
unlock_device_hotplug();
return rc;
@@ -1998,7 +2306,7 @@ static int try_reonline_memory_block(struct memory_block *mem, void *arg)
* unplugged all memory (so it's no longer in use) and want to offline + remove
* that memory.
*/
-int offline_and_remove_memory(int nid, u64 start, u64 size)
+int offline_and_remove_memory(u64 start, u64 size)
{
const unsigned long mb_count = size / memory_block_size_bytes();
uint8_t *online_types, *tmp;
@@ -2034,7 +2342,7 @@ int offline_and_remove_memory(int nid, u64 start, u64 size)
* This cannot fail as it cannot get onlined in the meantime.
*/
if (!rc) {
- rc = try_remove_memory(nid, start, size);
+ rc = try_remove_memory(start, size);
if (rc)
pr_err("%s: Failed to remove memory: %d", __func__, rc);
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 5e90b3fb7794..1592b081c58e 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1362,16 +1362,33 @@ mpol_out:
/*
* User space interface with variable sized bitmaps for nodelists.
*/
+static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
+ unsigned long maxnode)
+{
+ unsigned long nlongs = BITS_TO_LONGS(maxnode);
+ int ret;
+
+ if (in_compat_syscall())
+ ret = compat_get_bitmap(mask,
+ (const compat_ulong_t __user *)nmask,
+ maxnode);
+ else
+ ret = copy_from_user(mask, nmask,
+ nlongs * sizeof(unsigned long));
+
+ if (ret)
+ return -EFAULT;
+
+ if (maxnode % BITS_PER_LONG)
+ mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
+
+ return 0;
+}
/* Copy a node mask from user space. */
static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
unsigned long maxnode)
{
- unsigned long k;
- unsigned long t;
- unsigned long nlongs;
- unsigned long endmask;
-
--maxnode;
nodes_clear(*nodes);
if (maxnode == 0 || !nmask)
@@ -1379,49 +1396,29 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
return -EINVAL;
- nlongs = BITS_TO_LONGS(maxnode);
- if ((maxnode % BITS_PER_LONG) == 0)
- endmask = ~0UL;
- else
- endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
-
/*
* When the user specified more nodes than supported just check
- * if the non supported part is all zero.
- *
- * If maxnode have more longs than MAX_NUMNODES, check
- * the bits in that area first. And then go through to
- * check the rest bits which equal or bigger than MAX_NUMNODES.
- * Otherwise, just check bits [MAX_NUMNODES, maxnode).
+ * if the non supported part is all zero, one word at a time,
+ * starting at the end.
*/
- if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
- for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
- if (get_user(t, nmask + k))
- return -EFAULT;
- if (k == nlongs - 1) {
- if (t & endmask)
- return -EINVAL;
- } else if (t)
- return -EINVAL;
- }
- nlongs = BITS_TO_LONGS(MAX_NUMNODES);
- endmask = ~0UL;
- }
+ while (maxnode > MAX_NUMNODES) {
+ unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
+ unsigned long t;
- if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
- unsigned long valid_mask = endmask;
-
- valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
- if (get_user(t, nmask + nlongs - 1))
+ if (get_bitmap(&t, &nmask[maxnode / BITS_PER_LONG], bits))
return -EFAULT;
- if (t & valid_mask)
+
+ if (maxnode - bits >= MAX_NUMNODES) {
+ maxnode -= bits;
+ } else {
+ maxnode = MAX_NUMNODES;
+ t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
+ }
+ if (t)
return -EINVAL;
}
- if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
- return -EFAULT;
- nodes_addr(*nodes)[nlongs-1] &= endmask;
- return 0;
+ return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
}
/* Copy a kernel node mask to user space */
@@ -1430,6 +1427,10 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
{
unsigned long copy = ALIGN(maxnode-1, 64) / 8;
unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
+ bool compat = in_compat_syscall();
+
+ if (compat)
+ nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
if (copy > nbytes) {
if (copy > PAGE_SIZE)
@@ -1437,7 +1438,13 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
if (clear_user((char __user *)mask + nbytes, copy - nbytes))
return -EFAULT;
copy = nbytes;
+ maxnode = nr_node_ids;
}
+
+ if (compat)
+ return compat_put_bitmap((compat_ulong_t __user *)mask,
+ nodes_addr(*nodes), maxnode);
+
return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
}
@@ -1642,116 +1649,6 @@ SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
}
-#ifdef CONFIG_COMPAT
-
-COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
- compat_ulong_t __user *, nmask,
- compat_ulong_t, maxnode,
- compat_ulong_t, addr, compat_ulong_t, flags)
-{
- long err;
- unsigned long __user *nm = NULL;
- unsigned long nr_bits, alloc_size;
- DECLARE_BITMAP(bm, MAX_NUMNODES);
-
- nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
- alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
-
- if (nmask)
- nm = compat_alloc_user_space(alloc_size);
-
- err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
-
- if (!err && nmask) {
- unsigned long copy_size;
- copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
- err = copy_from_user(bm, nm, copy_size);
- /* ensure entire bitmap is zeroed */
- err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
- err |= compat_put_bitmap(nmask, bm, nr_bits);
- }
-
- return err;
-}
-
-COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
- compat_ulong_t, maxnode)
-{
- unsigned long __user *nm = NULL;
- unsigned long nr_bits, alloc_size;
- DECLARE_BITMAP(bm, MAX_NUMNODES);
-
- nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
- alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
-
- if (nmask) {
- if (compat_get_bitmap(bm, nmask, nr_bits))
- return -EFAULT;
- nm = compat_alloc_user_space(alloc_size);
- if (copy_to_user(nm, bm, alloc_size))
- return -EFAULT;
- }
-
- return kernel_set_mempolicy(mode, nm, nr_bits+1);
-}
-
-COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
- compat_ulong_t, mode, compat_ulong_t __user *, nmask,
- compat_ulong_t, maxnode, compat_ulong_t, flags)
-{
- unsigned long __user *nm = NULL;
- unsigned long nr_bits, alloc_size;
- nodemask_t bm;
-
- nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
- alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
-
- if (nmask) {
- if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
- return -EFAULT;
- nm = compat_alloc_user_space(alloc_size);
- if (copy_to_user(nm, nodes_addr(bm), alloc_size))
- return -EFAULT;
- }
-
- return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
-}
-
-COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
- compat_ulong_t, maxnode,
- const compat_ulong_t __user *, old_nodes,
- const compat_ulong_t __user *, new_nodes)
-{
- unsigned long __user *old = NULL;
- unsigned long __user *new = NULL;
- nodemask_t tmp_mask;
- unsigned long nr_bits;
- unsigned long size;
-
- nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
- size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
- if (old_nodes) {
- if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
- return -EFAULT;
- old = compat_alloc_user_space(new_nodes ? size * 2 : size);
- if (new_nodes)
- new = old + size / sizeof(unsigned long);
- if (copy_to_user(old, nodes_addr(tmp_mask), size))
- return -EFAULT;
- }
- if (new_nodes) {
- if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
- return -EFAULT;
- if (new == NULL)
- new = compat_alloc_user_space(size);
- if (copy_to_user(new, nodes_addr(tmp_mask), size))
- return -EFAULT;
- }
- return kernel_migrate_pages(pid, nr_bits + 1, old, new);
-}
-
-#endif /* CONFIG_COMPAT */
-
bool vma_migratable(struct vm_area_struct *vma)
{
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
@@ -1979,17 +1876,26 @@ unsigned int mempolicy_slab_node(void)
*/
static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
{
- unsigned nnodes = nodes_weight(pol->nodes);
- unsigned target;
+ nodemask_t nodemask = pol->nodes;
+ unsigned int target, nnodes;
int i;
int nid;
+ /*
+ * The barrier will stabilize the nodemask in a register or on
+ * the stack so that it will stop changing under the code.
+ *
+ * Between first_node() and next_node(), pol->nodes could be changed
+ * by other threads. So we put pol->nodes in a local stack.
+ */
+ barrier();
+ nnodes = nodes_weight(nodemask);
if (!nnodes)
return numa_node_id();
target = (unsigned int)n % nnodes;
- nid = first_node(pol->nodes);
+ nid = first_node(nodemask);
for (i = 0; i < target; i++)
- nid = next_node(nid, pol->nodes);
+ nid = next_node(nid, nodemask);
return nid;
}
diff --git a/mm/memremap.c b/mm/memremap.c
index 15a074ffb8d7..ed593bf87109 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -140,14 +140,11 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
{
struct range *range = &pgmap->ranges[range_id];
struct page *first_page;
- int nid;
/* make sure to access a memmap that was actually initialized */
first_page = pfn_to_page(pfn_first(pgmap, range_id));
/* pages are dead and unused, undo the arch mapping */
- nid = page_to_nid(first_page);
-
mem_hotplug_begin();
remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
PHYS_PFN(range_len(range)));
@@ -155,7 +152,7 @@ static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
__remove_pages(PHYS_PFN(range->start),
PHYS_PFN(range_len(range)), NULL);
} else {
- arch_remove_memory(nid, range->start, range_len(range),
+ arch_remove_memory(range->start, range_len(range),
pgmap_altmap(pgmap));
kasan_remove_zero_shadow(__va(range->start), range_len(range));
}
diff --git a/mm/migrate.c b/mm/migrate.c
index a0aeb3fe46a7..a6a7743ee98f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -960,7 +960,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
int force, enum migrate_mode mode)
{
int rc = -EAGAIN;
- int page_was_mapped = 0;
+ bool page_was_mapped = false;
struct anon_vma *anon_vma = NULL;
bool is_lru = !__PageMovable(page);
@@ -1008,7 +1008,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
}
/*
- * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
+ * By try_to_migrate(), page->mapcount goes down to 0 here. In this case,
* we cannot notice that anon_vma is freed while we migrates a page.
* This get_anon_vma() delays freeing anon_vma pointer until the end
* of migration. File cache pages are no problem because of page_lock()
@@ -1063,7 +1063,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
page);
try_to_migrate(page, 0);
- page_was_mapped = 1;
+ page_was_mapped = true;
}
if (!page_mapped(page))
@@ -1900,6 +1900,23 @@ set_status:
mmap_read_unlock(mm);
}
+static int get_compat_pages_array(const void __user *chunk_pages[],
+ const void __user * __user *pages,
+ unsigned long chunk_nr)
+{
+ compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
+ compat_uptr_t p;
+ int i;
+
+ for (i = 0; i < chunk_nr; i++) {
+ if (get_user(p, pages32 + i))
+ return -EFAULT;
+ chunk_pages[i] = compat_ptr(p);
+ }
+
+ return 0;
+}
+
/*
* Determine the nodes of a user array of pages and store it in
* a user array of status.
@@ -1919,8 +1936,15 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
chunk_nr = DO_PAGES_STAT_CHUNK_NR;
- if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
- break;
+ if (in_compat_syscall()) {
+ if (get_compat_pages_array(chunk_pages, pages,
+ chunk_nr))
+ break;
+ } else {
+ if (copy_from_user(chunk_pages, pages,
+ chunk_nr * sizeof(*chunk_pages)))
+ break;
+ }
do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
@@ -2023,28 +2047,6 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
}
-#ifdef CONFIG_COMPAT
-COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
- compat_uptr_t __user *, pages32,
- const int __user *, nodes,
- int __user *, status,
- int, flags)
-{
- const void __user * __user *pages;
- int i;
-
- pages = compat_alloc_user_space(nr_pages * sizeof(void *));
- for (i = 0; i < nr_pages; i++) {
- compat_uptr_t p;
-
- if (get_user(p, pages32 + i) ||
- put_user(compat_ptr(p), pages + i))
- return -EFAULT;
- }
- return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
-}
-#endif /* CONFIG_COMPAT */
-
#ifdef CONFIG_NUMA_BALANCING
/*
* Returns true if this is a safe migration target node for misplaced NUMA
@@ -2107,6 +2109,7 @@ out:
static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
{
int page_lru;
+ int nr_pages = thp_nr_pages(page);
VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
@@ -2115,7 +2118,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
return 0;
/* Avoid migrating to a node that is nearly full */
- if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
+ if (!migrate_balanced_pgdat(pgdat, nr_pages))
return 0;
if (isolate_lru_page(page))
@@ -2123,7 +2126,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
page_lru = page_is_file_lru(page);
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
- thp_nr_pages(page));
+ nr_pages);
/*
* Isolating the page has taken another reference, so the
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f95e1d2386a1..b37435c274cf 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -594,8 +594,6 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
static int page_is_consistent(struct zone *zone, struct page *page)
{
- if (!pfn_valid_within(page_to_pfn(page)))
- return 0;
if (zone != page_zone(page))
return 0;
@@ -1025,16 +1023,12 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
if (order >= MAX_ORDER - 2)
return false;
- if (!pfn_valid_within(buddy_pfn))
- return false;
-
combined_pfn = buddy_pfn & pfn;
higher_page = page + (combined_pfn - pfn);
buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
higher_buddy = higher_page + (buddy_pfn - combined_pfn);
- return pfn_valid_within(buddy_pfn) &&
- page_is_buddy(higher_page, higher_buddy, order + 1);
+ return page_is_buddy(higher_page, higher_buddy, order + 1);
}
/*
@@ -1095,8 +1089,6 @@ continue_merging:
buddy_pfn = __find_buddy_pfn(pfn, order);
buddy = page + (buddy_pfn - pfn);
- if (!pfn_valid_within(buddy_pfn))
- goto done_merging;
if (!page_is_buddy(page, buddy, order))
goto done_merging;
/*
@@ -1754,9 +1746,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
/*
* Check that the whole (or subset of) a pageblock given by the interval of
* [start_pfn, end_pfn) is valid and within the same zone, before scanning it
- * with the migration of free compaction scanner. The scanners then need to
- * use only pfn_valid_within() check for arches that allow holes within
- * pageblocks.
+ * with the migration of free compaction scanner.
*
* Return struct page pointer of start_pfn, or NULL if checks were not passed.
*
@@ -1872,8 +1862,6 @@ static inline void __init pgdat_init_report_one_done(void)
*/
static inline bool __init deferred_pfn_valid(unsigned long pfn)
{
- if (!pfn_valid_within(pfn))
- return false;
if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
return false;
return true;
@@ -2520,11 +2508,6 @@ static int move_freepages(struct zone *zone,
int pages_moved = 0;
for (pfn = start_pfn; pfn <= end_pfn;) {
- if (!pfn_valid_within(pfn)) {
- pfn++;
- continue;
- }
-
page = pfn_to_page(pfn);
if (!PageBuddy(page)) {
/*
@@ -3445,8 +3428,10 @@ void free_unref_page_list(struct list_head *list)
/* Prepare pages for freeing */
list_for_each_entry_safe(page, next, list, lru) {
pfn = page_to_pfn(page);
- if (!free_unref_page_prepare(page, pfn, 0))
+ if (!free_unref_page_prepare(page, pfn, 0)) {
list_del(&page->lru);
+ continue;
+ }
/*
* Free isolated pages directly to the allocator, see
@@ -7271,6 +7256,9 @@ static void __init calculate_node_totalpages(struct pglist_data *pgdat,
zone->zone_start_pfn = 0;
zone->spanned_pages = size;
zone->present_pages = real_size;
+#if defined(CONFIG_MEMORY_HOTPLUG)
+ zone->present_early_pages = real_size;
+#endif
totalpages += size;
realtotalpages += real_size;
@@ -8828,9 +8816,6 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
}
for (; iter < pageblock_nr_pages - offset; iter++) {
- if (!pfn_valid_within(pfn + iter))
- continue;
-
page = pfn_to_page(pfn + iter);
/*
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 293b2685fc48..dfb91653d359 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -58,11 +58,21 @@
* can utilize this callback to initialize the state of it correctly.
*/
+#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
+static bool need_page_idle(void)
+{
+ return true;
+}
+struct page_ext_operations page_idle_ops = {
+ .need = need_page_idle,
+};
+#endif
+
static struct page_ext_operations *page_ext_ops[] = {
#ifdef CONFIG_PAGE_OWNER
&page_owner_ops,
#endif
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
+#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
&page_idle_ops,
#endif
};
diff --git a/mm/page_idle.c b/mm/page_idle.c
index 64e5344a992c..edead6a8a5f9 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -207,16 +207,6 @@ static const struct attribute_group page_idle_attr_group = {
.name = "page_idle",
};
-#ifndef CONFIG_64BIT
-static bool need_page_idle(void)
-{
- return true;
-}
-struct page_ext_operations page_idle_ops = {
- .need = need_page_idle,
-};
-#endif
-
static int __init page_idle_init(void)
{
int err;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index fff55bb830f9..a95c2c6562d0 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -93,8 +93,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
buddy_pfn = __find_buddy_pfn(pfn, order);
buddy = page + (buddy_pfn - pfn);
- if (pfn_valid_within(buddy_pfn) &&
- !is_migrate_isolate_page(buddy)) {
+ if (!is_migrate_isolate_page(buddy)) {
__isolate_free_page(page, order);
isolated_page = true;
}
@@ -250,10 +249,6 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
struct page *page;
while (pfn < end_pfn) {
- if (!pfn_valid_within(pfn)) {
- pfn++;
- continue;
- }
page = pfn_to_page(pfn);
if (PageBuddy(page))
/*
diff --git a/mm/page_owner.c b/mm/page_owner.c
index f51a57e92aa3..62402d22539b 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -276,9 +276,6 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pageblock_mt = get_pageblock_migratetype(page);
for (; pfn < block_end_pfn; pfn++) {
- if (!pfn_valid_within(pfn))
- continue;
-
/* The pageblock is online, no need to recheck. */
page = pfn_to_page(pfn);
@@ -479,10 +476,6 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
continue;
}
- /* Check for holes within a MAX_ORDER area */
- if (!pfn_valid_within(pfn))
- continue;
-
page = pfn_to_page(pfn);
if (PageBuddy(page)) {
unsigned long freepage_order = buddy_order_unsafe(page);
@@ -560,14 +553,9 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
block_end_pfn = min(block_end_pfn, end_pfn);
for (; pfn < block_end_pfn; pfn++) {
- struct page *page;
+ struct page *page = pfn_to_page(pfn);
struct page_ext *page_ext;
- if (!pfn_valid_within(pfn))
- continue;
-
- page = pfn_to_page(pfn);
-
if (page_zone(page) != zone)
continue;
diff --git a/mm/percpu.c b/mm/percpu.c
index e1c20837a42a..e0a986818903 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -146,7 +146,6 @@ static unsigned int pcpu_high_unit_cpu __ro_after_init;
/* the address of the first chunk which starts with the kernel static area */
void *pcpu_base_addr __ro_after_init;
-EXPORT_SYMBOL_GPL(pcpu_base_addr);
static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */
const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */
diff --git a/mm/rmap.c b/mm/rmap.c
index 2d29a57d29e8..6aebd1747251 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1231,11 +1231,13 @@ void page_add_file_rmap(struct page *page, bool compound)
nr_pages);
} else {
if (PageTransCompound(page) && page_mapping(page)) {
+ struct page *head = compound_head(page);
+
VM_WARN_ON_ONCE(!PageLocked(page));
- SetPageDoubleMap(compound_head(page));
+ SetPageDoubleMap(head);
if (PageMlocked(page))
- clear_page_mlock(compound_head(page));
+ clear_page_mlock(head);
}
if (!atomic_inc_and_test(&page->_mapcount))
goto out;
diff --git a/mm/secretmem.c b/mm/secretmem.c
index 030f02ddc7c1..1fea68b8d5a6 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -18,6 +18,7 @@
#include <linux/secretmem.h>
#include <linux/set_memory.h>
#include <linux/sched/signal.h>
+#include <linux/refcount.h>
#include <uapi/linux/magic.h>
@@ -40,11 +41,11 @@ module_param_named(enable, secretmem_enable, bool, 0400);
MODULE_PARM_DESC(secretmem_enable,
"Enable secretmem and memfd_secret(2) system call");
-static atomic_t secretmem_users;
+static refcount_t secretmem_users;
bool secretmem_active(void)
{
- return !!atomic_read(&secretmem_users);
+ return !!refcount_read(&secretmem_users);
}
static vm_fault_t secretmem_fault(struct vm_fault *vmf)
@@ -103,7 +104,7 @@ static const struct vm_operations_struct secretmem_vm_ops = {
static int secretmem_release(struct inode *inode, struct file *file)
{
- atomic_dec(&secretmem_users);
+ refcount_dec(&secretmem_users);
return 0;
}
@@ -217,7 +218,7 @@ SYSCALL_DEFINE1(memfd_secret, unsigned int, flags)
file->f_flags |= O_LARGEFILE;
fd_install(fd, file);
- atomic_inc(&secretmem_users);
+ refcount_inc(&secretmem_users);
return fd;
err_put_fd:
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 1c673c323baf..ec2bb0beed75 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -502,6 +502,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (unlikely(!s))
return;
+ cpus_read_lock();
mutex_lock(&slab_mutex);
s->refcount--;
@@ -516,6 +517,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
}
out_unlock:
mutex_unlock(&slab_mutex);
+ cpus_read_unlock();
}
EXPORT_SYMBOL(kmem_cache_destroy);
diff --git a/mm/slub.c b/mm/slub.c
index f77d8cd79ef7..3d2025f7163b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -46,13 +46,21 @@
/*
* Lock order:
* 1. slab_mutex (Global Mutex)
- * 2. node->list_lock
- * 3. slab_lock(page) (Only on some arches and for debugging)
+ * 2. node->list_lock (Spinlock)
+ * 3. kmem_cache->cpu_slab->lock (Local lock)
+ * 4. slab_lock(page) (Only on some arches or for debugging)
+ * 5. object_map_lock (Only for debugging)
*
* slab_mutex
*
* The role of the slab_mutex is to protect the list of all the slabs
* and to synchronize major metadata changes to slab cache structures.
+ * Also synchronizes memory hotplug callbacks.
+ *
+ * slab_lock
+ *
+ * The slab_lock is a wrapper around the page lock, thus it is a bit
+ * spinlock.
*
* The slab_lock is only used for debugging and on arches that do not
* have the ability to do a cmpxchg_double. It only protects:
@@ -61,6 +69,8 @@
* C. page->objects -> Number of objects in page
* D. page->frozen -> frozen state
*
+ * Frozen slabs
+ *
* If a slab is frozen then it is exempt from list management. It is not
* on any list except per cpu partial list. The processor that froze the
* slab is the one who can perform list operations on the page. Other
@@ -68,6 +78,8 @@
* froze the slab is the only one that can retrieve the objects from the
* page's freelist.
*
+ * list_lock
+ *
* The list_lock protects the partial and full list on each node and
* the partial slab counter. If taken then no new slabs may be added or
* removed from the lists nor make the number of partial slabs be modified.
@@ -79,10 +91,36 @@
* slabs, operations can continue without any centralized lock. F.e.
* allocating a long series of objects that fill up slabs does not require
* the list lock.
- * Interrupts are disabled during allocation and deallocation in order to
- * make the slab allocator safe to use in the context of an irq. In addition
- * interrupts are disabled to ensure that the processor does not change
- * while handling per_cpu slabs, due to kernel preemption.
+ *
+ * cpu_slab->lock local lock
+ *
+ * This locks protect slowpath manipulation of all kmem_cache_cpu fields
+ * except the stat counters. This is a percpu structure manipulated only by
+ * the local cpu, so the lock protects against being preempted or interrupted
+ * by an irq. Fast path operations rely on lockless operations instead.
+ * On PREEMPT_RT, the local lock does not actually disable irqs (and thus
+ * prevent the lockless operations), so fastpath operations also need to take
+ * the lock and are no longer lockless.
+ *
+ * lockless fastpaths
+ *
+ * The fast path allocation (slab_alloc_node()) and freeing (do_slab_free())
+ * are fully lockless when satisfied from the percpu slab (and when
+ * cmpxchg_double is possible to use, otherwise slab_lock is taken).
+ * They also don't disable preemption or migration or irqs. They rely on
+ * the transaction id (tid) field to detect being preempted or moved to
+ * another cpu.
+ *
+ * irq, preemption, migration considerations
+ *
+ * Interrupts are disabled as part of list_lock or local_lock operations, or
+ * around the slab_lock operation, in order to make the slab allocator safe
+ * to use in the context of an irq.
+ *
+ * In addition, preemption (or migration on PREEMPT_RT) is disabled in the
+ * allocation slowpath, bulk allocation, and put_cpu_partial(), so that the
+ * local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer
+ * doesn't have to be revalidated in each section protected by the local lock.
*
* SLUB assigns one slab for allocation to each processor.
* Allocations only occur from these slabs called cpu slabs.
@@ -118,6 +156,26 @@
* the fast path and disables lockless freelists.
*/
+/*
+ * We could simply use migrate_disable()/enable() but as long as it's a
+ * function call even on !PREEMPT_RT, use inline preempt_disable() there.
+ */
+#ifndef CONFIG_PREEMPT_RT
+#define slub_get_cpu_ptr(var) get_cpu_ptr(var)
+#define slub_put_cpu_ptr(var) put_cpu_ptr(var)
+#else
+#define slub_get_cpu_ptr(var) \
+({ \
+ migrate_disable(); \
+ this_cpu_ptr(var); \
+})
+#define slub_put_cpu_ptr(var) \
+do { \
+ (void)(var); \
+ migrate_enable(); \
+} while (0)
+#endif
+
#ifdef CONFIG_SLUB_DEBUG
#ifdef CONFIG_SLUB_DEBUG_ON
DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
@@ -359,25 +417,44 @@ static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
/*
* Per slab locking using the pagelock
*/
-static __always_inline void slab_lock(struct page *page)
+static __always_inline void __slab_lock(struct page *page)
{
VM_BUG_ON_PAGE(PageTail(page), page);
bit_spin_lock(PG_locked, &page->flags);
}
-static __always_inline void slab_unlock(struct page *page)
+static __always_inline void __slab_unlock(struct page *page)
{
VM_BUG_ON_PAGE(PageTail(page), page);
__bit_spin_unlock(PG_locked, &page->flags);
}
-/* Interrupts must be disabled (for the fallback code to work right) */
+static __always_inline void slab_lock(struct page *page, unsigned long *flags)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_save(*flags);
+ __slab_lock(page);
+}
+
+static __always_inline void slab_unlock(struct page *page, unsigned long *flags)
+{
+ __slab_unlock(page);
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_restore(*flags);
+}
+
+/*
+ * Interrupts must be disabled (for the fallback code to work right), typically
+ * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
+ * so we disable interrupts as part of slab_[un]lock().
+ */
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
void *freelist_old, unsigned long counters_old,
void *freelist_new, unsigned long counters_new,
const char *n)
{
- VM_BUG_ON(!irqs_disabled());
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ lockdep_assert_irqs_disabled();
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
if (s->flags & __CMPXCHG_DOUBLE) {
@@ -388,15 +465,18 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
} else
#endif
{
- slab_lock(page);
+ /* init to 0 to prevent spurious warnings */
+ unsigned long flags = 0;
+
+ slab_lock(page, &flags);
if (page->freelist == freelist_old &&
page->counters == counters_old) {
page->freelist = freelist_new;
page->counters = counters_new;
- slab_unlock(page);
+ slab_unlock(page, &flags);
return true;
}
- slab_unlock(page);
+ slab_unlock(page, &flags);
}
cpu_relax();
@@ -427,16 +507,16 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
unsigned long flags;
local_irq_save(flags);
- slab_lock(page);
+ __slab_lock(page);
if (page->freelist == freelist_old &&
page->counters == counters_old) {
page->freelist = freelist_new;
page->counters = counters_new;
- slab_unlock(page);
+ __slab_unlock(page);
local_irq_restore(flags);
return true;
}
- slab_unlock(page);
+ __slab_unlock(page);
local_irq_restore(flags);
}
@@ -452,7 +532,19 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
#ifdef CONFIG_SLUB_DEBUG
static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
-static DEFINE_SPINLOCK(object_map_lock);
+static DEFINE_RAW_SPINLOCK(object_map_lock);
+
+static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
+ struct page *page)
+{
+ void *addr = page_address(page);
+ void *p;
+
+ bitmap_zero(obj_map, page->objects);
+
+ for (p = page->freelist; p; p = get_freepointer(s, p))
+ set_bit(__obj_to_index(s, addr, p), obj_map);
+}
#if IS_ENABLED(CONFIG_KUNIT)
static bool slab_add_kunit_errors(void)
@@ -483,17 +575,11 @@ static inline bool slab_add_kunit_errors(void) { return false; }
static unsigned long *get_map(struct kmem_cache *s, struct page *page)
__acquires(&object_map_lock)
{
- void *p;
- void *addr = page_address(page);
-
VM_BUG_ON(!irqs_disabled());
- spin_lock(&object_map_lock);
+ raw_spin_lock(&object_map_lock);
- bitmap_zero(object_map, page->objects);
-
- for (p = page->freelist; p; p = get_freepointer(s, p))
- set_bit(__obj_to_index(s, addr, p), object_map);
+ __fill_map(object_map, s, page);
return object_map;
}
@@ -501,7 +587,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page)
static void put_map(unsigned long *map) __releases(&object_map_lock)
{
VM_BUG_ON(map != object_map);
- spin_unlock(&object_map_lock);
+ raw_spin_unlock(&object_map_lock);
}
static inline unsigned int size_from_object(struct kmem_cache *s)
@@ -1003,8 +1089,6 @@ static int check_slab(struct kmem_cache *s, struct page *page)
{
int maxobj;
- VM_BUG_ON(!irqs_disabled());
-
if (!PageSlab(page)) {
slab_err(s, page, "Not a valid slab page");
return 0;
@@ -1265,11 +1349,11 @@ static noinline int free_debug_processing(
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
void *object = head;
int cnt = 0;
- unsigned long flags;
+ unsigned long flags, flags2;
int ret = 0;
spin_lock_irqsave(&n->list_lock, flags);
- slab_lock(page);
+ slab_lock(page, &flags2);
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
if (!check_slab(s, page))
@@ -1302,7 +1386,7 @@ out:
slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
bulk_cnt, cnt);
- slab_unlock(page);
+ slab_unlock(page, &flags2);
spin_unlock_irqrestore(&n->list_lock, flags);
if (!ret)
slab_fix(s, "Object at 0x%p not freed", object);
@@ -1585,20 +1669,8 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s,
{
kmemleak_free_recursive(x, s->flags);
- /*
- * Trouble is that we may no longer disable interrupts in the fast path
- * So in order to make the debug calls that expect irqs to be
- * disabled we need to disable interrupts temporarily.
- */
-#ifdef CONFIG_LOCKDEP
- {
- unsigned long flags;
+ debug_check_no_locks_freed(x, s->object_size);
- local_irq_save(flags);
- debug_check_no_locks_freed(x, s->object_size);
- local_irq_restore(flags);
- }
-#endif
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(x, s->object_size);
@@ -1815,9 +1887,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
flags &= gfp_allowed_mask;
- if (gfpflags_allow_blocking(flags))
- local_irq_enable();
-
flags |= s->allocflags;
/*
@@ -1876,8 +1945,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
page->frozen = 1;
out:
- if (gfpflags_allow_blocking(flags))
- local_irq_disable();
if (!page)
return NULL;
@@ -1891,6 +1958,8 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
if (unlikely(flags & GFP_SLAB_BUG_MASK))
flags = kmalloc_fix_flags(flags);
+ WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
+
return allocate_slab(s,
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
}
@@ -2014,18 +2083,24 @@ static inline void *acquire_slab(struct kmem_cache *s,
return freelist;
}
+#ifdef CONFIG_SLUB_CPU_PARTIAL
static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
+#else
+static inline void put_cpu_partial(struct kmem_cache *s, struct page *page,
+ int drain) { }
+#endif
static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
/*
* Try to allocate a partial slab from a specific node.
*/
static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
- struct kmem_cache_cpu *c, gfp_t flags)
+ struct page **ret_page, gfp_t gfpflags)
{
struct page *page, *page2;
void *object = NULL;
unsigned int available = 0;
+ unsigned long flags;
int objects;
/*
@@ -2037,11 +2112,11 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
if (!n || !n->nr_partial)
return NULL;
- spin_lock(&n->list_lock);
+ spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
void *t;
- if (!pfmemalloc_match(page, flags))
+ if (!pfmemalloc_match(page, gfpflags))
continue;
t = acquire_slab(s, n, page, object == NULL, &objects);
@@ -2050,7 +2125,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
available += objects;
if (!object) {
- c->page = page;
+ *ret_page = page;
stat(s, ALLOC_FROM_PARTIAL);
object = t;
} else {
@@ -2062,7 +2137,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
break;
}
- spin_unlock(&n->list_lock);
+ spin_unlock_irqrestore(&n->list_lock, flags);
return object;
}
@@ -2070,7 +2145,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
* Get a page from somewhere. Search in increasing NUMA distances.
*/
static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
- struct kmem_cache_cpu *c)
+ struct page **ret_page)
{
#ifdef CONFIG_NUMA
struct zonelist *zonelist;
@@ -2112,7 +2187,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
if (n && cpuset_zone_allowed(zone, flags) &&
n->nr_partial > s->min_partial) {
- object = get_partial_node(s, n, c, flags);
+ object = get_partial_node(s, n, ret_page, flags);
if (object) {
/*
* Don't check read_mems_allowed_retry()
@@ -2134,7 +2209,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
* Get a partial page, lock it and return it.
*/
static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
- struct kmem_cache_cpu *c)
+ struct page **ret_page)
{
void *object;
int searchnode = node;
@@ -2142,11 +2217,11 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
if (node == NUMA_NO_NODE)
searchnode = numa_mem_id();
- object = get_partial_node(s, get_node(s, searchnode), c, flags);
+ object = get_partial_node(s, get_node(s, searchnode), ret_page, flags);
if (object || node != NUMA_NO_NODE)
return object;
- return get_any_partial(s, flags, c);
+ return get_any_partial(s, flags, ret_page);
}
#ifdef CONFIG_PREEMPTION
@@ -2213,16 +2288,23 @@ static inline void note_cmpxchg_failure(const char *n,
static void init_kmem_cache_cpus(struct kmem_cache *s)
{
int cpu;
+ struct kmem_cache_cpu *c;
- for_each_possible_cpu(cpu)
- per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
+ for_each_possible_cpu(cpu) {
+ c = per_cpu_ptr(s->cpu_slab, cpu);
+ local_lock_init(&c->lock);
+ c->tid = init_tid(cpu);
+ }
}
/*
- * Remove the cpu slab
+ * Finishes removing the cpu slab. Merges cpu's freelist with page's freelist,
+ * unfreezes the slabs and puts it on the proper list.
+ * Assumes the slab has been already safely taken away from kmem_cache_cpu
+ * by the caller.
*/
static void deactivate_slab(struct kmem_cache *s, struct page *page,
- void *freelist, struct kmem_cache_cpu *c)
+ void *freelist)
{
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -2230,6 +2312,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
enum slab_modes l = M_NONE, m = M_NONE;
void *nextfree, *freelist_iter, *freelist_tail;
int tail = DEACTIVATE_TO_HEAD;
+ unsigned long flags = 0;
struct page new;
struct page old;
@@ -2305,7 +2388,7 @@ redo:
* that acquire_slab() will see a slab page that
* is frozen
*/
- spin_lock(&n->list_lock);
+ spin_lock_irqsave(&n->list_lock, flags);
}
} else {
m = M_FULL;
@@ -2316,7 +2399,7 @@ redo:
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
- spin_lock(&n->list_lock);
+ spin_lock_irqsave(&n->list_lock, flags);
}
}
@@ -2333,14 +2416,14 @@ redo:
}
l = m;
- if (!__cmpxchg_double_slab(s, page,
+ if (!cmpxchg_double_slab(s, page,
old.freelist, old.counters,
new.freelist, new.counters,
"unfreezing slab"))
goto redo;
if (lock)
- spin_unlock(&n->list_lock);
+ spin_unlock_irqrestore(&n->list_lock, flags);
if (m == M_PARTIAL)
stat(s, tail);
@@ -2351,38 +2434,29 @@ redo:
discard_slab(s, page);
stat(s, FREE_SLAB);
}
-
- c->page = NULL;
- c->freelist = NULL;
}
-/*
- * Unfreeze all the cpu partial slabs.
- *
- * This function must be called with interrupts disabled
- * for the cpu using c (or some other guarantee must be there
- * to guarantee no concurrent accesses).
- */
-static void unfreeze_partials(struct kmem_cache *s,
- struct kmem_cache_cpu *c)
-{
#ifdef CONFIG_SLUB_CPU_PARTIAL
+static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page)
+{
struct kmem_cache_node *n = NULL, *n2 = NULL;
struct page *page, *discard_page = NULL;
+ unsigned long flags = 0;
- while ((page = slub_percpu_partial(c))) {
+ while (partial_page) {
struct page new;
struct page old;
- slub_set_percpu_partial(c, page);
+ page = partial_page;
+ partial_page = page->next;
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
if (n)
- spin_unlock(&n->list_lock);
+ spin_unlock_irqrestore(&n->list_lock, flags);
n = n2;
- spin_lock(&n->list_lock);
+ spin_lock_irqsave(&n->list_lock, flags);
}
do {
@@ -2411,7 +2485,7 @@ static void unfreeze_partials(struct kmem_cache *s,
}
if (n)
- spin_unlock(&n->list_lock);
+ spin_unlock_irqrestore(&n->list_lock, flags);
while (discard_page) {
page = discard_page;
@@ -2421,7 +2495,35 @@ static void unfreeze_partials(struct kmem_cache *s,
discard_slab(s, page);
stat(s, FREE_SLAB);
}
-#endif /* CONFIG_SLUB_CPU_PARTIAL */
+}
+
+/*
+ * Unfreeze all the cpu partial slabs.
+ */
+static void unfreeze_partials(struct kmem_cache *s)
+{
+ struct page *partial_page;
+ unsigned long flags;
+
+ local_lock_irqsave(&s->cpu_slab->lock, flags);
+ partial_page = this_cpu_read(s->cpu_slab->partial);
+ this_cpu_write(s->cpu_slab->partial, NULL);
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
+
+ if (partial_page)
+ __unfreeze_partials(s, partial_page);
+}
+
+static void unfreeze_partials_cpu(struct kmem_cache *s,
+ struct kmem_cache_cpu *c)
+{
+ struct page *partial_page;
+
+ partial_page = slub_percpu_partial(c);
+ c->partial = NULL;
+
+ if (partial_page)
+ __unfreeze_partials(s, partial_page);
}
/*
@@ -2433,97 +2535,170 @@ static void unfreeze_partials(struct kmem_cache *s,
*/
static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
{
-#ifdef CONFIG_SLUB_CPU_PARTIAL
struct page *oldpage;
- int pages;
- int pobjects;
+ struct page *page_to_unfreeze = NULL;
+ unsigned long flags;
+ int pages = 0;
+ int pobjects = 0;
- preempt_disable();
- do {
- pages = 0;
- pobjects = 0;
- oldpage = this_cpu_read(s->cpu_slab->partial);
+ local_lock_irqsave(&s->cpu_slab->lock, flags);
+
+ oldpage = this_cpu_read(s->cpu_slab->partial);
- if (oldpage) {
+ if (oldpage) {
+ if (drain && oldpage->pobjects > slub_cpu_partial(s)) {
+ /*
+ * Partial array is full. Move the existing set to the
+ * per node partial list. Postpone the actual unfreezing
+ * outside of the critical section.
+ */
+ page_to_unfreeze = oldpage;
+ oldpage = NULL;
+ } else {
pobjects = oldpage->pobjects;
pages = oldpage->pages;
- if (drain && pobjects > slub_cpu_partial(s)) {
- unsigned long flags;
- /*
- * partial array is full. Move the existing
- * set to the per node partial list.
- */
- local_irq_save(flags);
- unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
- local_irq_restore(flags);
- oldpage = NULL;
- pobjects = 0;
- pages = 0;
- stat(s, CPU_PARTIAL_DRAIN);
- }
}
+ }
- pages++;
- pobjects += page->objects - page->inuse;
+ pages++;
+ pobjects += page->objects - page->inuse;
- page->pages = pages;
- page->pobjects = pobjects;
- page->next = oldpage;
+ page->pages = pages;
+ page->pobjects = pobjects;
+ page->next = oldpage;
- } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
- != oldpage);
- if (unlikely(!slub_cpu_partial(s))) {
- unsigned long flags;
+ this_cpu_write(s->cpu_slab->partial, page);
- local_irq_save(flags);
- unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
- local_irq_restore(flags);
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
+
+ if (page_to_unfreeze) {
+ __unfreeze_partials(s, page_to_unfreeze);
+ stat(s, CPU_PARTIAL_DRAIN);
}
- preempt_enable();
-#endif /* CONFIG_SLUB_CPU_PARTIAL */
}
+#else /* CONFIG_SLUB_CPU_PARTIAL */
+
+static inline void unfreeze_partials(struct kmem_cache *s) { }
+static inline void unfreeze_partials_cpu(struct kmem_cache *s,
+ struct kmem_cache_cpu *c) { }
+
+#endif /* CONFIG_SLUB_CPU_PARTIAL */
+
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
- stat(s, CPUSLAB_FLUSH);
- deactivate_slab(s, c->page, c->freelist, c);
+ unsigned long flags;
+ struct page *page;
+ void *freelist;
+
+ local_lock_irqsave(&s->cpu_slab->lock, flags);
+
+ page = c->page;
+ freelist = c->freelist;
+
+ c->page = NULL;
+ c->freelist = NULL;
+ c->tid = next_tid(c->tid);
+
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
+
+ if (page) {
+ deactivate_slab(s, page, freelist);
+ stat(s, CPUSLAB_FLUSH);
+ }
+}
+
+static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
+{
+ struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+ void *freelist = c->freelist;
+ struct page *page = c->page;
+ c->page = NULL;
+ c->freelist = NULL;
c->tid = next_tid(c->tid);
+
+ if (page) {
+ deactivate_slab(s, page, freelist);
+ stat(s, CPUSLAB_FLUSH);
+ }
+
+ unfreeze_partials_cpu(s, c);
}
+struct slub_flush_work {
+ struct work_struct work;
+ struct kmem_cache *s;
+ bool skip;
+};
+
/*
* Flush cpu slab.
*
- * Called from IPI handler with interrupts disabled.
+ * Called from CPU work handler with migration disabled.
*/
-static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
+static void flush_cpu_slab(struct work_struct *w)
{
- struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+ struct kmem_cache *s;
+ struct kmem_cache_cpu *c;
+ struct slub_flush_work *sfw;
+
+ sfw = container_of(w, struct slub_flush_work, work);
+
+ s = sfw->s;
+ c = this_cpu_ptr(s->cpu_slab);
if (c->page)
flush_slab(s, c);
- unfreeze_partials(s, c);
+ unfreeze_partials(s);
}
-static void flush_cpu_slab(void *d)
+static bool has_cpu_slab(int cpu, struct kmem_cache *s)
{
- struct kmem_cache *s = d;
+ struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
- __flush_cpu_slab(s, smp_processor_id());
+ return c->page || slub_percpu_partial(c);
}
-static bool has_cpu_slab(int cpu, void *info)
+static DEFINE_MUTEX(flush_lock);
+static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
+
+static void flush_all_cpus_locked(struct kmem_cache *s)
{
- struct kmem_cache *s = info;
- struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+ struct slub_flush_work *sfw;
+ unsigned int cpu;
- return c->page || slub_percpu_partial(c);
+ lockdep_assert_cpus_held();
+ mutex_lock(&flush_lock);
+
+ for_each_online_cpu(cpu) {
+ sfw = &per_cpu(slub_flush, cpu);
+ if (!has_cpu_slab(cpu, s)) {
+ sfw->skip = true;
+ continue;
+ }
+ INIT_WORK(&sfw->work, flush_cpu_slab);
+ sfw->skip = false;
+ sfw->s = s;
+ schedule_work_on(cpu, &sfw->work);
+ }
+
+ for_each_online_cpu(cpu) {
+ sfw = &per_cpu(slub_flush, cpu);
+ if (sfw->skip)
+ continue;
+ flush_work(&sfw->work);
+ }
+
+ mutex_unlock(&flush_lock);
}
static void flush_all(struct kmem_cache *s)
{
- on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
+ cpus_read_lock();
+ flush_all_cpus_locked(s);
+ cpus_read_unlock();
}
/*
@@ -2533,14 +2708,10 @@ static void flush_all(struct kmem_cache *s)
static int slub_cpu_dead(unsigned int cpu)
{
struct kmem_cache *s;
- unsigned long flags;
mutex_lock(&slab_mutex);
- list_for_each_entry(s, &slab_caches, list) {
- local_irq_save(flags);
+ list_for_each_entry(s, &slab_caches, list)
__flush_cpu_slab(s, cpu);
- local_irq_restore(flags);
- }
mutex_unlock(&slab_mutex);
return 0;
}
@@ -2623,44 +2794,22 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
#endif
}
-static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
- int node, struct kmem_cache_cpu **pc)
+static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
{
- void *freelist;
- struct kmem_cache_cpu *c = *pc;
- struct page *page;
-
- WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
-
- freelist = get_partial(s, flags, node, c);
-
- if (freelist)
- return freelist;
-
- page = new_slab(s, flags, node);
- if (page) {
- c = raw_cpu_ptr(s->cpu_slab);
- if (c->page)
- flush_slab(s, c);
-
- /*
- * No other reference to the page yet so we can
- * muck around with it freely without cmpxchg
- */
- freelist = page->freelist;
- page->freelist = NULL;
-
- stat(s, ALLOC_SLAB);
- c->page = page;
- *pc = c;
- }
+ if (unlikely(PageSlabPfmemalloc(page)))
+ return gfp_pfmemalloc_allowed(gfpflags);
- return freelist;
+ return true;
}
-static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
+/*
+ * A variant of pfmemalloc_match() that tests page flags without asserting
+ * PageSlab. Intended for opportunistic checks before taking a lock and
+ * rechecking that nobody else freed the page under us.
+ */
+static inline bool pfmemalloc_match_unsafe(struct page *page, gfp_t gfpflags)
{
- if (unlikely(PageSlabPfmemalloc(page)))
+ if (unlikely(__PageSlabPfmemalloc(page)))
return gfp_pfmemalloc_allowed(gfpflags);
return true;
@@ -2673,8 +2822,6 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
* The page is still frozen if the return value is not NULL.
*
* If this function returns NULL then the page has been unfrozen.
- *
- * This function must be called with interrupt disabled.
*/
static inline void *get_freelist(struct kmem_cache *s, struct page *page)
{
@@ -2682,6 +2829,8 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
unsigned long counters;
void *freelist;
+ lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
+
do {
freelist = page->freelist;
counters = page->counters;
@@ -2716,7 +2865,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
* we need to allocate a new slab. This is the slowest path since it involves
* a call to the page allocator and the setup of a new slab.
*
- * Version of __slab_alloc to use when we know that interrupts are
+ * Version of __slab_alloc to use when we know that preemption is
* already disabled (which is the case for bulk allocation).
*/
static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
@@ -2724,10 +2873,13 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
{
void *freelist;
struct page *page;
+ unsigned long flags;
stat(s, ALLOC_SLOWPATH);
- page = c->page;
+reread_page:
+
+ page = READ_ONCE(c->page);
if (!page) {
/*
* if the node is not online or has no normal memory, just
@@ -2750,8 +2902,7 @@ redo:
goto redo;
} else {
stat(s, ALLOC_NODE_MISMATCH);
- deactivate_slab(s, page, c->freelist, c);
- goto new_slab;
+ goto deactivate_slab;
}
}
@@ -2760,12 +2911,15 @@ redo:
* PFMEMALLOC but right now, we are losing the pfmemalloc
* information when the page leaves the per-cpu allocator
*/
- if (unlikely(!pfmemalloc_match(page, gfpflags))) {
- deactivate_slab(s, page, c->freelist, c);
- goto new_slab;
+ if (unlikely(!pfmemalloc_match_unsafe(page, gfpflags)))
+ goto deactivate_slab;
+
+ /* must check again c->page in case we got preempted and it changed */
+ local_lock_irqsave(&s->cpu_slab->lock, flags);
+ if (unlikely(page != c->page)) {
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
+ goto reread_page;
}
-
- /* must check again c->freelist in case of cpu migration or IRQ */
freelist = c->freelist;
if (freelist)
goto load_freelist;
@@ -2774,6 +2928,7 @@ redo:
if (!freelist) {
c->page = NULL;
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
stat(s, DEACTIVATE_BYPASS);
goto new_slab;
}
@@ -2781,6 +2936,9 @@ redo:
stat(s, ALLOC_REFILL);
load_freelist:
+
+ lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
+
/*
* freelist is pointing to the list of objects to be used.
* page is pointing to the page from which the objects are obtained.
@@ -2789,59 +2947,141 @@ load_freelist:
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
return freelist;
+deactivate_slab:
+
+ local_lock_irqsave(&s->cpu_slab->lock, flags);
+ if (page != c->page) {
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
+ goto reread_page;
+ }
+ freelist = c->freelist;
+ c->page = NULL;
+ c->freelist = NULL;
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
+ deactivate_slab(s, page, freelist);
+
new_slab:
if (slub_percpu_partial(c)) {
+ local_lock_irqsave(&s->cpu_slab->lock, flags);
+ if (unlikely(c->page)) {
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
+ goto reread_page;
+ }
+ if (unlikely(!slub_percpu_partial(c))) {
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
+ /* we were preempted and partial list got empty */
+ goto new_objects;
+ }
+
page = c->page = slub_percpu_partial(c);
slub_set_percpu_partial(c, page);
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
stat(s, CPU_PARTIAL_ALLOC);
goto redo;
}
- freelist = new_slab_objects(s, gfpflags, node, &c);
+new_objects:
+
+ freelist = get_partial(s, gfpflags, node, &page);
+ if (freelist)
+ goto check_new_page;
+
+ slub_put_cpu_ptr(s->cpu_slab);
+ page = new_slab(s, gfpflags, node);
+ c = slub_get_cpu_ptr(s->cpu_slab);
- if (unlikely(!freelist)) {
+ if (unlikely(!page)) {
slab_out_of_memory(s, gfpflags, node);
return NULL;
}
- page = c->page;
- if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
- goto load_freelist;
+ /*
+ * No other reference to the page yet so we can
+ * muck around with it freely without cmpxchg
+ */
+ freelist = page->freelist;
+ page->freelist = NULL;
- /* Only entered in the debug case */
- if (kmem_cache_debug(s) &&
- !alloc_debug_processing(s, page, freelist, addr))
- goto new_slab; /* Slab failed checks. Next slab needed */
+ stat(s, ALLOC_SLAB);
+
+check_new_page:
+
+ if (kmem_cache_debug(s)) {
+ if (!alloc_debug_processing(s, page, freelist, addr)) {
+ /* Slab failed checks. Next slab needed */
+ goto new_slab;
+ } else {
+ /*
+ * For debug case, we don't load freelist so that all
+ * allocations go through alloc_debug_processing()
+ */
+ goto return_single;
+ }
+ }
+
+ if (unlikely(!pfmemalloc_match(page, gfpflags)))
+ /*
+ * For !pfmemalloc_match() case we don't load freelist so that
+ * we don't make further mismatched allocations easier.
+ */
+ goto return_single;
+
+retry_load_page:
+
+ local_lock_irqsave(&s->cpu_slab->lock, flags);
+ if (unlikely(c->page)) {
+ void *flush_freelist = c->freelist;
+ struct page *flush_page = c->page;
+
+ c->page = NULL;
+ c->freelist = NULL;
+ c->tid = next_tid(c->tid);
+
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
- deactivate_slab(s, page, get_freepointer(s, freelist), c);
+ deactivate_slab(s, flush_page, flush_freelist);
+
+ stat(s, CPUSLAB_FLUSH);
+
+ goto retry_load_page;
+ }
+ c->page = page;
+
+ goto load_freelist;
+
+return_single:
+
+ deactivate_slab(s, page, get_freepointer(s, freelist));
return freelist;
}
/*
- * Another one that disabled interrupt and compensates for possible
- * cpu changes by refetching the per cpu area pointer.
+ * A wrapper for ___slab_alloc() for contexts where preemption is not yet
+ * disabled. Compensates for possible cpu changes by refetching the per cpu area
+ * pointer.
*/
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
void *p;
- unsigned long flags;
- local_irq_save(flags);
-#ifdef CONFIG_PREEMPTION
+#ifdef CONFIG_PREEMPT_COUNT
/*
* We may have been preempted and rescheduled on a different
- * cpu before disabling interrupts. Need to reload cpu area
+ * cpu before disabling preemption. Need to reload cpu area
* pointer.
*/
- c = this_cpu_ptr(s->cpu_slab);
+ c = slub_get_cpu_ptr(s->cpu_slab);
#endif
p = ___slab_alloc(s, gfpflags, node, addr, c);
- local_irq_restore(flags);
+#ifdef CONFIG_PREEMPT_COUNT
+ slub_put_cpu_ptr(s->cpu_slab);
+#endif
return p;
}
@@ -2892,15 +3132,14 @@ redo:
* reading from one cpu area. That does not matter as long
* as we end up on the original cpu again when doing the cmpxchg.
*
- * We should guarantee that tid and kmem_cache are retrieved on
- * the same cpu. It could be different if CONFIG_PREEMPTION so we need
- * to check if it is matched or not.
+ * We must guarantee that tid and kmem_cache_cpu are retrieved on the
+ * same cpu. We read first the kmem_cache_cpu pointer and use it to read
+ * the tid. If we are preempted and switched to another cpu between the
+ * two reads, it's OK as the two are still associated with the same cpu
+ * and cmpxchg later will validate the cpu.
*/
- do {
- tid = this_cpu_read(s->cpu_slab->tid);
- c = raw_cpu_ptr(s->cpu_slab);
- } while (IS_ENABLED(CONFIG_PREEMPTION) &&
- unlikely(tid != READ_ONCE(c->tid)));
+ c = raw_cpu_ptr(s->cpu_slab);
+ tid = READ_ONCE(c->tid);
/*
* Irqless object alloc/free algorithm used here depends on sequence
@@ -2921,7 +3160,15 @@ redo:
object = c->freelist;
page = c->page;
- if (unlikely(!object || !page || !node_match(page, node))) {
+ /*
+ * We cannot use the lockless fastpath on PREEMPT_RT because if a
+ * slowpath has taken the local_lock_irqsave(), it is not protected
+ * against a fast path operation in an irq handler. So we need to take
+ * the slow path which uses local_lock. It is still relatively fast if
+ * there is a suitable cpu freelist.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) ||
+ unlikely(!object || !page || !node_match(page, node))) {
object = __slab_alloc(s, gfpflags, node, addr, c);
} else {
void *next_object = get_freepointer_safe(s, object);
@@ -3174,16 +3421,14 @@ redo:
* data is retrieved via this pointer. If we are on the same cpu
* during the cmpxchg then the free will succeed.
*/
- do {
- tid = this_cpu_read(s->cpu_slab->tid);
- c = raw_cpu_ptr(s->cpu_slab);
- } while (IS_ENABLED(CONFIG_PREEMPTION) &&
- unlikely(tid != READ_ONCE(c->tid)));
+ c = raw_cpu_ptr(s->cpu_slab);
+ tid = READ_ONCE(c->tid);
/* Same with comment on barrier() in slab_alloc_node() */
barrier();
if (likely(page == c->page)) {
+#ifndef CONFIG_PREEMPT_RT
void **freelist = READ_ONCE(c->freelist);
set_freepointer(s, tail_obj, freelist);
@@ -3196,6 +3441,31 @@ redo:
note_cmpxchg_failure("slab_free", s, tid);
goto redo;
}
+#else /* CONFIG_PREEMPT_RT */
+ /*
+ * We cannot use the lockless fastpath on PREEMPT_RT because if
+ * a slowpath has taken the local_lock_irqsave(), it is not
+ * protected against a fast path operation in an irq handler. So
+ * we need to take the local_lock. We shouldn't simply defer to
+ * __slab_free() as that wouldn't use the cpu freelist at all.
+ */
+ void **freelist;
+
+ local_lock(&s->cpu_slab->lock);
+ c = this_cpu_ptr(s->cpu_slab);
+ if (unlikely(page != c->page)) {
+ local_unlock(&s->cpu_slab->lock);
+ goto redo;
+ }
+ tid = c->tid;
+ freelist = c->freelist;
+
+ set_freepointer(s, tail_obj, freelist);
+ c->freelist = head;
+ c->tid = next_tid(tid);
+
+ local_unlock(&s->cpu_slab->lock);
+#endif
stat(s, FREE_FASTPATH);
} else
__slab_free(s, page, head, tail_obj, cnt, addr);
@@ -3373,8 +3643,8 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
* IRQs, which protects against PREEMPT and interrupts
* handlers invoking normal fastpath.
*/
- local_irq_disable();
- c = this_cpu_ptr(s->cpu_slab);
+ c = slub_get_cpu_ptr(s->cpu_slab);
+ local_lock_irq(&s->cpu_slab->lock);
for (i = 0; i < size; i++) {
void *object = kfence_alloc(s, s->object_size, flags);
@@ -3395,6 +3665,8 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
*/
c->tid = next_tid(c->tid);
+ local_unlock_irq(&s->cpu_slab->lock);
+
/*
* Invoking slow path likely have side-effect
* of re-populating per CPU c->freelist
@@ -3407,6 +3679,8 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
c = this_cpu_ptr(s->cpu_slab);
maybe_wipe_obj_freeptr(s, p[i]);
+ local_lock_irq(&s->cpu_slab->lock);
+
continue; /* goto for-loop */
}
c->freelist = get_freepointer(s, object);
@@ -3414,7 +3688,8 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
maybe_wipe_obj_freeptr(s, p[i]);
}
c->tid = next_tid(c->tid);
- local_irq_enable();
+ local_unlock_irq(&s->cpu_slab->lock);
+ slub_put_cpu_ptr(s->cpu_slab);
/*
* memcg and kmem_cache debug support and memory initialization.
@@ -3424,7 +3699,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
slab_want_init_on_alloc(flags, s));
return i;
error:
- local_irq_enable();
+ slub_put_cpu_ptr(s->cpu_slab);
slab_post_alloc_hook(s, objcg, flags, i, p, false);
__kmem_cache_free_bulk(s, i, p);
return 0;
@@ -3938,11 +4213,12 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
{
#ifdef CONFIG_SLUB_DEBUG
void *addr = page_address(page);
+ unsigned long flags;
unsigned long *map;
void *p;
slab_err(s, page, text, s->name);
- slab_lock(page);
+ slab_lock(page, &flags);
map = get_map(s, page);
for_each_object(p, s, addr, page->objects) {
@@ -3953,7 +4229,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
}
}
put_map(map);
- slab_unlock(page);
+ slab_unlock(page, &flags);
#endif
}
@@ -4003,7 +4279,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
int node;
struct kmem_cache_node *n;
- flush_all(s);
+ flush_all_cpus_locked(s);
/* Attempt to free all objects */
for_each_kmem_cache_node(s, node, n) {
free_partial(s, n);
@@ -4279,7 +4555,7 @@ EXPORT_SYMBOL(kfree);
* being allocated from last increasing the chance that the last objects
* are freed in them.
*/
-int __kmem_cache_shrink(struct kmem_cache *s)
+static int __kmem_cache_do_shrink(struct kmem_cache *s)
{
int node;
int i;
@@ -4291,7 +4567,6 @@ int __kmem_cache_shrink(struct kmem_cache *s)
unsigned long flags;
int ret = 0;
- flush_all(s);
for_each_kmem_cache_node(s, node, n) {
INIT_LIST_HEAD(&discard);
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
@@ -4341,13 +4616,21 @@ int __kmem_cache_shrink(struct kmem_cache *s)
return ret;
}
+int __kmem_cache_shrink(struct kmem_cache *s)
+{
+ flush_all(s);
+ return __kmem_cache_do_shrink(s);
+}
+
static int slab_mem_going_offline_callback(void *arg)
{
struct kmem_cache *s;
mutex_lock(&slab_mutex);
- list_for_each_entry(s, &slab_caches, list)
- __kmem_cache_shrink(s);
+ list_for_each_entry(s, &slab_caches, list) {
+ flush_all_cpus_locked(s);
+ __kmem_cache_do_shrink(s);
+ }
mutex_unlock(&slab_mutex);
return 0;
@@ -4673,33 +4956,33 @@ static int count_total(struct page *page)
#endif
#ifdef CONFIG_SLUB_DEBUG
-static void validate_slab(struct kmem_cache *s, struct page *page)
+static void validate_slab(struct kmem_cache *s, struct page *page,
+ unsigned long *obj_map)
{
void *p;
void *addr = page_address(page);
- unsigned long *map;
+ unsigned long flags;
- slab_lock(page);
+ slab_lock(page, &flags);
if (!check_slab(s, page) || !on_freelist(s, page, NULL))
goto unlock;
/* Now we know that a valid freelist exists */
- map = get_map(s, page);
+ __fill_map(obj_map, s, page);
for_each_object(p, s, addr, page->objects) {
- u8 val = test_bit(__obj_to_index(s, addr, p), map) ?
+ u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
if (!check_object(s, page, p, val))
break;
}
- put_map(map);
unlock:
- slab_unlock(page);
+ slab_unlock(page, &flags);
}
static int validate_slab_node(struct kmem_cache *s,
- struct kmem_cache_node *n)
+ struct kmem_cache_node *n, unsigned long *obj_map)
{
unsigned long count = 0;
struct page *page;
@@ -4708,7 +4991,7 @@ static int validate_slab_node(struct kmem_cache *s,
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, slab_list) {
- validate_slab(s, page);
+ validate_slab(s, page, obj_map);
count++;
}
if (count != n->nr_partial) {
@@ -4721,7 +5004,7 @@ static int validate_slab_node(struct kmem_cache *s,
goto out;
list_for_each_entry(page, &n->full, slab_list) {
- validate_slab(s, page);
+ validate_slab(s, page, obj_map);
count++;
}
if (count != atomic_long_read(&n->nr_slabs)) {
@@ -4740,10 +5023,17 @@ long validate_slab_cache(struct kmem_cache *s)
int node;
unsigned long count = 0;
struct kmem_cache_node *n;
+ unsigned long *obj_map;
+
+ obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
+ if (!obj_map)
+ return -ENOMEM;
flush_all(s);
for_each_kmem_cache_node(s, node, n)
- count += validate_slab_node(s, n);
+ count += validate_slab_node(s, n, obj_map);
+
+ bitmap_free(obj_map);
return count;
}
@@ -4879,17 +5169,17 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
}
static void process_slab(struct loc_track *t, struct kmem_cache *s,
- struct page *page, enum track_item alloc)
+ struct page *page, enum track_item alloc,
+ unsigned long *obj_map)
{
void *addr = page_address(page);
void *p;
- unsigned long *map;
- map = get_map(s, page);
+ __fill_map(obj_map, s, page);
+
for_each_object(p, s, addr, page->objects)
- if (!test_bit(__obj_to_index(s, addr, p), map))
+ if (!test_bit(__obj_to_index(s, addr, p), obj_map))
add_location(t, s, get_track(s, p, alloc));
- put_map(map);
}
#endif /* CONFIG_DEBUG_FS */
#endif /* CONFIG_SLUB_DEBUG */
@@ -5816,17 +6106,21 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
sizeof(struct loc_track));
struct kmem_cache *s = file_inode(filep)->i_private;
+ unsigned long *obj_map;
+
+ obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
+ if (!obj_map)
+ return -ENOMEM;
if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
alloc = TRACK_ALLOC;
else
alloc = TRACK_FREE;
- if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL))
+ if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
+ bitmap_free(obj_map);
return -ENOMEM;
-
- /* Push back cpu slabs */
- flush_all(s);
+ }
for_each_kmem_cache_node(s, node, n) {
unsigned long flags;
@@ -5837,12 +6131,13 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, slab_list)
- process_slab(t, s, page, alloc);
+ process_slab(t, s, page, alloc, obj_map);
list_for_each_entry(page, &n->full, slab_list)
- process_slab(t, s, page, alloc);
+ process_slab(t, s, page, alloc, obj_map);
spin_unlock_irqrestore(&n->list_lock, flags);
}
+ bitmap_free(obj_map);
return 0;
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 3824dc16ce1c..d77830ff604c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -44,6 +44,19 @@
#include "internal.h"
#include "pgalloc-track.h"
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
+
+static int __init set_nohugeiomap(char *str)
+{
+ ioremap_max_page_shift = PAGE_SHIFT;
+ return 0;
+}
+early_param("nohugeiomap", set_nohugeiomap);
+#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+
#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
static bool __ro_after_init vmap_allow_huge = true;
@@ -298,15 +311,14 @@ static int vmap_range_noflush(unsigned long addr, unsigned long end,
return err;
}
-int vmap_range(unsigned long addr, unsigned long end,
- phys_addr_t phys_addr, pgprot_t prot,
- unsigned int max_page_shift)
+int ioremap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot)
{
int err;
- err = vmap_range_noflush(addr, end, phys_addr, prot, max_page_shift);
+ err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
+ ioremap_max_page_shift);
flush_cache_vmap(addr, end);
-
return err;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 740d03e6dae2..74296c2d1fed 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2715,7 +2715,7 @@ out:
cgroup_size = max(cgroup_size, protection);
scan = lruvec_size - lruvec_size * protection /
- cgroup_size;
+ (cgroup_size + 1);
/*
* Minimally target SWAP_CLUSTER_MAX pages to keep
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 0885a34197b7..8ce2620344b2 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -319,6 +319,16 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
long x;
long t;
+ /*
+ * Accurate vmstat updates require a RMW. On !PREEMPT_RT kernels,
+ * atomicity is provided by IRQs being disabled -- either explicitly
+ * or via local_lock_irq. On PREEMPT_RT, local_lock_irq only disables
+ * CPU migrations and preemption potentially corrupts a counter so
+ * disable preemption.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
@@ -328,6 +338,9 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
x = 0;
}
__this_cpu_write(*p, x);
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
}
EXPORT_SYMBOL(__mod_zone_page_state);
@@ -350,6 +363,10 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
delta >>= PAGE_SHIFT;
}
+ /* See __mod_node_page_state */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
@@ -359,6 +376,9 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
x = 0;
}
__this_cpu_write(*p, x);
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
}
EXPORT_SYMBOL(__mod_node_page_state);
@@ -391,6 +411,10 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
+ /* See __mod_node_page_state */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
@@ -399,6 +423,9 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
zone_page_state_add(v + overstep, zone, item);
__this_cpu_write(*p, -overstep);
}
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
}
void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
@@ -409,6 +436,10 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
+ /* See __mod_node_page_state */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
@@ -417,6 +448,9 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
node_page_state_add(v + overstep, pgdat, item);
__this_cpu_write(*p, -overstep);
}
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
}
void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
@@ -437,6 +471,10 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
+ /* See __mod_node_page_state */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
@@ -445,6 +483,9 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
zone_page_state_add(v - overstep, zone, item);
__this_cpu_write(*p, overstep);
}
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
}
void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
@@ -455,6 +496,10 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
+ /* See __mod_node_page_state */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
@@ -463,6 +508,9 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
node_page_state_add(v - overstep, pgdat, item);
__this_cpu_write(*p, overstep);
}
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
}
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
diff --git a/mm/workingset.c b/mm/workingset.c
index 5ba3e42446fa..d4268d8e9a82 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -249,7 +249,7 @@ void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages)
* @target_memcg: the cgroup that is causing the reclaim
* @page: the page being evicted
*
- * Returns a shadow entry to be stored in @page->mapping->i_pages in place
+ * Return: a shadow entry to be stored in @page->mapping->i_pages in place
* of the evicted @page so that a later refault can be detected.
*/
void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
diff --git a/net/9p/client.c b/net/9p/client.c
index b7b958f61faf..213f12ed76cd 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -30,6 +30,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/9p.h>
+#define DEFAULT_MSIZE (128 * 1024)
+
/*
* Client Option Parsing (code inspired by NFS code)
* - a little lazy - parse all client options
@@ -65,7 +67,7 @@ EXPORT_SYMBOL(p9_is_proto_dotu);
int p9_show_client_options(struct seq_file *m, struct p9_client *clnt)
{
- if (clnt->msize != 8192)
+ if (clnt->msize != DEFAULT_MSIZE)
seq_printf(m, ",msize=%u", clnt->msize);
seq_printf(m, ",trans=%s", clnt->trans_mod->name);
@@ -139,7 +141,7 @@ static int parse_opts(char *opts, struct p9_client *clnt)
int ret = 0;
clnt->proto_version = p9_proto_2000L;
- clnt->msize = 8192;
+ clnt->msize = DEFAULT_MSIZE;
if (!opts)
return 0;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index f4dd0456beaf..007bbcc68010 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -34,7 +34,7 @@
#include <linux/syscalls.h> /* killme */
#define P9_PORT 564
-#define MAX_SOCK_BUF (64*1024)
+#define MAX_SOCK_BUF (1024*1024)
#define MAXPOLLWADDR 2
static struct p9_trans_module p9_tcp_trans;
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 2bbd7dce0f1d..490a4c900339 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -610,7 +610,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
if (!chan->vc_wq) {
err = -ENOMEM;
- goto out_free_tag;
+ goto out_remove_file;
}
init_waitqueue_head(chan->vc_wq);
chan->ring_bufs_avail = 1;
@@ -628,6 +628,8 @@ static int p9_virtio_probe(struct virtio_device *vdev)
return 0;
+out_remove_file:
+ sysfs_remove_file(&vdev->dev.kobj, &dev_attr_mount_tag.attr);
out_free_tag:
kfree(tag);
out_free_vq:
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index f4fea28e05da..3ec1a51a6944 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -138,7 +138,7 @@ static bool p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size)
static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
{
- struct xen_9pfs_front_priv *priv = NULL;
+ struct xen_9pfs_front_priv *priv;
RING_IDX cons, prod, masked_cons, masked_prod;
unsigned long flags;
u32 size = p9_req->tc.size;
@@ -151,7 +151,7 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
break;
}
read_unlock(&xen_9pfs_lock);
- if (!priv || priv->client != client)
+ if (list_entry_is_head(priv, &xen_9pfs_devs, list))
return -EINVAL;
num = p9_req->tc.tag % priv->num_rings;
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 37b67194c0df..414dc5671c45 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -53,20 +53,6 @@ struct chnl_net {
enum caif_states state;
};
-static void robust_list_del(struct list_head *delete_node)
-{
- struct list_head *list_node;
- struct list_head *n;
- ASSERT_RTNL();
- list_for_each_safe(list_node, n, &chnl_net_list) {
- if (list_node == delete_node) {
- list_del(list_node);
- return;
- }
- }
- WARN_ON(1);
-}
-
static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
{
struct sk_buff *skb;
@@ -364,6 +350,7 @@ static int chnl_net_init(struct net_device *dev)
ASSERT_RTNL();
priv = netdev_priv(dev);
strncpy(priv->name, dev->name, sizeof(priv->name));
+ INIT_LIST_HEAD(&priv->list_field);
return 0;
}
@@ -372,7 +359,7 @@ static void chnl_net_uninit(struct net_device *dev)
struct chnl_net *priv;
ASSERT_RTNL();
priv = netdev_priv(dev);
- robust_list_del(&priv->list_field);
+ list_del_init(&priv->list_field);
}
static const struct net_device_ops netdev_ops = {
@@ -537,7 +524,7 @@ static void __exit chnl_exit_module(void)
rtnl_lock();
list_for_each_safe(list_node, _tmp, &chnl_net_list) {
dev = list_entry(list_node, struct chnl_net, list_field);
- list_del(list_node);
+ list_del_init(list_node);
delete_device(dev);
}
rtnl_unlock();
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index b49c57d35a88..1a6a86693b74 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -71,11 +71,8 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n)
struct update_classid_context *ctx = (void *)v;
struct socket *sock = sock_from_file(file);
- if (sock) {
- spin_lock(&cgroup_sk_update_lock);
+ if (sock)
sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid);
- spin_unlock(&cgroup_sk_update_lock);
- }
if (--ctx->batch == 0) {
ctx->batch = UPDATE_CLASSID_BATCH;
return n + 1;
@@ -121,8 +118,6 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
struct css_task_iter it;
struct task_struct *p;
- cgroup_sk_alloc_disable();
-
cs->classid = (u32)value;
css_task_iter_start(css, 0, &it);
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 99a431c56f23..8456dfbe2eb4 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -207,8 +207,6 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
if (!dev)
return -ENODEV;
- cgroup_sk_alloc_disable();
-
rtnl_lock();
ret = netprio_set_prio(of_css(of), dev, prio);
@@ -221,12 +219,10 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
static int update_netprio(const void *v, struct file *file, unsigned n)
{
struct socket *sock = sock_from_file(file);
- if (sock) {
- spin_lock(&cgroup_sk_update_lock);
+
+ if (sock)
sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data,
(unsigned long)v);
- spin_unlock(&cgroup_sk_update_lock);
- }
return 0;
}
@@ -235,8 +231,6 @@ static void net_prio_attach(struct cgroup_taskset *tset)
struct task_struct *p;
struct cgroup_subsys_state *css;
- cgroup_sk_alloc_disable();
-
cgroup_taskset_for_each(p, css, tset) {
void *v = (void *)(unsigned long)css->id;
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index c5c74a34d139..91e7a2202697 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -94,6 +94,8 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
newdp->dccps_role = DCCP_ROLE_SERVER;
newdp->dccps_hc_rx_ackvec = NULL;
newdp->dccps_service_list = NULL;
+ newdp->dccps_hc_rx_ccid = NULL;
+ newdp->dccps_hc_tx_ccid = NULL;
newdp->dccps_service = dreq->dreq_service;
newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo;
newdp->dccps_timestamp_time = dreq->dreq_timestamp_time;
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 1dc45e40f961..41f36ad8b0ec 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -345,6 +345,11 @@ bool dsa_schedule_work(struct work_struct *work)
return queue_work(dsa_owq, work);
}
+void dsa_flush_workqueue(void)
+{
+ flush_workqueue(dsa_owq);
+}
+
int dsa_devlink_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 1b2b25d7bd02..eef13cd20f19 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -897,6 +897,33 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
ds->setup = false;
}
+/* First tear down the non-shared, then the shared ports. This ensures that
+ * all work items scheduled by our switchdev handlers for user ports have
+ * completed before we destroy the refcounting kept on the shared ports.
+ */
+static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
+ dsa_port_teardown(dp);
+
+ dsa_flush_workqueue();
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
+ dsa_port_teardown(dp);
+}
+
+static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ dsa_switch_teardown(dp->ds);
+}
+
static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
@@ -923,26 +950,13 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
return 0;
teardown:
- list_for_each_entry(dp, &dst->ports, list)
- dsa_port_teardown(dp);
+ dsa_tree_teardown_ports(dst);
- list_for_each_entry(dp, &dst->ports, list)
- dsa_switch_teardown(dp->ds);
+ dsa_tree_teardown_switches(dst);
return err;
}
-static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
-{
- struct dsa_port *dp;
-
- list_for_each_entry(dp, &dst->ports, list)
- dsa_port_teardown(dp);
-
- list_for_each_entry(dp, &dst->ports, list)
- dsa_switch_teardown(dp->ds);
-}
-
static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
@@ -1052,6 +1066,8 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
dsa_tree_teardown_master(dst);
+ dsa_tree_teardown_ports(dst);
+
dsa_tree_teardown_switches(dst);
dsa_tree_teardown_cpu_ports(dst);
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 33ab7d7af9eb..a5c9bc7b66c6 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -170,6 +170,7 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops);
const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
bool dsa_schedule_work(struct work_struct *work);
+void dsa_flush_workqueue(void);
const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 662ff531d4e2..a2bf2d8ac65b 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1854,13 +1854,11 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
* use the switch internal MDIO bus instead
*/
ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
- if (ret) {
- netdev_err(slave_dev,
- "failed to connect to port %d: %d\n",
- dp->index, ret);
- phylink_destroy(dp->pl);
- return ret;
- }
+ }
+ if (ret) {
+ netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
+ ERR_PTR(ret));
+ phylink_destroy(dp->pl);
}
return ret;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3f7bd7ae7d7a..141e85e6422b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1346,7 +1346,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
if (dup_sack && (sacked & TCPCB_RETRANS)) {
if (tp->undo_marker && tp->undo_retrans > 0 &&
after(end_seq, tp->undo_marker))
- tp->undo_retrans--;
+ tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount);
if ((sacked & TCPCB_SACKED_ACKED) &&
before(start_seq, state->reord))
state->reord = start_seq;
diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c
index 0d122edc368d..b91003538d87 100644
--- a/net/ipv4/udp_tunnel_nic.c
+++ b/net/ipv4/udp_tunnel_nic.c
@@ -935,7 +935,7 @@ static int __init udp_tunnel_nic_init_module(void)
{
int err;
- udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0);
+ udp_tunnel_nic_workqueue = alloc_ordered_workqueue("udp_tunnel_nic", 0);
if (!udp_tunnel_nic_workqueue)
return -ENOMEM;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 1bec5b22f80d..0371d2c14145 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1378,7 +1378,6 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
int err = -ENOMEM;
int allow_create = 1;
int replace_required = 0;
- int sernum = fib6_new_sernum(info->nl_net);
if (info->nlh) {
if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
@@ -1478,7 +1477,7 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
if (!err) {
if (rt->nh)
list_add(&rt->nh_list, &rt->nh->f6i_list);
- __fib6_update_sernum_upto_root(rt, sernum);
+ __fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net));
fib6_start_gc(info->nl_net, rt);
}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 53486b162f01..93271a2632b8 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -869,8 +869,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
}
if (tunnel->version == L2TP_HDR_VER_3 &&
- l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+ l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
+ l2tp_session_dec_refcount(session);
goto invalid;
+ }
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
l2tp_session_dec_refcount(session);
diff --git a/net/mctp/route.c b/net/mctp/route.c
index 5265525011ad..5ca186d53cb0 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -1083,8 +1083,10 @@ static void __net_exit mctp_routes_net_exit(struct net *net)
{
struct mctp_route *rt;
+ rcu_read_lock();
list_for_each_entry_rcu(rt, &net->mctp.routes, list)
mctp_route_release(rt);
+ rcu_read_unlock();
}
static struct pernet_operations mctp_net_ops = {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 543365f58e97..2a2bc64f75cf 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -46,6 +46,8 @@
* Copyright (C) 2011, <lokec@ccs.neu.edu>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/mm.h>
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 9de41e7f2d07..3e776e3dff91 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -194,6 +194,8 @@ static void rsi_request(struct cache_detail *cd,
qword_addhex(bpp, blen, rsii->in_handle.data, rsii->in_handle.len);
qword_addhex(bpp, blen, rsii->in_token.data, rsii->in_token.len);
(*bpp)[-1] = '\n';
+ WARN_ONCE(*blen < 0,
+ "RPCSEC/GSS credential too large - please use gssproxy\n");
}
static int rsi_parse(struct cache_detail *cd,
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 1a2c1c44bb00..59641803472c 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -803,7 +803,7 @@ static int cache_request(struct cache_detail *detail,
detail->cache_request(detail, crq->item, &bp, &len);
if (len < 0)
- return -EAGAIN;
+ return -E2BIG;
return PAGE_SIZE - len;
}
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index e1153cba9cc6..6316bd2b8f37 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -663,7 +663,7 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
{
struct svc_serv *serv = rqstp->rq_server;
struct xdr_buf *arg = &rqstp->rq_arg;
- unsigned long pages, filled;
+ unsigned long pages, filled, ret;
pagevec_init(&rqstp->rq_pvec);
@@ -675,11 +675,12 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
pages = RPCSVC_MAXPAGES;
}
- for (;;) {
- filled = alloc_pages_bulk_array(GFP_KERNEL, pages,
- rqstp->rq_pages);
- if (filled == pages)
- break;
+ for (filled = 0; filled < pages; filled = ret) {
+ ret = alloc_pages_bulk_array(GFP_KERNEL, pages,
+ rqstp->rq_pages);
+ if (ret > filled)
+ /* Made progress, don't sleep yet */
+ continue;
set_current_state(TASK_INTERRUPTIBLE);
if (signalled() || kthread_should_stop()) {
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index a0a27d87f631..ad570c2450be 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2423,7 +2423,7 @@ static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
u32 dport, struct sk_buff_head *xmitq)
{
- unsigned long time_limit = jiffies + 2;
+ unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
struct sk_buff *skb;
unsigned int lim;
atomic_t *dcnt;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index eb47b9de2380..92345c9bb60c 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -3073,7 +3073,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
other = unix_peer(sk);
if (other && unix_peer(other) != sk &&
- unix_recvq_full(other) &&
+ unix_recvq_full_lockless(other) &&
unix_dgram_peer_wake_me(sk, other))
writable = 0;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 3e02cc3b24f8..e2c0cfb334d2 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -2014,7 +2014,7 @@ static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg,
{
const struct vsock_transport *transport;
struct vsock_sock *vsk;
- ssize_t record_len;
+ ssize_t msg_len;
long timeout;
int err = 0;
DEFINE_WAIT(wait);
@@ -2028,9 +2028,9 @@ static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg,
if (err <= 0)
goto out;
- record_len = transport->seqpacket_dequeue(vsk, msg, flags);
+ msg_len = transport->seqpacket_dequeue(vsk, msg, flags);
- if (record_len < 0) {
+ if (msg_len < 0) {
err = -ENOMEM;
goto out;
}
@@ -2044,14 +2044,14 @@ static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg,
* packet.
*/
if (flags & MSG_TRUNC)
- err = record_len;
+ err = msg_len;
else
err = len - msg_data_left(msg);
/* Always set MSG_TRUNC if real length of packet is
* bigger than user's buffer.
*/
- if (record_len > len)
+ if (msg_len > len)
msg->msg_flags |= MSG_TRUNC;
}
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 081e7ae93cb1..59ee1be5a6dd 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -76,8 +76,12 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
goto out;
if (msg_data_left(info->msg) == 0 &&
- info->type == VIRTIO_VSOCK_TYPE_SEQPACKET)
- pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) {
+ pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
+
+ if (info->msg->msg_flags & MSG_EOR)
+ pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ }
}
trace_virtio_transport_alloc_pkt(src_cid, src_port,
@@ -457,9 +461,12 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
dequeued_len += pkt_len;
}
- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
msg_ready = true;
vvs->msg_count--;
+
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR)
+ msg->msg_flags |= MSG_EOR;
}
virtio_transport_dec_rx_pkt(vvs, pkt);
@@ -1029,7 +1036,7 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
goto out;
}
- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR)
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM)
vvs->msg_count++;
/* Try to copy small packets into the buffer of last packet queued,
@@ -1044,12 +1051,12 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
/* If there is space in the last packet queued, we copy the
* new packet in its buffer. We avoid this if the last packet
- * queued has VIRTIO_VSOCK_SEQ_EOR set, because this is
- * delimiter of SEQPACKET record, so 'pkt' is the first packet
- * of a new record.
+ * queued has VIRTIO_VSOCK_SEQ_EOM set, because this is
+ * delimiter of SEQPACKET message, so 'pkt' is the first packet
+ * of a new message.
*/
if ((pkt->len <= last_pkt->buf_len - last_pkt->len) &&
- !(le32_to_cpu(last_pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR)) {
+ !(le32_to_cpu(last_pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM)) {
memcpy(last_pkt->buf + last_pkt->len, pkt->buf,
pkt->len);
last_pkt->len += pkt->len;
diff --git a/scripts/check_extable.sh b/scripts/check_extable.sh
index 93af93c7b346..4b380564cf74 100755
--- a/scripts/check_extable.sh
+++ b/scripts/check_extable.sh
@@ -4,7 +4,7 @@
obj=$1
-file ${obj} | grep -q ELF || (echo "${obj} is not and ELF file." 1>&2 ; exit 0)
+file ${obj} | grep -q ELF || (echo "${obj} is not an ELF file." 1>&2 ; exit 0)
# Bail out early if there isn't an __ex_table section in this object file.
objdump -hj __ex_table ${obj} 2> /dev/null > /dev/null
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 461d4221e4a4..c27d2312cfc3 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -501,7 +501,7 @@ our $Binary = qr{(?i)0b[01]+$Int_type?};
our $Hex = qr{(?i)0x[0-9a-f]+$Int_type?};
our $Int = qr{[0-9]+$Int_type?};
our $Octal = qr{0[0-7]+$Int_type?};
-our $String = qr{"[X\t]*"};
+our $String = qr{(?:\b[Lu])?"[X\t]*"};
our $Float_hex = qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?};
our $Float_dec = qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?};
our $Float_int = qr{(?i)[0-9]+e-?[0-9]+[fl]?};
@@ -1181,7 +1181,8 @@ sub git_commit_info {
# git log --format='%H %s' -1 $line |
# echo "commit $(cut -c 1-12,41-)"
# done
- } elsif ($lines[0] =~ /^fatal: ambiguous argument '$commit': unknown revision or path not in the working tree\./) {
+ } elsif ($lines[0] =~ /^fatal: ambiguous argument '$commit': unknown revision or path not in the working tree\./ ||
+ $lines[0] =~ /^fatal: bad object $commit/) {
$id = undef;
} else {
$id = substr($lines[0], 0, 12);
@@ -2587,6 +2588,8 @@ sub process {
my $reported_maintainer_file = 0;
my $non_utf8_charset = 0;
+ my $last_git_commit_id_linenr = -1;
+
my $last_blank_line = 0;
my $last_coalesced_string_linenr = -1;
@@ -2909,10 +2912,10 @@ sub process {
my ($email_name, $email_comment, $email_address, $comment1) = parse_email($ctx);
my ($author_name, $author_comment, $author_address, $comment2) = parse_email($author);
- if ($email_address eq $author_address && $email_name eq $author_name) {
+ if (lc $email_address eq lc $author_address && $email_name eq $author_name) {
$author_sob = $ctx;
$authorsignoff = 2;
- } elsif ($email_address eq $author_address) {
+ } elsif (lc $email_address eq lc $author_address) {
$author_sob = $ctx;
$authorsignoff = 3;
} elsif ($email_name eq $author_name) {
@@ -3170,10 +3173,20 @@ sub process {
}
# Check for git id commit length and improperly formed commit descriptions
- if ($in_commit_log && !$commit_log_possible_stack_dump &&
+# A correctly formed commit description is:
+# commit <SHA-1 hash length 12+ chars> ("Complete commit subject")
+# with the commit subject '("' prefix and '")' suffix
+# This is a fairly compilicated block as it tests for what appears to be
+# bare SHA-1 hash with minimum length of 5. It also avoids several types of
+# possible SHA-1 matches.
+# A commit match can span multiple lines so this block attempts to find a
+# complete typical commit on a maximum of 3 lines
+ if ($perl_version_ok &&
+ $in_commit_log && !$commit_log_possible_stack_dump &&
$line !~ /^\s*(?:Link|Patchwork|http|https|BugLink|base-commit):/i &&
$line !~ /^This reverts commit [0-9a-f]{7,40}/ &&
- ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
+ (($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
+ ($line =~ /\bcommit\s*$/i && defined($rawlines[$linenr]) && $rawlines[$linenr] =~ /^\s*[0-9a-f]{5,}\b/i)) ||
($line =~ /(?:\s|^)[0-9a-f]{12,40}(?:[\s"'\(\[]|$)/i &&
$line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i &&
$line !~ /\bfixes:\s*[0-9a-f]{12,40}/i))) {
@@ -3183,49 +3196,56 @@ sub process {
my $long = 0;
my $case = 1;
my $space = 1;
- my $hasdesc = 0;
- my $hasparens = 0;
my $id = '0123456789ab';
my $orig_desc = "commit description";
my $description = "";
+ my $herectx = $herecurr;
+ my $has_parens = 0;
+ my $has_quotes = 0;
+
+ my $input = $line;
+ if ($line =~ /(?:\bcommit\s+[0-9a-f]{5,}|\bcommit\s*$)/i) {
+ for (my $n = 0; $n < 2; $n++) {
+ if ($input =~ /\bcommit\s+[0-9a-f]{5,}\s*($balanced_parens)/i) {
+ $orig_desc = $1;
+ $has_parens = 1;
+ # Always strip leading/trailing parens then double quotes if existing
+ $orig_desc = substr($orig_desc, 1, -1);
+ if ($orig_desc =~ /^".*"$/) {
+ $orig_desc = substr($orig_desc, 1, -1);
+ $has_quotes = 1;
+ }
+ last;
+ }
+ last if ($#lines < $linenr + $n);
+ $input .= " " . trim($rawlines[$linenr + $n]);
+ $herectx .= "$rawlines[$linenr + $n]\n";
+ }
+ $herectx = $herecurr if (!$has_parens);
+ }
- if ($line =~ /\b(c)ommit\s+([0-9a-f]{5,})\b/i) {
+ if ($input =~ /\b(c)ommit\s+([0-9a-f]{5,})\b/i) {
$init_char = $1;
$orig_commit = lc($2);
- } elsif ($line =~ /\b([0-9a-f]{12,40})\b/i) {
+ $short = 0 if ($input =~ /\bcommit\s+[0-9a-f]{12,40}/i);
+ $long = 1 if ($input =~ /\bcommit\s+[0-9a-f]{41,}/i);
+ $space = 0 if ($input =~ /\bcommit [0-9a-f]/i);
+ $case = 0 if ($input =~ /\b[Cc]ommit\s+[0-9a-f]{5,40}[^A-F]/);
+ } elsif ($input =~ /\b([0-9a-f]{12,40})\b/i) {
$orig_commit = lc($1);
}
- $short = 0 if ($line =~ /\bcommit\s+[0-9a-f]{12,40}/i);
- $long = 1 if ($line =~ /\bcommit\s+[0-9a-f]{41,}/i);
- $space = 0 if ($line =~ /\bcommit [0-9a-f]/i);
- $case = 0 if ($line =~ /\b[Cc]ommit\s+[0-9a-f]{5,40}[^A-F]/);
- if ($line =~ /\bcommit\s+[0-9a-f]{5,}\s+\("([^"]+)"\)/i) {
- $orig_desc = $1;
- $hasparens = 1;
- } elsif ($line =~ /\bcommit\s+[0-9a-f]{5,}\s*$/i &&
- defined $rawlines[$linenr] &&
- $rawlines[$linenr] =~ /^\s*\("([^"]+)"\)/) {
- $orig_desc = $1;
- $hasparens = 1;
- } elsif ($line =~ /\bcommit\s+[0-9a-f]{5,}\s+\("[^"]+$/i &&
- defined $rawlines[$linenr] &&
- $rawlines[$linenr] =~ /^\s*[^"]+"\)/) {
- $line =~ /\bcommit\s+[0-9a-f]{5,}\s+\("([^"]+)$/i;
- $orig_desc = $1;
- $rawlines[$linenr] =~ /^\s*([^"]+)"\)/;
- $orig_desc .= " " . $1;
- $hasparens = 1;
- }
-
($id, $description) = git_commit_info($orig_commit,
$id, $orig_desc);
if (defined($id) &&
- ($short || $long || $space || $case || ($orig_desc ne $description) || !$hasparens)) {
+ ($short || $long || $space || $case || ($orig_desc ne $description) || !$has_quotes) &&
+ $last_git_commit_id_linenr != $linenr - 1) {
ERROR("GIT_COMMIT_ID",
- "Please use git commit description style 'commit <12+ chars of sha1> (\"<title line>\")' - ie: '${init_char}ommit $id (\"$description\")'\n" . $herecurr);
+ "Please use git commit description style 'commit <12+ chars of sha1> (\"<title line>\")' - ie: '${init_char}ommit $id (\"$description\")'\n" . $herectx);
}
+ #don't report the next line if this line ends in commit and the sha1 hash is the next line
+ $last_git_commit_id_linenr = $linenr if ($line =~ /\bcommit\s*$/i);
}
# Check for added, moved or deleted files
@@ -6132,7 +6152,8 @@ sub process {
}
# concatenated string without spaces between elements
- if ($line =~ /$String[A-Za-z0-9_]/ || $line =~ /[A-Za-z0-9_]$String/) {
+ if ($line =~ /$String[A-Z_]/ ||
+ ($line =~ /([A-Za-z0-9_]+)$String/ && $1 !~ /^[Lu]$/)) {
if (CHK("CONCATENATED_STRING",
"Concatenated strings should use spaces between elements\n" . $herecurr) &&
$fix) {
@@ -6145,7 +6166,7 @@ sub process {
}
# uncoalesced string fragments
- if ($line =~ /$String\s*"/) {
+ if ($line =~ /$String\s*[Lu]?"/) {
if (WARN("STRING_FRAGMENTS",
"Consecutive strings are generally better as a single string\n" . $herecurr) &&
$fix) {
diff --git a/scripts/coccinelle/api/kvmalloc.cocci b/scripts/coccinelle/api/kvmalloc.cocci
index c30dab718a49..5ddcb76b76b0 100644
--- a/scripts/coccinelle/api/kvmalloc.cocci
+++ b/scripts/coccinelle/api/kvmalloc.cocci
@@ -79,7 +79,7 @@ position p : script:python() { relevant(p) };
} else {
... when != krealloc(E, ...)
when any
-* \(kfree\|kzfree\)(E)
+* \(kfree\|kfree_sensitive\)(E)
...
}
diff --git a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci
index 9be48b520879..676edd562eef 100644
--- a/scripts/coccinelle/iterators/use_after_iter.cocci
+++ b/scripts/coccinelle/iterators/use_after_iter.cocci
@@ -123,6 +123,8 @@ hlist_for_each_entry_safe(c,...) S
|
list_remove_head(x,c,...)
|
+list_entry_is_head(c,...)
+|
sizeof(<+...c...+>)
|
&c->member
diff --git a/scripts/min-tool-version.sh b/scripts/min-tool-version.sh
index 319f92104f56..4edc708baa63 100755
--- a/scripts/min-tool-version.sh
+++ b/scripts/min-tool-version.sh
@@ -17,13 +17,7 @@ binutils)
echo 2.23.0
;;
gcc)
- # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293
- # https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk
- if [ "$SRCARCH" = arm64 ]; then
- echo 5.1.0
- else
- echo 4.9.0
- fi
+ echo 5.1.0
;;
icc)
# temporary
diff --git a/scripts/module.lds.S b/scripts/module.lds.S
index 04c5685c25cf..1d0e1e4dc3d2 100644
--- a/scripts/module.lds.S
+++ b/scripts/module.lds.S
@@ -24,6 +24,7 @@ SECTIONS {
__kcrctab 0 : { *(SORT(___kcrctab+*)) }
__kcrctab_gpl 0 : { *(SORT(___kcrctab_gpl+*)) }
+ .ctors 0 : ALIGN(8) { *(SORT(.ctors.*)) *(.ctors) }
.init_array 0 : ALIGN(8) { *(SORT(.init_array.*)) *(.init_array) }
__jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) }
diff --git a/scripts/sorttable.c b/scripts/sorttable.c
index 0ef3abfc4a51..f355869c65cd 100644
--- a/scripts/sorttable.c
+++ b/scripts/sorttable.c
@@ -349,6 +349,7 @@ static int do_file(char const *const fname, void *addr)
case EM_ARM:
case EM_MICROBLAZE:
case EM_MIPS:
+ case EM_RISCV:
case EM_XTENSA:
break;
default:
diff --git a/sound/isa/gus/gus_main.c b/sound/isa/gus/gus_main.c
index ae1e2542ee4a..3b46490271fe 100644
--- a/sound/isa/gus/gus_main.c
+++ b/sound/isa/gus/gus_main.c
@@ -87,10 +87,24 @@ static void snd_gus_init_control(struct snd_gus_card *gus)
static int snd_gus_free(struct snd_gus_card *gus)
{
- if (gus->gf1.res_port2) {
- snd_gf1_stop(gus);
- snd_gus_init_dma_irq(gus, 0);
+ if (gus->gf1.res_port2 == NULL)
+ goto __hw_end;
+ snd_gf1_stop(gus);
+ snd_gus_init_dma_irq(gus, 0);
+ __hw_end:
+ release_and_free_resource(gus->gf1.res_port1);
+ release_and_free_resource(gus->gf1.res_port2);
+ if (gus->gf1.irq >= 0)
+ free_irq(gus->gf1.irq, (void *) gus);
+ if (gus->gf1.dma1 >= 0) {
+ disable_dma(gus->gf1.dma1);
+ free_dma(gus->gf1.dma1);
}
+ if (!gus->equal_dma && gus->gf1.dma2 >= 0) {
+ disable_dma(gus->gf1.dma2);
+ free_dma(gus->gf1.dma2);
+ }
+ kfree(gus);
return 0;
}
@@ -116,7 +130,7 @@ int snd_gus_create(struct snd_card *card,
};
*rgus = NULL;
- gus = devm_kzalloc(card->dev, sizeof(*gus), GFP_KERNEL);
+ gus = kzalloc(sizeof(*gus), GFP_KERNEL);
if (gus == NULL)
return -ENOMEM;
spin_lock_init(&gus->reg_lock);
@@ -142,33 +156,35 @@ int snd_gus_create(struct snd_card *card,
gus->gf1.reg_timerctrl = GUSP(gus, TIMERCNTRL);
gus->gf1.reg_timerdata = GUSP(gus, TIMERDATA);
/* allocate resources */
- gus->gf1.res_port1 = devm_request_region(card->dev, port, 16,
- "GUS GF1 (Adlib/SB)");
+ gus->gf1.res_port1 = request_region(port, 16, "GUS GF1 (Adlib/SB)");
if (!gus->gf1.res_port1) {
snd_printk(KERN_ERR "gus: can't grab SB port 0x%lx\n", port);
+ snd_gus_free(gus);
return -EBUSY;
}
- gus->gf1.res_port2 = devm_request_region(card->dev, port + 0x100, 12,
- "GUS GF1 (Synth)");
+ gus->gf1.res_port2 = request_region(port + 0x100, 12, "GUS GF1 (Synth)");
if (!gus->gf1.res_port2) {
snd_printk(KERN_ERR "gus: can't grab synth port 0x%lx\n", port + 0x100);
+ snd_gus_free(gus);
return -EBUSY;
}
- if (irq >= 0 && devm_request_irq(card->dev, irq, snd_gus_interrupt, 0,
- "GUS GF1", (void *) gus)) {
+ if (irq >= 0 && request_irq(irq, snd_gus_interrupt, 0, "GUS GF1", (void *) gus)) {
snd_printk(KERN_ERR "gus: can't grab irq %d\n", irq);
+ snd_gus_free(gus);
return -EBUSY;
}
gus->gf1.irq = irq;
card->sync_irq = irq;
- if (snd_devm_request_dma(card->dev, dma1, "GUS - 1")) {
+ if (request_dma(dma1, "GUS - 1")) {
snd_printk(KERN_ERR "gus: can't grab DMA1 %d\n", dma1);
+ snd_gus_free(gus);
return -EBUSY;
}
gus->gf1.dma1 = dma1;
if (dma2 >= 0 && dma1 != dma2) {
- if (snd_devm_request_dma(card->dev, dma2, "GUS - 2")) {
+ if (request_dma(dma2, "GUS - 2")) {
snd_printk(KERN_ERR "gus: can't grab DMA2 %d\n", dma2);
+ snd_gus_free(gus);
return -EBUSY;
}
gus->gf1.dma2 = dma2;
@@ -193,8 +209,10 @@ int snd_gus_create(struct snd_card *card,
gus->gf1.volume_ramp = 25;
gus->gf1.smooth_pan = 1;
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, gus, &ops);
- if (err < 0)
+ if (err < 0) {
+ snd_gus_free(gus);
return err;
+ }
*rgus = gus;
return 0;
}
diff --git a/sound/isa/gus/interwave.c b/sound/isa/gus/interwave.c
index 20f490e9d563..a04a9d3253f8 100644
--- a/sound/isa/gus/interwave.c
+++ b/sound/isa/gus/interwave.c
@@ -618,12 +618,19 @@ static int snd_interwave_card_new(struct device *pdev, int dev,
return 0;
}
-static int snd_interwave_probe(struct snd_card *card, int dev)
+static int snd_interwave_probe_gus(struct snd_card *card, int dev,
+ struct snd_gus_card **gusp)
+{
+ return snd_gus_create(card, port[dev], -irq[dev], dma1[dev], dma2[dev],
+ 0, 32, pcm_channels[dev], effect[dev], gusp);
+}
+
+static int snd_interwave_probe(struct snd_card *card, int dev,
+ struct snd_gus_card *gus)
{
int xirq, xdma1, xdma2;
struct snd_interwave *iwcard = card->private_data;
struct snd_wss *wss;
- struct snd_gus_card *gus;
#ifdef SNDRV_STB
struct snd_i2c_bus *i2c_bus;
#endif
@@ -634,14 +641,6 @@ static int snd_interwave_probe(struct snd_card *card, int dev)
xdma1 = dma1[dev];
xdma2 = dma2[dev];
- err = snd_gus_create(card,
- port[dev],
- -xirq, xdma1, xdma2,
- 0, 32,
- pcm_channels[dev], effect[dev], &gus);
- if (err < 0)
- return err;
-
err = snd_interwave_detect(iwcard, gus, dev
#ifdef SNDRV_STB
, &i2c_bus
@@ -757,22 +756,6 @@ static int snd_interwave_probe(struct snd_card *card, int dev)
return 0;
}
-static int snd_interwave_isa_probe1(int dev, struct device *devptr)
-{
- struct snd_card *card;
- int err;
-
- err = snd_interwave_card_new(devptr, dev, &card);
- if (err < 0)
- return err;
-
- err = snd_interwave_probe(card, dev);
- if (err < 0)
- return err;
- dev_set_drvdata(devptr, card);
- return 0;
-}
-
static int snd_interwave_isa_match(struct device *pdev,
unsigned int dev)
{
@@ -788,6 +771,8 @@ static int snd_interwave_isa_match(struct device *pdev,
static int snd_interwave_isa_probe(struct device *pdev,
unsigned int dev)
{
+ struct snd_card *card;
+ struct snd_gus_card *gus;
int err;
static const int possible_irqs[] = {5, 11, 12, 9, 7, 15, 3, -1};
static const int possible_dmas[] = {0, 1, 3, 5, 6, 7, -1};
@@ -814,19 +799,31 @@ static int snd_interwave_isa_probe(struct device *pdev,
}
}
+ err = snd_interwave_card_new(pdev, dev, &card);
+ if (err < 0)
+ return err;
+
if (port[dev] != SNDRV_AUTO_PORT)
- return snd_interwave_isa_probe1(dev, pdev);
+ err = snd_interwave_probe_gus(card, dev, &gus);
else {
static const long possible_ports[] = {0x210, 0x220, 0x230, 0x240, 0x250, 0x260};
int i;
for (i = 0; i < ARRAY_SIZE(possible_ports); i++) {
port[dev] = possible_ports[i];
- err = snd_interwave_isa_probe1(dev, pdev);
+ err = snd_interwave_probe_gus(card, dev, &gus);
if (! err)
return 0;
}
- return err;
}
+ if (err < 0)
+ return err;
+
+ err = snd_interwave_probe(card, dev, gus);
+ if (err < 0)
+ return err;
+
+ dev_set_drvdata(pdev, card);
+ return 0;
}
static struct isa_driver snd_interwave_driver = {
@@ -844,6 +841,7 @@ static int snd_interwave_pnp_detect(struct pnp_card_link *pcard,
{
static int dev;
struct snd_card *card;
+ struct snd_gus_card *gus;
int res;
for ( ; dev < SNDRV_CARDS; dev++) {
@@ -860,7 +858,10 @@ static int snd_interwave_pnp_detect(struct pnp_card_link *pcard,
res = snd_interwave_pnp(dev, card->private_data, pcard, pid);
if (res < 0)
return res;
- res = snd_interwave_probe(card, dev);
+ res = snd_interwave_probe_gus(card, dev, &gus);
+ if (res < 0)
+ return res;
+ res = snd_interwave_probe(card, dev, gus);
if (res < 0)
return res;
pnp_set_card_drvdata(pcard, card);
diff --git a/sound/pci/vx222/vx222.c b/sound/pci/vx222/vx222.c
index f48cc20b9e8a..468a6a20dc1e 100644
--- a/sound/pci/vx222/vx222.c
+++ b/sound/pci/vx222/vx222.c
@@ -137,6 +137,7 @@ static int snd_vx222_create(struct snd_card *card, struct pci_dev *pci,
}
chip->irq = pci->irq;
card->sync_irq = chip->irq;
+ *rchip = vx;
return 0;
}
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index e822fa1b9d4b..4a64cab99c55 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -2942,9 +2942,6 @@ static int rt5682_suspend(struct snd_soc_component *component)
break;
}
- snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
- RT5682_PWR_CBJ, 0);
-
/* enter SAR ADC power saving mode */
snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1,
RT5682_SAR_BUTT_DET_MASK | RT5682_SAR_BUTDET_MODE_MASK |
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index 5e71382467e8..546f6fd0609e 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -285,6 +285,7 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
if (li->cpu) {
struct snd_soc_card *card = simple_priv_to_card(priv);
struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
+ struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0);
int is_single_links = 0;
/* Codec is dummy */
@@ -313,6 +314,7 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
dai_link->no_pcm = 1;
asoc_simple_canonicalize_cpu(cpus, is_single_links);
+ asoc_simple_canonicalize_platform(platforms, cpus);
} else {
struct snd_soc_codec_conf *cconf = simple_props_to_codec_conf(dai_props, 0);
struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
@@ -366,6 +368,7 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv,
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
+ struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0);
char dai_name[64];
int ret, is_single_links = 0;
@@ -383,6 +386,7 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv,
"%s-%s", cpus->dai_name, codecs->dai_name);
asoc_simple_canonicalize_cpu(cpus, is_single_links);
+ asoc_simple_canonicalize_platform(platforms, cpus);
ret = graph_link_init(priv, cpu_ep, codec_ep, li, dai_name);
if (ret < 0)
@@ -608,6 +612,7 @@ static int graph_count_noml(struct asoc_simple_priv *priv,
li->num[li->link].cpus = 1;
li->num[li->link].codecs = 1;
+ li->num[li->link].platforms = 1;
li->link += 1; /* 1xCPU-Codec */
@@ -630,6 +635,7 @@ static int graph_count_dpcm(struct asoc_simple_priv *priv,
if (li->cpu) {
li->num[li->link].cpus = 1;
+ li->num[li->link].platforms = 1;
li->link++; /* 1xCPU-dummy */
} else {
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index 046955bf717c..61b71d6c44cf 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -602,7 +602,7 @@ config SND_SOC_INTEL_SOUNDWIRE_SOF_MACH
select SND_SOC_DMIC
select SND_SOC_INTEL_HDA_DSP_COMMON
select SND_SOC_INTEL_SOF_MAXIM_COMMON
- select SND_SOC_SDW_MOCKUP
+ imply SND_SOC_SDW_MOCKUP
help
Add support for Intel SoundWire-based platforms connected to
MAX98373, RT700, RT711, RT1308 and RT715
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index cf567a89f421..5a2f4667d50b 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -187,6 +187,7 @@ config SND_SOC_MT8192_MT6359_RT1015_RT5682
config SND_SOC_MT8195
tristate "ASoC support for Mediatek MT8195 chip"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
select SND_SOC_MEDIATEK
help
This adds ASoC platform driver support for Mediatek MT8195 chip
@@ -197,7 +198,7 @@ config SND_SOC_MT8195
config SND_SOC_MT8195_MT6359_RT1019_RT5682
tristate "ASoC Audio driver for MT8195 with MT6359 RT1019 RT5682 codec"
depends on I2C
- depends on SND_SOC_MT8195
+ depends on SND_SOC_MT8195 && MTK_PMIC_WRAP
select SND_SOC_MT6359
select SND_SOC_RT1015P
select SND_SOC_RT5682_I2C
diff --git a/sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c b/sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c
index 5dc217f59bd6..c97ace7387b4 100644
--- a/sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c
+++ b/sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c
@@ -1018,13 +1018,12 @@ static int mt8195_mt6359_rt1019_rt5682_dev_probe(struct platform_device *pdev)
of_parse_phandle(pdev->dev.of_node,
"mediatek,dptx-codec", 0);
if (!dai_link->codecs->of_node) {
- dev_err(&pdev->dev, "Property 'dptx-codec' missing or invalid\n");
- return -EINVAL;
+ dev_dbg(&pdev->dev, "No property 'dptx-codec'\n");
+ } else {
+ dai_link->codecs->name = NULL;
+ dai_link->codecs->dai_name = "i2s-hifi";
+ dai_link->init = mt8195_dptx_codec_init;
}
-
- dai_link->codecs->name = NULL;
- dai_link->codecs->dai_name = "i2s-hifi";
- dai_link->init = mt8195_dptx_codec_init;
}
if (strcmp(dai_link->name, "ETDM3_OUT_BE") == 0) {
@@ -1032,13 +1031,12 @@ static int mt8195_mt6359_rt1019_rt5682_dev_probe(struct platform_device *pdev)
of_parse_phandle(pdev->dev.of_node,
"mediatek,hdmi-codec", 0);
if (!dai_link->codecs->of_node) {
- dev_err(&pdev->dev, "Property 'hdmi-codec' missing or invalid\n");
- return -EINVAL;
+ dev_dbg(&pdev->dev, "No property 'hdmi-codec'\n");
+ } else {
+ dai_link->codecs->name = NULL;
+ dai_link->codecs->dai_name = "i2s-hifi";
+ dai_link->init = mt8195_hdmi_codec_init;
}
-
- dai_link->codecs->name = NULL;
- dai_link->codecs->dai_name = "i2s-hifi";
- dai_link->init = mt8195_hdmi_codec_init;
}
}
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 53e0b4a1c7d2..7e89f5b0c237 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/spinlock.h>
#include <sound/pcm_params.h>
#include <sound/dmaengine_pcm.h>
@@ -53,6 +54,7 @@ struct rk_i2s_dev {
bool is_master_mode;
const struct rk_i2s_pins *pins;
unsigned int bclk_ratio;
+ spinlock_t lock; /* tx/rx lock */
};
static int i2s_runtime_suspend(struct device *dev)
@@ -96,6 +98,7 @@ static void rockchip_snd_txctrl(struct rk_i2s_dev *i2s, int on)
unsigned int val = 0;
int retry = 10;
+ spin_lock(&i2s->lock);
if (on) {
regmap_update_bits(i2s->regmap, I2S_DMACR,
I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_ENABLE);
@@ -136,6 +139,7 @@ static void rockchip_snd_txctrl(struct rk_i2s_dev *i2s, int on)
}
}
}
+ spin_unlock(&i2s->lock);
}
static void rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
@@ -143,6 +147,7 @@ static void rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
unsigned int val = 0;
int retry = 10;
+ spin_lock(&i2s->lock);
if (on) {
regmap_update_bits(i2s->regmap, I2S_DMACR,
I2S_DMACR_RDE_ENABLE, I2S_DMACR_RDE_ENABLE);
@@ -183,6 +188,7 @@ static void rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
}
}
}
+ spin_unlock(&i2s->lock);
}
static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
@@ -684,6 +690,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
if (!i2s)
return -ENOMEM;
+ spin_lock_init(&i2s->lock);
i2s->dev = &pdev->dev;
i2s->grf = syscon_regmap_lookup_by_phandle(node, "rockchip,grf");
diff --git a/sound/soc/samsung/s3c24xx_simtec.c b/sound/soc/samsung/s3c24xx_simtec.c
index 81a29d12c57d..0cc66774b85d 100644
--- a/sound/soc/samsung/s3c24xx_simtec.c
+++ b/sound/soc/samsung/s3c24xx_simtec.c
@@ -327,7 +327,7 @@ int simtec_audio_core_probe(struct platform_device *pdev,
snd_dev = platform_device_alloc("soc-audio", -1);
if (!snd_dev) {
- dev_err(&pdev->dev, "failed to alloc soc-audio devicec\n");
+ dev_err(&pdev->dev, "failed to alloc soc-audio device\n");
ret = -ENOMEM;
goto err_gpio;
}
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 4479a590194f..6ee6d24c847f 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1743,6 +1743,7 @@ static const struct registration_quirk registration_quirks[] = {
REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */
REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */
REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2), /* JBL Quantum 600 */
+ REG_QUIRK_ENTRY(0x0ecb, 0x1f47, 2), /* JBL Quantum 800 */
REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2), /* JBL Quantum 400 */
REG_QUIRK_ENTRY(0x0ecb, 0x203c, 2), /* JBL Quantum 600 */
REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2), /* JBL Quantum 800 */
diff --git a/tools/arch/x86/include/asm/amd-ibs.h b/tools/arch/x86/include/asm/amd-ibs.h
new file mode 100644
index 000000000000..174e7d83fcbd
--- /dev/null
+++ b/tools/arch/x86/include/asm/amd-ibs.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * From PPR Vol 1 for AMD Family 19h Model 01h B1
+ * 55898 Rev 0.35 - Feb 5, 2021
+ */
+
+#include "msr-index.h"
+
+/*
+ * IBS Hardware MSRs
+ */
+
+/* MSR 0xc0011030: IBS Fetch Control */
+union ibs_fetch_ctl {
+ __u64 val;
+ struct {
+ __u64 fetch_maxcnt:16,/* 0-15: instruction fetch max. count */
+ fetch_cnt:16, /* 16-31: instruction fetch count */
+ fetch_lat:16, /* 32-47: instruction fetch latency */
+ fetch_en:1, /* 48: instruction fetch enable */
+ fetch_val:1, /* 49: instruction fetch valid */
+ fetch_comp:1, /* 50: instruction fetch complete */
+ ic_miss:1, /* 51: i-cache miss */
+ phy_addr_valid:1,/* 52: physical address valid */
+ l1tlb_pgsz:2, /* 53-54: i-cache L1TLB page size
+ * (needs IbsPhyAddrValid) */
+ l1tlb_miss:1, /* 55: i-cache fetch missed in L1TLB */
+ l2tlb_miss:1, /* 56: i-cache fetch missed in L2TLB */
+ rand_en:1, /* 57: random tagging enable */
+ fetch_l2_miss:1,/* 58: L2 miss for sampled fetch
+ * (needs IbsFetchComp) */
+ reserved:5; /* 59-63: reserved */
+ };
+};
+
+/* MSR 0xc0011033: IBS Execution Control */
+union ibs_op_ctl {
+ __u64 val;
+ struct {
+ __u64 opmaxcnt:16, /* 0-15: periodic op max. count */
+ reserved0:1, /* 16: reserved */
+ op_en:1, /* 17: op sampling enable */
+ op_val:1, /* 18: op sample valid */
+ cnt_ctl:1, /* 19: periodic op counter control */
+ opmaxcnt_ext:7, /* 20-26: upper 7 bits of periodic op maximum count */
+ reserved1:5, /* 27-31: reserved */
+ opcurcnt:27, /* 32-58: periodic op counter current count */
+ reserved2:5; /* 59-63: reserved */
+ };
+};
+
+/* MSR 0xc0011035: IBS Op Data 2 */
+union ibs_op_data {
+ __u64 val;
+ struct {
+ __u64 comp_to_ret_ctr:16, /* 0-15: op completion to retire count */
+ tag_to_ret_ctr:16, /* 15-31: op tag to retire count */
+ reserved1:2, /* 32-33: reserved */
+ op_return:1, /* 34: return op */
+ op_brn_taken:1, /* 35: taken branch op */
+ op_brn_misp:1, /* 36: mispredicted branch op */
+ op_brn_ret:1, /* 37: branch op retired */
+ op_rip_invalid:1, /* 38: RIP is invalid */
+ op_brn_fuse:1, /* 39: fused branch op */
+ op_microcode:1, /* 40: microcode op */
+ reserved2:23; /* 41-63: reserved */
+ };
+};
+
+/* MSR 0xc0011036: IBS Op Data 2 */
+union ibs_op_data2 {
+ __u64 val;
+ struct {
+ __u64 data_src:3, /* 0-2: data source */
+ reserved0:1, /* 3: reserved */
+ rmt_node:1, /* 4: destination node */
+ cache_hit_st:1, /* 5: cache hit state */
+ reserved1:57; /* 5-63: reserved */
+ };
+};
+
+/* MSR 0xc0011037: IBS Op Data 3 */
+union ibs_op_data3 {
+ __u64 val;
+ struct {
+ __u64 ld_op:1, /* 0: load op */
+ st_op:1, /* 1: store op */
+ dc_l1tlb_miss:1, /* 2: data cache L1TLB miss */
+ dc_l2tlb_miss:1, /* 3: data cache L2TLB hit in 2M page */
+ dc_l1tlb_hit_2m:1, /* 4: data cache L1TLB hit in 2M page */
+ dc_l1tlb_hit_1g:1, /* 5: data cache L1TLB hit in 1G page */
+ dc_l2tlb_hit_2m:1, /* 6: data cache L2TLB hit in 2M page */
+ dc_miss:1, /* 7: data cache miss */
+ dc_mis_acc:1, /* 8: misaligned access */
+ reserved:4, /* 9-12: reserved */
+ dc_wc_mem_acc:1, /* 13: write combining memory access */
+ dc_uc_mem_acc:1, /* 14: uncacheable memory access */
+ dc_locked_op:1, /* 15: locked operation */
+ dc_miss_no_mab_alloc:1, /* 16: DC miss with no MAB allocated */
+ dc_lin_addr_valid:1, /* 17: data cache linear address valid */
+ dc_phy_addr_valid:1, /* 18: data cache physical address valid */
+ dc_l2_tlb_hit_1g:1, /* 19: data cache L2 hit in 1GB page */
+ l2_miss:1, /* 20: L2 cache miss */
+ sw_pf:1, /* 21: software prefetch */
+ op_mem_width:4, /* 22-25: load/store size in bytes */
+ op_dc_miss_open_mem_reqs:6, /* 26-31: outstanding mem reqs on DC fill */
+ dc_miss_lat:16, /* 32-47: data cache miss latency */
+ tlb_refill_lat:16; /* 48-63: L1 TLB refill latency */
+ };
+};
+
+/* MSR 0xc001103c: IBS Fetch Control Extended */
+union ic_ibs_extd_ctl {
+ __u64 val;
+ struct {
+ __u64 itlb_refill_lat:16, /* 0-15: ITLB Refill latency for sampled fetch */
+ reserved:48; /* 16-63: reserved */
+ };
+};
+
+/*
+ * IBS driver related
+ */
+
+struct perf_ibs_data {
+ u32 size;
+ union {
+ u32 data[0]; /* data buffer starts here */
+ u32 caps;
+ };
+ u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
+};
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index a6c327f8ad9e..2ef1f6513c68 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -295,6 +295,7 @@ struct kvm_debug_exit_arch {
#define KVM_GUESTDBG_USE_HW_BP 0x00020000
#define KVM_GUESTDBG_INJECT_DB 0x00040000
#define KVM_GUESTDBG_INJECT_BP 0x00080000
+#define KVM_GUESTDBG_BLOCKIRQ 0x00100000
/* for KVM_SET_GUEST_DEBUG */
struct kvm_guest_debug_arch {
diff --git a/tools/bootconfig/include/linux/memblock.h b/tools/bootconfig/include/linux/memblock.h
index 7862f217d85d..f2e506f7d57f 100644
--- a/tools/bootconfig/include/linux/memblock.h
+++ b/tools/bootconfig/include/linux/memblock.h
@@ -4,9 +4,8 @@
#include <stdlib.h>
-#define __pa(addr) (addr)
#define SMP_CACHE_BYTES 0
#define memblock_alloc(size, align) malloc(size)
-#define memblock_free(paddr, size) free(paddr)
+#define memblock_free_ptr(paddr, size) free(paddr)
#endif
diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
index f45fa992e01d..fd67496a947f 100644
--- a/tools/bootconfig/main.c
+++ b/tools/bootconfig/main.c
@@ -111,9 +111,11 @@ static void xbc_show_list(void)
char key[XBC_KEYLEN_MAX];
struct xbc_node *leaf;
const char *val;
+ int ret;
xbc_for_each_key_value(leaf, val) {
- if (xbc_node_compose_key(leaf, key, XBC_KEYLEN_MAX) < 0) {
+ ret = xbc_node_compose_key(leaf, key, XBC_KEYLEN_MAX);
+ if (ret < 0) {
fprintf(stderr, "Failed to compose key %d\n", ret);
break;
}
diff --git a/tools/bootconfig/scripts/ftrace2bconf.sh b/tools/bootconfig/scripts/ftrace2bconf.sh
index fbaf07dc91bf..6183b36c6846 100755
--- a/tools/bootconfig/scripts/ftrace2bconf.sh
+++ b/tools/bootconfig/scripts/ftrace2bconf.sh
@@ -239,8 +239,8 @@ instance_options() { # [instance-name]
emit_kv $PREFIX.cpumask = $val
fi
val=`cat $INSTANCE/tracing_on`
- if [ `echo $val | sed -e s/f//g`x != x ]; then
- emit_kv $PREFIX.tracing_on = $val
+ if [ "$val" = "0" ]; then
+ emit_kv $PREFIX.tracing_on = 0
fi
val=`cat $INSTANCE/current_tracer`
diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh
index baed891d0ba4..f68e2e9eef8b 100755
--- a/tools/bootconfig/test-bootconfig.sh
+++ b/tools/bootconfig/test-bootconfig.sh
@@ -26,7 +26,7 @@ trap cleanup EXIT TERM
NO=1
xpass() { # pass test command
- echo "test case $NO ($3)... "
+ echo "test case $NO ($*)... "
if ! ($@ && echo "\t\t[OK]"); then
echo "\t\t[NG]"; NG=$((NG + 1))
fi
@@ -34,7 +34,7 @@ xpass() { # pass test command
}
xfail() { # fail test command
- echo "test case $NO ($3)... "
+ echo "test case $NO ($*)... "
if ! (! $@ && echo "\t\t[OK]"); then
echo "\t\t[NG]"; NG=$((NG + 1))
fi
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index 9d959bc24859..95611df1d26e 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -111,10 +111,10 @@ static inline int test_and_clear_bit(int nr, unsigned long *addr)
}
/**
- * bitmap_alloc - Allocate bitmap
+ * bitmap_zalloc - Allocate bitmap
* @nbits: Number of bits
*/
-static inline unsigned long *bitmap_alloc(int nbits)
+static inline unsigned long *bitmap_zalloc(int nbits)
{
return calloc(1, BITS_TO_LONGS(nbits) * sizeof(unsigned long));
}
diff --git a/tools/include/linux/compiler-gcc.h b/tools/include/linux/compiler-gcc.h
index 95c072b70d0e..8816f06fc6c7 100644
--- a/tools/include/linux/compiler-gcc.h
+++ b/tools/include/linux/compiler-gcc.h
@@ -16,9 +16,9 @@
# define __fallthrough __attribute__ ((fallthrough))
#endif
-#if GCC_VERSION >= 40300
+#if __has_attribute(__error__)
# define __compiletime_error(message) __attribute__((error(message)))
-#endif /* GCC_VERSION >= 40300 */
+#endif
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
@@ -38,7 +38,3 @@
#endif
#define __printf(a, b) __attribute__((format(printf, a, b)))
#define __scanf(a, b) __attribute__((format(scanf, a, b)))
-
-#if GCC_VERSION >= 50100
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
-#endif
diff --git a/tools/include/linux/overflow.h b/tools/include/linux/overflow.h
index 8712ff70995f..dcb0c1bf6866 100644
--- a/tools/include/linux/overflow.h
+++ b/tools/include/linux/overflow.h
@@ -5,12 +5,9 @@
#include <linux/compiler.h>
/*
- * In the fallback code below, we need to compute the minimum and
- * maximum values representable in a given type. These macros may also
- * be useful elsewhere, so we provide them outside the
- * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
- *
- * It would seem more obvious to do something like
+ * We need to compute the minimum and maximum values representable in a given
+ * type. These macros may also be useful elsewhere. It would seem more obvious
+ * to do something like:
*
* #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
* #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
@@ -36,8 +33,6 @@
#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
#define type_min(T) ((T)((T)-type_max(T)-(T)1))
-
-#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
/*
* For simplicity and code hygiene, the fallback code below insists on
* a, b and *d having the same type (similar to the min() and max()
@@ -73,135 +68,6 @@
__builtin_mul_overflow(__a, __b, __d); \
})
-#else
-
-
-/* Checking for unsigned overflow is relatively easy without causing UB. */
-#define __unsigned_add_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a + __b; \
- *__d < __a; \
-})
-#define __unsigned_sub_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a - __b; \
- __a < __b; \
-})
-/*
- * If one of a or b is a compile-time constant, this avoids a division.
- */
-#define __unsigned_mul_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a * __b; \
- __builtin_constant_p(__b) ? \
- __b > 0 && __a > type_max(typeof(__a)) / __b : \
- __a > 0 && __b > type_max(typeof(__b)) / __a; \
-})
-
-/*
- * For signed types, detecting overflow is much harder, especially if
- * we want to avoid UB. But the interface of these macros is such that
- * we must provide a result in *d, and in fact we must produce the
- * result promised by gcc's builtins, which is simply the possibly
- * wrapped-around value. Fortunately, we can just formally do the
- * operations in the widest relevant unsigned type (u64) and then
- * truncate the result - gcc is smart enough to generate the same code
- * with and without the (u64) casts.
- */
-
-/*
- * Adding two signed integers can overflow only if they have the same
- * sign, and overflow has happened iff the result has the opposite
- * sign.
- */
-#define __signed_add_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a + (u64)__b; \
- (((~(__a ^ __b)) & (*__d ^ __a)) \
- & type_min(typeof(__a))) != 0; \
-})
-
-/*
- * Subtraction is similar, except that overflow can now happen only
- * when the signs are opposite. In this case, overflow has happened if
- * the result has the opposite sign of a.
- */
-#define __signed_sub_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a - (u64)__b; \
- ((((__a ^ __b)) & (*__d ^ __a)) \
- & type_min(typeof(__a))) != 0; \
-})
-
-/*
- * Signed multiplication is rather hard. gcc always follows C99, so
- * division is truncated towards 0. This means that we can write the
- * overflow check like this:
- *
- * (a > 0 && (b > MAX/a || b < MIN/a)) ||
- * (a < -1 && (b > MIN/a || b < MAX/a) ||
- * (a == -1 && b == MIN)
- *
- * The redundant casts of -1 are to silence an annoying -Wtype-limits
- * (included in -Wextra) warning: When the type is u8 or u16, the
- * __b_c_e in check_mul_overflow obviously selects
- * __unsigned_mul_overflow, but unfortunately gcc still parses this
- * code and warns about the limited range of __b.
- */
-
-#define __signed_mul_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- typeof(a) __tmax = type_max(typeof(a)); \
- typeof(a) __tmin = type_min(typeof(a)); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a * (u64)__b; \
- (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
- (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
- (__b == (typeof(__b))-1 && __a == __tmin); \
-})
-
-
-#define check_add_overflow(a, b, d) \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_add_overflow(a, b, d), \
- __unsigned_add_overflow(a, b, d))
-
-#define check_sub_overflow(a, b, d) \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_sub_overflow(a, b, d), \
- __unsigned_sub_overflow(a, b, d))
-
-#define check_mul_overflow(a, b, d) \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_mul_overflow(a, b, d), \
- __unsigned_mul_overflow(a, b, d))
-
-
-#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
-
/**
* array_size() - Calculate size of 2-dimensional array.
*
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index a9d6fcd95f42..1c5fb86d455a 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -673,15 +673,15 @@ __SYSCALL(__NR_madvise, sys_madvise)
#define __NR_remap_file_pages 234
__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
#define __NR_mbind 235
-__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind)
+__SYSCALL(__NR_mbind, sys_mbind)
#define __NR_get_mempolicy 236
-__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy)
+__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
#define __NR_set_mempolicy 237
-__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy)
+__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
#define __NR_migrate_pages 238
-__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages)
+__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
#define __NR_move_pages 239
-__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages)
+__SYSCALL(__NR_move_pages, sys_move_pages)
#endif
#define __NR_rt_tgsigqueueinfo 240
@@ -877,9 +877,11 @@ __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
#define __NR_memfd_secret 447
__SYSCALL(__NR_memfd_secret, sys_memfd_secret)
#endif
+#define __NR_process_mrelease 448
+__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
#undef __NR_syscalls
-#define __NR_syscalls 448
+#define __NR_syscalls 449
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index d043752a74cf..3b810b53ba8b 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -635,8 +635,8 @@ struct drm_gem_open {
/**
* DRM_CAP_VBLANK_HIGH_CRTC
*
- * If set to 1, the kernel supports specifying a CRTC index in the high bits of
- * &drm_wait_vblank_request.type.
+ * If set to 1, the kernel supports specifying a :ref:`CRTC index<crtc_index>`
+ * in the high bits of &drm_wait_vblank_request.type.
*
* Starting kernel version 2.6.39, this capability is always set to 1.
*/
@@ -1050,6 +1050,16 @@ extern "C" {
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
+/**
+ * DRM_IOCTL_MODE_RMFB - Remove a framebuffer.
+ *
+ * This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
+ * argument is a framebuffer object ID.
+ *
+ * Warning: removing a framebuffer currently in-use on an enabled plane will
+ * disable that plane. The CRTC the plane is linked to may also be disabled
+ * (depending on driver capabilities).
+ */
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index c2c7759b7d2e..bde5860b3686 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -572,6 +572,15 @@ typedef struct drm_i915_irq_wait {
#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
+/*
+ * Indicates the 2k user priority levels are statically mapped into 3 buckets as
+ * follows:
+ *
+ * -1k to -1 Low priority
+ * 0 Normal priority
+ * 1 to 1k Highest priority
+ */
+#define I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP (1ul << 5)
#define I915_PARAM_HUC_STATUS 42
@@ -674,6 +683,9 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
+/* Query if the kernel supports the I915_USERPTR_PROBE flag. */
+#define I915_PARAM_HAS_USERPTR_PROBE 56
+
/* Must be kept compact -- no holes and well documented */
typedef struct drm_i915_getparam {
@@ -849,45 +861,113 @@ struct drm_i915_gem_mmap_gtt {
__u64 offset;
};
+/**
+ * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object.
+ *
+ * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl,
+ * and is used to retrieve the fake offset to mmap an object specified by &handle.
+ *
+ * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+.
+ * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave
+ * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`.
+ */
struct drm_i915_gem_mmap_offset {
- /** Handle for the object being mapped. */
+ /** @handle: Handle for the object being mapped. */
__u32 handle;
+ /** @pad: Must be zero */
__u32 pad;
/**
- * Fake offset to use for subsequent mmap call
+ * @offset: The fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 offset;
/**
- * Flags for extended behaviour.
+ * @flags: Flags for extended behaviour.
+ *
+ * It is mandatory that one of the `MMAP_OFFSET` types
+ * should be included:
*
- * It is mandatory that one of the MMAP_OFFSET types
- * (GTT, WC, WB, UC, etc) should be included.
+ * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined)
+ * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching.
+ * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching.
+ * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching.
+ *
+ * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid
+ * type. On devices without local memory, this caching mode is invalid.
+ *
+ * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will
+ * be used, depending on the object placement on creation. WB will be used
+ * when the object can only exist in system memory, WC otherwise.
*/
__u64 flags;
-#define I915_MMAP_OFFSET_GTT 0
-#define I915_MMAP_OFFSET_WC 1
-#define I915_MMAP_OFFSET_WB 2
-#define I915_MMAP_OFFSET_UC 3
- /*
- * Zero-terminated chain of extensions.
+#define I915_MMAP_OFFSET_GTT 0
+#define I915_MMAP_OFFSET_WC 1
+#define I915_MMAP_OFFSET_WB 2
+#define I915_MMAP_OFFSET_UC 3
+#define I915_MMAP_OFFSET_FIXED 4
+
+ /**
+ * @extensions: Zero-terminated chain of extensions.
*
* No current extensions defined; mbz.
*/
__u64 extensions;
};
+/**
+ * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in
+ * preparation for accessing the pages via some CPU domain.
+ *
+ * Specifying a new write or read domain will flush the object out of the
+ * previous domain(if required), before then updating the objects domain
+ * tracking with the new domain.
+ *
+ * Note this might involve waiting for the object first if it is still active on
+ * the GPU.
+ *
+ * Supported values for @read_domains and @write_domain:
+ *
+ * - I915_GEM_DOMAIN_WC: Uncached write-combined domain
+ * - I915_GEM_DOMAIN_CPU: CPU cache domain
+ * - I915_GEM_DOMAIN_GTT: Mappable aperture domain
+ *
+ * All other domains are rejected.
+ *
+ * Note that for discrete, starting from DG1, this is no longer supported, and
+ * is instead rejected. On such platforms the CPU domain is effectively static,
+ * where we also only support a single &drm_i915_gem_mmap_offset cache mode,
+ * which can't be set explicitly and instead depends on the object placements,
+ * as per the below.
+ *
+ * Implicit caching rules, starting from DG1:
+ *
+ * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
+ * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
+ * mapped as write-combined only.
+ *
+ * - Everything else is always allocated and mapped as write-back, with the
+ * guarantee that everything is also coherent with the GPU.
+ *
+ * Note that this is likely to change in the future again, where we might need
+ * more flexibility on future devices, so making this all explicit as part of a
+ * new &drm_i915_gem_create_ext extension is probable.
+ */
struct drm_i915_gem_set_domain {
- /** Handle for the object */
+ /** @handle: Handle for the object. */
__u32 handle;
- /** New read domains */
+ /** @read_domains: New read domains. */
__u32 read_domains;
- /** New write domain */
+ /**
+ * @write_domain: New write domain.
+ *
+ * Note that having something in the write domain implies it's in the
+ * read domain, and only that read domain.
+ */
__u32 write_domain;
};
@@ -1348,12 +1428,11 @@ struct drm_i915_gem_busy {
* reading from the object simultaneously.
*
* The value of each engine class is the same as specified in the
- * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
+ * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e.
* I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
- * reported as active itself. Some hardware may have parallel
- * execution engines, e.g. multiple media engines, which are
- * mapped to the same class identifier and so are not separately
- * reported for busyness.
+ * Some hardware may have parallel execution engines, e.g. multiple
+ * media engines, which are mapped to the same class identifier and so
+ * are not separately reported for busyness.
*
* Caveat emptor:
* Only the boolean result of this query is reliable; that is whether
@@ -1364,43 +1443,79 @@ struct drm_i915_gem_busy {
};
/**
- * I915_CACHING_NONE
- *
- * GPU access is not coherent with cpu caches. Default for machines without an
- * LLC.
- */
-#define I915_CACHING_NONE 0
-/**
- * I915_CACHING_CACHED
- *
- * GPU access is coherent with cpu caches and furthermore the data is cached in
- * last-level caches shared between cpu cores and the gpu GT. Default on
- * machines with HAS_LLC.
+ * struct drm_i915_gem_caching - Set or get the caching for given object
+ * handle.
+ *
+ * Allow userspace to control the GTT caching bits for a given object when the
+ * object is later mapped through the ppGTT(or GGTT on older platforms lacking
+ * ppGTT support, or if the object is used for scanout). Note that this might
+ * require unbinding the object from the GTT first, if its current caching value
+ * doesn't match.
+ *
+ * Note that this all changes on discrete platforms, starting from DG1, the
+ * set/get caching is no longer supported, and is now rejected. Instead the CPU
+ * caching attributes(WB vs WC) will become an immutable creation time property
+ * for the object, along with the GTT caching level. For now we don't expose any
+ * new uAPI for this, instead on DG1 this is all implicit, although this largely
+ * shouldn't matter since DG1 is coherent by default(without any way of
+ * controlling it).
+ *
+ * Implicit caching rules, starting from DG1:
+ *
+ * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
+ * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
+ * mapped as write-combined only.
+ *
+ * - Everything else is always allocated and mapped as write-back, with the
+ * guarantee that everything is also coherent with the GPU.
+ *
+ * Note that this is likely to change in the future again, where we might need
+ * more flexibility on future devices, so making this all explicit as part of a
+ * new &drm_i915_gem_create_ext extension is probable.
+ *
+ * Side note: Part of the reason for this is that changing the at-allocation-time CPU
+ * caching attributes for the pages might be required(and is expensive) if we
+ * need to then CPU map the pages later with different caching attributes. This
+ * inconsistent caching behaviour, while supported on x86, is not universally
+ * supported on other architectures. So for simplicity we opt for setting
+ * everything at creation time, whilst also making it immutable, on discrete
+ * platforms.
*/
-#define I915_CACHING_CACHED 1
-/**
- * I915_CACHING_DISPLAY
- *
- * Special GPU caching mode which is coherent with the scanout engines.
- * Transparently falls back to I915_CACHING_NONE on platforms where no special
- * cache mode (like write-through or gfdt flushing) is available. The kernel
- * automatically sets this mode when using a buffer as a scanout target.
- * Userspace can manually set this mode to avoid a costly stall and clflush in
- * the hotpath of drawing the first frame.
- */
-#define I915_CACHING_DISPLAY 2
-
struct drm_i915_gem_caching {
/**
- * Handle of the buffer to set/get the caching level of. */
+ * @handle: Handle of the buffer to set/get the caching level.
+ */
__u32 handle;
/**
- * Cacheing level to apply or return value
+ * @caching: The GTT caching level to apply or possible return value.
+ *
+ * The supported @caching values:
*
- * bits0-15 are for generic caching control (i.e. the above defined
- * values). bits16-31 are reserved for platform-specific variations
- * (e.g. l3$ caching on gen7). */
+ * I915_CACHING_NONE:
+ *
+ * GPU access is not coherent with CPU caches. Default for machines
+ * without an LLC. This means manual flushing might be needed, if we
+ * want GPU access to be coherent.
+ *
+ * I915_CACHING_CACHED:
+ *
+ * GPU access is coherent with CPU caches and furthermore the data is
+ * cached in last-level caches shared between CPU cores and the GPU GT.
+ *
+ * I915_CACHING_DISPLAY:
+ *
+ * Special GPU caching mode which is coherent with the scanout engines.
+ * Transparently falls back to I915_CACHING_NONE on platforms where no
+ * special cache mode (like write-through or gfdt flushing) is
+ * available. The kernel automatically sets this mode when using a
+ * buffer as a scanout target. Userspace can manually set this mode to
+ * avoid a costly stall and clflush in the hotpath of drawing the first
+ * frame.
+ */
+#define I915_CACHING_NONE 0
+#define I915_CACHING_CACHED 1
+#define I915_CACHING_DISPLAY 2
__u32 caching;
};
@@ -1639,6 +1754,10 @@ struct drm_i915_gem_context_param {
__u32 size;
__u64 param;
#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+/* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed. On the off chance
+ * someone somewhere has attempted to use it, never re-use this context
+ * param number.
+ */
#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
@@ -1723,24 +1842,8 @@ struct drm_i915_gem_context_param {
*/
#define I915_CONTEXT_PARAM_PERSISTENCE 0xb
-/*
- * I915_CONTEXT_PARAM_RINGSIZE:
- *
- * Sets the size of the CS ringbuffer to use for logical ring contexts. This
- * applies a limit of how many batches can be queued to HW before the caller
- * is blocked due to lack of space for more commands.
- *
- * Only reliably possible to be set prior to first use, i.e. during
- * construction. At any later point, the current execution must be flushed as
- * the ring can only be changed while the context is idle. Note, the ringsize
- * can be specified as a constructor property, see
- * I915_CONTEXT_CREATE_EXT_SETPARAM, but can also be set later if required.
- *
- * Only applies to the current set of engine and lost when those engines
- * are replaced by a new mapping (see I915_CONTEXT_PARAM_ENGINES).
- *
- * Must be between 4 - 512 KiB, in intervals of page size [4 KiB].
- * Default is 16 KiB.
+/* This API has been removed. On the off chance someone somewhere has
+ * attempted to use it, never re-use this context param number.
*/
#define I915_CONTEXT_PARAM_RINGSIZE 0xc
/* Must be kept compact -- no holes and well documented */
@@ -1807,6 +1910,69 @@ struct drm_i915_gem_context_param_sseu {
__u32 rsvd;
};
+/**
+ * DOC: Virtual Engine uAPI
+ *
+ * Virtual engine is a concept where userspace is able to configure a set of
+ * physical engines, submit a batch buffer, and let the driver execute it on any
+ * engine from the set as it sees fit.
+ *
+ * This is primarily useful on parts which have multiple instances of a same
+ * class engine, like for example GT3+ Skylake parts with their two VCS engines.
+ *
+ * For instance userspace can enumerate all engines of a certain class using the
+ * previously described `Engine Discovery uAPI`_. After that userspace can
+ * create a GEM context with a placeholder slot for the virtual engine (using
+ * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class
+ * and instance respectively) and finally using the
+ * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in
+ * the same reserved slot.
+ *
+ * Example of creating a virtual engine and submitting a batch buffer to it:
+ *
+ * .. code-block:: C
+ *
+ * I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = {
+ * .base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE,
+ * .engine_index = 0, // Place this virtual engine into engine map slot 0
+ * .num_siblings = 2,
+ * .engines = { { I915_ENGINE_CLASS_VIDEO, 0 },
+ * { I915_ENGINE_CLASS_VIDEO, 1 }, },
+ * };
+ * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
+ * .engines = { { I915_ENGINE_CLASS_INVALID,
+ * I915_ENGINE_CLASS_INVALID_NONE } },
+ * .extensions = to_user_pointer(&virtual), // Chains after load_balance extension
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_engines = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_ENGINES,
+ * .value = to_user_pointer(&engines),
+ * .size = sizeof(engines),
+ * },
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_engines);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * // Now we have created a GEM context with its engine map containing a
+ * // single virtual engine. Submissions to this slot can go either to
+ * // vcs0 or vcs1, depending on the load balancing algorithm used inside
+ * // the driver. The load balancing is dynamic from one batch buffer to
+ * // another and transparent to userspace.
+ *
+ * ...
+ * execbuf.rsvd1 = ctx_id;
+ * execbuf.flags = 0; // Submits to index 0 which is the virtual engine
+ * gem_execbuf(drm_fd, &execbuf);
+ */
+
/*
* i915_context_engines_load_balance:
*
@@ -1883,6 +2049,61 @@ struct i915_context_engines_bond {
struct i915_engine_class_instance engines[N__]; \
} __attribute__((packed)) name__
+/**
+ * DOC: Context Engine Map uAPI
+ *
+ * Context engine map is a new way of addressing engines when submitting batch-
+ * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT`
+ * inside the flags field of `struct drm_i915_gem_execbuffer2`.
+ *
+ * To use it created GEM contexts need to be configured with a list of engines
+ * the user is intending to submit to. This is accomplished using the
+ * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct
+ * i915_context_param_engines`.
+ *
+ * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the
+ * configured map.
+ *
+ * Example of creating such context and submitting against it:
+ *
+ * .. code-block:: C
+ *
+ * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = {
+ * .engines = { { I915_ENGINE_CLASS_RENDER, 0 },
+ * { I915_ENGINE_CLASS_COPY, 0 } }
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_engines = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_ENGINES,
+ * .value = to_user_pointer(&engines),
+ * .size = sizeof(engines),
+ * },
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_engines);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * // We have now created a GEM context with two engines in the map:
+ * // Index 0 points to rcs0 while index 1 points to bcs0. Other engines
+ * // will not be accessible from this context.
+ *
+ * ...
+ * execbuf.rsvd1 = ctx_id;
+ * execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context
+ * gem_execbuf(drm_fd, &execbuf);
+ *
+ * ...
+ * execbuf.rsvd1 = ctx_id;
+ * execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context
+ * gem_execbuf(drm_fd, &execbuf);
+ */
+
struct i915_context_param_engines {
__u64 extensions; /* linked chain of extension blocks, 0 terminates */
#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
@@ -1901,20 +2122,10 @@ struct drm_i915_gem_context_create_ext_setparam {
struct drm_i915_gem_context_param param;
};
-struct drm_i915_gem_context_create_ext_clone {
+/* This API has been removed. On the off chance someone somewhere has
+ * attempted to use it, never re-use this extension number.
+ */
#define I915_CONTEXT_CREATE_EXT_CLONE 1
- struct i915_user_extension base;
- __u32 clone_id;
- __u32 flags;
-#define I915_CONTEXT_CLONE_ENGINES (1u << 0)
-#define I915_CONTEXT_CLONE_FLAGS (1u << 1)
-#define I915_CONTEXT_CLONE_SCHEDATTR (1u << 2)
-#define I915_CONTEXT_CLONE_SSEU (1u << 3)
-#define I915_CONTEXT_CLONE_TIMELINE (1u << 4)
-#define I915_CONTEXT_CLONE_VM (1u << 5)
-#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
- __u64 rsvd;
-};
struct drm_i915_gem_context_destroy {
__u32 ctx_id;
@@ -1986,14 +2197,69 @@ struct drm_i915_reset_stats {
__u32 pad;
};
+/**
+ * struct drm_i915_gem_userptr - Create GEM object from user allocated memory.
+ *
+ * Userptr objects have several restrictions on what ioctls can be used with the
+ * object handle.
+ */
struct drm_i915_gem_userptr {
+ /**
+ * @user_ptr: The pointer to the allocated memory.
+ *
+ * Needs to be aligned to PAGE_SIZE.
+ */
__u64 user_ptr;
+
+ /**
+ * @user_size:
+ *
+ * The size in bytes for the allocated memory. This will also become the
+ * object size.
+ *
+ * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE,
+ * or larger.
+ */
__u64 user_size;
+
+ /**
+ * @flags:
+ *
+ * Supported flags:
+ *
+ * I915_USERPTR_READ_ONLY:
+ *
+ * Mark the object as readonly, this also means GPU access can only be
+ * readonly. This is only supported on HW which supports readonly access
+ * through the GTT. If the HW can't support readonly access, an error is
+ * returned.
+ *
+ * I915_USERPTR_PROBE:
+ *
+ * Probe the provided @user_ptr range and validate that the @user_ptr is
+ * indeed pointing to normal memory and that the range is also valid.
+ * For example if some garbage address is given to the kernel, then this
+ * should complain.
+ *
+ * Returns -EFAULT if the probe failed.
+ *
+ * Note that this doesn't populate the backing pages, and also doesn't
+ * guarantee that the object will remain valid when the object is
+ * eventually used.
+ *
+ * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE
+ * returns a non-zero value.
+ *
+ * I915_USERPTR_UNSYNCHRONIZED:
+ *
+ * NOT USED. Setting this flag will result in an error.
+ */
__u32 flags;
#define I915_USERPTR_READ_ONLY 0x1
+#define I915_USERPTR_PROBE 0x2
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
/**
- * Returned handle for the object.
+ * @handle: Returned handle for the object.
*
* Object handles are nonzero.
*/
@@ -2377,6 +2643,76 @@ struct drm_i915_query_topology_info {
};
/**
+ * DOC: Engine Discovery uAPI
+ *
+ * Engine discovery uAPI is a way of enumerating physical engines present in a
+ * GPU associated with an open i915 DRM file descriptor. This supersedes the old
+ * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like
+ * `I915_PARAM_HAS_BLT`.
+ *
+ * The need for this interface came starting with Icelake and newer GPUs, which
+ * started to establish a pattern of having multiple engines of a same class,
+ * where not all instances were always completely functionally equivalent.
+ *
+ * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the
+ * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id.
+ *
+ * Example for getting the list of engines:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_query_engine_info *info;
+ * struct drm_i915_query_item item = {
+ * .query_id = DRM_I915_QUERY_ENGINE_INFO;
+ * };
+ * struct drm_i915_query query = {
+ * .num_items = 1,
+ * .items_ptr = (uintptr_t)&item,
+ * };
+ * int err, i;
+ *
+ * // First query the size of the blob we need, this needs to be large
+ * // enough to hold our array of engines. The kernel will fill out the
+ * // item.length for us, which is the number of bytes we need.
+ * //
+ * // Alternatively a large buffer can be allocated straight away enabling
+ * // querying in one pass, in which case item.length should contain the
+ * // length of the provided buffer.
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * info = calloc(1, item.length);
+ * // Now that we allocated the required number of bytes, we call the ioctl
+ * // again, this time with the data_ptr pointing to our newly allocated
+ * // blob, which the kernel can then populate with info on all engines.
+ * item.data_ptr = (uintptr_t)&info,
+ *
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * // We can now access each engine in the array
+ * for (i = 0; i < info->num_engines; i++) {
+ * struct drm_i915_engine_info einfo = info->engines[i];
+ * u16 class = einfo.engine.class;
+ * u16 instance = einfo.engine.instance;
+ * ....
+ * }
+ *
+ * free(info);
+ *
+ * Each of the enumerated engines, apart from being defined by its class and
+ * instance (see `struct i915_engine_class_instance`), also can have flags and
+ * capabilities defined as documented in i915_drm.h.
+ *
+ * For instance video engines which support HEVC encoding will have the
+ * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set.
+ *
+ * Engine discovery only fully comes to its own when combined with the new way
+ * of addressing engines when submitting batch buffers using contexts with
+ * engine maps configured.
+ */
+
+/**
* struct drm_i915_engine_info
*
* Describes one engine and it's capabilities as known to the driver.
diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h
index 4c32e97dcdf0..bdf7b404b3e7 100644
--- a/tools/include/uapi/linux/fs.h
+++ b/tools/include/uapi/linux/fs.h
@@ -184,6 +184,7 @@ struct fsxattr {
#define BLKSECDISCARD _IO(0x12,125)
#define BLKROTATIONAL _IO(0x12,126)
#define BLKZEROOUT _IO(0x12,127)
+#define BLKGETDISKSEQ _IOR(0x12,128,__u64)
/*
* A jump here: 130-136 are reserved for zoned block devices
* (see uapi/linux/blkzoned.h)
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
index d1b327036ae4..14168225cecd 100644
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -188,11 +188,22 @@ struct ip_mreq_source {
};
struct ip_msfilter {
- __be32 imsf_multiaddr;
- __be32 imsf_interface;
- __u32 imsf_fmode;
- __u32 imsf_numsrc;
- __be32 imsf_slist[1];
+ union {
+ struct {
+ __be32 imsf_multiaddr_aux;
+ __be32 imsf_interface_aux;
+ __u32 imsf_fmode_aux;
+ __u32 imsf_numsrc_aux;
+ __be32 imsf_slist[1];
+ };
+ struct {
+ __be32 imsf_multiaddr;
+ __be32 imsf_interface;
+ __u32 imsf_fmode;
+ __u32 imsf_numsrc;
+ __be32 imsf_slist_flex[];
+ };
+ };
};
#define IP_MSFILTER_SIZE(numsrc) \
@@ -211,11 +222,22 @@ struct group_source_req {
};
struct group_filter {
- __u32 gf_interface; /* interface index */
- struct __kernel_sockaddr_storage gf_group; /* multicast address */
- __u32 gf_fmode; /* filter mode */
- __u32 gf_numsrc; /* number of sources */
- struct __kernel_sockaddr_storage gf_slist[1]; /* interface index */
+ union {
+ struct {
+ __u32 gf_interface_aux; /* interface index */
+ struct __kernel_sockaddr_storage gf_group_aux; /* multicast address */
+ __u32 gf_fmode_aux; /* filter mode */
+ __u32 gf_numsrc_aux; /* number of sources */
+ struct __kernel_sockaddr_storage gf_slist[1]; /* interface index */
+ };
+ struct {
+ __u32 gf_interface; /* interface index */
+ struct __kernel_sockaddr_storage gf_group; /* multicast address */
+ __u32 gf_fmode; /* filter mode */
+ __u32 gf_numsrc; /* number of sources */
+ struct __kernel_sockaddr_storage gf_slist_flex[]; /* interface index */
+ };
+ };
};
#define GROUP_FILTER_SIZE(numsrc) \
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index d9e4aabcb31a..a067410ebea5 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -1965,7 +1965,9 @@ struct kvm_stats_header {
#define KVM_STATS_TYPE_CUMULATIVE (0x0 << KVM_STATS_TYPE_SHIFT)
#define KVM_STATS_TYPE_INSTANT (0x1 << KVM_STATS_TYPE_SHIFT)
#define KVM_STATS_TYPE_PEAK (0x2 << KVM_STATS_TYPE_SHIFT)
-#define KVM_STATS_TYPE_MAX KVM_STATS_TYPE_PEAK
+#define KVM_STATS_TYPE_LINEAR_HIST (0x3 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_LOG_HIST (0x4 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_MAX KVM_STATS_TYPE_LOG_HIST
#define KVM_STATS_UNIT_SHIFT 4
#define KVM_STATS_UNIT_MASK (0xF << KVM_STATS_UNIT_SHIFT)
@@ -1988,8 +1990,9 @@ struct kvm_stats_header {
* @size: The number of data items for this stats.
* Every data item is of type __u64.
* @offset: The offset of the stats to the start of stat structure in
- * struture kvm or kvm_vcpu.
- * @unused: Unused field for future usage. Always 0 for now.
+ * structure kvm or kvm_vcpu.
+ * @bucket_size: A parameter value used for histogram stats. It is only used
+ * for linear histogram stats, specifying the size of the bucket;
* @name: The name string for the stats. Its size is indicated by the
* &kvm_stats_header->name_size.
*/
@@ -1998,7 +2001,7 @@ struct kvm_stats_desc {
__s16 exponent;
__u16 size;
__u32 offset;
- __u32 unused;
+ __u32 bucket_size;
char name[];
};
diff --git a/tools/include/uapi/linux/mount.h b/tools/include/uapi/linux/mount.h
index dd7a166fdf9c..4d93967f8aea 100644
--- a/tools/include/uapi/linux/mount.h
+++ b/tools/include/uapi/linux/mount.h
@@ -73,7 +73,8 @@
#define MOVE_MOUNT_T_SYMLINKS 0x00000010 /* Follow symlinks on to path */
#define MOVE_MOUNT_T_AUTOMOUNTS 0x00000020 /* Follow automounts on to path */
#define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */
-#define MOVE_MOUNT__MASK 0x00000077
+#define MOVE_MOUNT_SET_GROUP 0x00000100 /* Set sharing group instead */
+#define MOVE_MOUNT__MASK 0x00000177
/*
* fsopen() flags.
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index 967d9c55323d..43bd7f713c39 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -213,6 +213,7 @@ struct prctl_mm_map {
/* Speculation control variants */
# define PR_SPEC_STORE_BYPASS 0
# define PR_SPEC_INDIRECT_BRANCH 1
+# define PR_SPEC_L1D_FLUSH 2
/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
# define PR_SPEC_NOT_AFFECTED 0
# define PR_SPEC_PRCTL (1UL << 0)
@@ -234,14 +235,15 @@ struct prctl_mm_map {
#define PR_GET_TAGGED_ADDR_CTRL 56
# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
/* MTE tag check fault modes */
-# define PR_MTE_TCF_SHIFT 1
-# define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
-# define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
-# define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
-# define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
+# define PR_MTE_TCF_NONE 0
+# define PR_MTE_TCF_SYNC (1UL << 1)
+# define PR_MTE_TCF_ASYNC (1UL << 2)
+# define PR_MTE_TCF_MASK (PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC)
/* MTE tag inclusion mask */
# define PR_MTE_TAG_SHIFT 3
# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
+/* Unused; kept only for source compatibility */
+# define PR_MTE_TCF_SHIFT 1
/* Control reclaim behavior when allocating memory */
#define PR_SET_IO_FLUSHER 57
diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h
index d17c061950df..1d84ec9db93b 100644
--- a/tools/include/uapi/sound/asound.h
+++ b/tools/include/uapi/sound/asound.h
@@ -299,6 +299,7 @@ typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_INFO_HAS_LINK_ABSOLUTE_ATIME 0x02000000 /* report absolute hardware link audio time, not reset on startup */
#define SNDRV_PCM_INFO_HAS_LINK_ESTIMATED_ATIME 0x04000000 /* report estimated link audio time */
#define SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME 0x08000000 /* report synchronized audio/system time */
+#define SNDRV_PCM_INFO_EXPLICIT_SYNC 0x10000000 /* needs explicit sync of pointers and data */
#define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */
#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c
index 0a1344c45213..441b54234635 100644
--- a/tools/pci/pcitest.c
+++ b/tools/pci/pcitest.c
@@ -40,7 +40,7 @@ struct pci_test {
static int run_test(struct pci_test *test)
{
- struct pci_endpoint_test_xfer_param param;
+ struct pci_endpoint_test_xfer_param param = {};
int ret = -EINVAL;
int fd;
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index e555e9729758..8e0163b7ef01 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -39,3 +39,4 @@ pmu-events/jevents
feature/
fixdep
libtraceevent-dynamic-list
+Documentation/doc.dep
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index b66cf128cbc7..446180401e26 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -827,33 +827,36 @@ else
endif
endif
-ifeq ($(feature-libbfd), 1)
- EXTLIBS += -lbfd -lopcodes
-else
- # we are on a system that requires -liberty and (maybe) -lz
- # to link against -lbfd; test each case individually here
-
- # call all detections now so we get correct
- # status in VF output
- $(call feature_check,libbfd-liberty)
- $(call feature_check,libbfd-liberty-z)
- ifeq ($(feature-libbfd-liberty), 1)
- EXTLIBS += -lbfd -lopcodes -liberty
- FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -ldl
+ifndef NO_LIBBFD
+ ifeq ($(feature-libbfd), 1)
+ EXTLIBS += -lbfd -lopcodes
else
- ifeq ($(feature-libbfd-liberty-z), 1)
- EXTLIBS += -lbfd -lopcodes -liberty -lz
- FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -lz -ldl
+ # we are on a system that requires -liberty and (maybe) -lz
+ # to link against -lbfd; test each case individually here
+
+ # call all detections now so we get correct
+ # status in VF output
+ $(call feature_check,libbfd-liberty)
+ $(call feature_check,libbfd-liberty-z)
+
+ ifeq ($(feature-libbfd-liberty), 1)
+ EXTLIBS += -lbfd -lopcodes -liberty
+ FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -ldl
+ else
+ ifeq ($(feature-libbfd-liberty-z), 1)
+ EXTLIBS += -lbfd -lopcodes -liberty -lz
+ FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -lz -ldl
+ endif
endif
+ $(call feature_check,disassembler-four-args)
endif
- $(call feature_check,disassembler-four-args)
-endif
-ifeq ($(feature-libbfd-buildid), 1)
- CFLAGS += -DHAVE_LIBBFD_BUILDID_SUPPORT
-else
- msg := $(warning Old version of libbfd/binutils things like PE executable profiling will not be available);
+ ifeq ($(feature-libbfd-buildid), 1)
+ CFLAGS += -DHAVE_LIBBFD_BUILDID_SUPPORT
+ else
+ msg := $(warning Old version of libbfd/binutils things like PE executable profiling will not be available);
+ endif
endif
ifdef NO_DEMANGLE
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
index ac653d08b1ea..1ca7bc337932 100644
--- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -361,3 +361,5 @@
444 n64 landlock_create_ruleset sys_landlock_create_ruleset
445 n64 landlock_add_rule sys_landlock_add_rule
446 n64 landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 n64 process_mrelease sys_process_mrelease
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 6f3953f2a0d5..7bef917cc84e 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -330,10 +330,10 @@
256 64 sys_debug_setcontext sys_ni_syscall
256 spu sys_debug_setcontext sys_ni_syscall
# 257 reserved for vserver
-258 nospu migrate_pages sys_migrate_pages compat_sys_migrate_pages
-259 nospu mbind sys_mbind compat_sys_mbind
-260 nospu get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
-261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+258 nospu migrate_pages sys_migrate_pages
+259 nospu mbind sys_mbind
+260 nospu get_mempolicy sys_get_mempolicy
+261 nospu set_mempolicy sys_set_mempolicy
262 nospu mq_open sys_mq_open compat_sys_mq_open
263 nospu mq_unlink sys_mq_unlink
264 32 mq_timedsend sys_mq_timedsend_time32
@@ -381,7 +381,7 @@
298 common faccessat sys_faccessat
299 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
-301 common move_pages sys_move_pages compat_sys_move_pages
+301 common move_pages sys_move_pages
302 common getcpu sys_getcpu
303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
304 32 utimensat sys_utimensat_time32
@@ -526,3 +526,5 @@
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index 8d619ec86dcc..df5261e5cfe1 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -122,7 +122,7 @@
131 common quotactl sys_quotactl sys_quotactl
132 common getpgid sys_getpgid sys_getpgid
133 common fchdir sys_fchdir sys_fchdir
-134 common bdflush - -
+134 common bdflush sys_ni_syscall sys_ni_syscall
135 common sysfs sys_sysfs sys_sysfs
136 common personality sys_s390_personality sys_s390_personality
137 common afs_syscall - -
@@ -274,9 +274,9 @@
265 common statfs64 sys_statfs64 compat_sys_statfs64
266 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
267 common remap_file_pages sys_remap_file_pages sys_remap_file_pages
-268 common mbind sys_mbind compat_sys_mbind
-269 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
-270 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+268 common mbind sys_mbind sys_mbind
+269 common get_mempolicy sys_get_mempolicy sys_get_mempolicy
+270 common set_mempolicy sys_set_mempolicy sys_set_mempolicy
271 common mq_open sys_mq_open compat_sys_mq_open
272 common mq_unlink sys_mq_unlink sys_mq_unlink
273 common mq_timedsend sys_mq_timedsend sys_mq_timedsend_time32
@@ -293,7 +293,7 @@
284 common inotify_init sys_inotify_init sys_inotify_init
285 common inotify_add_watch sys_inotify_add_watch sys_inotify_add_watch
286 common inotify_rm_watch sys_inotify_rm_watch sys_inotify_rm_watch
-287 common migrate_pages sys_migrate_pages compat_sys_migrate_pages
+287 common migrate_pages sys_migrate_pages sys_migrate_pages
288 common openat sys_openat compat_sys_openat
289 common mkdirat sys_mkdirat sys_mkdirat
290 common mknodat sys_mknodat sys_mknodat
@@ -317,7 +317,7 @@
307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range
308 common tee sys_tee sys_tee
309 common vmsplice sys_vmsplice sys_vmsplice
-310 common move_pages sys_move_pages compat_sys_move_pages
+310 common move_pages sys_move_pages sys_move_pages
311 common getcpu sys_getcpu sys_getcpu
312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
313 common utimes sys_utimes sys_utimes_time32
@@ -449,3 +449,5 @@
444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease sys_process_mrelease
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index f6b57799c1ea..18b5500ea8bf 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -369,6 +369,7 @@
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
447 common memfd_secret sys_memfd_secret
+448 common process_mrelease sys_process_mrelease
#
# Due to a historical design error, certain syscalls are numbered differently
@@ -397,7 +398,7 @@
530 x32 set_robust_list compat_sys_set_robust_list
531 x32 get_robust_list compat_sys_get_robust_list
532 x32 vmsplice sys_vmsplice
-533 x32 move_pages compat_sys_move_pages
+533 x32 move_pages sys_move_pages
534 x32 preadv compat_sys_preadv64
535 x32 pwritev compat_sys_pwritev64
536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
diff --git a/tools/perf/bench/find-bit-bench.c b/tools/perf/bench/find-bit-bench.c
index 73b5bcc5946a..22b5cfe97023 100644
--- a/tools/perf/bench/find-bit-bench.c
+++ b/tools/perf/bench/find-bit-bench.c
@@ -54,7 +54,7 @@ static bool asm_test_bit(long nr, const unsigned long *addr)
static int do_for_each_set_bit(unsigned int num_bits)
{
- unsigned long *to_test = bitmap_alloc(num_bits);
+ unsigned long *to_test = bitmap_zalloc(num_bits);
struct timeval start, end, diff;
u64 runtime_us;
struct stats fb_time_stats, tb_time_stats;
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index a812f32cf5d9..a192014fa52b 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -139,11 +139,11 @@ static void *c2c_he_zalloc(size_t size)
if (!c2c_he)
return NULL;
- c2c_he->cpuset = bitmap_alloc(c2c.cpus_cnt);
+ c2c_he->cpuset = bitmap_zalloc(c2c.cpus_cnt);
if (!c2c_he->cpuset)
return NULL;
- c2c_he->nodeset = bitmap_alloc(c2c.nodes_cnt);
+ c2c_he->nodeset = bitmap_zalloc(c2c.nodes_cnt);
if (!c2c_he->nodeset)
return NULL;
@@ -2047,7 +2047,7 @@ static int setup_nodes(struct perf_session *session)
struct perf_cpu_map *map = n[node].map;
unsigned long *set;
- set = bitmap_alloc(c2c.cpus_cnt);
+ set = bitmap_zalloc(c2c.cpus_cnt);
if (!set)
return -ENOMEM;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 06c4dca0c466..b3509d9d20cc 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -2757,7 +2757,7 @@ int cmd_record(int argc, const char **argv)
if (rec->opts.affinity != PERF_AFFINITY_SYS) {
rec->affinity_mask.nbits = cpu__max_cpu();
- rec->affinity_mask.bits = bitmap_alloc(rec->affinity_mask.nbits);
+ rec->affinity_mask.bits = bitmap_zalloc(rec->affinity_mask.nbits);
if (!rec->affinity_mask.bits) {
pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits);
err = -ENOMEM;
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index c783558332b8..f1e46277e822 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -144,6 +144,7 @@ done
# diff with extra ignore lines
check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))"'
check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"'
+check arch/x86/include/asm/amd-ibs.h '-I "^#include [<\"]\(asm/\)*msr-index.h"'
check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"'
check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"'
check include/linux/build_bug.h '-I "^#\(ifndef\|endif\)\( \/\/\)* static_assert$"'
diff --git a/tools/perf/scripts/python/bin/stackcollapse-report b/tools/perf/scripts/python/bin/stackcollapse-report
index 356b9656393d..21a356bd27f6 100755
--- a/tools/perf/scripts/python/bin/stackcollapse-report
+++ b/tools/perf/scripts/python/bin/stackcollapse-report
@@ -1,3 +1,3 @@
#!/bin/sh
# description: produce callgraphs in short form for scripting use
-perf script -s "$PERF_EXEC_PATH"/scripts/python/stackcollapse.py -- "$@"
+perf script -s "$PERF_EXEC_PATH"/scripts/python/stackcollapse.py "$@"
diff --git a/tools/perf/tests/bitmap.c b/tools/perf/tests/bitmap.c
index 96c137360918..12b805efdca0 100644
--- a/tools/perf/tests/bitmap.c
+++ b/tools/perf/tests/bitmap.c
@@ -14,7 +14,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
unsigned long *bm = NULL;
int i;
- bm = bitmap_alloc(nbits);
+ bm = bitmap_zalloc(nbits);
if (map && bm) {
for (i = 0; i < map->nr; i++)
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index dbf5f5215abe..fa03ff0dc083 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -192,7 +192,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
}
if (count != expect * evlist->core.nr_entries) {
- pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect, count);
+ pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect * evlist->core.nr_entries, count);
goto out_delete_evlist;
}
diff --git a/tools/perf/tests/mem2node.c b/tools/perf/tests/mem2node.c
index a258bd51f1a4..e4d0d58b97f8 100644
--- a/tools/perf/tests/mem2node.c
+++ b/tools/perf/tests/mem2node.c
@@ -27,7 +27,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
unsigned long *bm = NULL;
int i;
- bm = bitmap_alloc(nbits);
+ bm = bitmap_zalloc(nbits);
if (map && bm) {
for (i = 0; i < map->nr; i++) {
diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
index 0d8e3dcb7f88..041d6032a348 100644
--- a/tools/perf/trace/beauty/include/linux/socket.h
+++ b/tools/perf/trace/beauty/include/linux/socket.h
@@ -223,8 +223,11 @@ struct ucred {
* reuses AF_INET address family
*/
#define AF_XDP 44 /* XDP sockets */
+#define AF_MCTP 45 /* Management component
+ * transport protocol
+ */
-#define AF_MAX 45 /* For now.. */
+#define AF_MAX 46 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -274,6 +277,7 @@ struct ucred {
#define PF_QIPCRTR AF_QIPCRTR
#define PF_SMC AF_SMC
#define PF_XDP AF_XDP
+#define PF_MCTP AF_MCTP
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
@@ -421,6 +425,9 @@ extern int __sys_accept4_file(struct file *file, unsigned file_flags,
struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags,
unsigned long nofile);
+extern struct file *do_accept(struct file *file, unsigned file_flags,
+ struct sockaddr __user *upeer_sockaddr,
+ int __user *upeer_addrlen, int flags);
extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags);
extern int __sys_socket(int family, int type, int protocol);
diff --git a/tools/perf/trace/beauty/move_mount_flags.sh b/tools/perf/trace/beauty/move_mount_flags.sh
index 55e59241daa4..4b1d9acc0bd0 100755
--- a/tools/perf/trace/beauty/move_mount_flags.sh
+++ b/tools/perf/trace/beauty/move_mount_flags.sh
@@ -10,7 +10,7 @@ fi
linux_mount=${linux_header_dir}/mount.h
printf "static const char *move_mount_flags[] = {\n"
-regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MOVE_MOUNT_([FT]_[[:alnum:]_]+)[[:space:]]+(0x[[:xdigit:]]+)[[:space:]]*.*'
+regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MOVE_MOUNT_([^_]+_[[:alnum:]_]+)[[:space:]]+(0x[[:xdigit:]]+)[[:space:]]*.*'
egrep $regex ${linux_mount} | \
sed -r "s/$regex/\2 \1/g" | \
xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n"
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 2d4fa1304178..f2914d5bed6e 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -59,6 +59,7 @@ perf-y += pstack.o
perf-y += session.o
perf-y += sample-raw.o
perf-y += s390-sample-raw.o
+perf-y += amd-sample-raw.o
perf-$(CONFIG_TRACE) += syscalltbl.o
perf-y += ordered-events.o
perf-y += namespaces.o
diff --git a/tools/perf/util/affinity.c b/tools/perf/util/affinity.c
index a5e31f826828..7b12bd7a3080 100644
--- a/tools/perf/util/affinity.c
+++ b/tools/perf/util/affinity.c
@@ -25,11 +25,11 @@ int affinity__setup(struct affinity *a)
{
int cpu_set_size = get_cpu_set_size();
- a->orig_cpus = bitmap_alloc(cpu_set_size * 8);
+ a->orig_cpus = bitmap_zalloc(cpu_set_size * 8);
if (!a->orig_cpus)
return -1;
sched_getaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
- a->sched_cpus = bitmap_alloc(cpu_set_size * 8);
+ a->sched_cpus = bitmap_zalloc(cpu_set_size * 8);
if (!a->sched_cpus) {
zfree(&a->orig_cpus);
return -1;
diff --git a/tools/perf/util/amd-sample-raw.c b/tools/perf/util/amd-sample-raw.c
new file mode 100644
index 000000000000..d19d765195c5
--- /dev/null
+++ b/tools/perf/util/amd-sample-raw.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD specific. Provide textual annotation for IBS raw sample data.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include <linux/string.h>
+#include "../../arch/x86/include/asm/amd-ibs.h"
+
+#include "debug.h"
+#include "session.h"
+#include "evlist.h"
+#include "sample-raw.h"
+#include "pmu-events/pmu-events.h"
+
+static u32 cpu_family, cpu_model, ibs_fetch_type, ibs_op_type;
+
+static void pr_ibs_fetch_ctl(union ibs_fetch_ctl reg)
+{
+ const char * const ic_miss_strs[] = {
+ " IcMiss 0",
+ " IcMiss 1",
+ };
+ const char * const l1tlb_pgsz_strs[] = {
+ " L1TlbPgSz 4KB",
+ " L1TlbPgSz 2MB",
+ " L1TlbPgSz 1GB",
+ " L1TlbPgSz RESERVED"
+ };
+ const char * const l1tlb_pgsz_strs_erratum1347[] = {
+ " L1TlbPgSz 4KB",
+ " L1TlbPgSz 16KB",
+ " L1TlbPgSz 2MB",
+ " L1TlbPgSz 1GB"
+ };
+ const char *ic_miss_str = NULL;
+ const char *l1tlb_pgsz_str = NULL;
+
+ if (cpu_family == 0x19 && cpu_model < 0x10) {
+ /*
+ * Erratum #1238 workaround is to ignore MSRC001_1030[IbsIcMiss]
+ * Erratum #1347 workaround is to use table provided in erratum
+ */
+ if (reg.phy_addr_valid)
+ l1tlb_pgsz_str = l1tlb_pgsz_strs_erratum1347[reg.l1tlb_pgsz];
+ } else {
+ if (reg.phy_addr_valid)
+ l1tlb_pgsz_str = l1tlb_pgsz_strs[reg.l1tlb_pgsz];
+ ic_miss_str = ic_miss_strs[reg.ic_miss];
+ }
+
+ printf("ibs_fetch_ctl:\t%016llx MaxCnt %7d Cnt %7d Lat %5d En %d Val %d Comp %d%s "
+ "PhyAddrValid %d%s L1TlbMiss %d L2TlbMiss %d RandEn %d%s\n",
+ reg.val, reg.fetch_maxcnt << 4, reg.fetch_cnt << 4, reg.fetch_lat,
+ reg.fetch_en, reg.fetch_val, reg.fetch_comp, ic_miss_str ? : "",
+ reg.phy_addr_valid, l1tlb_pgsz_str ? : "", reg.l1tlb_miss, reg.l2tlb_miss,
+ reg.rand_en, reg.fetch_comp ? (reg.fetch_l2_miss ? " L2Miss 1" : " L2Miss 0") : "");
+}
+
+static void pr_ic_ibs_extd_ctl(union ic_ibs_extd_ctl reg)
+{
+ printf("ic_ibs_ext_ctl:\t%016llx IbsItlbRefillLat %3d\n", reg.val, reg.itlb_refill_lat);
+}
+
+static void pr_ibs_op_ctl(union ibs_op_ctl reg)
+{
+ printf("ibs_op_ctl:\t%016llx MaxCnt %9d En %d Val %d CntCtl %d=%s CurCnt %9d\n",
+ reg.val, ((reg.opmaxcnt_ext << 16) | reg.opmaxcnt) << 4, reg.op_en, reg.op_val,
+ reg.cnt_ctl, reg.cnt_ctl ? "uOps" : "cycles", reg.opcurcnt);
+}
+
+static void pr_ibs_op_data(union ibs_op_data reg)
+{
+ printf("ibs_op_data:\t%016llx CompToRetCtr %5d TagToRetCtr %5d%s%s%s BrnRet %d "
+ " RipInvalid %d BrnFuse %d Microcode %d\n",
+ reg.val, reg.comp_to_ret_ctr, reg.tag_to_ret_ctr,
+ reg.op_brn_ret ? (reg.op_return ? " OpReturn 1" : " OpReturn 0") : "",
+ reg.op_brn_ret ? (reg.op_brn_taken ? " OpBrnTaken 1" : " OpBrnTaken 0") : "",
+ reg.op_brn_ret ? (reg.op_brn_misp ? " OpBrnMisp 1" : " OpBrnMisp 0") : "",
+ reg.op_brn_ret, reg.op_rip_invalid, reg.op_brn_fuse, reg.op_microcode);
+}
+
+static void pr_ibs_op_data2(union ibs_op_data2 reg)
+{
+ static const char * const data_src_str[] = {
+ "",
+ " DataSrc 1=(reserved)",
+ " DataSrc 2=Local node cache",
+ " DataSrc 3=DRAM",
+ " DataSrc 4=Remote node cache",
+ " DataSrc 5=(reserved)",
+ " DataSrc 6=(reserved)",
+ " DataSrc 7=Other"
+ };
+
+ printf("ibs_op_data2:\t%016llx %sRmtNode %d%s\n", reg.val,
+ reg.data_src == 2 ? (reg.cache_hit_st ? "CacheHitSt 1=O-State "
+ : "CacheHitSt 0=M-state ") : "",
+ reg.rmt_node, data_src_str[reg.data_src]);
+}
+
+static void pr_ibs_op_data3(union ibs_op_data3 reg)
+{
+ char l2_miss_str[sizeof(" L2Miss _")] = "";
+ char op_mem_width_str[sizeof(" OpMemWidth _____ bytes")] = "";
+ char op_dc_miss_open_mem_reqs_str[sizeof(" OpDcMissOpenMemReqs __")] = "";
+
+ /*
+ * Erratum #1293
+ * Ignore L2Miss and OpDcMissOpenMemReqs (and opdata2) if DcMissNoMabAlloc or SwPf set
+ */
+ if (!(cpu_family == 0x19 && cpu_model < 0x10 && (reg.dc_miss_no_mab_alloc || reg.sw_pf))) {
+ snprintf(l2_miss_str, sizeof(l2_miss_str), " L2Miss %d", reg.l2_miss);
+ snprintf(op_dc_miss_open_mem_reqs_str, sizeof(op_dc_miss_open_mem_reqs_str),
+ " OpDcMissOpenMemReqs %2d", reg.op_dc_miss_open_mem_reqs);
+ }
+
+ if (reg.op_mem_width)
+ snprintf(op_mem_width_str, sizeof(op_mem_width_str),
+ " OpMemWidth %2d bytes", 1 << (reg.op_mem_width - 1));
+
+ printf("ibs_op_data3:\t%016llx LdOp %d StOp %d DcL1TlbMiss %d DcL2TlbMiss %d "
+ "DcL1TlbHit2M %d DcL1TlbHit1G %d DcL2TlbHit2M %d DcMiss %d DcMisAcc %d "
+ "DcWcMemAcc %d DcUcMemAcc %d DcLockedOp %d DcMissNoMabAlloc %d DcLinAddrValid %d "
+ "DcPhyAddrValid %d DcL2TlbHit1G %d%s SwPf %d%s%s DcMissLat %5d TlbRefillLat %5d\n",
+ reg.val, reg.ld_op, reg.st_op, reg.dc_l1tlb_miss, reg.dc_l2tlb_miss,
+ reg.dc_l1tlb_hit_2m, reg.dc_l1tlb_hit_1g, reg.dc_l2tlb_hit_2m, reg.dc_miss,
+ reg.dc_mis_acc, reg.dc_wc_mem_acc, reg.dc_uc_mem_acc, reg.dc_locked_op,
+ reg.dc_miss_no_mab_alloc, reg.dc_lin_addr_valid, reg.dc_phy_addr_valid,
+ reg.dc_l2_tlb_hit_1g, l2_miss_str, reg.sw_pf, op_mem_width_str,
+ op_dc_miss_open_mem_reqs_str, reg.dc_miss_lat, reg.tlb_refill_lat);
+}
+
+/*
+ * IBS Op/Execution MSRs always saved, in order, are:
+ * IBS_OP_CTL, IBS_OP_RIP, IBS_OP_DATA, IBS_OP_DATA2,
+ * IBS_OP_DATA3, IBS_DC_LINADDR, IBS_DC_PHYSADDR, BP_IBSTGT_RIP
+ */
+static void amd_dump_ibs_op(struct perf_sample *sample)
+{
+ struct perf_ibs_data *data = sample->raw_data;
+ union ibs_op_ctl *op_ctl = (union ibs_op_ctl *)data->data;
+ __u64 *rip = (__u64 *)op_ctl + 1;
+ union ibs_op_data *op_data = (union ibs_op_data *)(rip + 1);
+ union ibs_op_data3 *op_data3 = (union ibs_op_data3 *)(rip + 3);
+
+ pr_ibs_op_ctl(*op_ctl);
+ if (!op_data->op_rip_invalid)
+ printf("IbsOpRip:\t%016llx\n", *rip);
+ pr_ibs_op_data(*op_data);
+ /*
+ * Erratum #1293: ignore op_data2 if DcMissNoMabAlloc or SwPf are set
+ */
+ if (!(cpu_family == 0x19 && cpu_model < 0x10 &&
+ (op_data3->dc_miss_no_mab_alloc || op_data3->sw_pf)))
+ pr_ibs_op_data2(*(union ibs_op_data2 *)(rip + 2));
+ pr_ibs_op_data3(*op_data3);
+ if (op_data3->dc_lin_addr_valid)
+ printf("IbsDCLinAd:\t%016llx\n", *(rip + 4));
+ if (op_data3->dc_phy_addr_valid)
+ printf("IbsDCPhysAd:\t%016llx\n", *(rip + 5));
+ if (op_data->op_brn_ret && *(rip + 6))
+ printf("IbsBrTarget:\t%016llx\n", *(rip + 6));
+}
+
+/*
+ * IBS Fetch MSRs always saved, in order, are:
+ * IBS_FETCH_CTL, IBS_FETCH_LINADDR, IBS_FETCH_PHYSADDR, IC_IBS_EXTD_CTL
+ */
+static void amd_dump_ibs_fetch(struct perf_sample *sample)
+{
+ struct perf_ibs_data *data = sample->raw_data;
+ union ibs_fetch_ctl *fetch_ctl = (union ibs_fetch_ctl *)data->data;
+ __u64 *addr = (__u64 *)fetch_ctl + 1;
+ union ic_ibs_extd_ctl *extd_ctl = (union ic_ibs_extd_ctl *)addr + 2;
+
+ pr_ibs_fetch_ctl(*fetch_ctl);
+ printf("IbsFetchLinAd:\t%016llx\n", *addr++);
+ if (fetch_ctl->phy_addr_valid)
+ printf("IbsFetchPhysAd:\t%016llx\n", *addr);
+ pr_ic_ibs_extd_ctl(*extd_ctl);
+}
+
+/*
+ * Test for enable and valid bits in captured control MSRs.
+ */
+static bool is_valid_ibs_fetch_sample(struct perf_sample *sample)
+{
+ struct perf_ibs_data *data = sample->raw_data;
+ union ibs_fetch_ctl *fetch_ctl = (union ibs_fetch_ctl *)data->data;
+
+ if (fetch_ctl->fetch_en && fetch_ctl->fetch_val)
+ return true;
+
+ return false;
+}
+
+static bool is_valid_ibs_op_sample(struct perf_sample *sample)
+{
+ struct perf_ibs_data *data = sample->raw_data;
+ union ibs_op_ctl *op_ctl = (union ibs_op_ctl *)data->data;
+
+ if (op_ctl->op_en && op_ctl->op_val)
+ return true;
+
+ return false;
+}
+
+/* AMD vendor specific raw sample function. Check for PERF_RECORD_SAMPLE events
+ * and if the event was triggered by IBS, display its raw data with decoded text.
+ * The function is only invoked when the dump flag -D is set.
+ */
+void evlist__amd_sample_raw(struct evlist *evlist, union perf_event *event,
+ struct perf_sample *sample)
+{
+ struct evsel *evsel;
+
+ if (event->header.type != PERF_RECORD_SAMPLE || !sample->raw_size)
+ return;
+
+ evsel = evlist__event2evsel(evlist, event);
+ if (!evsel)
+ return;
+
+ if (evsel->core.attr.type == ibs_fetch_type) {
+ if (!is_valid_ibs_fetch_sample(sample)) {
+ pr_debug("Invalid raw IBS Fetch MSR data encountered\n");
+ return;
+ }
+ amd_dump_ibs_fetch(sample);
+ } else if (evsel->core.attr.type == ibs_op_type) {
+ if (!is_valid_ibs_op_sample(sample)) {
+ pr_debug("Invalid raw IBS Op MSR data encountered\n");
+ return;
+ }
+ amd_dump_ibs_op(sample);
+ }
+}
+
+static void parse_cpuid(struct perf_env *env)
+{
+ const char *cpuid;
+ int ret;
+
+ cpuid = perf_env__cpuid(env);
+ /*
+ * cpuid = "AuthenticAMD,family,model,stepping"
+ */
+ ret = sscanf(cpuid, "%*[^,],%u,%u", &cpu_family, &cpu_model);
+ if (ret != 2)
+ pr_debug("problem parsing cpuid\n");
+}
+
+/*
+ * Find and assign the type number used for ibs_op or ibs_fetch samples.
+ * Device names can be large - we are only interested in the first 9 characters,
+ * to match "ibs_fetch".
+ */
+bool evlist__has_amd_ibs(struct evlist *evlist)
+{
+ struct perf_env *env = evlist->env;
+ int ret, nr_pmu_mappings = perf_env__nr_pmu_mappings(env);
+ const char *pmu_mapping = perf_env__pmu_mappings(env);
+ char name[sizeof("ibs_fetch")];
+ u32 type;
+
+ while (nr_pmu_mappings--) {
+ ret = sscanf(pmu_mapping, "%u:%9s", &type, name);
+ if (ret == 2) {
+ if (strstarts(name, "ibs_op"))
+ ibs_op_type = type;
+ else if (strstarts(name, "ibs_fetch"))
+ ibs_fetch_type = type;
+ }
+ pmu_mapping += strlen(pmu_mapping) + 1 /* '\0' */;
+ }
+
+ if (ibs_fetch_type || ibs_op_type) {
+ if (!cpu_family)
+ parse_cpuid(env);
+ return true;
+ }
+
+ return false;
+}
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 996d025b8ed8..683f6d63560e 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -21,6 +21,14 @@
#include "record.h"
#include "util/synthetic-events.h"
+struct btf * __weak btf__load_from_kernel_by_id(__u32 id)
+{
+ struct btf *btf;
+ int err = btf__get_from_id(id, &btf);
+
+ return err ? ERR_PTR(err) : btf;
+}
+
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index ee15db2be2f4..9ed9a5676d35 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -1349,6 +1349,16 @@ void dso__set_build_id(struct dso *dso, struct build_id *bid)
bool dso__build_id_equal(const struct dso *dso, struct build_id *bid)
{
+ if (dso->bid.size > bid->size && dso->bid.size == BUILD_ID_SIZE) {
+ /*
+ * For the backward compatibility, it allows a build-id has
+ * trailing zeros.
+ */
+ return !memcmp(dso->bid.data, bid->data, bid->size) &&
+ !memchr_inv(&dso->bid.data[bid->size], 0,
+ dso->bid.size - bid->size);
+ }
+
return dso->bid.size == bid->size &&
memcmp(dso->bid.data, bid->data, dso->bid.size) == 0;
}
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 8f7ff0035c41..cf773f0dec38 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -10,6 +10,7 @@
#include <sys/utsname.h>
#include <stdlib.h>
#include <string.h>
+#include "strbuf.h"
struct perf_env perf_env;
@@ -306,6 +307,45 @@ int perf_env__read_cpu_topology_map(struct perf_env *env)
return 0;
}
+int perf_env__read_pmu_mappings(struct perf_env *env)
+{
+ struct perf_pmu *pmu = NULL;
+ u32 pmu_num = 0;
+ struct strbuf sb;
+
+ while ((pmu = perf_pmu__scan(pmu))) {
+ if (!pmu->name)
+ continue;
+ pmu_num++;
+ }
+ if (!pmu_num) {
+ pr_debug("pmu mappings not available\n");
+ return -ENOENT;
+ }
+ env->nr_pmu_mappings = pmu_num;
+
+ if (strbuf_init(&sb, 128 * pmu_num) < 0)
+ return -ENOMEM;
+
+ while ((pmu = perf_pmu__scan(pmu))) {
+ if (!pmu->name)
+ continue;
+ if (strbuf_addf(&sb, "%u:%s", pmu->type, pmu->name) < 0)
+ goto error;
+ /* include a NULL character at the end */
+ if (strbuf_add(&sb, "", 1) < 0)
+ goto error;
+ }
+
+ env->pmu_mappings = strbuf_detach(&sb, NULL);
+
+ return 0;
+
+error:
+ strbuf_release(&sb);
+ return -1;
+}
+
int perf_env__read_cpuid(struct perf_env *env)
{
char cpuid[128];
@@ -404,6 +444,44 @@ const char *perf_env__arch(struct perf_env *env)
return normalize_arch(arch_name);
}
+const char *perf_env__cpuid(struct perf_env *env)
+{
+ int status;
+
+ if (!env || !env->cpuid) { /* Assume local operation */
+ status = perf_env__read_cpuid(env);
+ if (status)
+ return NULL;
+ }
+
+ return env->cpuid;
+}
+
+int perf_env__nr_pmu_mappings(struct perf_env *env)
+{
+ int status;
+
+ if (!env || !env->nr_pmu_mappings) { /* Assume local operation */
+ status = perf_env__read_pmu_mappings(env);
+ if (status)
+ return 0;
+ }
+
+ return env->nr_pmu_mappings;
+}
+
+const char *perf_env__pmu_mappings(struct perf_env *env)
+{
+ int status;
+
+ if (!env || !env->pmu_mappings) { /* Assume local operation */
+ status = perf_env__read_pmu_mappings(env);
+ if (status)
+ return NULL;
+ }
+
+ return env->pmu_mappings;
+}
int perf_env__numa_node(struct perf_env *env, int cpu)
{
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index 1f5175820a05..1383876f72b3 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -149,11 +149,16 @@ int perf_env__kernel_is_64_bit(struct perf_env *env);
int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
int perf_env__read_cpuid(struct perf_env *env);
+int perf_env__read_pmu_mappings(struct perf_env *env);
+int perf_env__nr_pmu_mappings(struct perf_env *env);
+const char *perf_env__pmu_mappings(struct perf_env *env);
+
int perf_env__read_cpu_topology_map(struct perf_env *env);
void cpu_cache_level__free(struct cpu_cache_level *cache);
const char *perf_env__arch(struct perf_env *env);
+const char *perf_env__cpuid(struct perf_env *env);
const char *perf_env__raw_arch(struct perf_env *env);
int perf_env__nr_cpus_avail(struct perf_env *env);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 54d251327b5b..dbfeceb2546c 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -333,11 +333,11 @@ error_free:
goto out;
}
-static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
+int copy_config_terms(struct list_head *dst, struct list_head *src)
{
struct evsel_config_term *pos, *tmp;
- list_for_each_entry(pos, &src->config_terms, list) {
+ list_for_each_entry(pos, src, list) {
tmp = malloc(sizeof(*tmp));
if (tmp == NULL)
return -ENOMEM;
@@ -350,11 +350,16 @@ static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
return -ENOMEM;
}
}
- list_add_tail(&tmp->list, &dst->config_terms);
+ list_add_tail(&tmp->list, dst);
}
return 0;
}
+static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
+{
+ return copy_config_terms(&dst->config_terms, &src->config_terms);
+}
+
/**
* evsel__clone - create a new evsel copied from @orig
* @orig: original evsel
@@ -1385,11 +1390,11 @@ int evsel__disable(struct evsel *evsel)
return err;
}
-static void evsel__free_config_terms(struct evsel *evsel)
+void free_config_terms(struct list_head *config_terms)
{
struct evsel_config_term *term, *h;
- list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
+ list_for_each_entry_safe(term, h, config_terms, list) {
list_del_init(&term->list);
if (term->free_str)
zfree(&term->val.str);
@@ -1397,6 +1402,11 @@ static void evsel__free_config_terms(struct evsel *evsel)
}
}
+static void evsel__free_config_terms(struct evsel *evsel)
+{
+ free_config_terms(&evsel->config_terms);
+}
+
void evsel__exit(struct evsel *evsel)
{
assert(list_empty(&evsel->core.node));
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 1b3eeab5f188..1f7edfa8568a 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -213,6 +213,9 @@ static inline struct evsel *evsel__new(struct perf_event_attr *attr)
struct evsel *evsel__clone(struct evsel *orig);
struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx);
+int copy_config_terms(struct list_head *dst, struct list_head *src);
+void free_config_terms(struct list_head *config_terms);
+
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index d2231cb7c4f7..1c7414f66655 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -278,7 +278,7 @@ static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
if (ret)
return ret;
- set = bitmap_alloc(size);
+ set = bitmap_zalloc(size);
if (!set)
return -ENOMEM;
@@ -1294,7 +1294,7 @@ static int memory_node__read(struct memory_node *n, unsigned long idx)
size++;
- n->set = bitmap_alloc(size);
+ n->set = bitmap_zalloc(size);
if (!n->set) {
closedir(dir);
return -ENOMEM;
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 99d047c5ead0..29b747ac31c1 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -313,7 +313,7 @@ static int metricgroup__setup_events(struct list_head *groups,
struct evsel *evsel, *tmp;
unsigned long *evlist_used;
- evlist_used = bitmap_alloc(perf_evlist->core.nr_entries);
+ evlist_used = bitmap_zalloc(perf_evlist->core.nr_entries);
if (!evlist_used)
return -ENOMEM;
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index ab7108d22428..512dc8b9c168 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -106,7 +106,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
data = map->aio.data[idx];
mmap_len = mmap__mmap_len(map);
node_index = cpu__get_node(cpu);
- node_mask = bitmap_alloc(node_index + 1);
+ node_mask = bitmap_zalloc(node_index + 1);
if (!node_mask) {
pr_err("Failed to allocate node mask for mbind: error %m\n");
return -1;
@@ -258,7 +258,7 @@ static void build_node_mask(int node, struct mmap_cpu_mask *mask)
static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
{
map->affinity_mask.nbits = cpu__max_cpu();
- map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits);
+ map->affinity_mask.bits = bitmap_zalloc(map->affinity_mask.nbits);
if (!map->affinity_mask.bits)
return -1;
diff --git a/tools/perf/util/parse-events-hybrid.c b/tools/perf/util/parse-events-hybrid.c
index 10160ab126f9..b234d95fb10a 100644
--- a/tools/perf/util/parse-events-hybrid.c
+++ b/tools/perf/util/parse-events-hybrid.c
@@ -76,12 +76,16 @@ static int add_hw_hybrid(struct parse_events_state *parse_state,
int ret;
perf_pmu__for_each_hybrid_pmu(pmu) {
+ LIST_HEAD(terms);
+
if (pmu_cmp(parse_state, pmu))
continue;
+ copy_config_terms(&terms, config_terms);
ret = create_event_hybrid(PERF_TYPE_HARDWARE,
&parse_state->idx, list, attr, name,
- config_terms, pmu);
+ &terms, pmu);
+ free_config_terms(&terms);
if (ret)
return ret;
}
@@ -115,11 +119,15 @@ static int add_raw_hybrid(struct parse_events_state *parse_state,
int ret;
perf_pmu__for_each_hybrid_pmu(pmu) {
+ LIST_HEAD(terms);
+
if (pmu_cmp(parse_state, pmu))
continue;
+ copy_config_terms(&terms, config_terms);
ret = create_raw_event_hybrid(&parse_state->idx, list, attr,
- name, config_terms, pmu);
+ name, &terms, pmu);
+ free_config_terms(&terms);
if (ret)
return ret;
}
@@ -165,11 +173,15 @@ int parse_events__add_cache_hybrid(struct list_head *list, int *idx,
*hybrid = true;
perf_pmu__for_each_hybrid_pmu(pmu) {
+ LIST_HEAD(terms);
+
if (pmu_cmp(parse_state, pmu))
continue;
+ copy_config_terms(&terms, config_terms);
ret = create_event_hybrid(PERF_TYPE_HW_CACHE, idx, list,
- attr, name, config_terms, pmu);
+ attr, name, &terms, pmu);
+ free_config_terms(&terms);
if (ret)
return ret;
}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index e5eae23cfceb..51a2219df601 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -387,7 +387,7 @@ __add_event(struct list_head *list, int *idx,
evsel->name = strdup(name);
if (config_terms)
- list_splice(config_terms, &evsel->config_terms);
+ list_splice_init(config_terms, &evsel->config_terms);
if (list)
list_add_tail(&evsel->core.node, list);
@@ -535,9 +535,12 @@ int parse_events_add_cache(struct list_head *list, int *idx,
config_name ? : name, &config_terms,
&hybrid, parse_state);
if (hybrid)
- return ret;
+ goto out_free_terms;
- return add_event(list, idx, &attr, config_name ? : name, &config_terms);
+ ret = add_event(list, idx, &attr, config_name ? : name, &config_terms);
+out_free_terms:
+ free_config_terms(&config_terms);
+ return ret;
}
static void tracepoint_error(struct parse_events_error *e, int err,
@@ -1457,10 +1460,13 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
get_config_name(head_config),
&config_terms, &hybrid);
if (hybrid)
- return ret;
+ goto out_free_terms;
- return add_event(list, &parse_state->idx, &attr,
- get_config_name(head_config), &config_terms);
+ ret = add_event(list, &parse_state->idx, &attr,
+ get_config_name(head_config), &config_terms);
+out_free_terms:
+ free_config_terms(&config_terms);
+ return ret;
}
int parse_events_add_tool(struct parse_events_state *parse_state,
@@ -1608,14 +1614,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
}
if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
- struct evsel_config_term *pos, *tmp;
-
- list_for_each_entry_safe(pos, tmp, &config_terms, list) {
- list_del_init(&pos->list);
- if (pos->free_str)
- zfree(&pos->val.str);
- free(pos);
- }
+ free_config_terms(&config_terms);
return -EINVAL;
}
diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c
index 30481825515b..47b7531f51da 100644
--- a/tools/perf/util/perf_event_attr_fprintf.c
+++ b/tools/perf/util/perf_event_attr_fprintf.c
@@ -137,6 +137,9 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(cgroup, p_unsigned);
PRINT_ATTRf(text_poke, p_unsigned);
PRINT_ATTRf(build_id, p_unsigned);
+ PRINT_ATTRf(inherit_thread, p_unsigned);
+ PRINT_ATTRf(remove_on_exec, p_unsigned);
+ PRINT_ATTRf(sigtrap, p_unsigned);
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
PRINT_ATTRf(bp_type, p_unsigned);
@@ -150,7 +153,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(aux_watermark, p_unsigned);
PRINT_ATTRf(sample_max_stack, p_unsigned);
PRINT_ATTRf(aux_sample_size, p_unsigned);
- PRINT_ATTRf(text_poke, p_unsigned);
+ PRINT_ATTRf(sig_data, p_unsigned);
return ret;
}
diff --git a/tools/perf/util/sample-raw.c b/tools/perf/util/sample-raw.c
index cde5cd3ce49b..f3f6bd9d290e 100644
--- a/tools/perf/util/sample-raw.c
+++ b/tools/perf/util/sample-raw.c
@@ -1,8 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <string.h>
+#include <linux/string.h>
#include "evlist.h"
#include "env.h"
+#include "header.h"
#include "sample-raw.h"
/*
@@ -12,7 +14,13 @@
void evlist__init_trace_event_sample_raw(struct evlist *evlist)
{
const char *arch_pf = perf_env__arch(evlist->env);
+ const char *cpuid = perf_env__cpuid(evlist->env);
if (arch_pf && !strcmp("s390", arch_pf))
evlist->trace_event_sample_raw = evlist__s390_sample_raw;
+ else if (arch_pf && !strcmp("x86", arch_pf) &&
+ cpuid && strstarts(cpuid, "AuthenticAMD") &&
+ evlist__has_amd_ibs(evlist)) {
+ evlist->trace_event_sample_raw = evlist__amd_sample_raw;
+ }
}
diff --git a/tools/perf/util/sample-raw.h b/tools/perf/util/sample-raw.h
index 4be84a5510cf..ea01c5811503 100644
--- a/tools/perf/util/sample-raw.h
+++ b/tools/perf/util/sample-raw.h
@@ -6,6 +6,10 @@ struct evlist;
union perf_event;
struct perf_sample;
-void evlist__s390_sample_raw(struct evlist *evlist, union perf_event *event, struct perf_sample *sample);
+void evlist__s390_sample_raw(struct evlist *evlist, union perf_event *event,
+ struct perf_sample *sample);
+bool evlist__has_amd_ibs(struct evlist *evlist);
+void evlist__amd_sample_raw(struct evlist *evlist, union perf_event *event,
+ struct perf_sample *sample);
void evlist__init_trace_event_sample_raw(struct evlist *evlist);
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 77fc46ca07c0..0fc9a5410739 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1581,10 +1581,6 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
if (bfd_get_flavour(abfd) == bfd_target_elf_flavour)
goto out_close;
- section = bfd_get_section_by_name(abfd, ".text");
- if (section)
- dso->text_offset = section->vma - section->filepos;
-
symbols_size = bfd_get_symtab_upper_bound(abfd);
if (symbols_size == 0) {
bfd_close(abfd);
@@ -1602,6 +1598,22 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
if (symbols_count < 0)
goto out_free;
+ section = bfd_get_section_by_name(abfd, ".text");
+ if (section) {
+ for (i = 0; i < symbols_count; ++i) {
+ if (!strcmp(bfd_asymbol_name(symbols[i]), "__ImageBase") ||
+ !strcmp(bfd_asymbol_name(symbols[i]), "__image_base__"))
+ break;
+ }
+ if (i < symbols_count) {
+ /* PE symbols can only have 4 bytes, so use .text high bits */
+ dso->text_offset = section->vma - (u32)section->vma;
+ dso->text_offset += (u32)bfd_asymbol_value(symbols[i]);
+ } else {
+ dso->text_offset = section->vma - section->filepos;
+ }
+ }
+
qsort(symbols, symbols_count, sizeof(asymbol *), bfd_symbols__cmpvalue);
#ifdef bfd_get_section
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
index 033051717ba5..f3daa44a8266 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.c
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -12,27 +12,36 @@
#include <unistd.h>
#include <ftw.h>
-
#include "cgroup_helpers.h"
/*
* To avoid relying on the system setup, when setup_cgroup_env is called
- * we create a new mount namespace, and cgroup namespace. The cgroup2
- * root is mounted at CGROUP_MOUNT_PATH
- *
- * Unfortunately, most people don't have cgroupv2 enabled at this point in time.
- * It's easier to create our own mount namespace and manage it ourselves.
+ * we create a new mount namespace, and cgroup namespace. The cgroupv2
+ * root is mounted at CGROUP_MOUNT_PATH. Unfortunately, most people don't
+ * have cgroupv2 enabled at this point in time. It's easier to create our
+ * own mount namespace and manage it ourselves. We assume /mnt exists.
*
- * We assume /mnt exists.
+ * Related cgroupv1 helpers are named *classid*(), since we only use the
+ * net_cls controller for tagging net_cls.classid. We assume the default
+ * mount under /sys/fs/cgroup/net_cls, which should be the case for the
+ * vast majority of users.
*/
#define WALK_FD_LIMIT 16
+
#define CGROUP_MOUNT_PATH "/mnt"
+#define CGROUP_MOUNT_DFLT "/sys/fs/cgroup"
+#define NETCLS_MOUNT_PATH CGROUP_MOUNT_DFLT "/net_cls"
#define CGROUP_WORK_DIR "/cgroup-test-work-dir"
+
#define format_cgroup_path(buf, path) \
snprintf(buf, sizeof(buf), "%s%s%s", CGROUP_MOUNT_PATH, \
CGROUP_WORK_DIR, path)
+#define format_classid_path(buf) \
+ snprintf(buf, sizeof(buf), "%s%s", NETCLS_MOUNT_PATH, \
+ CGROUP_WORK_DIR)
+
/**
* enable_all_controllers() - Enable all available cgroup v2 controllers
*
@@ -139,8 +148,7 @@ static int nftwfunc(const char *filename, const struct stat *statptr,
return 0;
}
-
-static int join_cgroup_from_top(char *cgroup_path)
+static int join_cgroup_from_top(const char *cgroup_path)
{
char cgroup_procs_path[PATH_MAX + 1];
pid_t pid = getpid();
@@ -313,3 +321,114 @@ int cgroup_setup_and_join(const char *path) {
}
return cg_fd;
}
+
+/**
+ * setup_classid_environment() - Setup the cgroupv1 net_cls environment
+ *
+ * After calling this function, cleanup_classid_environment should be called
+ * once testing is complete.
+ *
+ * This function will print an error to stderr and return 1 if it is unable
+ * to setup the cgroup environment. If setup is successful, 0 is returned.
+ */
+int setup_classid_environment(void)
+{
+ char cgroup_workdir[PATH_MAX + 1];
+
+ format_classid_path(cgroup_workdir);
+
+ if (mount("tmpfs", CGROUP_MOUNT_DFLT, "tmpfs", 0, NULL) &&
+ errno != EBUSY) {
+ log_err("mount cgroup base");
+ return 1;
+ }
+
+ if (mkdir(NETCLS_MOUNT_PATH, 0777) && errno != EEXIST) {
+ log_err("mkdir cgroup net_cls");
+ return 1;
+ }
+
+ if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls") &&
+ errno != EBUSY) {
+ log_err("mount cgroup net_cls");
+ return 1;
+ }
+
+ cleanup_classid_environment();
+
+ if (mkdir(cgroup_workdir, 0777) && errno != EEXIST) {
+ log_err("mkdir cgroup work dir");
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * set_classid() - Set a cgroupv1 net_cls classid
+ * @id: the numeric classid
+ *
+ * Writes the passed classid into the cgroup work dir's net_cls.classid
+ * file in order to later on trigger socket tagging.
+ *
+ * On success, it returns 0, otherwise on failure it returns 1. If there
+ * is a failure, it prints the error to stderr.
+ */
+int set_classid(unsigned int id)
+{
+ char cgroup_workdir[PATH_MAX - 42];
+ char cgroup_classid_path[PATH_MAX + 1];
+ int fd, rc = 0;
+
+ format_classid_path(cgroup_workdir);
+ snprintf(cgroup_classid_path, sizeof(cgroup_classid_path),
+ "%s/net_cls.classid", cgroup_workdir);
+
+ fd = open(cgroup_classid_path, O_WRONLY);
+ if (fd < 0) {
+ log_err("Opening cgroup classid: %s", cgroup_classid_path);
+ return 1;
+ }
+
+ if (dprintf(fd, "%u\n", id) < 0) {
+ log_err("Setting cgroup classid");
+ rc = 1;
+ }
+
+ close(fd);
+ return rc;
+}
+
+/**
+ * join_classid() - Join a cgroupv1 net_cls classid
+ *
+ * This function expects the cgroup work dir to be already created, as we
+ * join it here. This causes the process sockets to be tagged with the given
+ * net_cls classid.
+ *
+ * On success, it returns 0, otherwise on failure it returns 1.
+ */
+int join_classid(void)
+{
+ char cgroup_workdir[PATH_MAX + 1];
+
+ format_classid_path(cgroup_workdir);
+ return join_cgroup_from_top(cgroup_workdir);
+}
+
+/**
+ * cleanup_classid_environment() - Cleanup the cgroupv1 net_cls environment
+ *
+ * At call time, it moves the calling process to the root cgroup, and then
+ * runs the deletion process.
+ *
+ * On failure, it will print an error to stderr, and try to continue.
+ */
+void cleanup_classid_environment(void)
+{
+ char cgroup_workdir[PATH_MAX + 1];
+
+ format_classid_path(cgroup_workdir);
+ join_cgroup_from_top(NETCLS_MOUNT_PATH);
+ nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
+}
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h
index 5fe3d88e4f0d..629da3854b3e 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.h
+++ b/tools/testing/selftests/bpf/cgroup_helpers.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CGROUP_HELPERS_H
#define __CGROUP_HELPERS_H
+
#include <errno.h>
#include <string.h>
@@ -8,12 +9,21 @@
#define log_err(MSG, ...) fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
__FILE__, __LINE__, clean_errno(), ##__VA_ARGS__)
-
+/* cgroupv2 related */
int cgroup_setup_and_join(const char *path);
int create_and_get_cgroup(const char *path);
+unsigned long long get_cgroup_id(const char *path);
+
int join_cgroup(const char *path);
+
int setup_cgroup_environment(void);
void cleanup_cgroup_environment(void);
-unsigned long long get_cgroup_id(const char *path);
-#endif
+/* cgroupv1 related */
+int set_classid(unsigned int id);
+int join_classid(void);
+
+int setup_classid_environment(void);
+void cleanup_classid_environment(void);
+
+#endif /* __CGROUP_HELPERS_H */
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index 7e9f6375757a..6db1af8fdee7 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -208,11 +208,26 @@ error_close:
static int connect_fd_to_addr(int fd,
const struct sockaddr_storage *addr,
- socklen_t addrlen)
+ socklen_t addrlen, const bool must_fail)
{
- if (connect(fd, (const struct sockaddr *)addr, addrlen)) {
- log_err("Failed to connect to server");
- return -1;
+ int ret;
+
+ errno = 0;
+ ret = connect(fd, (const struct sockaddr *)addr, addrlen);
+ if (must_fail) {
+ if (!ret) {
+ log_err("Unexpected success to connect to server");
+ return -1;
+ }
+ if (errno != EPERM) {
+ log_err("Unexpected error from connect to server");
+ return -1;
+ }
+ } else {
+ if (ret) {
+ log_err("Failed to connect to server");
+ return -1;
+ }
}
return 0;
@@ -257,7 +272,7 @@ int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts)
strlen(opts->cc) + 1))
goto error_close;
- if (connect_fd_to_addr(fd, &addr, addrlen))
+ if (connect_fd_to_addr(fd, &addr, addrlen, opts->must_fail))
goto error_close;
return fd;
@@ -289,7 +304,7 @@ int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms)
return -1;
}
- if (connect_fd_to_addr(client_fd, &addr, len))
+ if (connect_fd_to_addr(client_fd, &addr, len, false))
return -1;
return 0;
diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h
index da7e132657d5..d198181a5648 100644
--- a/tools/testing/selftests/bpf/network_helpers.h
+++ b/tools/testing/selftests/bpf/network_helpers.h
@@ -20,6 +20,7 @@ typedef __u16 __sum16;
struct network_helper_opts {
const char *cc;
int timeout_ms;
+ bool must_fail;
};
/* ipv4 test vector */
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
new file mode 100644
index 000000000000..ab3b9bc5e6d1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#include "connect4_dropper.skel.h"
+
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+
+static int run_test(int cgroup_fd, int server_fd, bool classid)
+{
+ struct network_helper_opts opts = {
+ .must_fail = true,
+ };
+ struct connect4_dropper *skel;
+ int fd, err = 0;
+
+ skel = connect4_dropper__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return -1;
+
+ skel->links.connect_v4_dropper =
+ bpf_program__attach_cgroup(skel->progs.connect_v4_dropper,
+ cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.connect_v4_dropper, "prog_attach")) {
+ err = -1;
+ goto out;
+ }
+
+ if (classid && !ASSERT_OK(join_classid(), "join_classid")) {
+ err = -1;
+ goto out;
+ }
+
+ fd = connect_to_fd_opts(server_fd, &opts);
+ if (fd < 0)
+ err = -1;
+ else
+ close(fd);
+out:
+ connect4_dropper__destroy(skel);
+ return err;
+}
+
+void test_cgroup_v1v2(void)
+{
+ struct network_helper_opts opts = {};
+ int server_fd, client_fd, cgroup_fd;
+ static const int port = 60123;
+
+ /* Step 1: Check base connectivity works without any BPF. */
+ server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0);
+ if (!ASSERT_GE(server_fd, 0, "server_fd"))
+ return;
+ client_fd = connect_to_fd_opts(server_fd, &opts);
+ if (!ASSERT_GE(client_fd, 0, "client_fd")) {
+ close(server_fd);
+ return;
+ }
+ close(client_fd);
+ close(server_fd);
+
+ /* Step 2: Check BPF policy prog attached to cgroups drops connectivity. */
+ cgroup_fd = test__join_cgroup("/connect_dropper");
+ if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd"))
+ return;
+ server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0);
+ if (!ASSERT_GE(server_fd, 0, "server_fd")) {
+ close(cgroup_fd);
+ return;
+ }
+ ASSERT_OK(run_test(cgroup_fd, server_fd, false), "cgroup-v2-only");
+ setup_classid_environment();
+ set_classid(42);
+ ASSERT_OK(run_test(cgroup_fd, server_fd, true), "cgroup-v1v2");
+ cleanup_classid_environment();
+ close(server_fd);
+ close(cgroup_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c b/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c
index 53f0e0fa1a53..37c20b5ffa70 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <test_progs.h>
-#include <linux/ptrace.h>
#include "test_task_pt_regs.skel.h"
void test_task_pt_regs(void)
diff --git a/tools/testing/selftests/bpf/progs/connect4_dropper.c b/tools/testing/selftests/bpf/progs/connect4_dropper.c
new file mode 100644
index 000000000000..b565d997810a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/connect4_dropper.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <string.h>
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+
+#include <sys/socket.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define VERDICT_REJECT 0
+#define VERDICT_PROCEED 1
+
+SEC("cgroup/connect4")
+int connect_v4_dropper(struct bpf_sock_addr *ctx)
+{
+ if (ctx->type != SOCK_STREAM)
+ return VERDICT_PROCEED;
+ if (ctx->user_port == bpf_htons(60123))
+ return VERDICT_REJECT;
+ return VERDICT_PROCEED;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_task_pt_regs.c b/tools/testing/selftests/bpf/progs/test_task_pt_regs.c
index 6c059f1cfa1b..e6cb09259408 100644
--- a/tools/testing/selftests/bpf/progs/test_task_pt_regs.c
+++ b/tools/testing/selftests/bpf/progs/test_task_pt_regs.c
@@ -1,12 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/ptrace.h>
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-struct pt_regs current_regs = {};
-struct pt_regs ctx_regs = {};
+#define PT_REGS_SIZE sizeof(struct pt_regs)
+
+/*
+ * The kernel struct pt_regs isn't exported in its entirety to userspace.
+ * Pass it as an array to task_pt_regs.c
+ */
+char current_regs[PT_REGS_SIZE] = {};
+char ctx_regs[PT_REGS_SIZE] = {};
int uprobe_res = 0;
SEC("uprobe/trigger_func")
@@ -17,8 +22,10 @@ int handle_uprobe(struct pt_regs *ctx)
current = bpf_get_current_task_btf();
regs = (struct pt_regs *) bpf_task_pt_regs(current);
- __builtin_memcpy(&current_regs, regs, sizeof(*regs));
- __builtin_memcpy(&ctx_regs, ctx, sizeof(*ctx));
+ if (bpf_probe_read_kernel(current_regs, PT_REGS_SIZE, regs))
+ return 0;
+ if (bpf_probe_read_kernel(ctx_regs, PT_REGS_SIZE, ctx))
+ return 0;
/* Prove that uprobe was run */
uprobe_res = 1;
diff --git a/tools/testing/selftests/damon/Makefile b/tools/testing/selftests/damon/Makefile
new file mode 100644
index 000000000000..8a3f2cd9fec0
--- /dev/null
+++ b/tools/testing/selftests/damon/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for damon selftests
+
+TEST_FILES = _chk_dependency.sh
+TEST_PROGS = debugfs_attrs.sh
+
+include ../lib.mk
diff --git a/tools/testing/selftests/damon/_chk_dependency.sh b/tools/testing/selftests/damon/_chk_dependency.sh
new file mode 100644
index 000000000000..0189db81550b
--- /dev/null
+++ b/tools/testing/selftests/damon/_chk_dependency.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+DBGFS=/sys/kernel/debug/damon
+
+if [ $EUID -ne 0 ];
+then
+ echo "Run as root"
+ exit $ksft_skip
+fi
+
+if [ ! -d "$DBGFS" ]
+then
+ echo "$DBGFS not found"
+ exit $ksft_skip
+fi
+
+for f in attrs target_ids monitor_on
+do
+ if [ ! -f "$DBGFS/$f" ]
+ then
+ echo "$f not found"
+ exit 1
+ fi
+done
diff --git a/tools/testing/selftests/damon/debugfs_attrs.sh b/tools/testing/selftests/damon/debugfs_attrs.sh
new file mode 100644
index 000000000000..bfabb19dc0d3
--- /dev/null
+++ b/tools/testing/selftests/damon/debugfs_attrs.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+test_write_result() {
+ file=$1
+ content=$2
+ orig_content=$3
+ expect_reason=$4
+ expected=$5
+
+ echo "$content" > "$file"
+ if [ $? -ne "$expected" ]
+ then
+ echo "writing $content to $file doesn't return $expected"
+ echo "expected because: $expect_reason"
+ echo "$orig_content" > "$file"
+ exit 1
+ fi
+}
+
+test_write_succ() {
+ test_write_result "$1" "$2" "$3" "$4" 0
+}
+
+test_write_fail() {
+ test_write_result "$1" "$2" "$3" "$4" 1
+}
+
+test_content() {
+ file=$1
+ orig_content=$2
+ expected=$3
+ expect_reason=$4
+
+ content=$(cat "$file")
+ if [ "$content" != "$expected" ]
+ then
+ echo "reading $file expected $expected but $content"
+ echo "expected because: $expect_reason"
+ echo "$orig_content" > "$file"
+ exit 1
+ fi
+}
+
+source ./_chk_dependency.sh
+
+# Test attrs file
+# ===============
+
+file="$DBGFS/attrs"
+orig_content=$(cat "$file")
+
+test_write_succ "$file" "1 2 3 4 5" "$orig_content" "valid input"
+test_write_fail "$file" "1 2 3 4" "$orig_content" "no enough fields"
+test_write_fail "$file" "1 2 3 5 4" "$orig_content" \
+ "min_nr_regions > max_nr_regions"
+test_content "$file" "$orig_content" "1 2 3 4 5" "successfully written"
+echo "$orig_content" > "$file"
+
+# Test target_ids file
+# ====================
+
+file="$DBGFS/target_ids"
+orig_content=$(cat "$file")
+
+test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input"
+test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input"
+test_content "$file" "$orig_content" "1 2" "non-integer was there"
+test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input"
+test_content "$file" "$orig_content" "" "wrong input written"
+test_write_succ "$file" "" "$orig_content" "empty input"
+test_content "$file" "$orig_content" "" "empty input written"
+echo "$orig_content" > "$file"
+
+echo "PASS"
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc
index 25a3da4eaa44..5f5b2ba3e557 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc
@@ -22,7 +22,7 @@ ls
echo 0 > events/eprobes/$EPROBE/enable
content=`grep '^ *ls-' trace | grep 'file='`
-nocontent=`grep '^ *ls-' trace | grep 'file=' | grep -v -e '"/' -e '"."'` || true
+nocontent=`grep '^ *ls-' trace | grep 'file=' | grep -v -e '"/' -e '"."' -e '(fault)' ` || true
if [ -z "$content" ]; then
exit_fail
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 3c30d0045d8d..479868570d59 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -171,7 +171,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
- bmap = bitmap_alloc(host_num_pages);
+ bmap = bitmap_zalloc(host_num_pages);
if (dirty_log_manual_caps) {
cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 5fe0140e407e..792c60e1b17d 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -749,8 +749,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
- bmap = bitmap_alloc(host_num_pages);
- host_bmap_track = bitmap_alloc(host_num_pages);
+ bmap = bitmap_zalloc(host_num_pages);
+ host_bmap_track = bitmap_zalloc(host_num_pages);
/* Add an extra memory slot for testing dirty logging */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
index 06a64980a5d2..68f26a8b4f42 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
@@ -111,7 +111,7 @@ int main(int argc, char *argv[])
nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
- bmap = bitmap_alloc(TEST_MEM_PAGES);
+ bmap = bitmap_zalloc(TEST_MEM_PAGES);
host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
while (!done) {
diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
index 74baab83fec3..192a2899bae8 100644
--- a/tools/testing/selftests/memfd/memfd_test.c
+++ b/tools/testing/selftests/memfd/memfd_test.c
@@ -56,7 +56,7 @@ static int mfd_assert_new(const char *name, loff_t sz, unsigned int flags)
static int mfd_assert_reopen_fd(int fd_in)
{
- int r, fd;
+ int fd;
char path[100];
sprintf(path, "/proc/self/fd/%d", fd_in);
diff --git a/tools/testing/selftests/nci/nci_dev.c b/tools/testing/selftests/nci/nci_dev.c
index e1bf55dabdf6..162c41e9bcae 100644
--- a/tools/testing/selftests/nci/nci_dev.c
+++ b/tools/testing/selftests/nci/nci_dev.c
@@ -746,7 +746,7 @@ int read_write_nci_cmd(int nfc_sock, int virtual_fd, const __u8 *cmd, __u32 cmd_
const __u8 *rsp, __u32 rsp_len)
{
char buf[256];
- unsigned int len;
+ int len;
send(nfc_sock, &cmd[3], cmd_len - 3, 0);
len = read(virtual_fd, buf, cmd_len);
diff --git a/tools/testing/selftests/net/altnames.sh b/tools/testing/selftests/net/altnames.sh
index 4254ddc3f70b..1ef9e4159bba 100755
--- a/tools/testing/selftests/net/altnames.sh
+++ b/tools/testing/selftests/net/altnames.sh
@@ -45,7 +45,7 @@ altnames_test()
check_err $? "Got unexpected long alternative name from link show JSON"
ip link property del $DUMMY_DEV altname $SHORT_NAME
- check_err $? "Failed to add short alternative name"
+ check_err $? "Failed to delete short alternative name"
ip -j -p link show $SHORT_NAME &>/dev/null
check_fail $? "Unexpected success while trying to do link show with deleted short alternative name"
diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
index 67766bfe176f..2a3638c0a008 100644
--- a/tools/testing/vsock/vsock_test.c
+++ b/tools/testing/vsock/vsock_test.c
@@ -282,6 +282,7 @@ static void test_stream_msg_peek_server(const struct test_opts *opts)
}
#define MESSAGES_CNT 7
+#define MSG_EOR_IDX (MESSAGES_CNT / 2)
static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
{
int fd;
@@ -294,7 +295,7 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
/* Send several messages, one with MSG_EOR flag */
for (int i = 0; i < MESSAGES_CNT; i++)
- send_byte(fd, 1, 0);
+ send_byte(fd, 1, (i == MSG_EOR_IDX) ? MSG_EOR : 0);
control_writeln("SENDDONE");
close(fd);
@@ -324,6 +325,11 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
perror("message bound violated");
exit(EXIT_FAILURE);
}
+
+ if ((i == MSG_EOR_IDX) ^ !!(msg.msg_flags & MSG_EOR)) {
+ perror("MSG_EOR");
+ exit(EXIT_FAILURE);
+ }
}
close(fd);
diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
index 9db867df7679..f9c52b7fab7b 100644
--- a/tools/thermal/tmon/Makefile
+++ b/tools/thermal/tmon/Makefile
@@ -10,10 +10,9 @@ override CFLAGS+= $(call cc-option,-O3,-O1) ${WARNFLAGS}
# Add "-fstack-protector" only if toolchain supports it.
override CFLAGS+= $(call cc-option,-fstack-protector-strong)
CC?= $(CROSS_COMPILE)gcc
-PKG_CONFIG?= pkg-config
+PKG_CONFIG?= $(CROSS_COMPILE)pkg-config
override CFLAGS+=-D VERSION=\"$(VERSION)\"
-LDFLAGS+=
TARGET=tmon
INSTALL_PROGRAM=install -m 755 -p
@@ -33,7 +32,6 @@ override CFLAGS += $(shell $(PKG_CONFIG) --cflags $(STATIC) panelw ncursesw 2> /
$(PKG_CONFIG) --cflags $(STATIC) panel ncurses 2> /dev/null)
OBJS = tmon.o tui.o sysfs.o pid.o
-OBJS +=
tmon: $(OBJS) Makefile tmon.h
$(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) -o $(TARGET) $(TMON_LIBS)
@@ -42,15 +40,13 @@ valgrind: tmon
sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET) 1> /dev/null
install:
- - mkdir -p $(INSTALL_ROOT)/$(BINDIR)
- - $(INSTALL_PROGRAM) "$(TARGET)" "$(INSTALL_ROOT)/$(BINDIR)/$(TARGET)"
+ - $(INSTALL_PROGRAM) -D "$(TARGET)" "$(INSTALL_ROOT)/$(BINDIR)/$(TARGET)"
uninstall:
$(DEL_FILE) "$(INSTALL_ROOT)/$(BINDIR)/$(TARGET)"
clean:
- find . -name "*.o" | xargs $(DEL_FILE)
- rm -f $(TARGET)
+ rm -f $(TARGET) $(OBJS)
dist:
git tag v$(VERSION)