summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-bus-xen-backend9
-rw-r--r--Documentation/ABI/testing/sysfs-driver-xen-blkback10
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt6
-rw-r--r--Documentation/arm64/sve.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt3
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt14
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt6
-rw-r--r--Documentation/devicetree/bindings/net/sh_eth.txt1
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas-wdt.txt5
-rw-r--r--Documentation/hwmon/ina2xx2
-rw-r--r--Documentation/i2c/DMA-considerations10
-rw-r--r--Documentation/process/changes.rst2
-rw-r--r--Documentation/scsi/scsi-parameters.txt5
-rw-r--r--MAINTAINERS9
-rw-r--r--Makefile5
-rw-r--r--arch/arc/Kconfig10
-rw-r--r--arch/arc/Makefile10
-rw-r--r--arch/arc/boot/dts/axc003.dtsi26
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi26
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi7
-rw-r--r--arch/arc/boot/dts/hsdk.dts11
-rw-r--r--arch/arc/configs/axs101_defconfig3
-rw-r--r--arch/arc/configs/axs103_defconfig3
-rw-r--r--arch/arc/configs/axs103_smp_defconfig3
-rw-r--r--arch/arc/configs/haps_hs_defconfig2
-rw-r--r--arch/arc/configs/haps_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/hsdk_defconfig1
-rw-r--r--arch/arc/configs/nps_defconfig1
-rw-r--r--arch/arc/configs/nsim_700_defconfig2
-rw-r--r--arch/arc/configs/nsim_hs_defconfig2
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/tb10x_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig2
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig1
-rw-r--r--arch/arc/include/asm/atomic.h2
-rw-r--r--arch/arc/include/asm/dma-mapping.h13
-rw-r--r--arch/arc/kernel/troubleshoot.c13
-rw-r--r--arch/arc/mm/cache.c36
-rw-r--r--arch/arc/mm/dma.c82
-rw-r--r--[-rwxr-xr-x]arch/arm/boot/dts/am335x-osd3358-sm-red.dts0
-rw-r--r--arch/arm/boot/dts/am4372.dtsi1
-rw-r--r--arch/arm/boot/dts/imx23-evk.dts90
-rw-r--r--arch/arm/boot/dts/imx28-evk.dts183
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi12
-rw-r--r--arch/arm/boot/dts/omap4-droid4-xt894.dts20
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/configs/versatile_defconfig14
-rw-r--r--arch/arm/include/asm/kvm_host.h1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c39
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts2
-rw-r--r--arch/arm64/configs/defconfig3
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c29
-rw-r--r--arch/arm64/crypto/sm4-ce-glue.c2
-rw-r--r--arch/arm64/include/asm/kvm_host.h4
-rw-r--r--arch/arm64/kvm/hyp/switch.c9
-rw-r--r--arch/arm64/mm/mmu.c10
-rw-r--r--arch/m68k/mac/misc.c10
-rw-r--r--arch/m68k/mm/mcfmmu.c2
-rw-r--r--arch/mips/include/asm/kvm_host.h1
-rw-r--r--arch/mips/kernel/vdso.c20
-rw-r--r--arch/mips/kvm/mmu.c10
-rw-r--r--arch/nds32/Kconfig4
-rw-r--r--arch/nds32/Makefile4
-rw-r--r--arch/nds32/include/asm/elf.h4
-rw-r--r--arch/nds32/include/asm/ftrace.h46
-rw-r--r--arch/nds32/include/asm/nds32.h1
-rw-r--r--arch/nds32/include/asm/uaccess.h229
-rw-r--r--arch/nds32/kernel/Makefile6
-rw-r--r--arch/nds32/kernel/atl2c.c3
-rw-r--r--arch/nds32/kernel/ex-entry.S2
-rw-r--r--arch/nds32/kernel/ex-exit.S4
-rw-r--r--arch/nds32/kernel/ftrace.c309
-rw-r--r--arch/nds32/kernel/module.c4
-rw-r--r--arch/nds32/kernel/stacktrace.c6
-rw-r--r--arch/nds32/kernel/traps.c42
-rw-r--r--arch/nds32/kernel/vmlinux.lds.S12
-rw-r--r--arch/nios2/Kconfig.debug9
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c6
-rw-r--r--arch/riscv/include/asm/tlb.h4
-rw-r--r--arch/riscv/kernel/sys_riscv.c15
-rw-r--r--arch/s390/include/asm/mmu.h8
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/s390/kvm/priv.c30
-rw-r--r--arch/s390/kvm/vsie.c3
-rw-r--r--arch/sparc/kernel/of_device_32.c4
-rw-r--r--arch/sparc/kernel/of_device_64.c3
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S66
-rw-r--r--arch/x86/include/asm/kvm_host.h22
-rw-r--r--arch/x86/include/asm/pgtable-3level.h7
-rw-r--r--arch/x86/kvm/lapic.c27
-rw-r--r--arch/x86/kvm/mmu.c26
-rw-r--r--arch/x86/kvm/svm.c19
-rw-r--r--arch/x86/kvm/vmx.c43
-rw-r--r--arch/x86/kvm/x86.c28
-rw-r--r--arch/x86/kvm/x86.h2
-rw-r--r--arch/x86/xen/mmu_pv.c9
-rw-r--r--block/bfq-cgroup.c4
-rw-r--r--block/bio.c3
-rw-r--r--block/blk-cgroup.c105
-rw-r--r--block/blk-core.c5
-rw-r--r--block/blk-throttle.c5
-rw-r--r--block/blk-wbt.c89
-rw-r--r--block/bsg.c8
-rw-r--r--block/elevator.c3
-rw-r--r--drivers/acpi/acpi_lpss.c2
-rw-r--r--drivers/acpi/bus.c13
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/pata_ftide010.c27
-rw-r--r--drivers/base/memory.c20
-rw-r--r--drivers/base/power/clock_ops.c2
-rw-r--r--drivers/block/nbd.c3
-rw-r--r--drivers/block/rbd.c235
-rw-r--r--drivers/block/xen-blkback/blkback.c99
-rw-r--r--drivers/block/xen-blkback/common.h14
-rw-r--r--drivers/block/xen-blkfront.c110
-rw-r--r--drivers/bluetooth/Kconfig1
-rw-r--r--drivers/bluetooth/btmtkuart.c8
-rw-r--r--drivers/bus/ti-sysc.c37
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/Kconfig4
-rw-r--r--drivers/char/random.c11
-rw-r--r--drivers/clk/clk-npcm7xx.c4
-rw-r--r--drivers/clk/x86/clk-st.c2
-rw-r--r--drivers/cpuidle/governors/menu.c13
-rw-r--r--drivers/crypto/caam/caamalg_qi.c6
-rw-r--r--drivers/crypto/caam/caampkc.c20
-rw-r--r--drivers/crypto/caam/jr.c3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_dev.h3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c57
-rw-r--r--drivers/crypto/chelsio/chtls/chtls.h5
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c7
-rw-r--r--drivers/crypto/vmx/aes_cbc.c30
-rw-r--r--drivers/crypto/vmx/aes_xts.c21
-rw-r--r--drivers/dax/device.c3
-rw-r--r--drivers/firmware/arm_scmi/perf.c8
-rw-r--r--drivers/gpio/gpio-adp5588.c24
-rw-r--r--drivers/gpio/gpio-dwapb.c1
-rw-r--r--drivers/gpio/gpiolib-acpi.c86
-rw-r--r--drivers/gpio/gpiolib-of.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c33
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c33
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c37
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c4
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c3
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c17
-rw-r--r--drivers/gpu/drm/i915/intel_display.c7
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c33
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c92
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c47
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c18
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c27
-rw-r--r--drivers/hwmon/adt7475.c25
-rw-r--r--drivers/hwmon/ina2xx.c13
-rw-r--r--drivers/hwmon/nct6775.c2
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c1
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c55
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c1
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c7
-rw-r--r--drivers/i2c/busses/i2c-i801.c16
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c1
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c15
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c7
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c7
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/i2c-core-base.c11
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c4
-rw-r--r--drivers/md/md-cluster.c10
-rw-r--r--drivers/md/raid10.c5
-rw-r--r--drivers/md/raid5-log.h5
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/memory/ti-aemif.c2
-rw-r--r--drivers/mmc/core/queue.c12
-rw-r--r--drivers/mmc/core/queue.h1
-rw-r--r--drivers/mmc/host/android-goldfish.c4
-rw-r--r--drivers/mmc/host/atmel-mci.c12
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c10
-rw-r--r--drivers/mtd/nand/raw/denali.c5
-rw-r--r--drivers/mtd/nand/raw/docg4.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c10
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c29
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c129
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h6
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c12
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c115
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h16
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c36
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c48
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c187
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c23
-rw-r--r--drivers/net/ethernet/realtek/r8169.c7
-rw-r--r--drivers/net/ethernet/renesas/ravb.h5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c49
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c9
-rw-r--r--drivers/net/hyperv/netvsc_drv.c16
-rw-r--r--drivers/net/phy/sfp.c20
-rw-r--r--drivers/net/usb/r8152.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c50
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c12
-rw-r--r--drivers/nvme/host/pci.c8
-rw-r--r--drivers/nvme/target/core.c4
-rw-r--r--drivers/nvme/target/fcloop.c3
-rw-r--r--drivers/of/base.c47
-rw-r--r--drivers/of/platform.c4
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c71
-rw-r--r--drivers/scsi/csiostor/csio_hw.h1
-rw-r--r--drivers/scsi/csiostor/csio_mb.c6
-rw-r--r--drivers/scsi/hosts.c24
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c8
-rw-r--r--drivers/scsi/scsi_lib.c21
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c8
-rw-r--r--drivers/thermal/of-thermal.c7
-rw-r--r--drivers/thermal/qoriq_thermal.c27
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c11
-rw-r--r--drivers/thermal/rcar_thermal.c16
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c9
-rw-r--r--fs/afs/proc.c15
-rw-r--r--fs/btrfs/ctree.h12
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/btrfs/extent-tree.c17
-rw-r--r--fs/btrfs/inode.c117
-rw-r--r--fs/btrfs/ioctl.c35
-rw-r--r--fs/btrfs/qgroup.c5
-rw-r--r--fs/btrfs/tree-log.c48
-rw-r--r--fs/btrfs/tree-log.h10
-rw-r--r--fs/btrfs/volumes.c7
-rw-r--r--fs/buffer.c1
-rw-r--r--fs/ceph/super.c16
-rw-r--r--fs/cifs/cifs_unicode.c3
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/cifs/smb2misc.c14
-rw-r--r--fs/cifs/smb2ops.c35
-rw-r--r--fs/cifs/smb2pdu.c3
-rw-r--r--fs/isofs/inode.c7
-rw-r--r--fs/nilfs2/alloc.c11
-rw-r--r--fs/nilfs2/alloc.h11
-rw-r--r--fs/nilfs2/bmap.c11
-rw-r--r--fs/nilfs2/bmap.h11
-rw-r--r--fs/nilfs2/btnode.c11
-rw-r--r--fs/nilfs2/btnode.h11
-rw-r--r--fs/nilfs2/btree.c11
-rw-r--r--fs/nilfs2/btree.h11
-rw-r--r--fs/nilfs2/cpfile.c11
-rw-r--r--fs/nilfs2/cpfile.h11
-rw-r--r--fs/nilfs2/dat.c11
-rw-r--r--fs/nilfs2/dat.h11
-rw-r--r--fs/nilfs2/dir.c11
-rw-r--r--fs/nilfs2/direct.c11
-rw-r--r--fs/nilfs2/direct.h11
-rw-r--r--fs/nilfs2/file.c11
-rw-r--r--fs/nilfs2/gcinode.c11
-rw-r--r--fs/nilfs2/ifile.c11
-rw-r--r--fs/nilfs2/ifile.h11
-rw-r--r--fs/nilfs2/inode.c11
-rw-r--r--fs/nilfs2/ioctl.c11
-rw-r--r--fs/nilfs2/mdt.c11
-rw-r--r--fs/nilfs2/mdt.h11
-rw-r--r--fs/nilfs2/namei.c11
-rw-r--r--fs/nilfs2/nilfs.h11
-rw-r--r--fs/nilfs2/page.c11
-rw-r--r--fs/nilfs2/page.h11
-rw-r--r--fs/nilfs2/recovery.c11
-rw-r--r--fs/nilfs2/segbuf.c11
-rw-r--r--fs/nilfs2/segbuf.h11
-rw-r--r--fs/nilfs2/segment.c11
-rw-r--r--fs/nilfs2/segment.h11
-rw-r--r--fs/nilfs2/sufile.c11
-rw-r--r--fs/nilfs2/sufile.h11
-rw-r--r--fs/nilfs2/super.c11
-rw-r--r--fs/nilfs2/sysfs.c11
-rw-r--r--fs/nilfs2/sysfs.h11
-rw-r--r--fs/nilfs2/the_nilfs.c11
-rw-r--r--fs/nilfs2/the_nilfs.h11
-rw-r--r--fs/notify/fsnotify.c13
-rw-r--r--fs/notify/mark.c6
-rw-r--r--fs/quota/quota.c14
-rw-r--r--fs/udf/super.c93
-rw-r--r--include/linux/arm-smccc.h38
-rw-r--r--include/linux/blk-cgroup.h45
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/of.h33
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/platform_data/ina2xx.h2
-rw-r--r--include/linux/quota.h8
-rw-r--r--include/linux/timekeeping.h4
-rw-r--r--include/linux/tracepoint.h8
-rw-r--r--include/net/act_api.h7
-rw-r--r--include/net/cfg80211.h4
-rw-r--r--include/net/pkt_cls.h25
-rw-r--r--include/net/regulatory.h4
-rw-r--r--include/uapi/linux/keyctl.h2
-rw-r--r--include/uapi/linux/rds.h1
-rw-r--r--include/uapi/linux/vhost.h2
-rw-r--r--ipc/shm.c1
-rw-r--r--kernel/bpf/hashtab.c23
-rw-r--r--kernel/bpf/sockmap.c75
-rw-r--r--kernel/cpu.c37
-rw-r--r--kernel/dma/direct.c4
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/printk/printk.c1
-rw-r--r--kernel/printk/printk_safe.c4
-rw-r--r--kernel/time/clocksource.c40
-rw-r--r--kernel/watchdog.c4
-rw-r--r--kernel/watchdog_hld.c2
-rw-r--r--kernel/workqueue.c2
-rw-r--r--lib/Kconfig.debug4
-rw-r--r--lib/percpu_counter.c1
-rw-r--r--lib/rhashtable.c1
-rw-r--r--mm/backing-dev.c5
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/kmemleak.c9
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory_hotplug.c3
-rw-r--r--mm/oom_kill.c14
-rw-r--r--mm/page-writeback.c1
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/slub.c1
-rw-r--r--mm/util.c11
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/filter.c59
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/dsa/dsa.c2
-rw-r--r--net/dsa/slave.c4
-rw-r--r--net/ipv4/igmp.c11
-rw-r--r--net/ipv4/ip_gre.c3
-rw-r--r--net/ipv4/tcp_bbr.c42
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv4/tcp_minisocks.c3
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/af_inet6.c10
-rw-r--r--net/ipv6/ip6_fib.c7
-rw-r--r--net/ipv6/ip6_gre.c1
-rw-r--r--net/ipv6/ip6_tunnel.c10
-rw-r--r--net/ipv6/ip6_vti.c5
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/mac80211/ibss.c22
-rw-r--r--net/mac80211/main.c28
-rw-r--r--net/mac80211/mesh_hwmp.c4
-rw-r--r--net/mac80211/mlme.c70
-rw-r--r--net/mac80211/rx.c1
-rw-r--r--net/mac80211/tx.c54
-rw-r--r--net/mac80211/util.c11
-rw-r--r--net/ncsi/ncsi-netlink.c4
-rw-r--r--net/packet/af_packet.c44
-rw-r--r--net/packet/internal.h1
-rw-r--r--net/rds/Kconfig2
-rw-r--r--net/rds/ib.c9
-rw-r--r--net/rds/tcp.c1
-rw-r--r--net/rfkill/rfkill-gpio.c1
-rw-r--r--net/sched/act_api.c86
-rw-r--r--net/sched/act_bpf.c8
-rw-r--r--net/sched/act_connmark.c8
-rw-r--r--net/sched/act_csum.c8
-rw-r--r--net/sched/act_gact.c8
-rw-r--r--net/sched/act_ife.c108
-rw-r--r--net/sched/act_ipt.c16
-rw-r--r--net/sched/act_mirred.c8
-rw-r--r--net/sched/act_nat.c8
-rw-r--r--net/sched/act_pedit.c26
-rw-r--r--net/sched/act_police.c8
-rw-r--r--net/sched/act_sample.c8
-rw-r--r--net/sched/act_simple.c8
-rw-r--r--net/sched/act_skbedit.c8
-rw-r--r--net/sched/act_skbmod.c8
-rw-r--r--net/sched/act_tunnel_key.c8
-rw-r--r--net/sched/act_vlan.c8
-rw-r--r--net/sched/cls_api.c4
-rw-r--r--net/sched/cls_u32.c10
-rw-r--r--net/sched/sch_cake.c24
-rw-r--r--net/sctp/proc.c8
-rw-r--r--net/sctp/socket.c56
-rw-r--r--net/tipc/bcast.c4
-rw-r--r--net/tipc/diag.c2
-rw-r--r--net/tipc/name_table.c10
-rw-r--r--net/tipc/name_table.h9
-rw-r--r--net/tipc/netlink.c2
-rw-r--r--net/tipc/socket.c78
-rw-r--r--net/tipc/socket.h2
-rw-r--r--net/tipc/topsrv.c4
-rw-r--r--net/tls/tls_main.c9
-rw-r--r--net/wireless/nl80211.c15
-rw-r--r--net/wireless/reg.c91
-rw-r--r--net/wireless/util.c2
-rw-r--r--net/xdp/xdp_umem.c4
-rw-r--r--scripts/Makefile.build2
-rwxr-xr-xscripts/checkpatch.pl3
-rwxr-xr-xscripts/depmod.sh5
-rw-r--r--scripts/kconfig/Makefile1
-rw-r--r--scripts/kconfig/check-pkgconfig.sh8
-rwxr-xr-xscripts/kconfig/gconf-cfg.sh7
-rwxr-xr-xscripts/kconfig/mconf-cfg.sh25
-rw-r--r--scripts/kconfig/mconf.c1
-rw-r--r--scripts/kconfig/nconf-cfg.sh25
-rwxr-xr-xscripts/kconfig/qconf-cfg.sh7
-rwxr-xr-xscripts/recordmcount.pl3
-rwxr-xr-xscripts/setlocalversion2
-rw-r--r--security/apparmor/secid.c1
-rw-r--r--security/keys/dh.c2
-rw-r--r--sound/core/rawmidi.c4
-rw-r--r--sound/hda/ext/hdac_ext_stream.c22
-rw-r--r--sound/pci/hda/hda_codec.c3
-rw-r--r--tools/bpf/bpftool/map.c1
-rw-r--r--tools/bpf/bpftool/map_perf_ring.c5
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat59
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh7
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/police.json48
-rw-r--r--tools/vm/page-types.c6
-rw-r--r--tools/vm/slabinfo.c4
-rw-r--r--virt/kvm/arm/mmu.c21
-rw-r--r--virt/kvm/arm/trace.h15
511 files changed, 5006 insertions, 3376 deletions
diff --git a/Documentation/ABI/stable/sysfs-bus-xen-backend b/Documentation/ABI/stable/sysfs-bus-xen-backend
index 3d5951c8bf5f..e8b60bd766f7 100644
--- a/Documentation/ABI/stable/sysfs-bus-xen-backend
+++ b/Documentation/ABI/stable/sysfs-bus-xen-backend
@@ -73,3 +73,12 @@ KernelVersion: 3.0
Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Description:
Number of sectors written by the frontend.
+
+What: /sys/bus/xen-backend/devices/*/state
+Date: August 2018
+KernelVersion: 4.19
+Contact: Joe Jin <joe.jin@oracle.com>
+Description:
+ The state of the device. One of: 'Unknown',
+ 'Initialising', 'Initialised', 'Connected', 'Closing',
+ 'Closed', 'Reconfiguring', 'Reconfigured'.
diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkback b/Documentation/ABI/testing/sysfs-driver-xen-blkback
index 8bb43b66eb55..4e7babb3ba1f 100644
--- a/Documentation/ABI/testing/sysfs-driver-xen-blkback
+++ b/Documentation/ABI/testing/sysfs-driver-xen-blkback
@@ -15,3 +15,13 @@ Description:
blkback. If the frontend tries to use more than
max_persistent_grants, the LRU kicks in and starts
removing 5% of max_persistent_grants every 100ms.
+
+What: /sys/module/xen_blkback/parameters/persistent_grant_unused_seconds
+Date: August 2018
+KernelVersion: 4.19
+Contact: Roger Pau Monné <roger.pau@citrix.com>
+Description:
+ How long a persistent grant is allowed to remain
+ allocated without being in use. The time is in
+ seconds, 0 means indefinitely long.
+ The default is 60 seconds.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 9871e649ffef..64a3bf54b974 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3523,6 +3523,12 @@
ramdisk_size= [RAM] Sizes of RAM disks in kilobytes
See Documentation/blockdev/ramdisk.txt.
+ random.trust_cpu={on,off}
+ [KNL] Enable or disable trusting the use of the
+ CPU's random number generator (if available) to
+ fully seed the kernel's CRNG. Default is controlled
+ by CONFIG_RANDOM_TRUST_CPU.
+
ras=option[,option,...] [KNL] RAS-specific options
cec_disable [X86]
diff --git a/Documentation/arm64/sve.txt b/Documentation/arm64/sve.txt
index f128f736b4a5..7169a0ec41d8 100644
--- a/Documentation/arm64/sve.txt
+++ b/Documentation/arm64/sve.txt
@@ -200,7 +200,7 @@ prctl(PR_SVE_SET_VL, unsigned long arg)
thread.
* Changing the vector length causes all of P0..P15, FFR and all bits of
- Z0..V31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become
+ Z0..Z31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become
unspecified. Calling PR_SVE_SET_VL with vl equal to the thread's current
vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC
flag, does not constitute a change to the vector length for this purpose.
@@ -500,7 +500,7 @@ References
[2] arch/arm64/include/uapi/asm/ptrace.h
AArch64 Linux ptrace ABI definitions
-[3] linux/Documentation/arm64/cpu-feature-registers.txt
+[3] Documentation/arm64/cpu-feature-registers.txt
[4] ARM IHI0055C
http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
index 00e4365d7206..091c8dfd3229 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
@@ -3,7 +3,6 @@
Required properties:
- compatible :
- "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc
- - "fsl,imx8dv-lpi2c" for LPI2C compatible with the one integrated on i.MX8DV soc
- reg : address and length of the lpi2c master registers
- interrupts : lpi2c interrupt
- clocks : lpi2c clock specifier
@@ -11,7 +10,7 @@ Required properties:
Examples:
lpi2c7: lpi2c7@40a50000 {
- compatible = "fsl,imx8dv-lpi2c";
+ compatible = "fsl,imx7ulp-lpi2c";
reg = <0x40A50000 0x10000>;
interrupt-parent = <&intc>;
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt
index b0a8af51c388..265b223cd978 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt
@@ -11,7 +11,7 @@ The RISC-V supervisor ISA manual specifies three interrupt sources that are
attached to every HLIC: software interrupts, the timer interrupt, and external
interrupts. Software interrupts are used to send IPIs between cores. The
timer interrupt comes from an architecturally mandated real-time timer that is
-controller via Supervisor Binary Interface (SBI) calls and CSR reads. External
+controlled via Supervisor Binary Interface (SBI) calls and CSR reads. External
interrupts connect all other device interrupts to the HLIC, which are routed
via the platform-level interrupt controller (PLIC).
@@ -25,7 +25,15 @@ in the system.
Required properties:
- compatible : "riscv,cpu-intc"
-- #interrupt-cells : should be <1>
+- #interrupt-cells : should be <1>. The interrupt sources are defined by the
+ RISC-V supervisor ISA manual, with only the following three interrupts being
+ defined for supervisor mode:
+ - Source 1 is the supervisor software interrupt, which can be sent by an SBI
+ call and is reserved for use by software.
+ - Source 5 is the supervisor timer interrupt, which can be configured by
+ SBI calls and implements a one-shot timer.
+ - Source 9 is the supervisor external interrupt, which chains to all other
+ device interrupts.
- interrupt-controller : Identifies the node as an interrupt controller
Furthermore, this interrupt-controller MUST be embedded inside the cpu
@@ -38,7 +46,7 @@ An example device tree entry for a HLIC is show below.
...
cpu1-intc: interrupt-controller {
#interrupt-cells = <1>;
- compatible = "riscv,cpu-intc", "sifive,fu540-c000-cpu-intc";
+ compatible = "sifive,fu540-c000-cpu-intc", "riscv,cpu-intc";
interrupt-controller;
};
};
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 41089369f891..b3acebe08eb0 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -19,6 +19,10 @@ Required properties:
- slaves : Specifies number for slaves
- active_slave : Specifies the slave to use for time stamping,
ethtool and SIOCGMIIPHY
+- cpsw-phy-sel : Specifies the phandle to the CPSW phy mode selection
+ device. See also cpsw-phy-sel.txt for it's binding.
+ Note that in legacy cases cpsw-phy-sel may be
+ a child device instead of a phandle.
Optional properties:
- ti,hwmods : Must be "cpgmac0"
@@ -75,6 +79,7 @@ Examples:
cpts_clock_mult = <0x80000000>;
cpts_clock_shift = <29>;
syscon = <&cm>;
+ cpsw-phy-sel = <&phy_sel>;
cpsw_emac0: slave@0 {
phy_id = <&davinci_mdio>, <0>;
phy-mode = "rgmii-txid";
@@ -103,6 +108,7 @@ Examples:
cpts_clock_mult = <0x80000000>;
cpts_clock_shift = <29>;
syscon = <&cm>;
+ cpsw-phy-sel = <&phy_sel>;
cpsw_emac0: slave@0 {
phy_id = <&davinci_mdio>, <0>;
phy-mode = "rgmii-txid";
diff --git a/Documentation/devicetree/bindings/net/sh_eth.txt b/Documentation/devicetree/bindings/net/sh_eth.txt
index 76db9f13ad96..abc36274227c 100644
--- a/Documentation/devicetree/bindings/net/sh_eth.txt
+++ b/Documentation/devicetree/bindings/net/sh_eth.txt
@@ -16,6 +16,7 @@ Required properties:
"renesas,ether-r8a7794" if the device is a part of R8A7794 SoC.
"renesas,gether-r8a77980" if the device is a part of R8A77980 SoC.
"renesas,ether-r7s72100" if the device is a part of R7S72100 SoC.
+ "renesas,ether-r7s9210" if the device is a part of R7S9210 SoC.
"renesas,rcar-gen1-ether" for a generic R-Car Gen1 device.
"renesas,rcar-gen2-ether" for a generic R-Car Gen2 or RZ/G1
device.
diff --git a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
index 5d47a262474c..9407212a85a8 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
@@ -7,6 +7,7 @@ Required properties:
Examples with soctypes are:
- "renesas,r8a7743-wdt" (RZ/G1M)
- "renesas,r8a7745-wdt" (RZ/G1E)
+ - "renesas,r8a774a1-wdt" (RZ/G2M)
- "renesas,r8a7790-wdt" (R-Car H2)
- "renesas,r8a7791-wdt" (R-Car M2-W)
- "renesas,r8a7792-wdt" (R-Car V2H)
@@ -21,8 +22,8 @@ Required properties:
- "renesas,r7s72100-wdt" (RZ/A1)
The generic compatible string must be:
- "renesas,rza-wdt" for RZ/A
- - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G
- - "renesas,rcar-gen3-wdt" for R-Car Gen3
+ - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G1
+ - "renesas,rcar-gen3-wdt" for R-Car Gen3 and RZ/G2
- reg : Should contain WDT registers location and length
- clocks : the clock feeding the watchdog timer.
diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx
index 72d16f08e431..b8df81f6d6bc 100644
--- a/Documentation/hwmon/ina2xx
+++ b/Documentation/hwmon/ina2xx
@@ -32,7 +32,7 @@ Supported chips:
Datasheet: Publicly available at the Texas Instruments website
http://www.ti.com/
-Author: Lothar Felten <l-felten@ti.com>
+Author: Lothar Felten <lothar.felten@gmail.com>
Description
-----------
diff --git a/Documentation/i2c/DMA-considerations b/Documentation/i2c/DMA-considerations
index 966610aa4620..203002054120 100644
--- a/Documentation/i2c/DMA-considerations
+++ b/Documentation/i2c/DMA-considerations
@@ -50,10 +50,14 @@ bounce buffer. But you don't need to care about that detail, just use the
returned buffer. If NULL is returned, the threshold was not met or a bounce
buffer could not be allocated. Fall back to PIO in that case.
-In any case, a buffer obtained from above needs to be released. It ensures data
-is copied back to the message and a potentially used bounce buffer is freed::
+In any case, a buffer obtained from above needs to be released. Another helper
+function ensures a potentially used bounce buffer is freed::
- i2c_release_dma_safe_msg_buf(msg, dma_buf);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, xferred);
+
+The last argument 'xferred' controls if the buffer is synced back to the
+message or not. No syncing is needed in cases setting up DMA had an error and
+there was no data transferred.
The bounce buffer handling from the core is generic and simple. It will always
allocate a new bounce buffer. If you want a more sophisticated handling (e.g.
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index 61f918b10a0c..d1bf143b446f 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -86,7 +86,7 @@ pkg-config
The build system, as of 4.18, requires pkg-config to check for installed
kconfig tools and to determine flags settings for use in
-'make {menu,n,g,x}config'. Previously pkg-config was being used but not
+'make {g,x}config'. Previously pkg-config was being used but not
verified or documented.
Flex
diff --git a/Documentation/scsi/scsi-parameters.txt b/Documentation/scsi/scsi-parameters.txt
index 25a4b4cf04a6..92999d4e0cb8 100644
--- a/Documentation/scsi/scsi-parameters.txt
+++ b/Documentation/scsi/scsi-parameters.txt
@@ -97,6 +97,11 @@ parameters may be changed at runtime by the command
allowing boot to proceed. none ignores them, expecting
user space to do the scan.
+ scsi_mod.use_blk_mq=
+ [SCSI] use blk-mq I/O path by default
+ See SCSI_MQ_DEFAULT in drivers/scsi/Kconfig.
+ Format: <y/n>
+
sim710= [SCSI,HW]
See header of drivers/scsi/sim710.c.
diff --git a/MAINTAINERS b/MAINTAINERS
index a5b256b25905..d870cb57c887 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2311,6 +2311,7 @@ F: drivers/clocksource/cadence_ttc_timer.c
F: drivers/i2c/busses/i2c-cadence.c
F: drivers/mmc/host/sdhci-of-arasan.c
F: drivers/edac/synopsys_edac.c
+F: drivers/i2c/busses/i2c-xiic.c
ARM64 PORT (AARCH64 ARCHITECTURE)
M: Catalin Marinas <catalin.marinas@arm.com>
@@ -8255,9 +8256,9 @@ F: drivers/ata/pata_arasan_cf.c
LIBATA PATA DRIVERS
M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
-M: Jens Axboe <kernel.dk>
+M: Jens Axboe <axboe@kernel.dk>
L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: drivers/ata/pata_*.c
F: drivers/ata/ata_generic.c
@@ -8275,7 +8276,7 @@ LIBATA SATA AHCI PLATFORM devices support
M: Hans de Goede <hdegoede@redhat.com>
M: Jens Axboe <axboe@kernel.dk>
L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: drivers/ata/ahci_platform.c
F: drivers/ata/libahci_platform.c
@@ -8291,7 +8292,7 @@ F: drivers/ata/sata_promise.*
LIBATA SUBSYSTEM (Serial and Parallel ATA drivers)
M: Jens Axboe <axboe@kernel.dk>
L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: drivers/ata/
F: include/linux/ata.h
diff --git a/Makefile b/Makefile
index 2b458801ba74..19948e556941 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Merciless Moray
# *DOCUMENTATION*
@@ -807,6 +807,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
# disable pointer signed / unsigned warnings in gcc 4.0
KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
+# disable stringop warnings in gcc 8+
+KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
+
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 6d5eb8267e42..b4441b0764d7 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -9,6 +9,7 @@
config ARC
def_bool y
select ARC_TIMERS
+ select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_SG_CHAIN
@@ -28,8 +29,12 @@ config ARC
select GENERIC_SMP_IDLE_THREAD
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
+ select HAVE_DEBUG_STACKOVERFLOW
select HAVE_FUTEX_CMPXCHG if FUTEX
+ select HAVE_GENERIC_DMA_COHERENT
select HAVE_IOREMAP_PROT
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZMA
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_MEMBLOCK
@@ -44,11 +49,6 @@ config ARC
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
- select HAVE_DEBUG_STACKOVERFLOW
- select HAVE_GENERIC_DMA_COHERENT
- select HAVE_KERNEL_GZIP
- select HAVE_KERNEL_LZMA
- select ARCH_HAS_PTE_SPECIAL
config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index fb026196aaab..99cce77ab98f 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -43,10 +43,7 @@ ifdef CONFIG_ARC_CURR_IN_REG
LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h
endif
-upto_gcc44 := $(call cc-ifversion, -le, 0404, y)
-atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
-
-cflags-$(atleast_gcc44) += -fsection-anchors
+cflags-y += -fsection-anchors
cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
@@ -82,11 +79,6 @@ cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
-# STAR 9000518362: (fixed with binutils shipping with gcc 4.8)
-# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept
-# --build-id w/o "-marclinux". Default arc-elf32-ld is OK
-ldflags-$(upto_gcc44) += -marclinux
-
LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
# Modules with short calls might break for calls into builtin-kernel
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index dc91c663bcc0..d75d65ddf8e3 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -94,6 +94,32 @@
};
/*
+ * Mark DMA peripherals connected via IOC port as dma-coherent. We do
+ * it via overlay because peripherals defined in axs10x_mb.dtsi are
+ * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so
+ * only AXS103 board has HW-coherent DMA peripherals)
+ * We don't need to mark pgu@17000 as dma-coherent because it uses
+ * external DMA buffer located outside of IOC aperture.
+ */
+ axs10x_mb {
+ ethernet@0x18000 {
+ dma-coherent;
+ };
+
+ ehci@0x40000 {
+ dma-coherent;
+ };
+
+ ohci@0x60000 {
+ dma-coherent;
+ };
+
+ mmc@0x15000 {
+ dma-coherent;
+ };
+ };
+
+ /*
* The DW APB ICTL intc on MB is connected to CPU intc via a
* DT "invisible" DW APB GPIO block, configured to simply pass thru
* interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c)
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index 69ff4895f2ba..a05bb737ea63 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -101,6 +101,32 @@
};
/*
+ * Mark DMA peripherals connected via IOC port as dma-coherent. We do
+ * it via overlay because peripherals defined in axs10x_mb.dtsi are
+ * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so
+ * only AXS103 board has HW-coherent DMA peripherals)
+ * We don't need to mark pgu@17000 as dma-coherent because it uses
+ * external DMA buffer located outside of IOC aperture.
+ */
+ axs10x_mb {
+ ethernet@0x18000 {
+ dma-coherent;
+ };
+
+ ehci@0x40000 {
+ dma-coherent;
+ };
+
+ ohci@0x60000 {
+ dma-coherent;
+ };
+
+ mmc@0x15000 {
+ dma-coherent;
+ };
+ };
+
+ /*
* This INTC is actually connected to DW APB GPIO
* which acts as a wire between MB INTC and CPU INTC.
* GPIO INTC is configured in platform init code
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index 47b74fbc403c..37bafd44e36d 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -9,6 +9,10 @@
*/
/ {
+ aliases {
+ ethernet = &gmac;
+ };
+
axs10x_mb {
compatible = "simple-bus";
#address-cells = <1>;
@@ -68,7 +72,7 @@
};
};
- ethernet@0x18000 {
+ gmac: ethernet@0x18000 {
#interrupt-cells = <1>;
compatible = "snps,dwmac";
reg = < 0x18000 0x2000 >;
@@ -81,6 +85,7 @@
max-speed = <100>;
resets = <&creg_rst 5>;
reset-names = "stmmaceth";
+ mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
};
ehci@0x40000 {
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 006aa3de5348..ef149f59929a 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -25,6 +25,10 @@
bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
};
+ aliases {
+ ethernet = &gmac;
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -163,7 +167,7 @@
#clock-cells = <0>;
};
- ethernet@8000 {
+ gmac: ethernet@8000 {
#interrupt-cells = <1>;
compatible = "snps,dwmac";
reg = <0x8000 0x2000>;
@@ -176,6 +180,8 @@
phy-handle = <&phy0>;
resets = <&cgu_rst HSDK_ETH_RESET>;
reset-names = "stmmaceth";
+ mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
+ dma-coherent;
mdio {
#address-cells = <1>;
@@ -194,12 +200,14 @@
compatible = "snps,hsdk-v1.0-ohci", "generic-ohci";
reg = <0x60000 0x100>;
interrupts = <15>;
+ dma-coherent;
};
ehci@40000 {
compatible = "snps,hsdk-v1.0-ehci", "generic-ehci";
reg = <0x40000 0x100>;
interrupts = <15>;
+ dma-coherent;
};
mmc@a000 {
@@ -212,6 +220,7 @@
clock-names = "biu", "ciu";
interrupts = <12>;
bus-width = <4>;
+ dma-coherent;
};
};
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index a635ea972304..41bc08be6a3b 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -1,5 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -63,7 +61,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
CONFIG_MOUSE_SERIAL=y
CONFIG_MOUSE_SYNAPTICS_USB=y
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DW=y
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index aa507e423075..1e1c4a8011b5 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -1,5 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -64,7 +62,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
CONFIG_MOUSE_SERIAL=y
CONFIG_MOUSE_SYNAPTICS_USB=y
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DW=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index eba07f468654..6b0c0cfd5c30 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -1,5 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -65,7 +63,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
CONFIG_MOUSE_SERIAL=y
CONFIG_MOUSE_SYNAPTICS_USB=y
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DW=y
diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig
index 098b19fbaa51..240dd2cd5148 100644
--- a/arch/arc/configs/haps_hs_defconfig
+++ b/arch/arc/configs/haps_hs_defconfig
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -57,7 +56,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_ARC_PS2=y
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=1
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig
index 0104c404d897..14ae7e5acc7c 100644
--- a/arch/arc/configs/haps_hs_smp_defconfig
+++ b/arch/arc/configs/haps_hs_smp_defconfig
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -60,7 +59,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_ARC_PS2=y
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=1
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 6491be0ddbc9..1dec2b4bc5e6 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
CONFIG_SYSVIPC=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NO_HZ_IDLE=y
diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig
index 7c9c706ae7f6..31ba224bbfb4 100644
--- a/arch/arc/configs/nps_defconfig
+++ b/arch/arc/configs/nps_defconfig
@@ -59,7 +59,6 @@ CONFIG_NETCONSOLE=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=1
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 99e05cf63fca..8e0b8b134cd9 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -1,5 +1,4 @@
# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -44,7 +43,6 @@ CONFIG_LXT_PHY=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_ARC=y
CONFIG_SERIAL_ARC_CONSOLE=y
# CONFIG_HW_RANDOM is not set
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index 0dc4f9b737e7..739b90e5e893 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -1,5 +1,4 @@
# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -45,7 +44,6 @@ CONFIG_DEVTMPFS=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_ARC=y
CONFIG_SERIAL_ARC_CONSOLE=y
# CONFIG_HW_RANDOM is not set
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index be3c30a15e54..b5895bdf3a93 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -1,5 +1,4 @@
# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_HIGH_RES_TIMERS=y
@@ -44,7 +43,6 @@ CONFIG_DEVTMPFS=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_ARC=y
CONFIG_SERIAL_ARC_CONSOLE=y
# CONFIG_HW_RANDOM is not set
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 3a74b9b21772..f14eeff7d308 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -1,5 +1,4 @@
# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -48,7 +47,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_ARC_PS2=y
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=1
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index ea2834b4dc1d..025298a48305 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -1,5 +1,4 @@
# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -47,7 +46,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_ARC_PS2=y
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=1
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index 80a5a1b4924b..df7b77b13b82 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -58,7 +57,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_ARC_PS2=y
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=1
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index 2cc87f909747..a7f65313f84a 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -57,7 +57,6 @@ CONFIG_STMMAC_ETH=y
# CONFIG_SERIO is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=1
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index f629493929ea..db47c3541f15 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -1,5 +1,4 @@
# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
@@ -53,7 +52,6 @@ CONFIG_NATIONAL_PHY=y
CONFIG_MOUSE_PS2_TOUCHKIT=y
CONFIG_SERIO_ARC_PS2=y
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DW=y
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index 21f0ca26a05d..a8ac5e917d9a 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -1,5 +1,4 @@
# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 4e0072730241..158af079838d 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -84,7 +84,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
"1: llock %[orig], [%[ctr]] \n" \
" " #asm_op " %[val], %[orig], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
- " \n" \
+ " bnz 1b \n" \
: [val] "=&r" (val), \
[orig] "=&r" (orig) \
: [ctr] "r" (&v->counter), \
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..c946c0a83e76
--- /dev/null
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+// (C) 2018 Synopsys, Inc. (www.synopsys.com)
+
+#ifndef ASM_ARC_DMA_MAPPING_H
+#define ASM_ARC_DMA_MAPPING_H
+
+#include <asm-generic/dma-mapping.h>
+
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *iommu, bool coherent);
+#define arch_setup_dma_ops arch_setup_dma_ops
+
+#endif
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 783b20354f8b..e8d9fb452346 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -83,9 +83,6 @@ done:
static void show_faulting_vma(unsigned long address, char *buf)
{
struct vm_area_struct *vma;
- struct inode *inode;
- unsigned long ino = 0;
- dev_t dev = 0;
char *nm = buf;
struct mm_struct *active_mm = current->active_mm;
@@ -99,12 +96,10 @@ static void show_faulting_vma(unsigned long address, char *buf)
* if the container VMA is not found
*/
if (vma && (vma->vm_start <= address)) {
- struct file *file = vma->vm_file;
- if (file) {
- nm = file_path(file, buf, PAGE_SIZE - 1);
- inode = file_inode(vma->vm_file);
- dev = inode->i_sb->s_dev;
- ino = inode->i_ino;
+ if (vma->vm_file) {
+ nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1);
+ if (IS_ERR(nm))
+ nm = "?";
}
pr_info(" @off 0x%lx in [%s]\n"
" VMA: 0x%08lx to 0x%08lx\n",
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 25c631942500..f2701c13a66b 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -65,7 +65,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
perip_base,
- IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
+ IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
return buf;
}
@@ -897,15 +897,6 @@ static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
}
/*
- * DMA ops for systems with IOC
- * IOC hardware snoops all DMA traffic keeping the caches consistent with
- * memory - eliding need for any explicit cache maintenance of DMA buffers
- */
-static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
-static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
-static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
-
-/*
* Exported DMA API
*/
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
@@ -1153,6 +1144,19 @@ noinline void __init arc_ioc_setup(void)
{
unsigned int ioc_base, mem_sz;
+ /*
+ * As for today we don't support both IOC and ZONE_HIGHMEM enabled
+ * simultaneously. This happens because as of today IOC aperture covers
+ * only ZONE_NORMAL (low mem) and any dma transactions outside this
+ * region won't be HW coherent.
+ * If we want to use both IOC and ZONE_HIGHMEM we can use
+ * bounce_buffer to handle dma transactions to HIGHMEM.
+ * Also it is possible to modify dma_direct cache ops or increase IOC
+ * aperture size if we are planning to use HIGHMEM without PAE.
+ */
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ panic("IOC and HIGHMEM can't be used simultaneously");
+
/* Flush + invalidate + disable L1 dcache */
__dc_disable();
@@ -1264,11 +1268,7 @@ void __init arc_cache_init_master(void)
if (is_isa_arcv2() && ioc_enable)
arc_ioc_setup();
- if (is_isa_arcv2() && ioc_enable) {
- __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
- __dma_cache_inv = __dma_cache_inv_ioc;
- __dma_cache_wback = __dma_cache_wback_ioc;
- } else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
+ if (is_isa_arcv2() && l2_line_sz && slc_enable) {
__dma_cache_wback_inv = __dma_cache_wback_inv_slc;
__dma_cache_inv = __dma_cache_inv_slc;
__dma_cache_wback = __dma_cache_wback_slc;
@@ -1277,6 +1277,12 @@ void __init arc_cache_init_master(void)
__dma_cache_inv = __dma_cache_inv_l1;
__dma_cache_wback = __dma_cache_wback_l1;
}
+ /*
+ * In case of IOC (say IOC+SLC case), pointers above could still be set
+ * but end up not being relevant as the first function in chain is not
+ * called at all for @dma_direct_ops
+ * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
+ */
}
void __ref arc_cache_init(void)
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index ec47e6079f5d..c75d5c3470e3 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -6,20 +6,17 @@
* published by the Free Software Foundation.
*/
-/*
- * DMA Coherent API Notes
- *
- * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
- * implemented by accessing it using a kernel virtual address, with
- * Cache bit off in the TLB entry.
- *
- * The default DMA address == Phy address which is 0x8000_0000 based.
- */
-
#include <linux/dma-noncoherent.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
+/*
+ * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
+ * - hardware IOC not available (or "dma-coherent" not set for device in DT)
+ * - But still handle both coherent and non-coherent requests from caller
+ *
+ * For DMA coherent hardware (IOC) generic code suffices
+ */
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{
@@ -27,42 +24,29 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
struct page *page;
phys_addr_t paddr;
void *kvaddr;
- int need_coh = 1, need_kvaddr = 0;
-
- page = alloc_pages(gfp, order);
- if (!page)
- return NULL;
+ bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
/*
- * IOC relies on all data (even coherent DMA data) being in cache
- * Thus allocate normal cached memory
- *
- * The gains with IOC are two pronged:
- * -For streaming data, elides need for cache maintenance, saving
- * cycles in flush code, and bus bandwidth as all the lines of a
- * buffer need to be flushed out to memory
- * -For coherent data, Read/Write to buffers terminate early in cache
- * (vs. always going to memory - thus are faster)
+ * __GFP_HIGHMEM flag is cleared by upper layer functions
+ * (in include/linux/dma-mapping.h) so we should never get a
+ * __GFP_HIGHMEM here.
*/
- if ((is_isa_arcv2() && ioc_enable) ||
- (attrs & DMA_ATTR_NON_CONSISTENT))
- need_coh = 0;
+ BUG_ON(gfp & __GFP_HIGHMEM);
- /*
- * - A coherent buffer needs MMU mapping to enforce non-cachability
- * - A highmem page needs a virtual handle (hence MMU mapping)
- * independent of cachability
- */
- if (PageHighMem(page) || need_coh)
- need_kvaddr = 1;
+ page = alloc_pages(gfp, order);
+ if (!page)
+ return NULL;
/* This is linear addr (0x8000_0000 based) */
paddr = page_to_phys(page);
*dma_handle = paddr;
- /* This is kernel Virtual address (0x7000_0000 based) */
- if (need_kvaddr) {
+ /*
+ * A coherent buffer needs MMU mapping to enforce non-cachability.
+ * kvaddr is kernel Virtual address (0x7000_0000 based).
+ */
+ if (need_coh) {
kvaddr = ioremap_nocache(paddr, size);
if (kvaddr == NULL) {
__free_pages(page, order);
@@ -93,12 +77,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
{
phys_addr_t paddr = dma_handle;
struct page *page = virt_to_page(paddr);
- int is_non_coh = 1;
-
- is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
- (is_isa_arcv2() && ioc_enable);
- if (PageHighMem(page) || !is_non_coh)
+ if (!(attrs & DMA_ATTR_NON_CONSISTENT))
iounmap((void __force __iomem *)vaddr);
__free_pages(page, get_order(size));
@@ -185,3 +165,23 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
break;
}
}
+
+/*
+ * Plug in coherent or noncoherent dma ops
+ */
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *iommu, bool coherent)
+{
+ /*
+ * IOC hardware snoops all DMA traffic keeping the caches consistent
+ * with memory - eliding need for any explicit cache maintenance of
+ * DMA buffers - so we can use dma_direct cache ops.
+ */
+ if (is_isa_arcv2() && ioc_enable && coherent) {
+ set_dma_ops(dev, &dma_direct_ops);
+ dev_info(dev, "use dma_direct_ops cache ops\n");
+ } else {
+ set_dma_ops(dev, &dma_noncoherent_ops);
+ dev_info(dev, "use dma_noncoherent_ops cache ops\n");
+ }
+}
diff --git a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
index 4d969013f99a..4d969013f99a 100755..100644
--- a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
+++ b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index f0cbd86312dc..d4b7c59eec68 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -469,6 +469,7 @@
ti,hwmods = "rtc";
clocks = <&clk_32768_ck>;
clock-names = "int-clk";
+ system-power-controller;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts
index 9fb47724b9c1..ad2ae25b7b4d 100644
--- a/arch/arm/boot/dts/imx23-evk.dts
+++ b/arch/arm/boot/dts/imx23-evk.dts
@@ -13,6 +13,43 @@
reg = <0x40000000 0x08000000>;
};
+ reg_vddio_sd0: regulator-vddio-sd0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vddio-sd0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 29 0>;
+ };
+
+ reg_lcd_3v3: regulator-lcd-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "lcd-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 18 0>;
+ enable-active-high;
+ };
+
+ reg_lcd_5v: regulator-lcd-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "lcd-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ panel {
+ compatible = "sii,43wvf1g";
+ backlight = <&backlight_display>;
+ dvdd-supply = <&reg_lcd_3v3>;
+ avdd-supply = <&reg_lcd_5v>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&display_out>;
+ };
+ };
+ };
+
apb@80000000 {
apbh@80000000 {
gpmi-nand@8000c000 {
@@ -52,31 +89,11 @@
lcdif@80030000 {
pinctrl-names = "default";
pinctrl-0 = <&lcdif_24bit_pins_a>;
- lcd-supply = <&reg_lcd_3v3>;
- display = <&display0>;
status = "okay";
- display0: display0 {
- bits-per-pixel = <32>;
- bus-width = <24>;
-
- display-timings {
- native-mode = <&timing0>;
- timing0: timing0 {
- clock-frequency = <9200000>;
- hactive = <480>;
- vactive = <272>;
- hback-porch = <15>;
- hfront-porch = <8>;
- vback-porch = <12>;
- vfront-porch = <4>;
- hsync-len = <1>;
- vsync-len = <1>;
- hsync-active = <0>;
- vsync-active = <0>;
- de-active = <1>;
- pixelclk-active = <0>;
- };
+ port {
+ display_out: endpoint {
+ remote-endpoint = <&panel_in>;
};
};
};
@@ -118,32 +135,7 @@
};
};
- regulators {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <0>;
-
- reg_vddio_sd0: regulator@0 {
- compatible = "regulator-fixed";
- reg = <0>;
- regulator-name = "vddio-sd0";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio1 29 0>;
- };
-
- reg_lcd_3v3: regulator@1 {
- compatible = "regulator-fixed";
- reg = <1>;
- regulator-name = "lcd-3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio1 18 0>;
- enable-active-high;
- };
- };
-
- backlight {
+ backlight_display: backlight {
compatible = "pwm-backlight";
pwms = <&pwm 2 5000000>;
brightness-levels = <0 4 8 16 32 64 128 255>;
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
index 6b0ae667640f..93ab5bdfe068 100644
--- a/arch/arm/boot/dts/imx28-evk.dts
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -13,6 +13,87 @@
reg = <0x40000000 0x08000000>;
};
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_vddio_sd0: regulator-vddio-sd0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vddio-sd0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 28 0>;
+ };
+
+ reg_fec_3v3: regulator-fec-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "fec-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 15 0>;
+ };
+
+ reg_usb0_vbus: regulator-usb0-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb0_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 9 0>;
+ enable-active-high;
+ };
+
+ reg_usb1_vbus: regulator-usb1-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 8 0>;
+ enable-active-high;
+ };
+
+ reg_lcd_3v3: regulator-lcd-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "lcd-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 30 0>;
+ enable-active-high;
+ };
+
+ reg_can_3v3: regulator-can-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "can-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 13 0>;
+ enable-active-high;
+ };
+
+ reg_lcd_5v: regulator-lcd-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "lcd-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ panel {
+ compatible = "sii,43wvf1g";
+ backlight = <&backlight_display>;
+ dvdd-supply = <&reg_lcd_3v3>;
+ avdd-supply = <&reg_lcd_5v>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&display_out>;
+ };
+ };
+ };
+
apb@80000000 {
apbh@80000000 {
gpmi-nand@8000c000 {
@@ -116,31 +197,11 @@
pinctrl-names = "default";
pinctrl-0 = <&lcdif_24bit_pins_a
&lcdif_pins_evk>;
- lcd-supply = <&reg_lcd_3v3>;
- display = <&display0>;
status = "okay";
- display0: display0 {
- bits-per-pixel = <32>;
- bus-width = <24>;
-
- display-timings {
- native-mode = <&timing0>;
- timing0: timing0 {
- clock-frequency = <33500000>;
- hactive = <800>;
- vactive = <480>;
- hback-porch = <89>;
- hfront-porch = <164>;
- vback-porch = <23>;
- vfront-porch = <10>;
- hsync-len = <10>;
- vsync-len = <10>;
- hsync-active = <0>;
- vsync-active = <0>;
- de-active = <1>;
- pixelclk-active = <0>;
- };
+ port {
+ display_out: endpoint {
+ remote-endpoint = <&panel_in>;
};
};
};
@@ -269,80 +330,6 @@
};
};
- regulators {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <0>;
-
- reg_3p3v: regulator@0 {
- compatible = "regulator-fixed";
- reg = <0>;
- regulator-name = "3P3V";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
- reg_vddio_sd0: regulator@1 {
- compatible = "regulator-fixed";
- reg = <1>;
- regulator-name = "vddio-sd0";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio3 28 0>;
- };
-
- reg_fec_3v3: regulator@2 {
- compatible = "regulator-fixed";
- reg = <2>;
- regulator-name = "fec-3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio2 15 0>;
- };
-
- reg_usb0_vbus: regulator@3 {
- compatible = "regulator-fixed";
- reg = <3>;
- regulator-name = "usb0_vbus";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&gpio3 9 0>;
- enable-active-high;
- };
-
- reg_usb1_vbus: regulator@4 {
- compatible = "regulator-fixed";
- reg = <4>;
- regulator-name = "usb1_vbus";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&gpio3 8 0>;
- enable-active-high;
- };
-
- reg_lcd_3v3: regulator@5 {
- compatible = "regulator-fixed";
- reg = <5>;
- regulator-name = "lcd-3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio3 30 0>;
- enable-active-high;
- };
-
- reg_can_3v3: regulator@6 {
- compatible = "regulator-fixed";
- reg = <6>;
- regulator-name = "can-3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio2 13 0>;
- enable-active-high;
- };
-
- };
-
sound {
compatible = "fsl,imx28-evk-sgtl5000",
"fsl,mxs-audio-sgtl5000";
@@ -363,7 +350,7 @@
};
};
- backlight {
+ backlight_display: backlight {
compatible = "pwm-backlight";
pwms = <&pwm 2 5000000>;
brightness-levels = <0 4 8 16 32 64 128 255>;
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index 7cbc2ffa4b3a..7234e8330a57 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -126,10 +126,14 @@
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ /*
+ * Reference manual lists pci irqs incorrectly
+ * Real hardware ordering is same as imx6: D+MSI, C, B, A
+ */
+ interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
<&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
<&clks IMX7D_PCIE_PHY_ROOT_CLK>;
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
index 12d6822f0057..04758a2a87f0 100644
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -354,7 +354,7 @@
&mmc2 {
vmmc-supply = <&vsdio>;
bus-width = <8>;
- non-removable;
+ ti,non-removable;
};
&mmc3 {
@@ -621,15 +621,6 @@
OMAP4_IOPAD(0x10c, PIN_INPUT | MUX_MODE1) /* abe_mcbsp3_fsx */
>;
};
-};
-
-&omap4_pmx_wkup {
- usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins {
- /* gpio_wk0 */
- pinctrl-single,pins = <
- OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
- >;
- };
vibrator_direction_pin: pinmux_vibrator_direction_pin {
pinctrl-single,pins = <
@@ -644,6 +635,15 @@
};
};
+&omap4_pmx_wkup {
+ usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins {
+ /* gpio_wk0 */
+ pinctrl-single,pins = <
+ OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
+ >;
+ };
+};
+
/*
* As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for
* uart1 wakeirq.
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index e2c127608bcc..7eca43ff69bb 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -257,6 +257,7 @@ CONFIG_IMX_IPUV3_CORE=y
CONFIG_DRM=y
CONFIG_DRM_PANEL_LVDS=y
CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
CONFIG_DRM_DW_HDMI_CEC=y
CONFIG_DRM_IMX=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 148226e36152..7b8212857535 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -95,6 +95,7 @@ CONFIG_MFD_MXS_LRADC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_DRM=y
+CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
CONFIG_DRM_MXSFB=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig
index df68dc4056e5..5282324c7cef 100644
--- a/arch/arm/configs/versatile_defconfig
+++ b/arch/arm/configs/versatile_defconfig
@@ -5,19 +5,19 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_PARTITION_ADVANCED=y
# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_VERSATILE=y
CONFIG_AEABI=y
CONFIG_OABI_COMPAT=y
-CONFIG_CMA=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="root=1f03 mem=32M"
CONFIG_FPE_NWFPE=y
CONFIG_VFP=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_CMA=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -59,6 +59,7 @@ CONFIG_GPIO_PL061=y
CONFIG_DRM=y
CONFIG_DRM_PANEL_ARM_VERSATILE=y
CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_DUMB_VGA_DAC=y
CONFIG_DRM_PL111=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -89,9 +90,10 @@ CONFIG_NFSD=y
CONFIG_NFSD_V3=y
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=m
+CONFIG_FONTS=y
+CONFIG_FONT_ACORN_8x8=y
+CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
-CONFIG_FONTS=y
-CONFIG_FONT_ACORN_8x8=y
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 79906cecb091..3ad482d2f1eb 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -223,7 +223,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 2ceffd85dd3d..cd65ea4e9c54 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2161,6 +2161,37 @@ static int of_dev_hwmod_lookup(struct device_node *np,
}
/**
+ * omap_hwmod_fix_mpu_rt_idx - fix up mpu_rt_idx register offsets
+ *
+ * @oh: struct omap_hwmod *
+ * @np: struct device_node *
+ *
+ * Fix up module register offsets for modules with mpu_rt_idx.
+ * Only needed for cpsw with interconnect target module defined
+ * in device tree while still using legacy hwmod platform data
+ * for rev, sysc and syss registers.
+ *
+ * Can be removed when all cpsw hwmod platform data has been
+ * dropped.
+ */
+static void omap_hwmod_fix_mpu_rt_idx(struct omap_hwmod *oh,
+ struct device_node *np,
+ struct resource *res)
+{
+ struct device_node *child = NULL;
+ int error;
+
+ child = of_get_next_child(np, child);
+ if (!child)
+ return;
+
+ error = of_address_to_resource(child, oh->mpu_rt_idx, res);
+ if (error)
+ pr_err("%s: error mapping mpu_rt_idx: %i\n",
+ __func__, error);
+}
+
+/**
* omap_hwmod_parse_module_range - map module IO range from device tree
* @oh: struct omap_hwmod *
* @np: struct device_node *
@@ -2220,7 +2251,13 @@ int omap_hwmod_parse_module_range(struct omap_hwmod *oh,
size = be32_to_cpup(ranges);
pr_debug("omap_hwmod: %s %s at 0x%llx size 0x%llx\n",
- oh->name, np->name, base, size);
+ oh ? oh->name : "", np->name, base, size);
+
+ if (oh && oh->mpu_rt_idx) {
+ omap_hwmod_fix_mpu_rt_idx(oh, np, res);
+
+ return 0;
+ }
res->start = base;
res->end = base + size - 1;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 29e75b47becd..1b1a0e95c751 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -763,7 +763,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
config HOLES_IN_ZONE
def_bool y
- depends on NUMA
source kernel/Kconfig.hz
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
index ceffc40810ee..48daec7f78ba 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
@@ -46,6 +46,7 @@
pinctrl-0 = <&mmc0_pins>;
vmmc-supply = <&reg_cldo1>;
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
status = "okay";
};
@@ -56,6 +57,7 @@
vqmmc-supply = <&reg_bldo2>;
non-removable;
cap-mmc-hw-reset;
+ bus-width = <8>;
status = "okay";
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index f67e8d5e93ad..db8d364f8476 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -38,6 +38,7 @@ CONFIG_ARCH_BCM_IPROC=y
CONFIG_ARCH_BERLIN=y
CONFIG_ARCH_BRCMSTB=y
CONFIG_ARCH_EXYNOS=y
+CONFIG_ARCH_K3=y
CONFIG_ARCH_LAYERSCAPE=y
CONFIG_ARCH_LG1K=y
CONFIG_ARCH_HISI=y
@@ -605,6 +606,8 @@ CONFIG_ARCH_TEGRA_132_SOC=y
CONFIG_ARCH_TEGRA_210_SOC=y
CONFIG_ARCH_TEGRA_186_SOC=y
CONFIG_ARCH_TEGRA_194_SOC=y
+CONFIG_ARCH_K3_AM6_SOC=y
+CONFIG_SOC_TI=y
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_EXTCON_USB_GPIO=y
CONFIG_EXTCON_USBC_CROS_EC=y
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 6e9f33d14930..067d8937d5af 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -417,7 +417,7 @@ static int gcm_encrypt(struct aead_request *req)
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- while (walk.nbytes >= AES_BLOCK_SIZE) {
+ while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr;
@@ -437,11 +437,18 @@ static int gcm_encrypt(struct aead_request *req)
NULL);
err = skcipher_walk_done(&walk,
- walk.nbytes % AES_BLOCK_SIZE);
+ walk.nbytes % (2 * AES_BLOCK_SIZE));
}
- if (walk.nbytes)
+ if (walk.nbytes) {
__aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv,
nrounds);
+ if (walk.nbytes > AES_BLOCK_SIZE) {
+ crypto_inc(iv, AES_BLOCK_SIZE);
+ __aes_arm64_encrypt(ctx->aes_key.key_enc,
+ ks + AES_BLOCK_SIZE, iv,
+ nrounds);
+ }
+ }
}
/* handle the tail */
@@ -545,7 +552,7 @@ static int gcm_decrypt(struct aead_request *req)
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- while (walk.nbytes >= AES_BLOCK_SIZE) {
+ while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr;
@@ -564,11 +571,21 @@ static int gcm_decrypt(struct aead_request *req)
} while (--blocks > 0);
err = skcipher_walk_done(&walk,
- walk.nbytes % AES_BLOCK_SIZE);
+ walk.nbytes % (2 * AES_BLOCK_SIZE));
}
- if (walk.nbytes)
+ if (walk.nbytes) {
+ if (walk.nbytes > AES_BLOCK_SIZE) {
+ u8 *iv2 = iv + AES_BLOCK_SIZE;
+
+ memcpy(iv2, iv, AES_BLOCK_SIZE);
+ crypto_inc(iv2, AES_BLOCK_SIZE);
+
+ __aes_arm64_encrypt(ctx->aes_key.key_enc, iv2,
+ iv2, nrounds);
+ }
__aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv,
nrounds);
+ }
}
/* handle the tail */
diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c
index b7fb5274b250..0c4fc223f225 100644
--- a/arch/arm64/crypto/sm4-ce-glue.c
+++ b/arch/arm64/crypto/sm4-ce-glue.c
@@ -69,5 +69,5 @@ static void __exit sm4_ce_mod_fini(void)
crypto_unregister_alg(&sm4_ce_alg);
}
-module_cpu_feature_match(SM3, sm4_ce_mod_init);
+module_cpu_feature_match(SM4, sm4_ce_mod_init);
module_exit(sm4_ce_mod_fini);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index f26055f2306e..3d6d7336f871 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -61,8 +61,7 @@ struct kvm_arch {
u64 vmid_gen;
u32 vmid;
- /* 1-level 2nd stage table and lock */
- spinlock_t pgd_lock;
+ /* 1-level 2nd stage table, protected by kvm->mmu_lock */
pgd_t *pgd;
/* VTTBR value associated with above pgd and vmid */
@@ -357,7 +356,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index d496ef579859..ca46153d7915 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -98,8 +98,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
val = read_sysreg(cpacr_el1);
val |= CPACR_EL1_TTA;
val &= ~CPACR_EL1_ZEN;
- if (!update_fp_enabled(vcpu))
+ if (!update_fp_enabled(vcpu)) {
val &= ~CPACR_EL1_FPEN;
+ __activate_traps_fpsimd32(vcpu);
+ }
write_sysreg(val, cpacr_el1);
@@ -114,8 +116,10 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
val = CPTR_EL2_DEFAULT;
val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
- if (!update_fp_enabled(vcpu))
+ if (!update_fp_enabled(vcpu)) {
val |= CPTR_EL2_TFP;
+ __activate_traps_fpsimd32(vcpu);
+ }
write_sysreg(val, cptr_el2);
}
@@ -129,7 +133,6 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
- __activate_traps_fpsimd32(vcpu);
if (has_vhe())
activate_traps_vhe(vcpu);
else
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 65f86271f02b..8080c9f489c3 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -985,8 +985,9 @@ int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
pmd = READ_ONCE(*pmdp);
- /* No-op for empty entry and WARN_ON for valid entry */
- if (!pmd_present(pmd) || !pmd_table(pmd)) {
+ if (!pmd_present(pmd))
+ return 1;
+ if (!pmd_table(pmd)) {
VM_WARN_ON(!pmd_table(pmd));
return 1;
}
@@ -1007,8 +1008,9 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
pud = READ_ONCE(*pudp);
- /* No-op for empty entry and WARN_ON for valid entry */
- if (!pud_present(pud) || !pud_table(pud)) {
+ if (!pud_present(pud))
+ return 1;
+ if (!pud_table(pud)) {
VM_WARN_ON(!pud_table(pud));
return 1;
}
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index 3534aa6a4dc2..1b083c500b9a 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -98,11 +98,10 @@ static time64_t pmu_read_time(void)
if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
return 0;
- while (!req.complete)
- pmu_poll();
+ pmu_wait_complete(&req);
- time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) |
- (req.reply[3] << 8) | req.reply[4]);
+ time = (u32)((req.reply[0] << 24) | (req.reply[1] << 16) |
+ (req.reply[2] << 8) | req.reply[3]);
return time - RTC_OFFSET;
}
@@ -116,8 +115,7 @@ static void pmu_write_time(time64_t time)
(data >> 24) & 0xFF, (data >> 16) & 0xFF,
(data >> 8) & 0xFF, data & 0xFF) < 0)
return;
- while (!req.complete)
- pmu_poll();
+ pmu_wait_complete(&req);
}
static __u8 pmu_read_pram(int offset)
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 70dde040779b..f5453d944ff5 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -172,7 +172,7 @@ void __init cf_bootmem_alloc(void)
high_memory = (void *)_ramend;
/* Reserve kernel text/data/bss */
- memblock_reserve(memstart, memstart - _rambase);
+ memblock_reserve(_rambase, memstart - _rambase);
m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
module_fixup(NULL, __start_fixup, __stop_fixup);
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index a9af1d2dcd69..2c1c53d12179 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -931,7 +931,6 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
bool write);
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 019035d7225c..8f845f6e5f42 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/ioport.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -20,6 +21,7 @@
#include <asm/abi.h>
#include <asm/mips-cps.h>
+#include <asm/page.h>
#include <asm/vdso.h>
/* Kernel-provided data used by the VDSO. */
@@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vvar_size = gic_size + PAGE_SIZE;
size = vvar_size + image->size;
+ /*
+ * Find a region that's large enough for us to perform the
+ * colour-matching alignment below.
+ */
+ if (cpu_has_dc_aliases)
+ size += shm_align_mask + 1;
+
base = get_unmapped_area(NULL, 0, size, 0, 0);
if (IS_ERR_VALUE(base)) {
ret = base;
goto out;
}
+ /*
+ * If we suffer from dcache aliasing, ensure that the VDSO data page
+ * mapping is coloured the same as the kernel's mapping of that memory.
+ * This ensures that when the kernel updates the VDSO data userland
+ * will observe it without requiring cache invalidations.
+ */
+ if (cpu_has_dc_aliases) {
+ base = __ALIGN_MASK(base, shm_align_mask);
+ base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
+ }
+
data_addr = base + gic_size;
vdso_addr = data_addr + PAGE_SIZE;
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index ee64db032793..d8dcdb350405 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -512,16 +512,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
return 1;
}
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
- unsigned long end = hva + PAGE_SIZE;
-
- handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
-
- kvm_mips_callbacks->flush_shadow_all(kvm);
- return 0;
-}
-
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index 1d4248fa55e9..7068f341133d 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -40,6 +40,10 @@ config NDS32
select NO_IOPORT_MAP
select RTC_LIB
select THREAD_INFO_IN_TASK
+ select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_DYNAMIC_FTRACE
help
Andes(nds32) Linux support.
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile
index 63f4f173e5f4..3509fac10491 100644
--- a/arch/nds32/Makefile
+++ b/arch/nds32/Makefile
@@ -5,6 +5,10 @@ KBUILD_DEFCONFIG := defconfig
comma = ,
+ifdef CONFIG_FUNCTION_TRACER
+arch-y += -malways-save-lp -mno-relax
+endif
+
KBUILD_CFLAGS += $(call cc-option, -mno-sched-prolog-epilog)
KBUILD_CFLAGS += -mcmodel=large
diff --git a/arch/nds32/include/asm/elf.h b/arch/nds32/include/asm/elf.h
index 56c479058802..f5f9cf7e0544 100644
--- a/arch/nds32/include/asm/elf.h
+++ b/arch/nds32/include/asm/elf.h
@@ -121,9 +121,9 @@ struct elf32_hdr;
*/
#define ELF_CLASS ELFCLASS32
#ifdef __NDS32_EB__
-#define ELF_DATA ELFDATA2MSB;
+#define ELF_DATA ELFDATA2MSB
#else
-#define ELF_DATA ELFDATA2LSB;
+#define ELF_DATA ELFDATA2LSB
#endif
#define ELF_ARCH EM_NDS32
#define USE_ELF_CORE_DUMP
diff --git a/arch/nds32/include/asm/ftrace.h b/arch/nds32/include/asm/ftrace.h
new file mode 100644
index 000000000000..2f96cc96aa35
--- /dev/null
+++ b/arch/nds32/include/asm/ftrace.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_NDS32_FTRACE_H
+#define __ASM_NDS32_FTRACE_H
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define HAVE_FUNCTION_GRAPH_FP_TEST
+
+#define MCOUNT_ADDR ((unsigned long)(_mcount))
+/* mcount call is composed of three instructions:
+ * sethi + ori + jral
+ */
+#define MCOUNT_INSN_SIZE 12
+
+extern void _mcount(unsigned long parent_ip);
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+#define FTRACE_ADDR ((unsigned long)_ftrace_caller)
+
+#ifdef __NDS32_EL__
+#define INSN_NOP 0x09000040
+#define INSN_SIZE(insn) (((insn & 0x00000080) == 0) ? 4 : 2)
+#define IS_SETHI(insn) ((insn & 0x000000fe) == 0x00000046)
+#define ENDIAN_CONVERT(insn) be32_to_cpu(insn)
+#else /* __NDS32_EB__ */
+#define INSN_NOP 0x40000009
+#define INSN_SIZE(insn) (((insn & 0x80000000) == 0) ? 4 : 2)
+#define IS_SETHI(insn) ((insn & 0xfe000000) == 0x46000000)
+#define ENDIAN_CONVERT(insn) (insn)
+#endif
+
+extern void _ftrace_caller(unsigned long parent_ip);
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+struct dyn_arch_ftrace {
+};
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#endif /* __ASM_NDS32_FTRACE_H */
diff --git a/arch/nds32/include/asm/nds32.h b/arch/nds32/include/asm/nds32.h
index 19b19394a936..68c38151c3e4 100644
--- a/arch/nds32/include/asm/nds32.h
+++ b/arch/nds32/include/asm/nds32.h
@@ -17,6 +17,7 @@
#else
#define FP_OFFSET (-2)
#endif
+#define LP_OFFSET (-1)
extern void __init early_trap_init(void);
static inline void GIE_ENABLE(void)
diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h
index 18a009f3804d..362a32d9bd16 100644
--- a/arch/nds32/include/asm/uaccess.h
+++ b/arch/nds32/include/asm/uaccess.h
@@ -38,7 +38,7 @@ struct exception_table_entry {
extern int fixup_exception(struct pt_regs *regs);
#define KERNEL_DS ((mm_segment_t) { ~0UL })
-#define USER_DS ((mm_segment_t) {TASK_SIZE - 1})
+#define USER_DS ((mm_segment_t) {TASK_SIZE - 1})
#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
@@ -49,11 +49,11 @@ static inline void set_fs(mm_segment_t fs)
current_thread_info()->addr_limit = fs;
}
-#define segment_eq(a, b) ((a) == (b))
+#define segment_eq(a, b) ((a) == (b))
#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size))
-#define access_ok(type, addr, size) \
+#define access_ok(type, addr, size) \
__range_ok((unsigned long)addr, (unsigned long)size)
/*
* Single-value transfer routines. They automatically use the right
@@ -75,70 +75,73 @@ static inline void set_fs(mm_segment_t fs)
* versions are void (ie, don't return a value as such).
*/
-#define get_user(x,p) \
-({ \
- long __e = -EFAULT; \
- if(likely(access_ok(VERIFY_READ, p, sizeof(*p)))) { \
- __e = __get_user(x,p); \
- } else \
- x = 0; \
- __e; \
-})
-#define __get_user(x,ptr) \
+#define get_user __get_user \
+
+#define __get_user(x, ptr) \
({ \
long __gu_err = 0; \
- __get_user_err((x),(ptr),__gu_err); \
+ __get_user_check((x), (ptr), __gu_err); \
__gu_err; \
})
-#define __get_user_error(x,ptr,err) \
+#define __get_user_error(x, ptr, err) \
({ \
- __get_user_err((x),(ptr),err); \
- (void) 0; \
+ __get_user_check((x), (ptr), (err)); \
+ (void)0; \
})
-#define __get_user_err(x,ptr,err) \
+#define __get_user_check(x, ptr, err) \
+({ \
+ const __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
+ __get_user_err((x), __p, (err)); \
+ } else { \
+ (x) = 0; (err) = -EFAULT; \
+ } \
+})
+
+#define __get_user_err(x, ptr, err) \
do { \
- unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
- __get_user_asm("lbi",__gu_val,__gu_addr,err); \
+ __get_user_asm("lbi", __gu_val, (ptr), (err)); \
break; \
case 2: \
- __get_user_asm("lhi",__gu_val,__gu_addr,err); \
+ __get_user_asm("lhi", __gu_val, (ptr), (err)); \
break; \
case 4: \
- __get_user_asm("lwi",__gu_val,__gu_addr,err); \
+ __get_user_asm("lwi", __gu_val, (ptr), (err)); \
break; \
case 8: \
- __get_user_asm_dword(__gu_val,__gu_addr,err); \
+ __get_user_asm_dword(__gu_val, (ptr), (err)); \
break; \
default: \
BUILD_BUG(); \
break; \
} \
- (x) = (__typeof__(*(ptr)))__gu_val; \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
-#define __get_user_asm(inst,x,addr,err) \
- asm volatile( \
- "1: "inst" %1,[%2]\n" \
- "2:\n" \
- " .section .fixup,\"ax\"\n" \
- " .align 2\n" \
- "3: move %0, %3\n" \
- " move %1, #0\n" \
- " b 2b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 3b\n" \
- " .previous" \
- : "+r" (err), "=&r" (x) \
- : "r" (addr), "i" (-EFAULT) \
- : "cc")
+#define __get_user_asm(inst, x, addr, err) \
+ __asm__ __volatile__ ( \
+ "1: "inst" %1,[%2]\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: move %0, %3\n" \
+ " move %1, #0\n" \
+ " b 2b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 3b\n" \
+ " .previous" \
+ : "+r" (err), "=&r" (x) \
+ : "r" (addr), "i" (-EFAULT) \
+ : "cc")
#ifdef __NDS32_EB__
#define __gu_reg_oper0 "%H1"
@@ -149,61 +152,66 @@ do { \
#endif
#define __get_user_asm_dword(x, addr, err) \
- asm volatile( \
- "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n" \
- "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n" \
- "3:\n" \
- " .section .fixup,\"ax\"\n" \
- " .align 2\n" \
- "4: move %0, %3\n" \
- " b 3b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 4b\n" \
- " .long 2b, 4b\n" \
- " .previous" \
- : "+r"(err), "=&r"(x) \
- : "r"(addr), "i"(-EFAULT) \
- : "cc")
-#define put_user(x,p) \
-({ \
- long __e = -EFAULT; \
- if(likely(access_ok(VERIFY_WRITE, p, sizeof(*p)))) { \
- __e = __put_user(x,p); \
- } \
- __e; \
-})
-#define __put_user(x,ptr) \
+ __asm__ __volatile__ ( \
+ "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n" \
+ "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "4: move %0, %3\n" \
+ " b 3b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4b\n" \
+ " .long 2b, 4b\n" \
+ " .previous" \
+ : "+r"(err), "=&r"(x) \
+ : "r"(addr), "i"(-EFAULT) \
+ : "cc")
+
+#define put_user __put_user \
+
+#define __put_user(x, ptr) \
({ \
long __pu_err = 0; \
- __put_user_err((x),(ptr),__pu_err); \
+ __put_user_err((x), (ptr), __pu_err); \
__pu_err; \
})
-#define __put_user_error(x,ptr,err) \
+#define __put_user_error(x, ptr, err) \
+({ \
+ __put_user_err((x), (ptr), (err)); \
+ (void)0; \
+})
+
+#define __put_user_check(x, ptr, err) \
({ \
- __put_user_err((x),(ptr),err); \
- (void) 0; \
+ __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
+ __put_user_err((x), __p, (err)); \
+ } else { \
+ (err) = -EFAULT; \
+ } \
})
-#define __put_user_err(x,ptr,err) \
+#define __put_user_err(x, ptr, err) \
do { \
- unsigned long __pu_addr = (unsigned long)(ptr); \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
- __put_user_asm("sbi",__pu_val,__pu_addr,err); \
+ __put_user_asm("sbi", __pu_val, (ptr), (err)); \
break; \
case 2: \
- __put_user_asm("shi",__pu_val,__pu_addr,err); \
+ __put_user_asm("shi", __pu_val, (ptr), (err)); \
break; \
case 4: \
- __put_user_asm("swi",__pu_val,__pu_addr,err); \
+ __put_user_asm("swi", __pu_val, (ptr), (err)); \
break; \
case 8: \
- __put_user_asm_dword(__pu_val,__pu_addr,err); \
+ __put_user_asm_dword(__pu_val, (ptr), (err)); \
break; \
default: \
BUILD_BUG(); \
@@ -211,22 +219,22 @@ do { \
} \
} while (0)
-#define __put_user_asm(inst,x,addr,err) \
- asm volatile( \
- "1: "inst" %1,[%2]\n" \
- "2:\n" \
- " .section .fixup,\"ax\"\n" \
- " .align 2\n" \
- "3: move %0, %3\n" \
- " b 2b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 3b\n" \
- " .previous" \
- : "+r" (err) \
- : "r" (x), "r" (addr), "i" (-EFAULT) \
- : "cc")
+#define __put_user_asm(inst, x, addr, err) \
+ __asm__ __volatile__ ( \
+ "1: "inst" %1,[%2]\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: move %0, %3\n" \
+ " b 2b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 3b\n" \
+ " .previous" \
+ : "+r" (err) \
+ : "r" (x), "r" (addr), "i" (-EFAULT) \
+ : "cc")
#ifdef __NDS32_EB__
#define __pu_reg_oper0 "%H2"
@@ -237,23 +245,24 @@ do { \
#endif
#define __put_user_asm_dword(x, addr, err) \
- asm volatile( \
- "\n1:\tswi " __pu_reg_oper0 ",[%1]\n" \
- "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n" \
- "3:\n" \
- " .section .fixup,\"ax\"\n" \
- " .align 2\n" \
- "4: move %0, %3\n" \
- " b 3b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 4b\n" \
- " .long 2b, 4b\n" \
- " .previous" \
- : "+r"(err) \
- : "r"(addr), "r"(x), "i"(-EFAULT) \
- : "cc")
+ __asm__ __volatile__ ( \
+ "\n1:\tswi " __pu_reg_oper0 ",[%1]\n" \
+ "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "4: move %0, %3\n" \
+ " b 3b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4b\n" \
+ " .long 2b, 4b\n" \
+ " .previous" \
+ : "+r"(err) \
+ : "r"(addr), "r"(x), "i"(-EFAULT) \
+ : "cc")
+
extern unsigned long __arch_clear_user(void __user * addr, unsigned long n);
extern long strncpy_from_user(char *dest, const char __user * src, long count);
extern __must_check long strlen_user(const char __user * str);
diff --git a/arch/nds32/kernel/Makefile b/arch/nds32/kernel/Makefile
index 42792743e8b9..27cded39fa66 100644
--- a/arch/nds32/kernel/Makefile
+++ b/arch/nds32/kernel/Makefile
@@ -21,3 +21,9 @@ extra-y := head.o vmlinux.lds
obj-y += vdso/
+
+obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+endif
diff --git a/arch/nds32/kernel/atl2c.c b/arch/nds32/kernel/atl2c.c
index 0c6d031a1c4a..0c5386e72098 100644
--- a/arch/nds32/kernel/atl2c.c
+++ b/arch/nds32/kernel/atl2c.c
@@ -9,7 +9,8 @@
void __iomem *atl2c_base;
static const struct of_device_id atl2c_ids[] __initconst = {
- {.compatible = "andestech,atl2c",}
+ {.compatible = "andestech,atl2c",},
+ {}
};
static int __init atl2c_of_init(void)
diff --git a/arch/nds32/kernel/ex-entry.S b/arch/nds32/kernel/ex-entry.S
index b8ae4e9a6b93..21a144071566 100644
--- a/arch/nds32/kernel/ex-entry.S
+++ b/arch/nds32/kernel/ex-entry.S
@@ -118,7 +118,7 @@ common_exception_handler:
/* interrupt */
2:
#ifdef CONFIG_TRACE_IRQFLAGS
- jal trace_hardirqs_off
+ jal __trace_hardirqs_off
#endif
move $r0, $sp
sethi $lp, hi20(ret_from_intr)
diff --git a/arch/nds32/kernel/ex-exit.S b/arch/nds32/kernel/ex-exit.S
index 03e4f7788a18..f00af92f7e22 100644
--- a/arch/nds32/kernel/ex-exit.S
+++ b/arch/nds32/kernel/ex-exit.S
@@ -138,8 +138,8 @@ no_work_pending:
#ifdef CONFIG_TRACE_IRQFLAGS
lwi $p0, [$sp+(#IPSW_OFFSET)]
andi $p0, $p0, #0x1
- la $r10, trace_hardirqs_off
- la $r9, trace_hardirqs_on
+ la $r10, __trace_hardirqs_off
+ la $r9, __trace_hardirqs_on
cmovz $r9, $p0, $r10
jral $r9
#endif
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
new file mode 100644
index 000000000000..a0a9679ad5de
--- /dev/null
+++ b/arch/nds32/kernel/ftrace.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ftrace.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+extern void (*ftrace_trace_function)(unsigned long, unsigned long,
+ struct ftrace_ops*, struct pt_regs*);
+extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
+extern void ftrace_graph_caller(void);
+
+noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs)
+{
+ __asm__ (""); /* avoid to optimize as pure function */
+}
+
+noinline void _mcount(unsigned long parent_ip)
+{
+ /* save all state by the compiler prologue */
+
+ unsigned long ip = (unsigned long)__builtin_return_address(0);
+
+ if (ftrace_trace_function != ftrace_stub)
+ ftrace_trace_function(ip - MCOUNT_INSN_SIZE, parent_ip,
+ NULL, NULL);
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if (ftrace_graph_return != (trace_func_graph_ret_t)ftrace_stub
+ || ftrace_graph_entry != ftrace_graph_entry_stub)
+ ftrace_graph_caller();
+#endif
+
+ /* restore all state by the compiler epilogue */
+}
+EXPORT_SYMBOL(_mcount);
+
+#else /* CONFIG_DYNAMIC_FTRACE */
+
+noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs)
+{
+ __asm__ (""); /* avoid to optimize as pure function */
+}
+
+noinline void __naked _mcount(unsigned long parent_ip)
+{
+ __asm__ (""); /* avoid to optimize as pure function */
+}
+EXPORT_SYMBOL(_mcount);
+
+#define XSTR(s) STR(s)
+#define STR(s) #s
+void _ftrace_caller(unsigned long parent_ip)
+{
+ /* save all state needed by the compiler prologue */
+
+ /*
+ * prepare arguments for real tracing function
+ * first arg : __builtin_return_address(0) - MCOUNT_INSN_SIZE
+ * second arg : parent_ip
+ */
+ __asm__ __volatile__ (
+ "move $r1, %0 \n\t"
+ "addi $r0, %1, #-" XSTR(MCOUNT_INSN_SIZE) "\n\t"
+ :
+ : "r" (parent_ip), "r" (__builtin_return_address(0)));
+
+ /* a placeholder for the call to a real tracing function */
+ __asm__ __volatile__ (
+ "ftrace_call: \n\t"
+ "nop \n\t"
+ "nop \n\t"
+ "nop \n\t");
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* a placeholder for the call to ftrace_graph_caller */
+ __asm__ __volatile__ (
+ "ftrace_graph_call: \n\t"
+ "nop \n\t"
+ "nop \n\t"
+ "nop \n\t");
+#endif
+ /* restore all state needed by the compiler epilogue */
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+ return 0;
+}
+
+int ftrace_arch_code_modify_prepare(void)
+{
+ set_all_modules_text_rw();
+ return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+ set_all_modules_text_ro();
+ return 0;
+}
+
+static unsigned long gen_sethi_insn(unsigned long addr)
+{
+ unsigned long opcode = 0x46000000;
+ unsigned long imm = addr >> 12;
+ unsigned long rt_num = 0xf << 20;
+
+ return ENDIAN_CONVERT(opcode | rt_num | imm);
+}
+
+static unsigned long gen_ori_insn(unsigned long addr)
+{
+ unsigned long opcode = 0x58000000;
+ unsigned long imm = addr & 0x0000fff;
+ unsigned long rt_num = 0xf << 20;
+ unsigned long ra_num = 0xf << 15;
+
+ return ENDIAN_CONVERT(opcode | rt_num | ra_num | imm);
+}
+
+static unsigned long gen_jral_insn(unsigned long addr)
+{
+ unsigned long opcode = 0x4a000001;
+ unsigned long rt_num = 0x1e << 20;
+ unsigned long rb_num = 0xf << 10;
+
+ return ENDIAN_CONVERT(opcode | rt_num | rb_num);
+}
+
+static void ftrace_gen_call_insn(unsigned long *call_insns,
+ unsigned long addr)
+{
+ call_insns[0] = gen_sethi_insn(addr); /* sethi $r15, imm20u */
+ call_insns[1] = gen_ori_insn(addr); /* ori $r15, $r15, imm15u */
+ call_insns[2] = gen_jral_insn(addr); /* jral $lp, $r15 */
+}
+
+static int __ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
+ unsigned long *new_insn, bool validate)
+{
+ unsigned long orig_insn[3];
+
+ if (validate) {
+ if (probe_kernel_read(orig_insn, (void *)pc, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+ if (memcmp(orig_insn, old_insn, MCOUNT_INSN_SIZE))
+ return -EINVAL;
+ }
+
+ if (probe_kernel_write((void *)pc, new_insn, MCOUNT_INSN_SIZE))
+ return -EPERM;
+
+ return 0;
+}
+
+static int ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
+ unsigned long *new_insn, bool validate)
+{
+ int ret;
+
+ ret = __ftrace_modify_code(pc, old_insn, new_insn, validate);
+ if (ret)
+ return ret;
+
+ flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
+
+ return ret;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long pc = (unsigned long)&ftrace_call;
+ unsigned long old_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+ unsigned long new_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+ if (func != ftrace_stub)
+ ftrace_gen_call_insn(new_insn, (unsigned long)func);
+
+ return ftrace_modify_code(pc, old_insn, new_insn, false);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long pc = rec->ip;
+ unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+ unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+ ftrace_gen_call_insn(call_insn, addr);
+
+ return ftrace_modify_code(pc, nop_insn, call_insn, true);
+}
+
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+ unsigned long addr)
+{
+ unsigned long pc = rec->ip;
+ unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+ unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+ ftrace_gen_call_insn(call_insn, addr);
+
+ return ftrace_modify_code(pc, call_insn, nop_insn, true);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+ unsigned long frame_pointer)
+{
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+ struct ftrace_graph_ent trace;
+ unsigned long old;
+ int err;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ old = *parent;
+
+ trace.func = self_addr;
+ trace.depth = current->curr_ret_stack + 1;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace))
+ return;
+
+ err = ftrace_push_return_trace(old, self_addr, &trace.depth,
+ frame_pointer, NULL);
+
+ if (err == -EBUSY)
+ return;
+
+ *parent = return_hooker;
+}
+
+noinline void ftrace_graph_caller(void)
+{
+ unsigned long *parent_ip =
+ (unsigned long *)(__builtin_frame_address(2) - 4);
+
+ unsigned long selfpc =
+ (unsigned long)(__builtin_return_address(1) - MCOUNT_INSN_SIZE);
+
+ unsigned long frame_pointer =
+ (unsigned long)__builtin_frame_address(3);
+
+ prepare_ftrace_return(parent_ip, selfpc, frame_pointer);
+}
+
+extern unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
+void __naked return_to_handler(void)
+{
+ __asm__ __volatile__ (
+ /* save state needed by the ABI */
+ "smw.adm $r0,[$sp],$r1,#0x0 \n\t"
+
+ /* get original return address */
+ "move $r0, $fp \n\t"
+ "bal ftrace_return_to_handler\n\t"
+ "move $lp, $r0 \n\t"
+
+ /* restore state nedded by the ABI */
+ "lmw.bim $r0,[$sp],$r1,#0x0 \n\t");
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern unsigned long ftrace_graph_call;
+
+static int ftrace_modify_graph_caller(bool enable)
+{
+ unsigned long pc = (unsigned long)&ftrace_graph_call;
+ unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+ unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+ ftrace_gen_call_insn(call_insn, (unsigned long)ftrace_graph_caller);
+
+ if (enable)
+ return ftrace_modify_code(pc, nop_insn, call_insn, true);
+ else
+ return ftrace_modify_code(pc, call_insn, nop_insn, true);
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_graph_caller(true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_graph_caller(false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+noinline void __trace_hardirqs_off(void)
+{
+ trace_hardirqs_off();
+}
+noinline void __trace_hardirqs_on(void)
+{
+ trace_hardirqs_on();
+}
+#endif /* CONFIG_TRACE_IRQFLAGS */
diff --git a/arch/nds32/kernel/module.c b/arch/nds32/kernel/module.c
index 4167283d8293..1e31829cbc2a 100644
--- a/arch/nds32/kernel/module.c
+++ b/arch/nds32/kernel/module.c
@@ -40,7 +40,7 @@ void do_reloc16(unsigned int val, unsigned int *loc, unsigned int val_mask,
tmp2 = tmp & loc_mask;
if (partial_in_place) {
- tmp &= (!loc_mask);
+ tmp &= (~loc_mask);
tmp =
tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask);
} else {
@@ -70,7 +70,7 @@ void do_reloc32(unsigned int val, unsigned int *loc, unsigned int val_mask,
tmp2 = tmp & loc_mask;
if (partial_in_place) {
- tmp &= (!loc_mask);
+ tmp &= (~loc_mask);
tmp =
tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask);
} else {
diff --git a/arch/nds32/kernel/stacktrace.c b/arch/nds32/kernel/stacktrace.c
index 8b231e910ea6..d974c0c1c65f 100644
--- a/arch/nds32/kernel/stacktrace.c
+++ b/arch/nds32/kernel/stacktrace.c
@@ -4,6 +4,7 @@
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
+#include <linux/ftrace.h>
void save_stack_trace(struct stack_trace *trace)
{
@@ -16,6 +17,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
unsigned long *fpn;
int skip = trace->skip;
int savesched;
+ int graph_idx = 0;
if (tsk == current) {
__asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn));
@@ -29,10 +31,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
&& (fpn >= (unsigned long *)TASK_SIZE)) {
unsigned long lpp, fpp;
- lpp = fpn[-1];
+ lpp = fpn[LP_OFFSET];
fpp = fpn[FP_OFFSET];
if (!__kernel_text_address(lpp))
break;
+ else
+ lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL);
if (savesched || !in_sched_functions(lpp)) {
if (skip) {
diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c
index a6205fd4db52..1496aab48998 100644
--- a/arch/nds32/kernel/traps.c
+++ b/arch/nds32/kernel/traps.c
@@ -8,6 +8,7 @@
#include <linux/kdebug.h>
#include <linux/sched/task_stack.h>
#include <linux/uaccess.h>
+#include <linux/ftrace.h>
#include <asm/proc-fns.h>
#include <asm/unistd.h>
@@ -94,28 +95,6 @@ static void dump_instr(struct pt_regs *regs)
set_fs(fs);
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-#include <linux/ftrace.h>
-static void
-get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
-{
- if (*addr == (unsigned long)return_to_handler) {
- int index = tsk->curr_ret_stack;
-
- if (tsk->ret_stack && index >= *graph) {
- index -= *graph;
- *addr = tsk->ret_stack[index].ret;
- (*graph)++;
- }
- }
-}
-#else
-static inline void
-get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
-{
-}
-#endif
-
#define LOOP_TIMES (100)
static void __dump(struct task_struct *tsk, unsigned long *base_reg)
{
@@ -126,7 +105,8 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg)
while (!kstack_end(base_reg)) {
ret_addr = *base_reg++;
if (__kernel_text_address(ret_addr)) {
- get_real_ret_addr(&ret_addr, tsk, &graph);
+ ret_addr = ftrace_graph_ret_addr(
+ tsk, &graph, ret_addr, NULL);
print_ip_sym(ret_addr);
}
if (--cnt < 0)
@@ -137,15 +117,12 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg)
!((unsigned long)base_reg & 0x3) &&
((unsigned long)base_reg >= TASK_SIZE)) {
unsigned long next_fp;
-#if !defined(NDS32_ABI_2)
- ret_addr = base_reg[0];
- next_fp = base_reg[1];
-#else
- ret_addr = base_reg[-1];
+ ret_addr = base_reg[LP_OFFSET];
next_fp = base_reg[FP_OFFSET];
-#endif
if (__kernel_text_address(ret_addr)) {
- get_real_ret_addr(&ret_addr, tsk, &graph);
+
+ ret_addr = ftrace_graph_ret_addr(
+ tsk, &graph, ret_addr, NULL);
print_ip_sym(ret_addr);
}
if (--cnt < 0)
@@ -196,11 +173,10 @@ void die(const char *str, struct pt_regs *regs, int err)
pr_emerg("CPU: %i\n", smp_processor_id());
show_regs(regs);
pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
- tsk->comm, tsk->pid, task_thread_info(tsk) + 1);
+ tsk->comm, tsk->pid, end_of_stack(tsk));
if (!user_mode(regs) || in_interrupt()) {
- dump_mem("Stack: ", regs->sp,
- THREAD_SIZE + (unsigned long)task_thread_info(tsk));
+ dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK);
dump_instr(regs);
dump_stack();
}
diff --git a/arch/nds32/kernel/vmlinux.lds.S b/arch/nds32/kernel/vmlinux.lds.S
index 288313b886ef..9e90f30a181d 100644
--- a/arch/nds32/kernel/vmlinux.lds.S
+++ b/arch/nds32/kernel/vmlinux.lds.S
@@ -13,14 +13,26 @@ OUTPUT_ARCH(nds32)
ENTRY(_stext_lma)
jiffies = jiffies_64;
+#if defined(CONFIG_GCOV_KERNEL)
+#define NDS32_EXIT_KEEP(x) x
+#else
+#define NDS32_EXIT_KEEP(x)
+#endif
+
SECTIONS
{
_stext_lma = TEXTADDR - LOAD_OFFSET;
. = TEXTADDR;
__init_begin = .;
HEAD_TEXT_SECTION
+ .exit.text : {
+ NDS32_EXIT_KEEP(EXIT_TEXT)
+ }
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
+ .exit.data : {
+ NDS32_EXIT_KEEP(EXIT_DATA)
+ }
PERCPU_SECTION(L1_CACHE_BYTES)
__init_end = .;
diff --git a/arch/nios2/Kconfig.debug b/arch/nios2/Kconfig.debug
index 7a49f0d28d14..f1da8a7b17ff 100644
--- a/arch/nios2/Kconfig.debug
+++ b/arch/nios2/Kconfig.debug
@@ -3,15 +3,6 @@
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-config DEBUG_STACK_USAGE
- bool "Enable stack utilization instrumentation"
- depends on DEBUG_KERNEL
- help
- Enables the display of the minimum amount of free stack which each
- task has ever had available in the sysrq-T and sysrq-P debug output.
-
- This option will slow down process creation somewhat.
-
config EARLY_PRINTK
bool "Activate early kernel debugging"
default y
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index db0b6eebbfa5..a80669209155 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -177,7 +177,6 @@ config PPC
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
- select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_CBPF_JIT if !PPC64
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 3c0e8fb2b773..68e14afecac8 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -358,7 +358,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
unsigned long pp, key;
unsigned long v, orig_v, gr;
__be64 *hptep;
- int index;
+ long int index;
int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
if (kvm_is_radix(vcpu->kvm))
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 0af1c0aea1fe..fd6e8c13685f 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -725,10 +725,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
gpa, shift);
kvmppc_radix_tlbie_page(kvm, gpa, shift);
if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
- unsigned long npages = 1;
+ unsigned long psize = PAGE_SIZE;
if (shift)
- npages = 1ul << (shift - PAGE_SHIFT);
- kvmppc_update_dirty_map(memslot, gfn, npages);
+ psize = 1ul << shift;
+ kvmppc_update_dirty_map(memslot, gfn, psize);
}
}
return 0;
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
index c229509288ea..439dc7072e05 100644
--- a/arch/riscv/include/asm/tlb.h
+++ b/arch/riscv/include/asm/tlb.h
@@ -14,6 +14,10 @@
#ifndef _ASM_RISCV_TLB_H
#define _ASM_RISCV_TLB_H
+struct mmu_gather;
+
+static void tlb_flush(struct mmu_gather *tlb);
+
#include <asm-generic/tlb.h>
static inline void tlb_flush(struct mmu_gather *tlb)
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index 568026ccf6e8..fb03a4482ad6 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -65,24 +65,11 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
uintptr_t, flags)
{
-#ifdef CONFIG_SMP
- struct mm_struct *mm = current->mm;
- bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0;
-#endif
-
/* Check the reserved flags. */
if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL))
return -EINVAL;
- /*
- * Without CONFIG_SMP flush_icache_mm is a just a flush_icache_all(),
- * which generates unused variable warnings all over this function.
- */
-#ifdef CONFIG_SMP
- flush_icache_mm(mm, local);
-#else
- flush_icache_all();
-#endif
+ flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL);
return 0;
}
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index f31a15044c24..a8418e1379eb 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -16,7 +16,13 @@ typedef struct {
unsigned long asce;
unsigned long asce_limit;
unsigned long vdso_base;
- /* The mmu context allocates 4K page tables. */
+ /*
+ * The following bitfields need a down_write on the mm
+ * semaphore when they are written to. As they are only
+ * written once, they can be read without a lock.
+ *
+ * The mmu context allocates 4K page tables.
+ */
unsigned int alloc_pgste:1;
/* The mmu context uses extended page tables. */
unsigned int has_pgste:1;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 91ad4a9425c0..f69333fd2fa3 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -695,7 +695,9 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
r = -EINVAL;
else {
r = 0;
+ down_write(&kvm->mm->mmap_sem);
kvm->mm->context.allow_gmap_hpage_1m = 1;
+ up_write(&kvm->mm->mmap_sem);
/*
* We might have to create fake 4k page
* tables. To avoid that the hardware works on
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index d68f10441a16..8679bd74d337 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -280,9 +280,11 @@ retry:
goto retry;
}
}
- if (rc)
- return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
up_read(&current->mm->mmap_sem);
+ if (rc == -EFAULT)
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ if (rc < 0)
+ return rc;
vcpu->run->s.regs.gprs[reg1] &= ~0xff;
vcpu->run->s.regs.gprs[reg1] |= key;
return 0;
@@ -324,9 +326,11 @@ retry:
goto retry;
}
}
- if (rc < 0)
- return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
up_read(&current->mm->mmap_sem);
+ if (rc == -EFAULT)
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ if (rc < 0)
+ return rc;
kvm_s390_set_psw_cc(vcpu, rc);
return 0;
}
@@ -390,12 +394,12 @@ static int handle_sske(struct kvm_vcpu *vcpu)
FAULT_FLAG_WRITE, &unlocked);
rc = !rc ? -EAGAIN : rc;
}
+ up_read(&current->mm->mmap_sem);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
- up_read(&current->mm->mmap_sem);
- if (rc >= 0)
- start += PAGE_SIZE;
+ if (rc < 0)
+ return rc;
+ start += PAGE_SIZE;
}
if (m3 & (SSKE_MC | SSKE_MR)) {
@@ -1002,13 +1006,15 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
FAULT_FLAG_WRITE, &unlocked);
rc = !rc ? -EAGAIN : rc;
}
+ up_read(&current->mm->mmap_sem);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
- up_read(&current->mm->mmap_sem);
- if (rc >= 0)
- start += PAGE_SIZE;
+ if (rc == -EAGAIN)
+ continue;
+ if (rc < 0)
+ return rc;
}
+ start += PAGE_SIZE;
}
if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 63844b95c22c..a2b28cd1e3fe 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -173,7 +173,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
return set_validity_icpt(scb_s, 0x0039U);
/* copy only the wrapping keys */
- if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
+ if (read_guest_real(vcpu, crycb_addr + 72,
+ vsie_page->crycb.dea_wrapping_key_mask, 56))
return set_validity_icpt(scb_s, 0x0035U);
scb_s->ecb3 |= ecb3_flags;
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index 3641a294ed54..e4abe9b8f97a 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -9,6 +9,7 @@
#include <linux/irq.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
#include <asm/leon.h>
#include <asm/leon_amba.h>
@@ -381,6 +382,9 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
else
dev_set_name(&op->dev, "%08x", dp->phandle);
+ op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ op->dev.dma_mask = &op->dev.coherent_dma_mask;
+
if (of_device_register(op)) {
printk("%s: Could not register of device.\n",
dp->full_name);
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 44e4d4435bed..6df6086968c6 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -2,6 +2,7 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/mod_devicetable.h>
@@ -675,6 +676,8 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
dev_set_name(&op->dev, "root");
else
dev_set_name(&op->dev, "%08x", dp->phandle);
+ op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ op->dev.dma_mask = &op->dev.coherent_dma_mask;
if (of_device_register(op)) {
printk("%s: Could not register of device.\n",
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 9bd139569b41..cb2deb61c5d9 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -223,34 +223,34 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
pcmpeqd TWOONE(%rip), \TMP2
pand POLY(%rip), \TMP2
pxor \TMP2, \TMP3
- movdqa \TMP3, HashKey(%arg2)
+ movdqu \TMP3, HashKey(%arg2)
movdqa \TMP3, \TMP5
pshufd $78, \TMP3, \TMP1
pxor \TMP3, \TMP1
- movdqa \TMP1, HashKey_k(%arg2)
+ movdqu \TMP1, HashKey_k(%arg2)
GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
# TMP5 = HashKey^2<<1 (mod poly)
- movdqa \TMP5, HashKey_2(%arg2)
+ movdqu \TMP5, HashKey_2(%arg2)
# HashKey_2 = HashKey^2<<1 (mod poly)
pshufd $78, \TMP5, \TMP1
pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_2_k(%arg2)
+ movdqu \TMP1, HashKey_2_k(%arg2)
GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
# TMP5 = HashKey^3<<1 (mod poly)
- movdqa \TMP5, HashKey_3(%arg2)
+ movdqu \TMP5, HashKey_3(%arg2)
pshufd $78, \TMP5, \TMP1
pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_3_k(%arg2)
+ movdqu \TMP1, HashKey_3_k(%arg2)
GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
# TMP5 = HashKey^3<<1 (mod poly)
- movdqa \TMP5, HashKey_4(%arg2)
+ movdqu \TMP5, HashKey_4(%arg2)
pshufd $78, \TMP5, \TMP1
pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_4_k(%arg2)
+ movdqu \TMP1, HashKey_4_k(%arg2)
.endm
# GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding.
@@ -271,7 +271,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
- movdqa HashKey(%arg2), %xmm13
+ movdqu HashKey(%arg2), %xmm13
CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
%xmm4, %xmm5, %xmm6
@@ -997,7 +997,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pshufd $78, \XMM5, \TMP6
pxor \XMM5, \TMP6
paddd ONE(%rip), \XMM0 # INCR CNT
- movdqa HashKey_4(%arg2), \TMP5
+ movdqu HashKey_4(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
movdqa \XMM0, \XMM1
paddd ONE(%rip), \XMM0 # INCR CNT
@@ -1016,7 +1016,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pxor (%arg1), \XMM2
pxor (%arg1), \XMM3
pxor (%arg1), \XMM4
- movdqa HashKey_4_k(%arg2), \TMP5
+ movdqu HashKey_4_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
movaps 0x10(%arg1), \TMP1
AESENC \TMP1, \XMM1 # Round 1
@@ -1031,7 +1031,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM6, \TMP1
pshufd $78, \XMM6, \TMP2
pxor \XMM6, \TMP2
- movdqa HashKey_3(%arg2), \TMP5
+ movdqu HashKey_3(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
movaps 0x30(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 3
@@ -1044,7 +1044,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
- movdqa HashKey_3_k(%arg2), \TMP5
+ movdqu HashKey_3_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x50(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 5
@@ -1058,7 +1058,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM7, \TMP1
pshufd $78, \XMM7, \TMP2
pxor \XMM7, \TMP2
- movdqa HashKey_2(%arg2), \TMP5
+ movdqu HashKey_2(%arg2), \TMP5
# Multiply TMP5 * HashKey using karatsuba
@@ -1074,7 +1074,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
- movdqa HashKey_2_k(%arg2), \TMP5
+ movdqu HashKey_2_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x80(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 8
@@ -1092,7 +1092,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM8, \TMP1
pshufd $78, \XMM8, \TMP2
pxor \XMM8, \TMP2
- movdqa HashKey(%arg2), \TMP5
+ movdqu HashKey(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x90(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 9
@@ -1121,7 +1121,7 @@ aes_loop_par_enc_done\@:
AESENCLAST \TMP3, \XMM2
AESENCLAST \TMP3, \XMM3
AESENCLAST \TMP3, \XMM4
- movdqa HashKey_k(%arg2), \TMP5
+ movdqu HashKey_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqu (%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
@@ -1205,7 +1205,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pshufd $78, \XMM5, \TMP6
pxor \XMM5, \TMP6
paddd ONE(%rip), \XMM0 # INCR CNT
- movdqa HashKey_4(%arg2), \TMP5
+ movdqu HashKey_4(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
movdqa \XMM0, \XMM1
paddd ONE(%rip), \XMM0 # INCR CNT
@@ -1224,7 +1224,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pxor (%arg1), \XMM2
pxor (%arg1), \XMM3
pxor (%arg1), \XMM4
- movdqa HashKey_4_k(%arg2), \TMP5
+ movdqu HashKey_4_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
movaps 0x10(%arg1), \TMP1
AESENC \TMP1, \XMM1 # Round 1
@@ -1239,7 +1239,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM6, \TMP1
pshufd $78, \XMM6, \TMP2
pxor \XMM6, \TMP2
- movdqa HashKey_3(%arg2), \TMP5
+ movdqu HashKey_3(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
movaps 0x30(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 3
@@ -1252,7 +1252,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
- movdqa HashKey_3_k(%arg2), \TMP5
+ movdqu HashKey_3_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x50(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 5
@@ -1266,7 +1266,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM7, \TMP1
pshufd $78, \XMM7, \TMP2
pxor \XMM7, \TMP2
- movdqa HashKey_2(%arg2), \TMP5
+ movdqu HashKey_2(%arg2), \TMP5
# Multiply TMP5 * HashKey using karatsuba
@@ -1282,7 +1282,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
- movdqa HashKey_2_k(%arg2), \TMP5
+ movdqu HashKey_2_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x80(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 8
@@ -1300,7 +1300,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM8, \TMP1
pshufd $78, \XMM8, \TMP2
pxor \XMM8, \TMP2
- movdqa HashKey(%arg2), \TMP5
+ movdqu HashKey(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x90(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 9
@@ -1329,7 +1329,7 @@ aes_loop_par_dec_done\@:
AESENCLAST \TMP3, \XMM2
AESENCLAST \TMP3, \XMM3
AESENCLAST \TMP3, \XMM4
- movdqa HashKey_k(%arg2), \TMP5
+ movdqu HashKey_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqu (%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
@@ -1405,10 +1405,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM1, \TMP6
pshufd $78, \XMM1, \TMP2
pxor \XMM1, \TMP2
- movdqa HashKey_4(%arg2), \TMP5
+ movdqu HashKey_4(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0
- movdqa HashKey_4_k(%arg2), \TMP4
+ movdqu HashKey_4_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqa \XMM1, \XMMDst
movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1
@@ -1418,10 +1418,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM2, \TMP1
pshufd $78, \XMM2, \TMP2
pxor \XMM2, \TMP2
- movdqa HashKey_3(%arg2), \TMP5
+ movdqu HashKey_3(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0
- movdqa HashKey_3_k(%arg2), \TMP4
+ movdqu HashKey_3_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM2, \XMMDst
@@ -1433,10 +1433,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM3, \TMP1
pshufd $78, \XMM3, \TMP2
pxor \XMM3, \TMP2
- movdqa HashKey_2(%arg2), \TMP5
+ movdqu HashKey_2(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0
- movdqa HashKey_2_k(%arg2), \TMP4
+ movdqu HashKey_2_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM3, \XMMDst
@@ -1446,10 +1446,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM4, \TMP1
pshufd $78, \XMM4, \TMP2
pxor \XMM4, \TMP2
- movdqa HashKey(%arg2), \TMP5
+ movdqu HashKey(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0
- movdqa HashKey_k(%arg2), \TMP4
+ movdqu HashKey_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM4, \XMMDst
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 00ddb0c9e612..8e90488c3d56 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1237,19 +1237,12 @@ enum emulation_result {
#define EMULTYPE_NO_DECODE (1 << 0)
#define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_SKIP (1 << 2)
-#define EMULTYPE_RETRY (1 << 3)
-#define EMULTYPE_NO_REEXECUTE (1 << 4)
-#define EMULTYPE_NO_UD_ON_FAIL (1 << 5)
-#define EMULTYPE_VMWARE (1 << 6)
-int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
- int emulation_type, void *insn, int insn_len);
-
-static inline int emulate_instruction(struct kvm_vcpu *vcpu,
- int emulation_type)
-{
- return x86_emulate_instruction(vcpu, 0,
- emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
-}
+#define EMULTYPE_ALLOW_RETRY (1 << 3)
+#define EMULTYPE_NO_UD_ON_FAIL (1 << 4)
+#define EMULTYPE_VMWARE (1 << 5)
+int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
+int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
+ void *insn, int insn_len);
void kvm_enable_efer_bits(u64);
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
@@ -1450,7 +1443,6 @@ asmlinkage void kvm_spurious_fault(void);
____kvm_handle_fault_on_reboot(insn, "")
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
@@ -1463,7 +1455,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
- unsigned long ipi_bitmap_high, int min,
+ unsigned long ipi_bitmap_high, u32 min,
unsigned long icr, int op_64_bit);
u64 kvm_get_arch_capabilities(void);
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index a564084c6141..f8b1ad2c3828 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_PGTABLE_3LEVEL_H
#define _ASM_X86_PGTABLE_3LEVEL_H
+#include <asm/atomic64_32.h>
+
/*
* Intel Physical Address Extension (PAE) Mode - three-level page
* tables on PPro+ CPUs.
@@ -150,10 +152,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
{
pte_t res;
- /* xchg acts as a barrier before the setting of the high bits */
- res.pte_low = xchg(&ptep->pte_low, 0);
- res.pte_high = ptep->pte_high;
- ptep->pte_high = 0;
+ res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0);
return res;
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 0cefba28c864..17c0472c5b34 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -548,7 +548,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
}
int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
- unsigned long ipi_bitmap_high, int min,
+ unsigned long ipi_bitmap_high, u32 min,
unsigned long icr, int op_64_bit)
{
int i;
@@ -571,18 +571,31 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
rcu_read_lock();
map = rcu_dereference(kvm->arch.apic_map);
+ if (min > map->max_apic_id)
+ goto out;
/* Bits above cluster_size are masked in the caller. */
- for_each_set_bit(i, &ipi_bitmap_low, BITS_PER_LONG) {
- vcpu = map->phys_map[min + i]->vcpu;
- count += kvm_apic_set_irq(vcpu, &irq, NULL);
+ for_each_set_bit(i, &ipi_bitmap_low,
+ min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
+ if (map->phys_map[min + i]) {
+ vcpu = map->phys_map[min + i]->vcpu;
+ count += kvm_apic_set_irq(vcpu, &irq, NULL);
+ }
}
min += cluster_size;
- for_each_set_bit(i, &ipi_bitmap_high, BITS_PER_LONG) {
- vcpu = map->phys_map[min + i]->vcpu;
- count += kvm_apic_set_irq(vcpu, &irq, NULL);
+
+ if (min > map->max_apic_id)
+ goto out;
+
+ for_each_set_bit(i, &ipi_bitmap_high,
+ min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
+ if (map->phys_map[min + i]) {
+ vcpu = map->phys_map[min + i]->vcpu;
+ count += kvm_apic_set_irq(vcpu, &irq, NULL);
+ }
}
+out:
rcu_read_unlock();
return count;
}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a282321329b5..e24ea7067373 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1853,11 +1853,6 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
}
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
- return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
-}
-
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
@@ -5217,7 +5212,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
void *insn, int insn_len)
{
- int r, emulation_type = EMULTYPE_RETRY;
+ int r, emulation_type = 0;
enum emulation_result er;
bool direct = vcpu->arch.mmu.direct_map;
@@ -5230,10 +5225,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
r = RET_PF_INVALID;
if (unlikely(error_code & PFERR_RSVD_MASK)) {
r = handle_mmio_page_fault(vcpu, cr2, direct);
- if (r == RET_PF_EMULATE) {
- emulation_type = 0;
+ if (r == RET_PF_EMULATE)
goto emulate;
- }
}
if (r == RET_PF_INVALID) {
@@ -5260,8 +5253,19 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
return 1;
}
- if (mmio_info_in_cache(vcpu, cr2, direct))
- emulation_type = 0;
+ /*
+ * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
+ * optimistically try to just unprotect the page and let the processor
+ * re-execute the instruction that caused the page fault. Do not allow
+ * retrying MMIO emulation, as it's not only pointless but could also
+ * cause us to enter an infinite loop because the processor will keep
+ * faulting on the non-existent MMIO address. Retrying an instruction
+ * from a nested guest is also pointless and dangerous as we are only
+ * explicitly shadowing L1's page tables, i.e. unprotecting something
+ * for L1 isn't going to magically fix whatever issue cause L2 to fail.
+ */
+ if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu))
+ emulation_type = EMULTYPE_ALLOW_RETRY;
emulate:
/*
* On AMD platforms, under certain conditions insn_len may be zero on #NPF.
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 6276140044d0..89c4c5aa15f1 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -776,7 +776,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
}
if (!svm->next_rip) {
- if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
+ if (kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) !=
EMULATE_DONE)
printk(KERN_DEBUG "%s: NOP\n", __func__);
return;
@@ -2715,7 +2715,7 @@ static int gp_interception(struct vcpu_svm *svm)
WARN_ON_ONCE(!enable_vmware_backdoor);
- er = emulate_instruction(vcpu,
+ er = kvm_emulate_instruction(vcpu,
EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
if (er == EMULATE_USER_EXIT)
return 0;
@@ -2819,7 +2819,7 @@ static int io_interception(struct vcpu_svm *svm)
string = (io_info & SVM_IOIO_STR_MASK) != 0;
in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
if (string)
- return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
port = io_info >> 16;
size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
@@ -3861,7 +3861,7 @@ static int iret_interception(struct vcpu_svm *svm)
static int invlpg_interception(struct vcpu_svm *svm)
{
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
- return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+ return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
return kvm_skip_emulated_instruction(&svm->vcpu);
@@ -3869,13 +3869,13 @@ static int invlpg_interception(struct vcpu_svm *svm)
static int emulate_on_interception(struct vcpu_svm *svm)
{
- return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+ return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
}
static int rsm_interception(struct vcpu_svm *svm)
{
- return x86_emulate_instruction(&svm->vcpu, 0, 0,
- rsm_ins_bytes, 2) == EMULATE_DONE;
+ return kvm_emulate_instruction_from_buffer(&svm->vcpu,
+ rsm_ins_bytes, 2) == EMULATE_DONE;
}
static int rdpmc_interception(struct vcpu_svm *svm)
@@ -4700,7 +4700,7 @@ static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
ret = avic_unaccel_trap_write(svm);
} else {
/* Handling Fault */
- ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
+ ret = (kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
}
return ret;
@@ -6747,7 +6747,7 @@ e_free:
static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
{
unsigned long vaddr, vaddr_end, next_vaddr;
- unsigned long dst_vaddr, dst_vaddr_end;
+ unsigned long dst_vaddr;
struct page **src_p, **dst_p;
struct kvm_sev_dbg debug;
unsigned long n;
@@ -6763,7 +6763,6 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
size = debug.len;
vaddr_end = vaddr + size;
dst_vaddr = debug.dst_uaddr;
- dst_vaddr_end = dst_vaddr + size;
for (; vaddr < vaddr_end; vaddr = next_vaddr) {
int len, s_off, d_off;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1d26f3c4985b..533a327372c8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6983,7 +6983,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
* Cause the #SS fault with 0 error code in VM86 mode.
*/
if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
- if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
+ if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) {
if (vcpu->arch.halt_request) {
vcpu->arch.halt_request = 0;
return kvm_vcpu_halt(vcpu);
@@ -7054,7 +7054,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
WARN_ON_ONCE(!enable_vmware_backdoor);
- er = emulate_instruction(vcpu,
+ er = kvm_emulate_instruction(vcpu,
EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
if (er == EMULATE_USER_EXIT)
return 0;
@@ -7157,7 +7157,7 @@ static int handle_io(struct kvm_vcpu *vcpu)
++vcpu->stat.io_exits;
if (string)
- return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1;
@@ -7231,7 +7231,7 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
static int handle_desc(struct kvm_vcpu *vcpu)
{
WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
- return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
}
static int handle_cr(struct kvm_vcpu *vcpu)
@@ -7480,7 +7480,7 @@ static int handle_vmcall(struct kvm_vcpu *vcpu)
static int handle_invd(struct kvm_vcpu *vcpu)
{
- return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
}
static int handle_invlpg(struct kvm_vcpu *vcpu)
@@ -7547,7 +7547,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu);
}
}
- return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
}
static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
@@ -7704,8 +7704,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
return kvm_skip_emulated_instruction(vcpu);
else
- return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
- NULL, 0) == EMULATE_DONE;
+ return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) ==
+ EMULATE_DONE;
}
return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
@@ -7748,7 +7748,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
if (kvm_test_request(KVM_REQ_EVENT, vcpu))
return 1;
- err = emulate_instruction(vcpu, 0);
+ err = kvm_emulate_instruction(vcpu, 0);
if (err == EMULATE_USER_EXIT) {
++vcpu->stat.mmio_exits;
@@ -12537,8 +12537,11 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
bool from_vmentry = !!exit_qual;
u32 dummy_exit_qual;
+ u32 vmcs01_cpu_exec_ctrl;
int r = 0;
+ vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+
enter_guest_mode(vcpu);
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
@@ -12575,6 +12578,25 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
}
/*
+ * If L1 had a pending IRQ/NMI until it executed
+ * VMLAUNCH/VMRESUME which wasn't delivered because it was
+ * disallowed (e.g. interrupts disabled), L0 needs to
+ * evaluate if this pending event should cause an exit from L2
+ * to L1 or delivered directly to L2 (e.g. In case L1 don't
+ * intercept EXTERNAL_INTERRUPT).
+ *
+ * Usually this would be handled by L0 requesting a
+ * IRQ/NMI window by setting VMCS accordingly. However,
+ * this setting was done on VMCS01 and now VMCS02 is active
+ * instead. Thus, we force L0 to perform pending event
+ * evaluation by requesting a KVM_REQ_EVENT.
+ */
+ if (vmcs01_cpu_exec_ctrl &
+ (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) {
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ }
+
+ /*
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
* we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
* returned as far as L1 is concerned. It will only return (and set
@@ -13988,9 +14010,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
return -EINVAL;
- if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING)
- vmx->nested.nested_run_pending = 1;
-
vmx->nested.dirty_vmcs12 = true;
ret = enter_vmx_non_root_mode(vcpu, NULL);
if (ret)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 506bd2b4b8bb..542f6315444d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4987,7 +4987,7 @@ int handle_ud(struct kvm_vcpu *vcpu)
emul_type = 0;
}
- er = emulate_instruction(vcpu, emul_type);
+ er = kvm_emulate_instruction(vcpu, emul_type);
if (er == EMULATE_USER_EXIT)
return 0;
if (er != EMULATE_DONE)
@@ -5870,7 +5870,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
gpa_t gpa = cr2;
kvm_pfn_t pfn;
- if (emulation_type & EMULTYPE_NO_REEXECUTE)
+ if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
+ return false;
+
+ if (WARN_ON_ONCE(is_guest_mode(vcpu)))
return false;
if (!vcpu->arch.mmu.direct_map) {
@@ -5958,7 +5961,10 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
*/
vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
- if (!(emulation_type & EMULTYPE_RETRY))
+ if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
+ return false;
+
+ if (WARN_ON_ONCE(is_guest_mode(vcpu)))
return false;
if (x86_page_table_writing_insn(ctxt))
@@ -6276,7 +6282,19 @@ restart:
return r;
}
-EXPORT_SYMBOL_GPL(x86_emulate_instruction);
+
+int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
+{
+ return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_instruction);
+
+int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
+ void *insn, int insn_len)
+{
+ return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
unsigned short port)
@@ -7734,7 +7752,7 @@ static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
{
int r;
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
+ r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (r != EMULATE_DONE)
return 0;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 257f27620bc2..67b9568613f3 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -274,6 +274,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
int page_num);
bool kvm_vector_hashing_enabled(void);
+int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
+ int emulation_type, void *insn, int insn_len);
#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 45b700ac5fe7..2fe5c9b1816b 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -435,14 +435,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
{
trace_xen_mmu_set_pte_atomic(ptep, pte);
- set_64bit((u64 *)ptep, native_pte_val(pte));
+ __xen_set_pte(ptep, pte);
}
static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
trace_xen_mmu_pte_clear(mm, addr, ptep);
- if (!xen_batched_set_pte(ptep, native_make_pte(0)))
- native_pte_clear(mm, addr, ptep);
+ __xen_set_pte(ptep, native_make_pte(0));
}
static void xen_pmd_clear(pmd_t *pmdp)
@@ -1570,7 +1569,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
pte_val_ma(pte));
#endif
- native_set_pte(ptep, pte);
+ __xen_set_pte(ptep, pte);
}
/* Early in boot, while setting up the initial pagetable, assume
@@ -2061,7 +2060,6 @@ void __init xen_relocate_p2m(void)
pud_t *pud;
pgd_t *pgd;
unsigned long *new_p2m;
- int save_pud;
size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
@@ -2091,7 +2089,6 @@ void __init xen_relocate_p2m(void)
pgd = __va(read_cr3_pa());
new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
- save_pud = n_pud;
for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
pud = early_memremap(pud_phys, PAGE_SIZE);
clear_page(pud);
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 58c6efa9f9a9..9fe5952d117d 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -275,9 +275,9 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg)
void bfqg_and_blkg_put(struct bfq_group *bfqg)
{
- bfqg_put(bfqg);
-
blkg_put(bfqg_to_blkg(bfqg));
+
+ bfqg_put(bfqg);
}
/* @stats = 0 */
diff --git a/block/bio.c b/block/bio.c
index b12966e415d3..8c680a776171 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -2015,7 +2015,8 @@ int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
{
if (unlikely(bio->bi_blkg))
return -EBUSY;
- blkg_get(blkg);
+ if (!blkg_try_get(blkg))
+ return -ENODEV;
bio->bi_blkg = blkg;
return 0;
}
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 694595b29b8f..c19f9078da1e 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -310,28 +310,11 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
}
}
-static void blkg_pd_offline(struct blkcg_gq *blkg)
-{
- int i;
-
- lockdep_assert_held(blkg->q->queue_lock);
- lockdep_assert_held(&blkg->blkcg->lock);
-
- for (i = 0; i < BLKCG_MAX_POLS; i++) {
- struct blkcg_policy *pol = blkcg_policy[i];
-
- if (blkg->pd[i] && !blkg->pd[i]->offline &&
- pol->pd_offline_fn) {
- pol->pd_offline_fn(blkg->pd[i]);
- blkg->pd[i]->offline = true;
- }
- }
-}
-
static void blkg_destroy(struct blkcg_gq *blkg)
{
struct blkcg *blkcg = blkg->blkcg;
struct blkcg_gq *parent = blkg->parent;
+ int i;
lockdep_assert_held(blkg->q->queue_lock);
lockdep_assert_held(&blkcg->lock);
@@ -340,6 +323,13 @@ static void blkg_destroy(struct blkcg_gq *blkg)
WARN_ON_ONCE(list_empty(&blkg->q_node));
WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+
+ if (blkg->pd[i] && pol->pd_offline_fn)
+ pol->pd_offline_fn(blkg->pd[i]);
+ }
+
if (parent) {
blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
@@ -382,7 +372,6 @@ static void blkg_destroy_all(struct request_queue *q)
struct blkcg *blkcg = blkg->blkcg;
spin_lock(&blkcg->lock);
- blkg_pd_offline(blkg);
blkg_destroy(blkg);
spin_unlock(&blkcg->lock);
}
@@ -1053,59 +1042,64 @@ static struct cftype blkcg_legacy_files[] = {
{ } /* terminate */
};
+/*
+ * blkcg destruction is a three-stage process.
+ *
+ * 1. Destruction starts. The blkcg_css_offline() callback is invoked
+ * which offlines writeback. Here we tie the next stage of blkg destruction
+ * to the completion of writeback associated with the blkcg. This lets us
+ * avoid punting potentially large amounts of outstanding writeback to root
+ * while maintaining any ongoing policies. The next stage is triggered when
+ * the nr_cgwbs count goes to zero.
+ *
+ * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
+ * and handles the destruction of blkgs. Here the css reference held by
+ * the blkg is put back eventually allowing blkcg_css_free() to be called.
+ * This work may occur in cgwb_release_workfn() on the cgwb_release
+ * workqueue. Any submitted ios that fail to get the blkg ref will be
+ * punted to the root_blkg.
+ *
+ * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
+ * This finally frees the blkcg.
+ */
+
/**
* blkcg_css_offline - cgroup css_offline callback
* @css: css of interest
*
- * This function is called when @css is about to go away and responsible
- * for offlining all blkgs pd and killing all wbs associated with @css.
- * blkgs pd offline should be done while holding both q and blkcg locks.
- * As blkcg lock is nested inside q lock, this function performs reverse
- * double lock dancing.
- *
- * This is the blkcg counterpart of ioc_release_fn().
+ * This function is called when @css is about to go away. Here the cgwbs are
+ * offlined first and only once writeback associated with the blkcg has
+ * finished do we start step 2 (see above).
*/
static void blkcg_css_offline(struct cgroup_subsys_state *css)
{
struct blkcg *blkcg = css_to_blkcg(css);
- struct blkcg_gq *blkg;
-
- spin_lock_irq(&blkcg->lock);
-
- hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
- struct request_queue *q = blkg->q;
-
- if (spin_trylock(q->queue_lock)) {
- blkg_pd_offline(blkg);
- spin_unlock(q->queue_lock);
- } else {
- spin_unlock_irq(&blkcg->lock);
- cpu_relax();
- spin_lock_irq(&blkcg->lock);
- }
- }
-
- spin_unlock_irq(&blkcg->lock);
+ /* this prevents anyone from attaching or migrating to this blkcg */
wb_blkcg_offline(blkcg);
+
+ /* put the base cgwb reference allowing step 2 to be triggered */
+ blkcg_cgwb_put(blkcg);
}
/**
- * blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg
+ * blkcg_destroy_blkgs - responsible for shooting down blkgs
* @blkcg: blkcg of interest
*
- * This function is called when blkcg css is about to free and responsible for
- * destroying all blkgs associated with @blkcg.
- * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
+ * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
* is nested inside q lock, this function performs reverse double lock dancing.
+ * Destroying the blkgs releases the reference held on the blkcg's css allowing
+ * blkcg_css_free to eventually be called.
+ *
+ * This is the blkcg counterpart of ioc_release_fn().
*/
-static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)
+void blkcg_destroy_blkgs(struct blkcg *blkcg)
{
spin_lock_irq(&blkcg->lock);
+
while (!hlist_empty(&blkcg->blkg_list)) {
struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
- struct blkcg_gq,
- blkcg_node);
+ struct blkcg_gq, blkcg_node);
struct request_queue *q = blkg->q;
if (spin_trylock(q->queue_lock)) {
@@ -1117,6 +1111,7 @@ static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)
spin_lock_irq(&blkcg->lock);
}
}
+
spin_unlock_irq(&blkcg->lock);
}
@@ -1125,8 +1120,6 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
struct blkcg *blkcg = css_to_blkcg(css);
int i;
- blkcg_destroy_all_blkgs(blkcg);
-
mutex_lock(&blkcg_pol_mutex);
list_del(&blkcg->all_blkcgs_node);
@@ -1189,6 +1182,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
INIT_HLIST_HEAD(&blkcg->blkg_list);
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&blkcg->cgwb_list);
+ refcount_set(&blkcg->cgwb_refcnt, 1);
#endif
list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
@@ -1480,11 +1474,8 @@ void blkcg_deactivate_policy(struct request_queue *q,
list_for_each_entry(blkg, &q->blkg_list, q_node) {
if (blkg->pd[pol->plid]) {
- if (!blkg->pd[pol->plid]->offline &&
- pol->pd_offline_fn) {
+ if (pol->pd_offline_fn)
pol->pd_offline_fn(blkg->pd[pol->plid]);
- blkg->pd[pol->plid]->offline = true;
- }
pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL;
}
diff --git a/block/blk-core.c b/block/blk-core.c
index dee56c282efb..4dbc93f43b38 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2163,9 +2163,12 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
{
const int op = bio_op(bio);
- if (part->policy && (op_is_write(op) && !op_is_flush(op))) {
+ if (part->policy && op_is_write(op)) {
char b[BDEVNAME_SIZE];
+ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
+ return false;
+
WARN_ONCE(1,
"generic_make_request: Trying to write "
"to read-only block-device %s (partno %d)\n",
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a3eede00d302..01d0620a4e4a 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -2129,8 +2129,9 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
{
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- if (bio->bi_css)
- bio_associate_blkg(bio, tg_to_blkg(tg));
+ /* fallback to root_blkg if we fail to get a blkg ref */
+ if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV))
+ bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg);
bio_issue_init(&bio->bi_issue, bio_sectors(bio));
#endif
}
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 84507d3e9a98..8e20a0677dcf 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -123,16 +123,11 @@ static void rwb_wake_all(struct rq_wb *rwb)
}
}
-static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
+static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
+ enum wbt_flags wb_acct)
{
- struct rq_wb *rwb = RQWB(rqos);
- struct rq_wait *rqw;
int inflight, limit;
- if (!(wb_acct & WBT_TRACKED))
- return;
-
- rqw = get_rq_wait(rwb, wb_acct);
inflight = atomic_dec_return(&rqw->inflight);
/*
@@ -166,10 +161,22 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
int diff = limit - inflight;
if (!inflight || diff >= rwb->wb_background / 2)
- wake_up(&rqw->wait);
+ wake_up_all(&rqw->wait);
}
}
+static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
+{
+ struct rq_wb *rwb = RQWB(rqos);
+ struct rq_wait *rqw;
+
+ if (!(wb_acct & WBT_TRACKED))
+ return;
+
+ rqw = get_rq_wait(rwb, wb_acct);
+ wbt_rqw_done(rwb, rqw, wb_acct);
+}
+
/*
* Called on completion of a request. Note that it's also called when
* a request is merged, when the request gets freed.
@@ -481,6 +488,34 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
return limit;
}
+struct wbt_wait_data {
+ struct wait_queue_entry wq;
+ struct task_struct *task;
+ struct rq_wb *rwb;
+ struct rq_wait *rqw;
+ unsigned long rw;
+ bool got_token;
+};
+
+static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
+ int wake_flags, void *key)
+{
+ struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data,
+ wq);
+
+ /*
+ * If we fail to get a budget, return -1 to interrupt the wake up
+ * loop in __wake_up_common.
+ */
+ if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw)))
+ return -1;
+
+ data->got_token = true;
+ list_del_init(&curr->entry);
+ wake_up_process(data->task);
+ return 1;
+}
+
/*
* Block if we will exceed our limit, or if we are currently waiting for
* the timer to kick off queuing again.
@@ -491,19 +526,40 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
__acquires(lock)
{
struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
- DECLARE_WAITQUEUE(wait, current);
+ struct wbt_wait_data data = {
+ .wq = {
+ .func = wbt_wake_function,
+ .entry = LIST_HEAD_INIT(data.wq.entry),
+ },
+ .task = current,
+ .rwb = rwb,
+ .rqw = rqw,
+ .rw = rw,
+ };
bool has_sleeper;
has_sleeper = wq_has_sleeper(&rqw->wait);
if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
return;
- add_wait_queue_exclusive(&rqw->wait, &wait);
+ prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
do {
- set_current_state(TASK_UNINTERRUPTIBLE);
+ if (data.got_token)
+ break;
- if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
+ if (!has_sleeper &&
+ rq_wait_inc_below(rqw, get_limit(rwb, rw))) {
+ finish_wait(&rqw->wait, &data.wq);
+
+ /*
+ * We raced with wbt_wake_function() getting a token,
+ * which means we now have two. Put our local token
+ * and wake anyone else potentially waiting for one.
+ */
+ if (data.got_token)
+ wbt_rqw_done(rwb, rqw, wb_acct);
break;
+ }
if (lock) {
spin_unlock_irq(lock);
@@ -511,11 +567,11 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
spin_lock_irq(lock);
} else
io_schedule();
+
has_sleeper = false;
} while (1);
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&rqw->wait, &wait);
+ finish_wait(&rqw->wait, &data.wq);
}
static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
@@ -580,11 +636,6 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
return;
}
- if (current_is_kswapd())
- flags |= WBT_KSWAPD;
- if (bio_op(bio) == REQ_OP_DISCARD)
- flags |= WBT_DISCARD;
-
__wbt_wait(rwb, flags, bio->bi_opf, lock);
if (!blk_stat_is_active(rwb->cb))
diff --git a/block/bsg.c b/block/bsg.c
index db588add6ba6..9a442c23a715 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -37,7 +37,7 @@ struct bsg_device {
struct request_queue *queue;
spinlock_t lock;
struct hlist_node dev_list;
- atomic_t ref_count;
+ refcount_t ref_count;
char name[20];
int max_queue;
};
@@ -252,7 +252,7 @@ static int bsg_put_device(struct bsg_device *bd)
mutex_lock(&bsg_mutex);
- if (!atomic_dec_and_test(&bd->ref_count)) {
+ if (!refcount_dec_and_test(&bd->ref_count)) {
mutex_unlock(&bsg_mutex);
return 0;
}
@@ -290,7 +290,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
bd->queue = rq;
- atomic_set(&bd->ref_count, 1);
+ refcount_set(&bd->ref_count, 1);
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
@@ -308,7 +308,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
if (bd->queue == q) {
- atomic_inc(&bd->ref_count);
+ refcount_inc(&bd->ref_count);
goto found;
}
}
diff --git a/block/elevator.c b/block/elevator.c
index 5ea6e7d600e4..6a06b5d040e5 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -895,8 +895,7 @@ int elv_register(struct elevator_type *e)
spin_lock(&elv_list_lock);
if (elevator_find(e->elevator_name, e->uses_mq)) {
spin_unlock(&elv_list_lock);
- if (e->icq_cache)
- kmem_cache_destroy(e->icq_cache);
+ kmem_cache_destroy(e->icq_cache);
return -EBUSY;
}
list_add_tail(&e->list, &elv_list);
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 9706613eecf9..bf64cfa30feb 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -879,7 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
#define LPSS_GPIODEF0_DMA_LLP BIT(13)
static DEFINE_MUTEX(lpss_iosf_mutex);
-static bool lpss_iosf_d3_entered;
+static bool lpss_iosf_d3_entered = true;
static void lpss_iosf_enter_d3_state(void)
{
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 292088fcc624..d2e29a19890d 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -35,11 +35,11 @@
#include <linux/delay.h>
#ifdef CONFIG_X86
#include <asm/mpspec.h>
+#include <linux/dmi.h>
#endif
#include <linux/acpi_iort.h>
#include <linux/pci.h>
#include <acpi/apei.h>
-#include <linux/dmi.h>
#include <linux/suspend.h>
#include "internal.h"
@@ -82,10 +82,6 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
},
{}
};
-#else
-static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
- {}
-};
#endif
/* --------------------------------------------------------------------------
@@ -1033,11 +1029,16 @@ void __init acpi_early_init(void)
acpi_permanent_mmap = true;
+#ifdef CONFIG_X86
/*
* If the machine falls into the DMI check table,
- * DSDT will be copied to memory
+ * DSDT will be copied to memory.
+ * Note that calling dmi_check_system() here on other architectures
+ * would not be OK because only x86 initializes dmi early enough.
+ * Thankfully only x86 systems need such quirks for now.
*/
dmi_check_system(dsdt_dmi_table);
+#endif
status = acpi_reallocate_root_table();
if (ACPI_FAILURE(status)) {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 172e32840256..599e01bcdef2 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -7394,4 +7394,4 @@ EXPORT_SYMBOL_GPL(ata_cable_unknown);
EXPORT_SYMBOL_GPL(ata_cable_ignore);
EXPORT_SYMBOL_GPL(ata_cable_sata);
EXPORT_SYMBOL_GPL(ata_host_get);
-EXPORT_SYMBOL_GPL(ata_host_put); \ No newline at end of file
+EXPORT_SYMBOL_GPL(ata_host_put);
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 5d4b72e21161..569a4a662dcd 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -256,14 +256,12 @@ static struct ata_port_operations pata_ftide010_port_ops = {
.qc_issue = ftide010_qc_issue,
};
-static struct ata_port_info ftide010_port_info[] = {
- {
- .flags = ATA_FLAG_SLAVE_POSS,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .pio_mask = ATA_PIO4,
- .port_ops = &pata_ftide010_port_ops,
- },
+static struct ata_port_info ftide010_port_info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .pio_mask = ATA_PIO4,
+ .port_ops = &pata_ftide010_port_ops,
};
#if IS_ENABLED(CONFIG_SATA_GEMINI)
@@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_detect(struct ata_port *ap)
}
static int pata_ftide010_gemini_init(struct ftide010 *ftide,
+ struct ata_port_info *pi,
bool is_ata1)
{
struct device *dev = ftide->dev;
@@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
/* Flag port as SATA-capable */
if (gemini_sata_bridge_enabled(sg, is_ata1))
- ftide010_port_info[0].flags |= ATA_FLAG_SATA;
+ pi->flags |= ATA_FLAG_SATA;
+
+ /* This device has broken DMA, only PIO works */
+ if (of_machine_is_compatible("itian,sq201")) {
+ pi->mwdma_mask = 0;
+ pi->udma_mask = 0;
+ }
/*
* We assume that a simple 40-wire cable is used in the PATA mode.
@@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
}
#else
static int pata_ftide010_gemini_init(struct ftide010 *ftide,
+ struct ata_port_info *pi,
bool is_ata1)
{
return -ENOTSUPP;
@@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- const struct ata_port_info pi = ftide010_port_info[0];
+ struct ata_port_info pi = ftide010_port_info;
const struct ata_port_info *ppi[] = { &pi, NULL };
struct ftide010 *ftide;
struct resource *res;
@@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
* are ATA0. This will also set up the cable types.
*/
ret = pata_ftide010_gemini_init(ftide,
+ &pi,
(res->start == 0x63400000));
if (ret)
goto err_dis_clk;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index c8a1cb0b6136..817320c7c4c1 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -417,25 +417,23 @@ static ssize_t show_valid_zones(struct device *dev,
int nid;
/*
- * The block contains more than one zone can not be offlined.
- * This can happen e.g. for ZONE_DMA and ZONE_DMA32
- */
- if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn))
- return sprintf(buf, "none\n");
-
- start_pfn = valid_start_pfn;
- nr_pages = valid_end_pfn - start_pfn;
-
- /*
* Check the existing zone. Make sure that we do that only on the
* online nodes otherwise the page_zone is not reliable
*/
if (mem->state == MEM_ONLINE) {
+ /*
+ * The block contains more than one zone can not be offlined.
+ * This can happen e.g. for ZONE_DMA and ZONE_DMA32
+ */
+ if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
+ &valid_start_pfn, &valid_end_pfn))
+ return sprintf(buf, "none\n");
+ start_pfn = valid_start_pfn;
strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
goto out;
}
- nid = pfn_to_nid(start_pfn);
+ nid = mem->nid;
default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
strcat(buf, default_zone->name);
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 8e2e4757adcb..5a42ae4078c2 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
int of_pm_clk_add_clks(struct device *dev)
{
struct clk **clks;
- unsigned int i, count;
+ int i, count;
int ret;
if (!dev || !dev->of_node)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 3863c00372bb..14a51254c3db 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1239,6 +1239,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
case NBD_SET_SOCK:
return nbd_add_socket(nbd, arg, false);
case NBD_SET_BLKSIZE:
+ if (!arg || !is_power_of_2(arg) || arg < 512 ||
+ arg > PAGE_SIZE)
+ return -EINVAL;
nbd_size_set(nbd, arg,
div_s64(config->bytesize, arg));
return 0;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 7915f3b03736..73ed5f3a862d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4207,11 +4207,13 @@ static ssize_t rbd_parent_show(struct device *dev,
count += sprintf(&buf[count], "%s"
"pool_id %llu\npool_name %s\n"
+ "pool_ns %s\n"
"image_id %s\nimage_name %s\n"
"snap_id %llu\nsnap_name %s\n"
"overlap %llu\n",
!count ? "" : "\n", /* first? */
spec->pool_id, spec->pool_name,
+ spec->pool_ns ?: "",
spec->image_id, spec->image_name ?: "(unknown)",
spec->snap_id, spec->snap_name,
rbd_dev->parent_overlap);
@@ -4584,47 +4586,177 @@ static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
&rbd_dev->header.features);
}
+struct parent_image_info {
+ u64 pool_id;
+ const char *pool_ns;
+ const char *image_id;
+ u64 snap_id;
+
+ bool has_overlap;
+ u64 overlap;
+};
+
+/*
+ * The caller is responsible for @pii.
+ */
+static int decode_parent_image_spec(void **p, void *end,
+ struct parent_image_info *pii)
+{
+ u8 struct_v;
+ u32 struct_len;
+ int ret;
+
+ ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
+ &struct_v, &struct_len);
+ if (ret)
+ return ret;
+
+ ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
+ pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
+ if (IS_ERR(pii->pool_ns)) {
+ ret = PTR_ERR(pii->pool_ns);
+ pii->pool_ns = NULL;
+ return ret;
+ }
+ pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
+ if (IS_ERR(pii->image_id)) {
+ ret = PTR_ERR(pii->image_id);
+ pii->image_id = NULL;
+ return ret;
+ }
+ ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
+ return 0;
+
+e_inval:
+ return -EINVAL;
+}
+
+static int __get_parent_info(struct rbd_device *rbd_dev,
+ struct page *req_page,
+ struct page *reply_page,
+ struct parent_image_info *pii)
+{
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ size_t reply_len = PAGE_SIZE;
+ void *p, *end;
+ int ret;
+
+ ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+ "rbd", "parent_get", CEPH_OSD_FLAG_READ,
+ req_page, sizeof(u64), reply_page, &reply_len);
+ if (ret)
+ return ret == -EOPNOTSUPP ? 1 : ret;
+
+ p = page_address(reply_page);
+ end = p + reply_len;
+ ret = decode_parent_image_spec(&p, end, pii);
+ if (ret)
+ return ret;
+
+ ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+ "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
+ req_page, sizeof(u64), reply_page, &reply_len);
+ if (ret)
+ return ret;
+
+ p = page_address(reply_page);
+ end = p + reply_len;
+ ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
+ if (pii->has_overlap)
+ ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+
+ return 0;
+
+e_inval:
+ return -EINVAL;
+}
+
+/*
+ * The caller is responsible for @pii.
+ */
+static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
+ struct page *req_page,
+ struct page *reply_page,
+ struct parent_image_info *pii)
+{
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ size_t reply_len = PAGE_SIZE;
+ void *p, *end;
+ int ret;
+
+ ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+ "rbd", "get_parent", CEPH_OSD_FLAG_READ,
+ req_page, sizeof(u64), reply_page, &reply_len);
+ if (ret)
+ return ret;
+
+ p = page_address(reply_page);
+ end = p + reply_len;
+ ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
+ pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
+ if (IS_ERR(pii->image_id)) {
+ ret = PTR_ERR(pii->image_id);
+ pii->image_id = NULL;
+ return ret;
+ }
+ ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
+ pii->has_overlap = true;
+ ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+
+ return 0;
+
+e_inval:
+ return -EINVAL;
+}
+
+static int get_parent_info(struct rbd_device *rbd_dev,
+ struct parent_image_info *pii)
+{
+ struct page *req_page, *reply_page;
+ void *p;
+ int ret;
+
+ req_page = alloc_page(GFP_KERNEL);
+ if (!req_page)
+ return -ENOMEM;
+
+ reply_page = alloc_page(GFP_KERNEL);
+ if (!reply_page) {
+ __free_page(req_page);
+ return -ENOMEM;
+ }
+
+ p = page_address(req_page);
+ ceph_encode_64(&p, rbd_dev->spec->snap_id);
+ ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
+ if (ret > 0)
+ ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
+ pii);
+
+ __free_page(req_page);
+ __free_page(reply_page);
+ return ret;
+}
+
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
{
struct rbd_spec *parent_spec;
- size_t size;
- void *reply_buf = NULL;
- __le64 snapid;
- void *p;
- void *end;
- u64 pool_id;
- char *image_id;
- u64 snap_id;
- u64 overlap;
+ struct parent_image_info pii = { 0 };
int ret;
parent_spec = rbd_spec_alloc();
if (!parent_spec)
return -ENOMEM;
- size = sizeof (__le64) + /* pool_id */
- sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
- sizeof (__le64) + /* snap_id */
- sizeof (__le64); /* overlap */
- reply_buf = kmalloc(size, GFP_KERNEL);
- if (!reply_buf) {
- ret = -ENOMEM;
+ ret = get_parent_info(rbd_dev, &pii);
+ if (ret)
goto out_err;
- }
- snapid = cpu_to_le64(rbd_dev->spec->snap_id);
- ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
- &rbd_dev->header_oloc, "get_parent",
- &snapid, sizeof(snapid), reply_buf, size);
- dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
- if (ret < 0)
- goto out_err;
+ dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
+ __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
+ pii.has_overlap, pii.overlap);
- p = reply_buf;
- end = reply_buf + ret;
- ret = -ERANGE;
- ceph_decode_64_safe(&p, end, pool_id, out_err);
- if (pool_id == CEPH_NOPOOL) {
+ if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
/*
* Either the parent never existed, or we have
* record of it but the image got flattened so it no
@@ -4633,6 +4765,10 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
* overlap to 0. The effect of this is that all new
* requests will be treated as if the image had no
* parent.
+ *
+ * If !pii.has_overlap, the parent image spec is not
+ * applicable. It's there to avoid duplication in each
+ * snapshot record.
*/
if (rbd_dev->parent_overlap) {
rbd_dev->parent_overlap = 0;
@@ -4647,51 +4783,36 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
/* The ceph file layout needs to fit pool id in 32 bits */
ret = -EIO;
- if (pool_id > (u64)U32_MAX) {
+ if (pii.pool_id > (u64)U32_MAX) {
rbd_warn(NULL, "parent pool id too large (%llu > %u)",
- (unsigned long long)pool_id, U32_MAX);
+ (unsigned long long)pii.pool_id, U32_MAX);
goto out_err;
}
- image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
- if (IS_ERR(image_id)) {
- ret = PTR_ERR(image_id);
- goto out_err;
- }
- ceph_decode_64_safe(&p, end, snap_id, out_err);
- ceph_decode_64_safe(&p, end, overlap, out_err);
-
/*
* The parent won't change (except when the clone is
* flattened, already handled that). So we only need to
* record the parent spec we have not already done so.
*/
if (!rbd_dev->parent_spec) {
- parent_spec->pool_id = pool_id;
- parent_spec->image_id = image_id;
- parent_spec->snap_id = snap_id;
-
- /* TODO: support cloning across namespaces */
- if (rbd_dev->spec->pool_ns) {
- parent_spec->pool_ns = kstrdup(rbd_dev->spec->pool_ns,
- GFP_KERNEL);
- if (!parent_spec->pool_ns) {
- ret = -ENOMEM;
- goto out_err;
- }
+ parent_spec->pool_id = pii.pool_id;
+ if (pii.pool_ns && *pii.pool_ns) {
+ parent_spec->pool_ns = pii.pool_ns;
+ pii.pool_ns = NULL;
}
+ parent_spec->image_id = pii.image_id;
+ pii.image_id = NULL;
+ parent_spec->snap_id = pii.snap_id;
rbd_dev->parent_spec = parent_spec;
parent_spec = NULL; /* rbd_dev now owns this */
- } else {
- kfree(image_id);
}
/*
* We always update the parent overlap. If it's zero we issue
* a warning, as we will proceed as if there was no parent.
*/
- if (!overlap) {
+ if (!pii.overlap) {
if (parent_spec) {
/* refresh, careful to warn just once */
if (rbd_dev->parent_overlap)
@@ -4702,14 +4823,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
}
}
- rbd_dev->parent_overlap = overlap;
+ rbd_dev->parent_overlap = pii.overlap;
out:
ret = 0;
out_err:
- kfree(reply_buf);
+ kfree(pii.pool_ns);
+ kfree(pii.image_id);
rbd_spec_put(parent_spec);
-
return ret;
}
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index b55b245e8052..fd1e19f1a49f 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -84,6 +84,18 @@ MODULE_PARM_DESC(max_persistent_grants,
"Maximum number of grants to map persistently");
/*
+ * How long a persistent grant is allowed to remain allocated without being in
+ * use. The time is in seconds, 0 means indefinitely long.
+ */
+
+static unsigned int xen_blkif_pgrant_timeout = 60;
+module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout,
+ uint, 0644);
+MODULE_PARM_DESC(persistent_grant_unused_seconds,
+ "Time in seconds an unused persistent grant is allowed to "
+ "remain allocated. Default is 60, 0 means unlimited.");
+
+/*
* Maximum number of rings/queues blkback supports, allow as many queues as there
* are CPUs if user has not specified a value.
*/
@@ -123,6 +135,13 @@ module_param(log_stats, int, 0644);
/* Number of free pages to remove on each call to gnttab_free_pages */
#define NUM_BATCH_FREE_PAGES 10
+static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
+{
+ return xen_blkif_pgrant_timeout &&
+ (jiffies - persistent_gnt->last_used >=
+ HZ * xen_blkif_pgrant_timeout);
+}
+
static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
{
unsigned long flags;
@@ -236,8 +255,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring,
}
}
- bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
- set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
+ persistent_gnt->active = true;
/* Add new node and rebalance tree. */
rb_link_node(&(persistent_gnt->node), parent, new);
rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
@@ -261,11 +279,11 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
else if (gref > data->gnt)
node = node->rb_right;
else {
- if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
+ if (data->active) {
pr_alert_ratelimited("requesting a grant already in use\n");
return NULL;
}
- set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
+ data->active = true;
atomic_inc(&ring->persistent_gnt_in_use);
return data;
}
@@ -276,10 +294,10 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
static void put_persistent_gnt(struct xen_blkif_ring *ring,
struct persistent_gnt *persistent_gnt)
{
- if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
+ if (!persistent_gnt->active)
pr_alert_ratelimited("freeing a grant already unused\n");
- set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
- clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
+ persistent_gnt->last_used = jiffies;
+ persistent_gnt->active = false;
atomic_dec(&ring->persistent_gnt_in_use);
}
@@ -371,26 +389,26 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
struct persistent_gnt *persistent_gnt;
struct rb_node *n;
unsigned int num_clean, total;
- bool scan_used = false, clean_used = false;
+ bool scan_used = false;
struct rb_root *root;
- if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
- (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
- !ring->blkif->vbd.overflow_max_grants)) {
- goto out;
- }
-
if (work_busy(&ring->persistent_purge_work)) {
pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
goto out;
}
- num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
- num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
- num_clean = min(ring->persistent_gnt_c, num_clean);
- if ((num_clean == 0) ||
- (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use))))
- goto out;
+ if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
+ (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
+ !ring->blkif->vbd.overflow_max_grants)) {
+ num_clean = 0;
+ } else {
+ num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
+ num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants +
+ num_clean;
+ num_clean = min(ring->persistent_gnt_c, num_clean);
+ pr_debug("Going to purge at least %u persistent grants\n",
+ num_clean);
+ }
/*
* At this point, we can assure that there will be no calls
@@ -401,9 +419,7 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
* number of grants.
*/
- total = num_clean;
-
- pr_debug("Going to purge %u persistent grants\n", num_clean);
+ total = 0;
BUG_ON(!list_empty(&ring->persistent_purge_list));
root = &ring->persistent_gnts;
@@ -412,46 +428,37 @@ purge_list:
BUG_ON(persistent_gnt->handle ==
BLKBACK_INVALID_HANDLE);
- if (clean_used) {
- clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
+ if (persistent_gnt->active)
continue;
- }
-
- if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
+ if (!scan_used && !persistent_gnt_timeout(persistent_gnt))
continue;
- if (!scan_used &&
- (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
+ if (scan_used && total >= num_clean)
continue;
rb_erase(&persistent_gnt->node, root);
list_add(&persistent_gnt->remove_node,
&ring->persistent_purge_list);
- if (--num_clean == 0)
- goto finished;
+ total++;
}
/*
- * If we get here it means we also need to start cleaning
+ * Check whether we also need to start cleaning
* grants that were used since last purge in order to cope
* with the requested num
*/
- if (!scan_used && !clean_used) {
- pr_debug("Still missing %u purged frames\n", num_clean);
+ if (!scan_used && total < num_clean) {
+ pr_debug("Still missing %u purged frames\n", num_clean - total);
scan_used = true;
goto purge_list;
}
-finished:
- if (!clean_used) {
- pr_debug("Finished scanning for grants to clean, removing used flag\n");
- clean_used = true;
- goto purge_list;
- }
- ring->persistent_gnt_c -= (total - num_clean);
- ring->blkif->vbd.overflow_max_grants = 0;
+ if (total) {
+ ring->persistent_gnt_c -= total;
+ ring->blkif->vbd.overflow_max_grants = 0;
- /* We can defer this work */
- schedule_work(&ring->persistent_purge_work);
- pr_debug("Purged %u/%u\n", (total - num_clean), total);
+ /* We can defer this work */
+ schedule_work(&ring->persistent_purge_work);
+ pr_debug("Purged %u/%u\n", num_clean, total);
+ }
out:
return;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index ecb35fe8ca8d..1d3002d773f7 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -233,16 +233,6 @@ struct xen_vbd {
struct backend_info;
-/* Number of available flags */
-#define PERSISTENT_GNT_FLAGS_SIZE 2
-/* This persistent grant is currently in use */
-#define PERSISTENT_GNT_ACTIVE 0
-/*
- * This persistent grant has been used, this flag is set when we remove the
- * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
- */
-#define PERSISTENT_GNT_WAS_ACTIVE 1
-
/* Number of requests that we can fit in a ring */
#define XEN_BLKIF_REQS_PER_PAGE 32
@@ -250,7 +240,8 @@ struct persistent_gnt {
struct page *page;
grant_ref_t gnt;
grant_handle_t handle;
- DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
+ unsigned long last_used;
+ bool active;
struct rb_node node;
struct list_head remove_node;
};
@@ -278,7 +269,6 @@ struct xen_blkif_ring {
wait_queue_head_t pending_free_wq;
/* Tree to store persistent grants. */
- spinlock_t pers_gnts_lock;
struct rb_root persistent_gnts;
unsigned int persistent_gnt_c;
atomic_t persistent_gnt_in_use;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8986adab9bf5..a71d817e900d 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -46,6 +46,7 @@
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
#include <linux/list.h>
+#include <linux/workqueue.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
@@ -121,6 +122,8 @@ static inline struct blkif_req *blkif_req(struct request *rq)
static DEFINE_MUTEX(blkfront_mutex);
static const struct block_device_operations xlvbd_block_fops;
+static struct delayed_work blkfront_work;
+static LIST_HEAD(info_list);
/*
* Maximum number of segments in indirect requests, the actual value used by
@@ -216,6 +219,7 @@ struct blkfront_info
/* Save uncomplete reqs and bios for migration. */
struct list_head requests;
struct bio_list bio_list;
+ struct list_head info_list;
};
static unsigned int nr_minors;
@@ -1759,6 +1763,12 @@ abort_transaction:
return err;
}
+static void free_info(struct blkfront_info *info)
+{
+ list_del(&info->info_list);
+ kfree(info);
+}
+
/* Common code used when first setting up, and when resuming. */
static int talk_to_blkback(struct xenbus_device *dev,
struct blkfront_info *info)
@@ -1880,7 +1890,10 @@ again:
destroy_blkring:
blkif_free(info, 0);
- kfree(info);
+ mutex_lock(&blkfront_mutex);
+ free_info(info);
+ mutex_unlock(&blkfront_mutex);
+
dev_set_drvdata(&dev->dev, NULL);
return err;
@@ -1991,6 +2004,10 @@ static int blkfront_probe(struct xenbus_device *dev,
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
dev_set_drvdata(&dev->dev, info);
+ mutex_lock(&blkfront_mutex);
+ list_add(&info->info_list, &info_list);
+ mutex_unlock(&blkfront_mutex);
+
return 0;
}
@@ -2301,6 +2318,12 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
indirect_segments = 0;
info->max_indirect_segments = indirect_segments;
+
+ if (info->feature_persistent) {
+ mutex_lock(&blkfront_mutex);
+ schedule_delayed_work(&blkfront_work, HZ * 10);
+ mutex_unlock(&blkfront_mutex);
+ }
}
/*
@@ -2482,7 +2505,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
mutex_unlock(&info->mutex);
if (!bdev) {
- kfree(info);
+ mutex_lock(&blkfront_mutex);
+ free_info(info);
+ mutex_unlock(&blkfront_mutex);
return 0;
}
@@ -2502,7 +2527,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
if (info && !bdev->bd_openers) {
xlvbd_release_gendisk(info);
disk->private_data = NULL;
- kfree(info);
+ mutex_lock(&blkfront_mutex);
+ free_info(info);
+ mutex_unlock(&blkfront_mutex);
}
mutex_unlock(&bdev->bd_mutex);
@@ -2585,7 +2612,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
xlvbd_release_gendisk(info);
disk->private_data = NULL;
- kfree(info);
+ free_info(info);
}
out:
@@ -2618,6 +2645,61 @@ static struct xenbus_driver blkfront_driver = {
.is_ready = blkfront_is_ready,
};
+static void purge_persistent_grants(struct blkfront_info *info)
+{
+ unsigned int i;
+ unsigned long flags;
+
+ for (i = 0; i < info->nr_rings; i++) {
+ struct blkfront_ring_info *rinfo = &info->rinfo[i];
+ struct grant *gnt_list_entry, *tmp;
+
+ spin_lock_irqsave(&rinfo->ring_lock, flags);
+
+ if (rinfo->persistent_gnts_c == 0) {
+ spin_unlock_irqrestore(&rinfo->ring_lock, flags);
+ continue;
+ }
+
+ list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
+ node) {
+ if (gnt_list_entry->gref == GRANT_INVALID_REF ||
+ gnttab_query_foreign_access(gnt_list_entry->gref))
+ continue;
+
+ list_del(&gnt_list_entry->node);
+ gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
+ rinfo->persistent_gnts_c--;
+ __free_page(gnt_list_entry->page);
+ kfree(gnt_list_entry);
+ }
+
+ spin_unlock_irqrestore(&rinfo->ring_lock, flags);
+ }
+}
+
+static void blkfront_delay_work(struct work_struct *work)
+{
+ struct blkfront_info *info;
+ bool need_schedule_work = false;
+
+ mutex_lock(&blkfront_mutex);
+
+ list_for_each_entry(info, &info_list, info_list) {
+ if (info->feature_persistent) {
+ need_schedule_work = true;
+ mutex_lock(&info->mutex);
+ purge_persistent_grants(info);
+ mutex_unlock(&info->mutex);
+ }
+ }
+
+ if (need_schedule_work)
+ schedule_delayed_work(&blkfront_work, HZ * 10);
+
+ mutex_unlock(&blkfront_mutex);
+}
+
static int __init xlblk_init(void)
{
int ret;
@@ -2626,6 +2708,15 @@ static int __init xlblk_init(void)
if (!xen_domain())
return -ENODEV;
+ if (!xen_has_pv_disk_devices())
+ return -ENODEV;
+
+ if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
+ pr_warn("xen_blk: can't get major %d with name %s\n",
+ XENVBD_MAJOR, DEV_NAME);
+ return -ENODEV;
+ }
+
if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
@@ -2641,14 +2732,7 @@ static int __init xlblk_init(void)
xen_blkif_max_queues = nr_cpus;
}
- if (!xen_has_pv_disk_devices())
- return -ENODEV;
-
- if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
- printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
- XENVBD_MAJOR, DEV_NAME);
- return -ENODEV;
- }
+ INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work);
ret = xenbus_register_frontend(&blkfront_driver);
if (ret) {
@@ -2663,6 +2747,8 @@ module_init(xlblk_init);
static void __exit xlblk_exit(void)
{
+ cancel_delayed_work_sync(&blkfront_work);
+
xenbus_unregister_driver(&blkfront_driver);
unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
kfree(minors);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 2df11cc08a46..845b0314ce3a 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -200,6 +200,7 @@ config BT_HCIUART_RTL
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
depends on GPIOLIB
+ depends on ACPI
select BT_HCIUART_3WIRE
select BT_RTL
help
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index ed2a5c7cb77f..4593baff2bc9 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -144,8 +144,10 @@ static int mtk_setup_fw(struct hci_dev *hdev)
fw_size = fw->size;
/* The size of patch header is 30 bytes, should be skip */
- if (fw_size < 30)
- return -EINVAL;
+ if (fw_size < 30) {
+ err = -EINVAL;
+ goto free_fw;
+ }
fw_size -= 30;
fw_ptr += 30;
@@ -172,8 +174,8 @@ static int mtk_setup_fw(struct hci_dev *hdev)
fw_ptr += dlen;
}
+free_fw:
release_firmware(fw);
-
return err;
}
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index c9bac9dc4637..e4fe954e63a9 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -498,32 +498,29 @@ static int sysc_check_registers(struct sysc *ddata)
/**
* syc_ioremap - ioremap register space for the interconnect target module
- * @ddata: deviec driver data
+ * @ddata: device driver data
*
* Note that the interconnect target module registers can be anywhere
- * within the first child device address space. For example, SGX has
- * them at offset 0x1fc00 in the 32MB module address space. We just
- * what we need around the interconnect target module registers.
+ * within the interconnect target module range. For example, SGX has
+ * them at offset 0x1fc00 in the 32MB module address space. And cpsw
+ * has them at offset 0x1200 in the CPSW_WR child. Usually the
+ * the interconnect target module registers are at the beginning of
+ * the module range though.
*/
static int sysc_ioremap(struct sysc *ddata)
{
- u32 size = 0;
-
- if (ddata->offsets[SYSC_SYSSTATUS] >= 0)
- size = ddata->offsets[SYSC_SYSSTATUS];
- else if (ddata->offsets[SYSC_SYSCONFIG] >= 0)
- size = ddata->offsets[SYSC_SYSCONFIG];
- else if (ddata->offsets[SYSC_REVISION] >= 0)
- size = ddata->offsets[SYSC_REVISION];
- else
- return -EINVAL;
+ int size;
- size &= 0xfff00;
- size += SZ_256;
+ size = max3(ddata->offsets[SYSC_REVISION],
+ ddata->offsets[SYSC_SYSCONFIG],
+ ddata->offsets[SYSC_SYSSTATUS]);
+
+ if (size < 0 || (size + sizeof(u32)) > ddata->module_size)
+ return -EINVAL;
ddata->module_va = devm_ioremap(ddata->dev,
ddata->module_pa,
- size);
+ size + sizeof(u32));
if (!ddata->module_va)
return -EIO;
@@ -1224,10 +1221,10 @@ static int sysc_child_suspend_noirq(struct device *dev)
if (!pm_runtime_status_suspended(dev)) {
error = pm_generic_runtime_suspend(dev);
if (error) {
- dev_err(dev, "%s error at %i: %i\n",
- __func__, __LINE__, error);
+ dev_warn(dev, "%s busy at %i: %i\n",
+ __func__, __LINE__, error);
- return error;
+ return 0;
}
error = sysc_runtime_suspend(ddata->dev);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 113fc6edb2b0..a5d5a96479bf 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2546,7 +2546,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
if (!CDROM_CAN(CDC_SELECT_DISC) ||
(arg == CDSL_CURRENT || arg == CDSL_NONE))
return cdi->ops->drive_status(cdi, CDSL_CURRENT);
- if (((int)arg >= cdi->capacity))
+ if (arg >= cdi->capacity)
return -EINVAL;
return cdrom_slot_status(cdi, arg);
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index ce277ee0a28a..40728491f37b 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -566,5 +566,5 @@ config RANDOM_TRUST_CPU
that CPU manufacturer (perhaps with the insistence or mandate
of a Nation State's intelligence or law enforcement agencies)
has not installed a hidden back door to compromise the CPU's
- random number generation facilities.
-
+ random number generation facilities. This can also be configured
+ at boot with "random.trust_cpu=on/off".
diff --git a/drivers/char/random.c b/drivers/char/random.c
index bf5f99fc36f1..c75b6cdf0053 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -779,6 +779,13 @@ static struct crng_state **crng_node_pool __read_mostly;
static void invalidate_batched_entropy(void);
+static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static int __init parse_trust_cpu(char *arg)
+{
+ return kstrtobool(arg, &trust_cpu);
+}
+early_param("random.trust_cpu", parse_trust_cpu);
+
static void crng_initialize(struct crng_state *crng)
{
int i;
@@ -799,12 +806,10 @@ static void crng_initialize(struct crng_state *crng)
}
crng->state[i] ^= rv;
}
-#ifdef CONFIG_RANDOM_TRUST_CPU
- if (arch_init) {
+ if (trust_cpu && arch_init) {
crng_init = 2;
pr_notice("random: crng done (trusting CPU's manufacturer)\n");
}
-#endif
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
index 740af90a9508..c5edf8f2fd19 100644
--- a/drivers/clk/clk-npcm7xx.c
+++ b/drivers/clk/clk-npcm7xx.c
@@ -558,8 +558,8 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
if (!clk_base)
goto npcm7xx_init_error;
- npcm7xx_clk_data = kzalloc(sizeof(*npcm7xx_clk_data->hws) *
- NPCM7XX_NUM_CLOCKS + sizeof(npcm7xx_clk_data), GFP_KERNEL);
+ npcm7xx_clk_data = kzalloc(struct_size(npcm7xx_clk_data, hws,
+ NPCM7XX_NUM_CLOCKS), GFP_KERNEL);
if (!npcm7xx_clk_data)
goto npcm7xx_init_np_err;
diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c
index fb62f3938008..3a0996f2d556 100644
--- a/drivers/clk/x86/clk-st.c
+++ b/drivers/clk/x86/clk-st.c
@@ -46,7 +46,7 @@ static int st_clk_probe(struct platform_device *pdev)
clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents),
0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL);
- clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_25M]->clk);
+ clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk);
hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux",
0, st_data->base + MISCCLKCNTL1, OSCCLKENB,
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 110483f0e3fb..e26a40971b26 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -379,9 +379,20 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (idx == -1)
idx = i; /* first enabled state */
if (s->target_residency > data->predicted_us) {
- if (!tick_nohz_tick_stopped())
+ if (data->predicted_us < TICK_USEC)
break;
+ if (!tick_nohz_tick_stopped()) {
+ /*
+ * If the state selected so far is shallow,
+ * waking up early won't hurt, so retain the
+ * tick in that case and let the governor run
+ * again in the next iteration of the loop.
+ */
+ expected_interval = drv->states[idx].target_residency;
+ break;
+ }
+
/*
* If the state selected so far is shallow and this
* state's target residency matches the time till the
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 6e61cc93c2b0..d7aa7d7ff102 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
int ret = 0;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- crypto_ablkcipher_set_flags(ablkcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n");
- return -EINVAL;
+ goto badkey;
}
ctx->cdata.keylen = keylen;
@@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return ret;
badkey:
crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return 0;
+ return -EINVAL;
}
/*
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 578ea63a3109..f26d62e5533a 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
- dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+ dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
}
static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
@@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
- dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+ dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
}
/* RSA Job Completion handler */
@@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
goto unmap_p;
}
- pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
+ pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp1_dma)) {
dev_err(dev, "Unable to map RSA tmp1 memory\n");
goto unmap_q;
}
- pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
+ pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp2_dma)) {
dev_err(dev, "Unable to map RSA tmp2 memory\n");
goto unmap_tmp1;
@@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
return 0;
unmap_tmp1:
- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
unmap_q:
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
unmap_p:
@@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
goto unmap_dq;
}
- pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
+ pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp1_dma)) {
dev_err(dev, "Unable to map RSA tmp1 memory\n");
goto unmap_qinv;
}
- pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
+ pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp2_dma)) {
dev_err(dev, "Unable to map RSA tmp2 memory\n");
goto unmap_tmp1;
@@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
return 0;
unmap_tmp1:
- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
unmap_qinv:
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
unmap_dq:
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index f4f258075b89..acdd72016ffe 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg)
BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
/* Unmap just-run descriptor so we can post-process */
- dma_unmap_single(dev, jrp->outring[hw_idx].desc,
+ dma_unmap_single(dev,
+ caam_dma_to_cpu(jrp->outring[hw_idx].desc),
jrp->entinfo[sw_idx].desc_size,
DMA_TO_DEVICE);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index 9a476bb6d4c7..af596455b420 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -35,6 +35,7 @@ struct nitrox_cmdq {
/* requests in backlog queues */
atomic_t backlog_count;
+ int write_idx;
/* command size 32B/64B */
u8 instr_size;
u8 qno;
@@ -87,7 +88,7 @@ struct nitrox_bh {
struct bh_data *slc;
};
-/* NITROX-5 driver state */
+/* NITROX-V driver state */
#define NITROX_UCODE_LOADED 0
#define NITROX_READY 1
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index ebe267379ac9..4d31df07777f 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq)
cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
cmdq->qsize = (qsize + PKT_IN_ALIGN);
+ cmdq->write_idx = 0;
spin_lock_init(&cmdq->response_lock);
spin_lock_init(&cmdq->cmdq_lock);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index deaefd532aaa..4a362fc22f62 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -42,6 +42,16 @@
* Invalid flag options in AES-CCM IV.
*/
+static inline int incr_index(int index, int count, int max)
+{
+ if ((index + count) >= max)
+ index = index + count - max;
+ else
+ index += count;
+
+ return index;
+}
+
/**
* dma_free_sglist - unmap and free the sg lists.
* @ndev: N5 device
@@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr,
struct nitrox_cmdq *cmdq)
{
struct nitrox_device *ndev = sr->ndev;
- union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell;
- u64 offset;
+ int idx;
u8 *ent;
spin_lock_bh(&cmdq->cmdq_lock);
- /* get the next write offset */
- offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno);
- pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset);
+ idx = cmdq->write_idx;
/* copy the instruction */
- ent = cmdq->head + pkt_in_baoff_dbell.s.aoff;
+ ent = cmdq->head + (idx * cmdq->instr_size);
memcpy(ent, &sr->instr, cmdq->instr_size);
- /* flush the command queue updates */
- dma_wmb();
- sr->tstamp = jiffies;
atomic_set(&sr->status, REQ_POSTED);
response_list_add(sr, cmdq);
+ sr->tstamp = jiffies;
+ /* flush the command queue updates */
+ dma_wmb();
/* Ring doorbell with count 1 */
writeq(1, cmdq->dbell_csr_addr);
/* orders the doorbell rings */
mmiowb();
+ cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
+
spin_unlock_bh(&cmdq->cmdq_lock);
}
@@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
struct nitrox_softreq *sr, *tmp;
int ret = 0;
+ if (!atomic_read(&cmdq->backlog_count))
+ return 0;
+
spin_lock_bh(&cmdq->backlog_lock);
list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
@@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
/* submit until space available */
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
- ret = -EBUSY;
+ ret = -ENOSPC;
break;
}
/* delete from backlog list */
@@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
{
struct nitrox_cmdq *cmdq = sr->cmdq;
struct nitrox_device *ndev = sr->ndev;
- int ret = -EBUSY;
+
+ /* try to post backlog requests */
+ post_backlog_cmds(cmdq);
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EAGAIN;
-
+ return -ENOSPC;
+ /* add to backlog list */
backlog_list_add(sr, cmdq);
- } else {
- ret = post_backlog_cmds(cmdq);
- if (ret) {
- backlog_list_add(sr, cmdq);
- return ret;
- }
- post_se_instr(sr, cmdq);
- ret = -EINPROGRESS;
+ return -EBUSY;
}
- return ret;
+ post_se_instr(sr, cmdq);
+
+ return -EINPROGRESS;
}
/**
@@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
*/
sr->instr.fdata[0] = *((u64 *)&req->gph);
sr->instr.fdata[1] = 0;
- /* flush the soft_req changes before posting the cmd */
- wmb();
ret = nitrox_enqueue_request(sr);
- if (ret == -EAGAIN)
+ if (ret == -ENOSPC)
goto send_fail;
return ret;
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
index a53a0e6ba024..7725b6ee14ef 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -96,6 +96,10 @@ enum csk_flags {
CSK_CONN_INLINE, /* Connection on HW */
};
+enum chtls_cdev_state {
+ CHTLS_CDEV_STATE_UP = 1
+};
+
struct listen_ctx {
struct sock *lsk;
struct chtls_dev *cdev;
@@ -146,6 +150,7 @@ struct chtls_dev {
unsigned int send_page_order;
int max_host_sndbuf;
struct key_map kmap;
+ unsigned int cdev_state;
};
struct chtls_hws {
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 9b07f9165658..f59b044ebd25 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev)
tlsdev->hash = chtls_create_hash;
tlsdev->unhash = chtls_destroy_hash;
tls_register_device(&cdev->tlsdev);
+ cdev->cdev_state = CHTLS_CDEV_STATE_UP;
}
static void chtls_unregister_dev(struct chtls_dev *cdev)
@@ -281,8 +282,10 @@ static void chtls_free_all_uld(void)
struct chtls_dev *cdev, *tmp;
mutex_lock(&cdev_mutex);
- list_for_each_entry_safe(cdev, tmp, &cdev_list, list)
- chtls_free_uld(cdev);
+ list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
+ if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
+ chtls_free_uld(cdev);
+ }
mutex_unlock(&cdev_mutex);
}
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 5285ece4f33a..b71895871be3 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
ret = crypto_skcipher_encrypt(req);
skcipher_request_zero(req);
} else {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
-
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
aes_p8_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK,
&ctx->enc_key, walk.iv, 1);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
-
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
}
return ret;
@@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
ret = crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
} else {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
-
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
aes_p8_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK,
&ctx->dec_key, walk.iv, 0);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
-
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
}
return ret;
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 8bd9aff0f55f..e9954a7d4694 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
} else {
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+
+ ret = blkcipher_walk_virt(desc, &walk);
+
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
- blkcipher_walk_init(&walk, dst, src, nbytes);
-
- ret = blkcipher_walk_virt(desc, &walk);
iv = walk.iv;
memset(tweak, 0, AES_BLOCK_SIZE);
aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
while ((nbytes = walk.nbytes)) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
if (enc)
aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
else
aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
-
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
}
return ret;
}
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 6fd46083e629..bbe4d72ca105 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -392,7 +392,8 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
{
struct file *filp = vmf->vma->vm_file;
unsigned long fault_size;
- int rc, id;
+ vm_fault_t rc = VM_FAULT_SIGBUS;
+ int id;
pfn_t pfn;
struct dev_dax *dev_dax = filp->private_data;
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 721e6c57beae..64342944d917 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -166,7 +166,13 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
le32_to_cpu(attr->sustained_freq_khz);
dom_info->sustained_perf_level =
le32_to_cpu(attr->sustained_perf_level);
- dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) /
+ if (!dom_info->sustained_freq_khz ||
+ !dom_info->sustained_perf_level)
+ /* CPUFreq converts to kHz, hence default 1000 */
+ dom_info->mult_factor = 1000;
+ else
+ dom_info->mult_factor =
+ (dom_info->sustained_freq_khz * 1000) /
dom_info->sustained_perf_level;
memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
}
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index 3530ccd17e04..da9781a2ef4a 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -41,6 +41,8 @@ struct adp5588_gpio {
uint8_t int_en[3];
uint8_t irq_mask[3];
uint8_t irq_stat[3];
+ uint8_t int_input_en[3];
+ uint8_t int_lvl_cached[3];
};
static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@@ -173,12 +175,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
int i;
- for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
+ for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
+ if (dev->int_input_en[i]) {
+ mutex_lock(&dev->lock);
+ dev->dir[i] &= ~dev->int_input_en[i];
+ dev->int_input_en[i] = 0;
+ adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
+ dev->dir[i]);
+ mutex_unlock(&dev->lock);
+ }
+
+ if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
+ dev->int_lvl_cached[i] = dev->int_lvl[i];
+ adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
+ dev->int_lvl[i]);
+ }
+
if (dev->int_en[i] ^ dev->irq_mask[i]) {
dev->int_en[i] = dev->irq_mask[i];
adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
dev->int_en[i]);
}
+ }
mutex_unlock(&dev->irq_lock);
}
@@ -221,9 +239,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
else
return -EINVAL;
- adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
- adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
- dev->int_lvl[bank]);
+ dev->int_input_en[bank] |= bit;
return 0;
}
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 28da700f5f52..044888fd96a1 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -728,6 +728,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
out_unregister:
dwapb_gpio_unregister(gpio);
dwapb_irq_teardown(gpio);
+ clk_disable_unprepare(gpio->clk);
return err;
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index c48ed9d89ff5..8b9d7e42c600 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -25,7 +25,6 @@
struct acpi_gpio_event {
struct list_head node;
- struct list_head initial_sync_list;
acpi_handle handle;
unsigned int pin;
unsigned int irq;
@@ -49,10 +48,19 @@ struct acpi_gpio_chip {
struct mutex conn_lock;
struct gpio_chip *chip;
struct list_head events;
+ struct list_head deferred_req_irqs_list_entry;
};
-static LIST_HEAD(acpi_gpio_initial_sync_list);
-static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
+/*
+ * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
+ * (so builtin drivers) we register the ACPI GpioInt event handlers from a
+ * late_initcall_sync handler, so that other builtin drivers can register their
+ * OpRegions before the event handlers can run. This list contains gpiochips
+ * for which the acpi_gpiochip_request_interrupts() has been deferred.
+ */
+static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
+static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
+static bool acpi_gpio_deferred_req_irqs_done;
static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
{
@@ -89,21 +97,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
return gpiochip_get_desc(chip, pin);
}
-static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
-{
- mutex_lock(&acpi_gpio_initial_sync_list_lock);
- list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
- mutex_unlock(&acpi_gpio_initial_sync_list_lock);
-}
-
-static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
-{
- mutex_lock(&acpi_gpio_initial_sync_list_lock);
- if (!list_empty(&event->initial_sync_list))
- list_del_init(&event->initial_sync_list);
- mutex_unlock(&acpi_gpio_initial_sync_list_lock);
-}
-
static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
{
struct acpi_gpio_event *event = data;
@@ -186,7 +179,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
gpiod_direction_input(desc);
- value = gpiod_get_value(desc);
+ value = gpiod_get_value_cansleep(desc);
ret = gpiochip_lock_as_irq(chip, pin);
if (ret) {
@@ -229,7 +222,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
event->irq = irq;
event->pin = pin;
event->desc = desc;
- INIT_LIST_HEAD(&event->initial_sync_list);
ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
"ACPI:Event", event);
@@ -251,10 +243,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
* may refer to OperationRegions from other (builtin) drivers which
* may be probed after us.
*/
- if (handler == acpi_gpio_irq_handler &&
- (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
- ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
- acpi_gpio_add_to_initial_sync_list(event);
+ if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+ ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+ handler(event->irq, event);
return AE_OK;
@@ -283,6 +274,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
struct acpi_gpio_chip *acpi_gpio;
acpi_handle handle;
acpi_status status;
+ bool defer;
if (!chip->parent || !chip->to_irq)
return;
@@ -295,6 +287,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
if (ACPI_FAILURE(status))
return;
+ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+ defer = !acpi_gpio_deferred_req_irqs_done;
+ if (defer)
+ list_add(&acpi_gpio->deferred_req_irqs_list_entry,
+ &acpi_gpio_deferred_req_irqs_list);
+ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
+ if (defer)
+ return;
+
acpi_walk_resources(handle, "_AEI",
acpi_gpiochip_request_interrupt, acpi_gpio);
}
@@ -325,11 +327,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
if (ACPI_FAILURE(status))
return;
+ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+ if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry))
+ list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
+ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
struct gpio_desc *desc;
- acpi_gpio_del_from_initial_sync_list(event);
-
if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
disable_irq_wake(event->irq);
@@ -1052,6 +1057,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
acpi_gpio->chip = chip;
INIT_LIST_HEAD(&acpi_gpio->events);
+ INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio);
if (ACPI_FAILURE(status)) {
@@ -1198,20 +1204,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
return con_id == NULL;
}
-/* Sync the initial state of handlers after all builtin drivers have probed */
-static int acpi_gpio_initial_sync(void)
+/* Run deferred acpi_gpiochip_request_interrupts() */
+static int acpi_gpio_handle_deferred_request_interrupts(void)
{
- struct acpi_gpio_event *event, *ep;
+ struct acpi_gpio_chip *acpi_gpio, *tmp;
+
+ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+ list_for_each_entry_safe(acpi_gpio, tmp,
+ &acpi_gpio_deferred_req_irqs_list,
+ deferred_req_irqs_list_entry) {
+ acpi_handle handle;
- mutex_lock(&acpi_gpio_initial_sync_list_lock);
- list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
- initial_sync_list) {
- acpi_evaluate_object(event->handle, NULL, NULL, NULL);
- list_del_init(&event->initial_sync_list);
+ handle = ACPI_HANDLE(acpi_gpio->chip->parent);
+ acpi_walk_resources(handle, "_AEI",
+ acpi_gpiochip_request_interrupt, acpi_gpio);
+
+ list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
}
- mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+
+ acpi_gpio_deferred_req_irqs_done = true;
+ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
return 0;
}
/* We must use _sync so that this runs after the first deferred_probe run */
-late_initcall_sync(acpi_gpio_initial_sync);
+late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index a4f1157d6aa0..d4e7a09598fa 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -31,6 +31,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
struct of_phandle_args *gpiospec = data;
return chip->gpiodev->dev.of_node == gpiospec->np &&
+ chip->of_xlate &&
chip->of_xlate(chip, gpiospec, NULL) >= 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 502b94fb116a..b6e9df11115d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1012,13 +1012,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
if (r)
return r;
- if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
- parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
- if (!parser->ctx->preamble_presented) {
- parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
- parser->ctx->preamble_presented = true;
- }
- }
+ if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
+ parser->job->preamble_status |=
+ AMDGPU_PREAMBLE_IB_PRESENT;
if (parser->ring && parser->ring != ring)
return -EINVAL;
@@ -1207,26 +1203,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
int r;
+ job = p->job;
+ p->job = NULL;
+
+ r = drm_sched_job_init(&job->base, entity, p->filp);
+ if (r)
+ goto error_unlock;
+
+ /* No memory allocation is allowed while holding the mn lock */
amdgpu_mn_lock(p->mn);
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = e->robj;
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
- amdgpu_mn_unlock(p->mn);
- return -ERESTARTSYS;
+ r = -ERESTARTSYS;
+ goto error_abort;
}
}
- job = p->job;
- p->job = NULL;
-
- r = drm_sched_job_init(&job->base, entity, p->filp);
- if (r) {
- amdgpu_job_free(job);
- amdgpu_mn_unlock(p->mn);
- return r;
- }
-
job->owner = p->filp;
p->fence = dma_fence_get(&job->base.s_fence->finished);
@@ -1241,6 +1235,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_cs_post_dependencies(p);
+ if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
+ !p->ctx->preamble_presented) {
+ job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+ p->ctx->preamble_presented = true;
+ }
+
cs->out.handle = seq;
job->uf_sequence = seq;
@@ -1258,6 +1258,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_mn_unlock(p->mn);
return 0;
+
+error_abort:
+ dma_fence_put(&job->base.s_fence->finished);
+ job->base.s_fence = NULL;
+
+error_unlock:
+ amdgpu_job_free(job);
+ amdgpu_mn_unlock(p->mn);
+ return r;
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 5518e623fed2..51b5e977ca88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return r;
}
+ need_ctx_switch = ring->current_ctx != fence_ctx;
if (ring->funcs->emit_pipeline_sync && job &&
((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
+ (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
amdgpu_vm_need_pipeline_sync(ring, job))) {
need_pipe_sync = true;
dma_fence_put(tmp);
@@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
skip_preamble = ring->current_ctx == fence_ctx;
- need_ctx_switch = ring->current_ctx != fence_ctx;
if (job && ring->funcs->emit_cntxcntl) {
if (need_ctx_switch)
status |= AMDGPU_HAVE_CTX_SWITCH;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 8f98629fbe59..7b4e657a95c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
amdgpu_fence_wait_empty(ring);
}
- mutex_lock(&adev->pm.mutex);
- /* update battery/ac status */
- if (power_supply_is_system_supplied() > 0)
- adev->pm.ac_power = true;
- else
- adev->pm.ac_power = false;
- mutex_unlock(&adev->pm.mutex);
-
if (adev->powerplay.pp_funcs->dispatch_tasks) {
if (!amdgpu_device_has_dc_support(adev)) {
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ece0ac703e27..b17771dd5ce7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
* is validated on next vm use to avoid fault.
* */
list_move_tail(&base->vm_status, &vm->evicted);
+ base->moved = true;
}
/**
@@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
uint64_t addr;
int r;
- addr = amdgpu_bo_gpu_offset(bo);
entries = amdgpu_bo_size(bo) / 8;
if (pte_support_ats) {
@@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error;
+ addr = amdgpu_bo_gpu_offset(bo);
if (ats_entries) {
uint64_t ats_value;
@@ -2483,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
* amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
*
* @adev: amdgpu_device pointer
- * @vm_size: the default vm size if it's set auto
+ * @min_vm_size: the minimum vm size in GB if it's set auto
* @fragment_size_default: Default PTE fragment size
* @max_level: max VMPT level
* @max_bits: max address space size in bits
*
*/
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
uint32_t fragment_size_default, unsigned max_level,
unsigned max_bits)
{
+ unsigned int max_size = 1 << (max_bits - 30);
+ unsigned int vm_size;
uint64_t tmp;
/* adjust vm size first */
if (amdgpu_vm_size != -1) {
- unsigned max_size = 1 << (max_bits - 30);
-
vm_size = amdgpu_vm_size;
if (vm_size > max_size) {
dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
amdgpu_vm_size, max_size);
vm_size = max_size;
}
+ } else {
+ struct sysinfo si;
+ unsigned int phys_ram_gb;
+
+ /* Optimal VM size depends on the amount of physical
+ * RAM available. Underlying requirements and
+ * assumptions:
+ *
+ * - Need to map system memory and VRAM from all GPUs
+ * - VRAM from other GPUs not known here
+ * - Assume VRAM <= system memory
+ * - On GFX8 and older, VM space can be segmented for
+ * different MTYPEs
+ * - Need to allow room for fragmentation, guard pages etc.
+ *
+ * This adds up to a rough guess of system memory x3.
+ * Round up to power of two to maximize the available
+ * VM size with the given page table size.
+ */
+ si_meminfo(&si);
+ phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
+ (1 << 30) - 1) >> 30;
+ vm_size = roundup_pow_of_two(
+ min(max(phys_ram_gb * 3, min_vm_size), max_size));
}
adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 67a15d439ac0..9fa9df0c5e7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
uint32_t fragment_size_default, unsigned max_level,
unsigned max_bits);
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 5cd45210113f..5a9534a82d40 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_RLC_SMU_HS |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GFX_DMG))
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
@@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
default:
break;
}
-
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_RLC_SMU_HS |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GFX_DMG))
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 75317f283c69..ad151fefa41f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
amdgpu_gart_table_vram_unpin(adev);
}
-static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
-{
- amdgpu_gart_table_vram_free(adev);
- amdgpu_gart_fini(adev);
-}
-
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
u32 status, u32 addr, u32 mc_client)
{
@@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
- gmc_v6_0_gart_fini(adev);
+ amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
+ amdgpu_gart_fini(adev);
release_firmware(adev->gmc.fw);
adev->gmc.fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 36dc367c4b45..f8d8a3a73e42 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -747,19 +747,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
}
/**
- * gmc_v7_0_gart_fini - vm fini callback
- *
- * @adev: amdgpu_device pointer
- *
- * Tears down the driver GART/VM setup (CIK).
- */
-static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
-{
- amdgpu_gart_table_vram_free(adev);
- amdgpu_gart_fini(adev);
-}
-
-/**
* gmc_v7_0_vm_decode_fault - print human readable fault info
*
* @adev: amdgpu_device pointer
@@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
kfree(adev->gmc.vm_fault_info);
- gmc_v7_0_gart_fini(adev);
+ amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
+ amdgpu_gart_fini(adev);
release_firmware(adev->gmc.fw);
adev->gmc.fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 70fc97b59b4f..9333109b210d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -969,19 +969,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
}
/**
- * gmc_v8_0_gart_fini - vm fini callback
- *
- * @adev: amdgpu_device pointer
- *
- * Tears down the driver GART/VM setup (CIK).
- */
-static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
-{
- amdgpu_gart_table_vram_free(adev);
- amdgpu_gart_fini(adev);
-}
-
-/**
* gmc_v8_0_vm_decode_fault - print human readable fault info
*
* @adev: amdgpu_device pointer
@@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
kfree(adev->gmc.vm_fault_info);
- gmc_v8_0_gart_fini(adev);
+ amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
+ amdgpu_gart_fini(adev);
release_firmware(adev->gmc.fw);
adev->gmc.fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 399a5db27649..72f8018fa2a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle)
return 0;
}
-/**
- * gmc_v9_0_gart_fini - vm fini callback
- *
- * @adev: amdgpu_device pointer
- *
- * Tears down the driver GART/VM setup (CIK).
- */
-static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
-{
- amdgpu_gart_table_vram_free(adev);
- amdgpu_gart_fini(adev);
-}
-
static int gmc_v9_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
- gmc_v9_0_gart_fini(adev);
/*
* TODO:
@@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle)
*/
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
+ amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
+ amdgpu_gart_fini(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 3f57f6463dc8..cb79a93c2eb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
int min_temp, int max_temp);
static int kv_init_fps_limits(struct amdgpu_device *adev);
-static void kv_dpm_powergate_uvd(void *handle, bool gate);
-static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
@@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
return ret;
}
- kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
-
if (adev->irq.installed &&
amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
@@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
static void kv_dpm_disable(struct amdgpu_device *adev)
{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
@@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev)
/* powerup blocks */
kv_dpm_powergate_acp(adev, false);
kv_dpm_powergate_samu(adev, false);
- kv_dpm_powergate_vce(adev, false);
- kv_dpm_powergate_uvd(adev, false);
+ if (pi->caps_vce_pg) /* power on the VCE block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+ if (pi->caps_uvd_pg) /* power on the UVD block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
kv_enable_smc_cac(adev, false);
kv_enable_didt(adev, false);
@@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
int ret;
if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
- kv_dpm_powergate_vce(adev, false);
if (pi->caps_stable_p_state)
pi->vce_boot_level = table->count - 1;
else
@@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
kv_enable_vce_dpm(adev, true);
} else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
kv_enable_vce_dpm(adev, false);
- kv_dpm_powergate_vce(adev, true);
}
return 0;
@@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate)
}
}
-static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
+static void kv_dpm_powergate_vce(void *handle, bool gate)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
-
- if (pi->vce_power_gated == gate)
- return;
+ int ret;
pi->vce_power_gated = gate;
- if (!pi->caps_vce_pg)
- return;
-
- if (gate)
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
- else
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+ if (gate) {
+ /* stop the VCE block */
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ kv_enable_vce_dpm(adev, false);
+ if (pi->caps_vce_pg) /* power off the VCE block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
+ } else {
+ if (pi->caps_vce_pg) /* power on the VCE block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+ kv_enable_vce_dpm(adev, true);
+ /* re-init the VCE block */
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
+ }
}
+
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
{
struct kv_power_info *pi = kv_get_pi(adev);
@@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle)
else
adev->pm.dpm_enabled = true;
mutex_unlock(&adev->pm.mutex);
-
+ amdgpu_pm_compute_clocks(adev);
return ret;
}
@@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle,
case AMD_IP_BLOCK_TYPE_UVD:
kv_dpm_powergate_uvd(handle, gate);
break;
+ case AMD_IP_BLOCK_TYPE_VCE:
+ kv_dpm_powergate_vce(handle, gate);
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index db327b412562..1de96995e690 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
si_thermal_start_thermal_controller(adev);
- ni_update_current_ps(adev, boot_ps);
return 0;
}
@@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle)
else
adev->pm.dpm_enabled = true;
mutex_unlock(&adev->pm.mutex);
-
+ amdgpu_pm_compute_clocks(adev);
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index fbe878ae1e8c..4ba0003a9d32 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp,
{
struct dc_context *ctx = pp->ctx;
struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct pp_display_clock_request clock = {0};
- if (!pp_funcs || !pp_funcs->display_configuration_changed)
+ if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
return;
- amdgpu_dpm_display_configuration_changed(adev);
+ clock.clock_type = amd_pp_dcf_clock;
+ clock.clock_freq_in_khz = req->hard_min_dcefclk_khz;
+ pp_funcs->display_clock_voltage_request(pp_handle, &clock);
+
+ clock.clock_type = amd_pp_f_clock;
+ clock.clock_freq_in_khz = req->hard_min_fclk_khz;
+ pp_funcs->display_clock_voltage_request(pp_handle, &clock);
}
void pp_rv_set_wm_ranges(struct pp_smu *pp,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 567867915d32..37eaf72ace54 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
* fail-safe mode
*/
if (dc_is_hdmi_signal(link->connector_signal) ||
- dc_is_dvi_signal(link->connector_signal))
+ dc_is_dvi_signal(link->connector_signal)) {
+ if (prev_sink != NULL)
+ dc_sink_release(prev_sink);
+
return false;
+ }
default:
break;
}
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 6e3f56684f4e..51ed99a37803 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -170,20 +170,22 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
unsigned int tiling_mode = 0;
unsigned int stride = 0;
- switch (info->drm_format_mod << 10) {
- case PLANE_CTL_TILED_LINEAR:
+ switch (info->drm_format_mod) {
+ case DRM_FORMAT_MOD_LINEAR:
tiling_mode = I915_TILING_NONE;
break;
- case PLANE_CTL_TILED_X:
+ case I915_FORMAT_MOD_X_TILED:
tiling_mode = I915_TILING_X;
stride = info->stride;
break;
- case PLANE_CTL_TILED_Y:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
tiling_mode = I915_TILING_Y;
stride = info->stride;
break;
default:
- gvt_dbg_core("not supported tiling mode\n");
+ gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
+ info->drm_format_mod);
}
obj->tiling_and_stride = tiling_mode | stride;
} else {
@@ -222,9 +224,26 @@ static int vgpu_get_plane_info(struct drm_device *dev,
info->height = p.height;
info->stride = p.stride;
info->drm_format = p.drm_format;
- info->drm_format_mod = p.tiled;
+
+ switch (p.tiled) {
+ case PLANE_CTL_TILED_LINEAR:
+ info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
+ break;
+ case PLANE_CTL_TILED_X:
+ info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
+ break;
+ case PLANE_CTL_TILED_Y:
+ info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
+ break;
+ case PLANE_CTL_TILED_YF:
+ info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
+ break;
+ default:
+ gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
+ }
+
info->size = (((p.stride * p.height * p.bpp) / 8) +
- (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index face664be3e8..481896fb712a 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -220,8 +220,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
- plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
- _PLANE_CTL_TILED_SHIFT;
+ plane->tiled = val & PLANE_CTL_TILED_MASK;
fmt = skl_format_to_drm(
val & PLANE_CTL_FORMAT_MASK,
val & PLANE_CTL_ORDER_RGBX,
@@ -260,7 +259,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
return -EINVAL;
}
- plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
+ plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
(IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) ?
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index cb055f3c81a2..60c155085029 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -101,7 +101,7 @@ struct intel_gvt;
/* color space conversion and gamma correction are not included */
struct intel_vgpu_primary_plane_format {
u8 enabled; /* plane is enabled */
- u8 tiled; /* X-tiled */
+ u32 tiled; /* tiling mode: linear, X-tiled, Y tiled, etc */
u8 bpp; /* bits per pixel */
u32 hw_format; /* format field in the PRI_CTL register */
u32 drm_format; /* format in DRM definition */
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 7a58ca555197..72afa518edd9 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1296,6 +1296,19 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
return 0;
}
+static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
+ vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
+ else
+ vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
+
+ return 0;
+}
+
static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
@@ -1525,9 +1538,15 @@ static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
u32 v = *(u32 *)p_data;
u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
- vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
- vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
- vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+ switch (offset) {
+ case _PHY_CTL_FAMILY_EDP:
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
+ break;
+ case _PHY_CTL_FAMILY_DDI:
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
+ vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+ break;
+ }
vgpu_vreg(vgpu, offset) = v;
@@ -2812,6 +2831,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
skl_power_well_ctl_write);
+ MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
+
MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -2987,8 +3008,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
NULL, gen9_trtte_write);
MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
- MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
-
MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
@@ -3025,7 +3044,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
- NULL, NULL);
+ NULL, NULL);
+ MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_D(_MMIO(0x4ab8), D_KBL);
MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 42e1e6bdcc2c..e872f4847fbe 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -562,11 +562,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
* performace for batch mmio read/write, so we need
* handle forcewake mannually.
*/
- intel_runtime_pm_get(dev_priv);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
switch_mmio(pre, next, ring_id);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 09d7bb72b4ff..c32e7d5e8629 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
return false;
}
+/* We give 2 seconds higher prio for vGPU during start */
+#define GVT_SCHED_VGPU_PRI_TIME 2
+
struct vgpu_sched_data {
struct list_head lru_list;
struct intel_vgpu *vgpu;
bool active;
-
+ bool pri_sched;
+ ktime_t pri_time;
ktime_t sched_in_time;
ktime_t sched_time;
ktime_t left_ts;
@@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
if (!vgpu_has_pending_workload(vgpu_data->vgpu))
continue;
+ if (vgpu_data->pri_sched) {
+ if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
+ vgpu = vgpu_data->vgpu;
+ break;
+ } else
+ vgpu_data->pri_sched = false;
+ }
+
/* Return the vGPU only if it has time slice left */
if (vgpu_data->left_ts > 0) {
vgpu = vgpu_data->vgpu;
@@ -202,6 +214,7 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct vgpu_sched_data *vgpu_data;
struct intel_vgpu *vgpu = NULL;
+
/* no active vgpu or has already had a target */
if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
goto out;
@@ -209,12 +222,13 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
vgpu = find_busy_vgpu(sched_data);
if (vgpu) {
scheduler->next_vgpu = vgpu;
-
- /* Move the last used vGPU to the tail of lru_list */
vgpu_data = vgpu->sched_data;
- list_del_init(&vgpu_data->lru_list);
- list_add_tail(&vgpu_data->lru_list,
- &sched_data->lru_runq_head);
+ if (!vgpu_data->pri_sched) {
+ /* Move the last used vGPU to the tail of lru_list */
+ list_del_init(&vgpu_data->lru_list);
+ list_add_tail(&vgpu_data->lru_list,
+ &sched_data->lru_runq_head);
+ }
} else {
scheduler->next_vgpu = gvt->idle_vgpu;
}
@@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
{
struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+ ktime_t now;
if (!list_empty(&vgpu_data->lru_list))
return;
- list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
+ now = ktime_get();
+ vgpu_data->pri_time = ktime_add(now,
+ ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
+ vgpu_data->pri_sched = true;
+
+ list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
if (!hrtimer_active(&sched_data->timer))
hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
@@ -426,6 +446,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
&vgpu->gvt->scheduler;
int ring_id;
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (!vgpu_data->active)
return;
@@ -444,6 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
scheduler->current_vgpu = NULL;
}
+ intel_runtime_pm_get(dev_priv);
spin_lock_bh(&scheduler->mmio_context_lock);
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
if (scheduler->engine_owner[ring_id] == vgpu) {
@@ -452,5 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 08ec7446282e..9e63cd47b60f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -10422,7 +10422,7 @@ enum skl_power_gate {
_ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
_ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16)
#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0)
@@ -10437,7 +10437,7 @@ enum skl_power_gate {
_ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
_ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
#define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16)
#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 11d834f94220..98358b4b36de 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj,
vma->flags |= I915_VMA_GGTT;
list_add(&vma->obj_link, &obj->vma_list);
} else {
- i915_ppgtt_get(i915_vm_to_ppgtt(vm));
list_add_tail(&vma->obj_link, &obj->vma_list);
}
@@ -807,9 +806,6 @@ static void __i915_vma_destroy(struct i915_vma *vma)
if (vma->obj)
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
- if (!i915_vma_is_ggtt(vma))
- i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
-
rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
GEM_BUG_ON(i915_gem_active_isset(&iter->base));
kfree(iter);
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index b725835b47ef..769f3f586661 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -962,9 +962,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
int ret;
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
- return;
-
ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
if (ret < 0) {
DRM_ERROR("failed to add audio component (%d)\n", ret);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 8761513f3532..c9af34861d9e 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2708,7 +2708,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
intel_dp_stop_link_train(intel_dp);
- intel_ddi_enable_pipe_clock(crtc_state);
+ if (!is_mst)
+ intel_ddi_enable_pipe_clock(crtc_state);
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2810,14 +2811,14 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
bool is_mst = intel_crtc_has_type(old_crtc_state,
INTEL_OUTPUT_DP_MST);
- intel_ddi_disable_pipe_clock(old_crtc_state);
-
- /*
- * Power down sink before disabling the port, otherwise we end
- * up getting interrupts from the sink on detecting link loss.
- */
- if (!is_mst)
+ if (!is_mst) {
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+ /*
+ * Power down sink before disabling the port, otherwise we end
+ * up getting interrupts from the sink on detecting link loss.
+ */
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ }
intel_disable_ddi_buf(encoder);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ed3fa1c8a983..4a3c8ee9a973 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2988,6 +2988,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
int w = drm_rect_width(&plane_state->base.src) >> 16;
int h = drm_rect_height(&plane_state->base.src) >> 16;
int dst_x = plane_state->base.dst.x1;
+ int dst_w = drm_rect_width(&plane_state->base.dst);
int pipe_src_w = crtc_state->pipe_src_w;
int max_width = skl_max_plane_width(fb, 0, rotation);
int max_height = 4096;
@@ -3009,10 +3010,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
* screen may cause FIFO underflow and display corruption.
*/
if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
- (dst_x + w < 4 || dst_x > pipe_src_w - 4)) {
+ (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) {
DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
- dst_x + w < 4 ? "end" : "start",
- dst_x + w < 4 ? dst_x + w : dst_x,
+ dst_x + dst_w < 4 ? "end" : "start",
+ dst_x + dst_w < 4 ? dst_x + dst_w : dst_x,
4, pipe_src_w - 4);
return -ERANGE;
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index cd0f649b57a5..1193202766a2 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4160,18 +4160,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
}
-/*
- * If display is now connected check links status,
- * there has been known issues of link loss triggering
- * long pulse.
- *
- * Some sinks (eg. ASUS PB287Q) seem to perform some
- * weird HPD ping pong during modesets. So we can apparently
- * end up with HPD going low during a modeset, and then
- * going back up soon after. And once that happens we must
- * retrain the link to get a picture. That's in case no
- * userspace component reacted to intermittent HPD dip.
- */
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
@@ -4661,7 +4649,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
}
static int
-intel_dp_long_pulse(struct intel_connector *connector)
+intel_dp_long_pulse(struct intel_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
@@ -4720,6 +4709,22 @@ intel_dp_long_pulse(struct intel_connector *connector)
*/
status = connector_status_disconnected;
goto out;
+ } else {
+ /*
+ * If display is now connected check links status,
+ * there has been known issues of link loss triggering
+ * long pulse.
+ *
+ * Some sinks (eg. ASUS PB287Q) seem to perform some
+ * weird HPD ping pong during modesets. So we can apparently
+ * end up with HPD going low during a modeset, and then
+ * going back up soon after. And once that happens we must
+ * retrain the link to get a picture. That's in case no
+ * userspace component reacted to intermittent HPD dip.
+ */
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ intel_dp_retrain_link(encoder, ctx);
}
/*
@@ -4781,7 +4786,7 @@ intel_dp_detect(struct drm_connector *connector,
return ret;
}
- status = intel_dp_long_pulse(intel_dp->attached_connector);
+ status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
}
intel_dp->detect_done = false;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7e3e01607643..4ecd65375603 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -166,6 +166,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
/* this can fail */
drm_dp_check_act_status(&intel_dp->mst_mgr);
/* and this can also fail */
@@ -252,6 +254,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
I915_WRITE(DP_TP_STATUS(port), temp);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+
+ intel_ddi_enable_pipe_clock(pipe_config);
}
static void intel_mst_enable_dp(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a9076402dcb0..192972a7d287 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
ret = i2c_transfer(adapter, &msg, 1);
if (ret == 1)
- return 0;
- return ret >= 0 ? -EIO : ret;
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+
+ kfree(write_buf);
+ return ret;
}
static
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 5dae16ccd9f1..3e085c5f2b81 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
lspcon_mode_name(mode));
- wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100);
+ wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400);
if (current_mode != mode)
DRM_ERROR("LSPCON mode hasn't settled\n");
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 978782a77629..28d191192945 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
writel(0x0, comp->regs + DISP_REG_OVL_RST);
}
+static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
+{
+ return 4;
+}
+
static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
{
unsigned int reg;
@@ -157,6 +162,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
{
+ /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
+ * is defined in mediatek HW data sheet.
+ * The alphabet order in XXX is no relation to data
+ * arrangement in memory.
+ */
switch (fmt) {
default:
case DRM_FORMAT_RGB565:
@@ -221,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
.stop = mtk_ovl_stop,
.enable_vblank = mtk_ovl_enable_vblank,
.disable_vblank = mtk_ovl_disable_vblank,
+ .layer_nr = mtk_ovl_layer_nr,
.layer_on = mtk_ovl_layer_on,
.layer_off = mtk_ovl_layer_off,
.layer_config = mtk_ovl_layer_config,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 585943c81e1f..b0a5cffe345a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -31,14 +31,31 @@
#define RDMA_REG_UPDATE_INT BIT(0)
#define DISP_REG_RDMA_GLOBAL_CON 0x0010
#define RDMA_ENGINE_EN BIT(0)
+#define RDMA_MODE_MEMORY BIT(1)
#define DISP_REG_RDMA_SIZE_CON_0 0x0014
+#define RDMA_MATRIX_ENABLE BIT(17)
+#define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20)
+#define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20)
#define DISP_REG_RDMA_SIZE_CON_1 0x0018
#define DISP_REG_RDMA_TARGET_LINE 0x001c
+#define DISP_RDMA_MEM_CON 0x0024
+#define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4)
+#define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4)
+#define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4)
+#define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4)
+#define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4)
+#define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4)
+#define MEM_MODE_INPUT_SWAP BIT(8)
+#define DISP_RDMA_MEM_SRC_PITCH 0x002c
+#define DISP_RDMA_MEM_GMC_SETTING_0 0x0030
#define DISP_REG_RDMA_FIFO_CON 0x0040
#define RDMA_FIFO_UNDERFLOW_EN BIT(31)
#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16)
#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16)
#define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size)
+#define DISP_RDMA_MEM_START_ADDR 0x0f00
+
+#define RDMA_MEM_GMC 0x40402020
struct mtk_disp_rdma_data {
unsigned int fifo_size;
@@ -138,12 +155,87 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
}
+static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
+ unsigned int fmt)
+{
+ /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
+ * is defined in mediatek HW data sheet.
+ * The alphabet order in XXX is no relation to data
+ * arrangement in memory.
+ */
+ switch (fmt) {
+ default:
+ case DRM_FORMAT_RGB565:
+ return MEM_MODE_INPUT_FORMAT_RGB565;
+ case DRM_FORMAT_BGR565:
+ return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP;
+ case DRM_FORMAT_RGB888:
+ return MEM_MODE_INPUT_FORMAT_RGB888;
+ case DRM_FORMAT_BGR888:
+ return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP;
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_RGBA8888:
+ return MEM_MODE_INPUT_FORMAT_ARGB8888;
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_BGRA8888:
+ return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ return MEM_MODE_INPUT_FORMAT_RGBA8888;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP;
+ case DRM_FORMAT_UYVY:
+ return MEM_MODE_INPUT_FORMAT_UYVY;
+ case DRM_FORMAT_YUYV:
+ return MEM_MODE_INPUT_FORMAT_YUYV;
+ }
+}
+
+static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp)
+{
+ return 1;
+}
+
+static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
+ struct mtk_plane_state *state)
+{
+ struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+ struct mtk_plane_pending_state *pending = &state->pending;
+ unsigned int addr = pending->addr;
+ unsigned int pitch = pending->pitch & 0xffff;
+ unsigned int fmt = pending->format;
+ unsigned int con;
+
+ con = rdma_fmt_convert(rdma, fmt);
+ writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON);
+
+ if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
+ rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE);
+ rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_INT_MTX_SEL,
+ RDMA_MATRIX_INT_MTX_BT601_to_RGB);
+ } else {
+ rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_ENABLE, 0);
+ }
+
+ writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR);
+ writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH);
+ writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0);
+ rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON,
+ RDMA_MODE_MEMORY, RDMA_MODE_MEMORY);
+}
+
static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
.config = mtk_rdma_config,
.start = mtk_rdma_start,
.stop = mtk_rdma_stop,
.enable_vblank = mtk_rdma_enable_vblank,
.disable_vblank = mtk_rdma_disable_vblank,
+ .layer_nr = mtk_rdma_layer_nr,
+ .layer_config = mtk_rdma_layer_config,
};
static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 2d6aa150a9ff..0b976dfd04df 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -45,7 +45,8 @@ struct mtk_drm_crtc {
bool pending_needs_vblank;
struct drm_pending_vblank_event *event;
- struct drm_plane planes[OVL_LAYER_NR];
+ struct drm_plane *planes;
+ unsigned int layer_nr;
bool pending_planes;
void __iomem *config_regs;
@@ -171,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
- mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base);
+ mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base);
return 0;
}
@@ -181,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
- mtk_ddp_comp_disable_vblank(ovl);
+ mtk_ddp_comp_disable_vblank(comp);
}
static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
@@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
}
/* Initially configure all planes */
- for (i = 0; i < OVL_LAYER_NR; i++) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
@@ -334,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
unsigned int i;
/*
@@ -343,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
* queue update module registers on vblank.
*/
if (state->pending_config) {
- mtk_ddp_comp_config(ovl, state->pending_width,
+ mtk_ddp_comp_config(comp, state->pending_width,
state->pending_height,
state->pending_vrefresh, 0);
@@ -351,14 +352,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
}
if (mtk_crtc->pending_planes) {
- for (i = 0; i < OVL_LAYER_NR; i++) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
if (plane_state->pending.config) {
- mtk_ddp_comp_layer_config(ovl, i, plane_state);
+ mtk_ddp_comp_layer_config(comp, i, plane_state);
plane_state->pending.config = false;
}
}
@@ -370,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
int ret;
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
- ret = mtk_smi_larb_get(ovl->larb_dev);
+ ret = mtk_smi_larb_get(comp->larb_dev);
if (ret) {
DRM_ERROR("Failed to get larb: %d\n", ret);
return;
@@ -383,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
ret = mtk_crtc_ddp_hw_init(mtk_crtc);
if (ret) {
- mtk_smi_larb_put(ovl->larb_dev);
+ mtk_smi_larb_put(comp->larb_dev);
return;
}
@@ -395,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
int i;
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
@@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
return;
/* Set all pending plane state to disabled */
- for (i = 0; i < OVL_LAYER_NR; i++) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
@@ -418,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
mtk_crtc_ddp_hw_fini(mtk_crtc);
- mtk_smi_larb_put(ovl->larb_dev);
+ mtk_smi_larb_put(comp->larb_dev);
mtk_crtc->enabled = false;
}
@@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
if (mtk_crtc->event)
mtk_crtc->pending_needs_vblank = true;
- for (i = 0; i < OVL_LAYER_NR; i++) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
@@ -516,7 +517,7 @@ err_cleanup_crtc:
return ret;
}
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl)
+void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_drm_private *priv = crtc->dev->dev_private;
@@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->ddp_comp[i] = comp;
}
- for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) {
+ mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
+ mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr *
+ sizeof(struct drm_plane),
+ GFP_KERNEL);
+
+ for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) {
type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY :
(zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
DRM_PLANE_TYPE_OVERLAY;
@@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
}
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
- &mtk_crtc->planes[1], pipe);
+ mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
+ NULL, pipe);
if (ret < 0)
goto unprepare;
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index 9d9410c67ae9..091adb2087eb 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -18,13 +18,12 @@
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_plane.h"
-#define OVL_LAYER_NR 4
#define MTK_LUT_SIZE 512
#define MTK_MAX_BPC 10
#define MTK_MIN_BPC 3
void mtk_drm_crtc_commit(struct drm_crtc *crtc);
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl);
+void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp);
int mtk_drm_crtc_create(struct drm_device *drm_dev,
const enum mtk_ddp_comp_id *path,
unsigned int path_len);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 87e4191c250e..546b3e3b300b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -106,6 +106,8 @@
#define OVL1_MOUT_EN_COLOR1 0x1
#define GAMMA_MOUT_EN_RDMA1 0x1
#define RDMA0_SOUT_DPI0 0x2
+#define RDMA0_SOUT_DPI1 0x3
+#define RDMA0_SOUT_DSI1 0x1
#define RDMA0_SOUT_DSI2 0x4
#define RDMA0_SOUT_DSI3 0x5
#define RDMA1_SOUT_DPI0 0x2
@@ -122,6 +124,8 @@
#define DPI0_SEL_IN_RDMA2 0x3
#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
+#define DSI0_SEL_IN_RDMA1 0x1
+#define DSI0_SEL_IN_RDMA2 0x4
#define DSI1_SEL_IN_RDMA1 0x1
#define DSI1_SEL_IN_RDMA2 0x4
#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
@@ -224,6 +228,12 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
value = RDMA0_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI1;
} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
value = RDMA0_SOUT_DSI2;
@@ -282,6 +292,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
*addr = DISP_REG_CONFIG_DPI_SEL_IN;
value = DPI1_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI0_SEL_IN_RDMA1;
} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
*addr = DISP_REG_CONFIG_DSIO_SEL_IN;
value = DSI1_SEL_IN_RDMA1;
@@ -297,8 +310,11 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
*addr = DISP_REG_CONFIG_DPI_SEL_IN;
value = DPI1_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) {
*addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI0_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
value = DSI1_SEL_IN_RDMA2;
} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
*addr = DISP_REG_CONFIG_DSIE_SEL_IN;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 7413ffeb3c9d..8399229e6ad2 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -78,6 +78,7 @@ struct mtk_ddp_comp_funcs {
void (*stop)(struct mtk_ddp_comp *comp);
void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
void (*disable_vblank)(struct mtk_ddp_comp *comp);
+ unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
@@ -128,6 +129,14 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
comp->funcs->disable_vblank(comp);
}
+static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->layer_nr)
+ return comp->funcs->layer_nr(comp);
+
+ return 0;
+}
+
static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp,
unsigned int idx)
{
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 39721119713b..47ec604289b7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -381,7 +381,7 @@ static int mtk_drm_bind(struct device *dev)
err_deinit:
mtk_drm_kms_deinit(drm);
err_free:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -390,7 +390,7 @@ static void mtk_drm_unbind(struct device *dev)
struct mtk_drm_private *private = dev_get_drvdata(dev);
drm_dev_unregister(private->drm);
- drm_dev_unref(private->drm);
+ drm_dev_put(private->drm);
private->drm = NULL;
}
@@ -564,7 +564,7 @@ static int mtk_drm_remove(struct platform_device *pdev)
drm_dev_unregister(drm);
mtk_drm_kms_deinit(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
component_master_del(&pdev->dev, &mtk_drm_ops);
pm_runtime_disable(&pdev->dev);
@@ -580,29 +580,24 @@ static int mtk_drm_sys_suspend(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
struct drm_device *drm = private->drm;
+ int ret;
- drm_kms_helper_poll_disable(drm);
-
- private->suspend_state = drm_atomic_helper_suspend(drm);
- if (IS_ERR(private->suspend_state)) {
- drm_kms_helper_poll_enable(drm);
- return PTR_ERR(private->suspend_state);
- }
-
+ ret = drm_mode_config_helper_suspend(drm);
DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
- return 0;
+
+ return ret;
}
static int mtk_drm_sys_resume(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
struct drm_device *drm = private->drm;
+ int ret;
- drm_atomic_helper_resume(drm, private->suspend_state);
- drm_kms_helper_poll_enable(drm);
-
+ ret = drm_mode_config_helper_resume(drm);
DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
- return 0;
+
+ return ret;
}
#endif
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 90837f7c7d0f..f4c7516eb989 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -302,14 +302,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
return clamp_val(reg, 0, 1023) & (0xff << 2);
}
-static u16 adt7475_read_word(struct i2c_client *client, int reg)
+static int adt7475_read_word(struct i2c_client *client, int reg)
{
- u16 val;
+ int val1, val2;
- val = i2c_smbus_read_byte_data(client, reg);
- val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8);
+ val1 = i2c_smbus_read_byte_data(client, reg);
+ if (val1 < 0)
+ return val1;
+ val2 = i2c_smbus_read_byte_data(client, reg + 1);
+ if (val2 < 0)
+ return val2;
- return val;
+ return val1 | (val2 << 8);
}
static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
@@ -962,13 +966,14 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr,
{
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- int i = clamp_val(data->range[sattr->index] & 0xf, 0,
- ARRAY_SIZE(pwmfreq_table) - 1);
+ int idx;
if (IS_ERR(data))
return PTR_ERR(data);
+ idx = clamp_val(data->range[sattr->index] & 0xf, 0,
+ ARRAY_SIZE(pwmfreq_table) - 1);
- return sprintf(buf, "%d\n", pwmfreq_table[i]);
+ return sprintf(buf, "%d\n", pwmfreq_table[idx]);
}
static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr,
@@ -1004,6 +1009,10 @@ static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev,
char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY));
}
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index e9e6aeabbf84..71d3445ba869 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -17,7 +17,7 @@
* Bi-directional Current/Power Monitor with I2C Interface
* Datasheet: http://www.ti.com/product/ina230
*
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
* Thanks to Jan Volkering
*
* This program is free software; you can redistribute it and/or modify
@@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
return 0;
}
+static ssize_t ina2xx_show_shunt(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt);
+}
+
static ssize_t ina2xx_store_shunt(struct device *dev,
struct device_attribute *da,
const char *buf, size_t count)
@@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
/* shunt resistance */
static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
- ina2xx_show_value, ina2xx_store_shunt,
+ ina2xx_show_shunt, ina2xx_store_shunt,
INA2XX_CALIBRATION);
/* update interval (ina226 only) */
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index c6bd61e4695a..944f5b63aecd 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -63,6 +63,7 @@
#include <linux/bitops.h>
#include <linux/dmi.h>
#include <linux/io.h>
+#include <linux/nospec.h>
#include "lm75.h"
#define USE_ALTERNATE
@@ -2689,6 +2690,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr,
return err;
if (val > NUM_TEMP)
return -EINVAL;
+ val = array_index_nospec(val, NUM_TEMP + 1);
if (val && (!(data->have_temp & BIT(val - 1)) ||
!data->temp_src[val - 1]))
return -EINVAL;
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index fb4e4a6bb1f6..be5ba4690895 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -164,3 +164,4 @@ module_platform_driver(rpi_hwmon_driver);
MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:raspberrypi-hwmon");
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 6ec65adaba49..c33dcfb87993 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -110,8 +110,8 @@ static int sclhi(struct i2c_algo_bit_data *adap)
}
#ifdef DEBUG
if (jiffies != start && i2c_debug >= 3)
- pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go "
- "high\n", jiffies - start);
+ pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go high\n",
+ jiffies - start);
#endif
done:
@@ -171,8 +171,9 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
setsda(adap, sb);
udelay((adap->udelay + 1) / 2);
if (sclhi(adap) < 0) { /* timed out */
- bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
- "timeout at bit #%d\n", (int)c, i);
+ bit_dbg(1, &i2c_adap->dev,
+ "i2c_outb: 0x%02x, timeout at bit #%d\n",
+ (int)c, i);
return -ETIMEDOUT;
}
/* FIXME do arbitration here:
@@ -185,8 +186,8 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
}
sdahi(adap);
if (sclhi(adap) < 0) { /* timeout */
- bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
- "timeout at ack\n", (int)c);
+ bit_dbg(1, &i2c_adap->dev,
+ "i2c_outb: 0x%02x, timeout at ack\n", (int)c);
return -ETIMEDOUT;
}
@@ -215,8 +216,9 @@ static int i2c_inb(struct i2c_adapter *i2c_adap)
sdahi(adap);
for (i = 0; i < 8; i++) {
if (sclhi(adap) < 0) { /* timeout */
- bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit "
- "#%d\n", 7 - i);
+ bit_dbg(1, &i2c_adap->dev,
+ "i2c_inb: timeout at bit #%d\n",
+ 7 - i);
return -ETIMEDOUT;
}
indata *= 2;
@@ -265,8 +267,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
goto bailout;
}
if (!scl) {
- printk(KERN_WARNING "%s: SCL unexpected low "
- "while pulling SDA low!\n", name);
+ printk(KERN_WARNING
+ "%s: SCL unexpected low while pulling SDA low!\n",
+ name);
goto bailout;
}
@@ -278,8 +281,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
goto bailout;
}
if (!scl) {
- printk(KERN_WARNING "%s: SCL unexpected low "
- "while pulling SDA high!\n", name);
+ printk(KERN_WARNING
+ "%s: SCL unexpected low while pulling SDA high!\n",
+ name);
goto bailout;
}
@@ -291,8 +295,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
goto bailout;
}
if (!sda) {
- printk(KERN_WARNING "%s: SDA unexpected low "
- "while pulling SCL low!\n", name);
+ printk(KERN_WARNING
+ "%s: SDA unexpected low while pulling SCL low!\n",
+ name);
goto bailout;
}
@@ -304,8 +309,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
goto bailout;
}
if (!sda) {
- printk(KERN_WARNING "%s: SDA unexpected low "
- "while pulling SCL high!\n", name);
+ printk(KERN_WARNING
+ "%s: SDA unexpected low while pulling SCL high!\n",
+ name);
goto bailout;
}
@@ -352,8 +358,8 @@ static int try_address(struct i2c_adapter *i2c_adap,
i2c_start(adap);
}
if (i && ret)
- bit_dbg(1, &i2c_adap->dev, "Used %d tries to %s client at "
- "0x%02x: %s\n", i + 1,
+ bit_dbg(1, &i2c_adap->dev,
+ "Used %d tries to %s client at 0x%02x: %s\n", i + 1,
addr & 1 ? "read from" : "write to", addr >> 1,
ret == 1 ? "success" : "failed, timeout?");
return ret;
@@ -442,8 +448,9 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) {
if (!(flags & I2C_M_NO_RD_ACK))
acknak(i2c_adap, 0);
- dev_err(&i2c_adap->dev, "readbytes: invalid "
- "block length (%d)\n", inval);
+ dev_err(&i2c_adap->dev,
+ "readbytes: invalid block length (%d)\n",
+ inval);
return -EPROTO;
}
/* The original count value accounts for the extra
@@ -506,8 +513,8 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
return -ENXIO;
}
if (flags & I2C_M_RD) {
- bit_dbg(3, &i2c_adap->dev, "emitting repeated "
- "start condition\n");
+ bit_dbg(3, &i2c_adap->dev,
+ "emitting repeated start condition\n");
i2c_repstart(adap);
/* okay, now switch into reading mode */
addr |= 0x01;
@@ -564,8 +571,8 @@ static int bit_xfer(struct i2c_adapter *i2c_adap,
}
ret = bit_doAddress(i2c_adap, pmsg);
if ((ret != 0) && !nak_ok) {
- bit_dbg(1, &i2c_adap->dev, "NAK from "
- "device addr 0x%02x msg #%d\n",
+ bit_dbg(1, &i2c_adap->dev,
+ "NAK from device addr 0x%02x msg #%d\n",
msgs[i].addr, i);
goto bailout;
}
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index e18442b9973a..94d94b4a9a0d 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -708,7 +708,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
i2c_set_adapdata(adap, dev);
if (dev->pm_disabled) {
- dev_pm_syscore_device(dev->dev, true);
irq_flags = IRQF_NO_SUSPEND;
} else {
irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 1a8d2da5b000..b5750fd85125 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -434,6 +434,9 @@ static int dw_i2c_plat_suspend(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+ if (i_dev->pm_disabled)
+ return 0;
+
i_dev->disable(i_dev);
i2c_dw_prepare_clk(i_dev, false);
@@ -444,7 +447,9 @@ static int dw_i2c_plat_resume(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
- i2c_dw_prepare_clk(i_dev, true);
+ if (!i_dev->pm_disabled)
+ i2c_dw_prepare_clk(i_dev, true);
+
i_dev->init(i_dev);
return 0;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 941c223f6491..c91e145ef5a5 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -140,6 +140,7 @@
#define SBREG_BAR 0x10
#define SBREG_SMBCTRL 0xc6000c
+#define SBREG_SMBCTRL_DNV 0xcf000c
/* Host status bits for SMBPCISTS */
#define SMBPCISTS_INTS BIT(3)
@@ -1399,7 +1400,11 @@ static void i801_add_tco(struct i801_priv *priv)
spin_unlock(&p2sb_spinlock);
res = &tco_res[ICH_RES_MEM_OFF];
- res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+ if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
+ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
+ else
+ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+
res->end = res->start + 3;
res->flags = IORESOURCE_MEM;
@@ -1415,6 +1420,13 @@ static void i801_add_tco(struct i801_priv *priv)
}
#ifdef CONFIG_ACPI
+static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv,
+ acpi_physical_address address)
+{
+ return address >= priv->smba &&
+ address <= pci_resource_end(priv->pci_dev, SMBBAR);
+}
+
static acpi_status
i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
u64 *value, void *handler_context, void *region_context)
@@ -1430,7 +1442,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
*/
mutex_lock(&priv->acpi_lock);
- if (!priv->acpi_reserved) {
+ if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
priv->acpi_reserved = true;
dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 6d975f5221ca..06c4c767af32 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -538,7 +538,6 @@ static const struct i2c_algorithm lpi2c_imx_algo = {
static const struct of_device_id lpi2c_imx_of_match[] = {
{ .compatible = "fsl,imx7ulp-lpi2c" },
- { .compatible = "fsl,imx8dv-lpi2c" },
{ },
};
MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 439e8778f849..818cab14e87c 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -507,8 +507,6 @@ static void sh_mobile_i2c_dma_callback(void *data)
pd->pos = pd->msg->len;
pd->stop_after_dma = true;
- i2c_release_dma_safe_msg_buf(pd->msg, pd->dma_buf);
-
iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
}
@@ -602,8 +600,8 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
dma_async_issue_pending(chan);
}
-static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
- bool do_init)
+static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
+ bool do_init)
{
if (do_init) {
/* Initialize channel registers */
@@ -627,7 +625,6 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
/* Enable all interrupts to begin with */
iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
- return 0;
}
static int poll_dte(struct sh_mobile_i2c_data *pd)
@@ -698,9 +695,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
pd->stop_after_dma = false;
- err = start_ch(pd, msg, do_start);
- if (err)
- break;
+ start_ch(pd, msg, do_start);
if (do_start)
i2c_op(pd, OP_START, 0);
@@ -709,6 +704,10 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
timeout = wait_event_timeout(pd->wait,
pd->sr & (ICSR_TACK | SW_DONE),
adapter->timeout);
+
+ /* 'stop_after_dma' tells if DMA transfer was complete */
+ i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma);
+
if (!timeout) {
dev_err(pd->dev, "Transfer request timed out\n");
if (pd->dma_direction != DMA_NONE)
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index 9918bdd81619..a403e8579b65 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -401,11 +401,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
return ret;
for (msg = msgs; msg < emsg; msg++) {
- /* If next message is read, skip the stop condition */
- bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
- /* but, force it if I2C_M_STOP is set */
- if (msg->flags & I2C_M_STOP)
- stop = true;
+ /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+ bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
if (ret)
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index bb181b088291..454f914ae66d 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -248,11 +248,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap,
return ret;
for (msg = msgs; msg < emsg; msg++) {
- /* If next message is read, skip the stop condition */
- bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
- /* but, force it if I2C_M_STOP is set */
- if (msg->flags & I2C_M_STOP)
- stop = true;
+ /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+ bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
if (ret)
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 9a71e50d21f1..0c51c0ffdda9 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -532,6 +532,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
{
u8 rx_watermark;
struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
+ unsigned long flags;
/* Clear and enable Rx full interrupt. */
xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
@@ -547,6 +548,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
rx_watermark = IIC_RX_FIFO_DEPTH;
xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
+ local_irq_save(flags);
if (!(msg->flags & I2C_M_NOSTART))
/* write the address */
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
@@ -556,6 +558,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
+ local_irq_restore(flags);
+
if (i2c->nmsgs == 1)
/* very last, enable bus not busy as well */
xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index f15737763608..9ee9a15e7134 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -2293,21 +2293,22 @@ u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold)
EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf);
/**
- * i2c_release_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg
- * @msg: the message to be synced with
+ * i2c_put_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg
* @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL.
+ * @msg: the message which the buffer corresponds to
+ * @xferred: bool saying if the message was transferred
*/
-void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf)
+void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred)
{
if (!buf || buf == msg->buf)
return;
- if (msg->flags & I2C_M_RD)
+ if (xferred && msg->flags & I2C_M_RD)
memcpy(msg->buf, buf, msg->len);
kfree(buf);
}
-EXPORT_SYMBOL_GPL(i2c_release_dma_safe_msg_buf);
+EXPORT_SYMBOL_GPL(i2c_put_dma_safe_msg_buf);
MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
MODULE_DESCRIPTION("I2C-Bus main module");
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 316a57530f6d..c2df341ff6fa 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1439,6 +1439,7 @@ static struct irq_chip its_irq_chip = {
* The consequence of the above is that allocation is cost is low, but
* freeing is expensive. We assumes that freeing rarely occurs.
*/
+#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
static DEFINE_MUTEX(lpi_range_lock);
static LIST_HEAD(lpi_range_list);
@@ -1625,7 +1626,8 @@ static int __init its_alloc_lpi_tables(void)
{
phys_addr_t paddr;
- lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer);
+ lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
+ ITS_MAX_LPI_NRBITS);
gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
if (!gic_rdists->prop_page) {
pr_err("Failed to allocate PROPBASE\n");
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 94329e03001e..0b2af6e74fc3 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -1276,18 +1276,18 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
static int resync_finish(struct mddev *mddev)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
+ int ret = 0;
clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
- dlm_unlock_sync(cinfo->resync_lockres);
/*
* If resync thread is interrupted so we can't say resync is finished,
* another node will launch resync thread to continue.
*/
- if (test_bit(MD_CLOSING, &mddev->flags))
- return 0;
- else
- return resync_info_update(mddev, 0, 0);
+ if (!test_bit(MD_CLOSING, &mddev->flags))
+ ret = resync_info_update(mddev, 0, 0);
+ dlm_unlock_sync(cinfo->resync_lockres);
+ return ret;
}
static int area_resyncing(struct mddev *mddev, int direction,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 981898049491..d6f7978b4449 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4529,11 +4529,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
allow_barrier(conf);
}
+ raise_barrier(conf, 0);
read_more:
/* Now schedule reads for blocks from sector_nr to last */
r10_bio = raid10_alloc_init_r10buf(conf);
r10_bio->state = 0;
- raise_barrier(conf, sectors_done != 0);
+ raise_barrier(conf, 1);
atomic_set(&r10_bio->remaining, 0);
r10_bio->mddev = mddev;
r10_bio->sector = sector_nr;
@@ -4629,6 +4630,8 @@ read_more:
if (sector_nr <= last)
goto read_more;
+ lower_barrier(conf);
+
/* Now that we have done the whole section we can
* update reshape_progress
*/
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index a001808a2b77..bfb811407061 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -46,6 +46,11 @@ extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
extern void ppl_quiesce(struct r5conf *conf, int quiesce);
extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
+static inline bool raid5_has_log(struct r5conf *conf)
+{
+ return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
+}
+
static inline bool raid5_has_ppl(struct r5conf *conf)
{
return test_bit(MD_HAS_PPL, &conf->mddev->flags);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4ce0d7502fad..e4e98f47865d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -733,7 +733,7 @@ static bool stripe_can_batch(struct stripe_head *sh)
{
struct r5conf *conf = sh->raid_conf;
- if (conf->log || raid5_has_ppl(conf))
+ if (raid5_has_log(conf) || raid5_has_ppl(conf))
return false;
return test_bit(STRIPE_BATCH_READY, &sh->state) &&
!test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
@@ -7737,7 +7737,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
sector_t newsize;
struct r5conf *conf = mddev->private;
- if (conf->log || raid5_has_ppl(conf))
+ if (raid5_has_log(conf) || raid5_has_ppl(conf))
return -EINVAL;
sectors &= ~((sector_t)conf->chunk_sectors - 1);
newsize = raid5_size(mddev, sectors, mddev->raid_disks);
@@ -7788,7 +7788,7 @@ static int check_reshape(struct mddev *mddev)
{
struct r5conf *conf = mddev->private;
- if (conf->log || raid5_has_ppl(conf))
+ if (raid5_has_log(conf) || raid5_has_ppl(conf))
return -EINVAL;
if (mddev->delta_disks == 0 &&
mddev->new_layout == mddev->layout &&
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index 31112f622b88..475e5b3790ed 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -411,7 +411,7 @@ static int aemif_probe(struct platform_device *pdev)
if (ret < 0)
goto error;
}
- } else {
+ } else if (pdata) {
for (i = 0; i < pdata->num_sub_devices; i++) {
pdata->sub_devices[i].dev.parent = dev;
ret = platform_device_register(&pdata->sub_devices[i]);
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 648eb6743ed5..6edffeed9953 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -238,10 +238,6 @@ static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
mmc_exit_request(mq->queue, req);
}
-/*
- * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests
- * will not be dispatched in parallel.
- */
static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -264,7 +260,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
spin_lock_irq(q->queue_lock);
- if (mq->recovery_needed) {
+ if (mq->recovery_needed || mq->busy) {
spin_unlock_irq(q->queue_lock);
return BLK_STS_RESOURCE;
}
@@ -291,6 +287,9 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
break;
}
+ /* Parallel dispatch of requests is not supported at the moment */
+ mq->busy = true;
+
mq->in_flight[issue_type] += 1;
get_card = (mmc_tot_in_flight(mq) == 1);
cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
@@ -333,9 +332,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
mq->in_flight[issue_type] -= 1;
if (mmc_tot_in_flight(mq) == 0)
put_card = true;
+ mq->busy = false;
spin_unlock_irq(q->queue_lock);
if (put_card)
mmc_put_card(card, &mq->ctx);
+ } else {
+ WRITE_ONCE(mq->busy, false);
}
return ret;
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 17e59d50b496..9bf3c9245075 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -81,6 +81,7 @@ struct mmc_queue {
unsigned int cqe_busy;
#define MMC_CQE_DCMD_BUSY BIT(0)
#define MMC_CQE_QUEUE_FULL BIT(1)
+ bool busy;
bool use_cqe;
bool recovery_needed;
bool in_recovery;
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index 294de177632c..61e4e2a213c9 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -217,7 +217,7 @@ static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host,
* We don't really have DMA, so we need
* to copy from our platform driver buffer
*/
- sg_copy_to_buffer(data->sg, 1, host->virt_base,
+ sg_copy_from_buffer(data->sg, 1, host->virt_base,
data->sg->length);
}
host->data->bytes_xfered += data->sg->length;
@@ -393,7 +393,7 @@ static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host,
* We don't really have DMA, so we need to copy to our
* platform driver buffer
*/
- sg_copy_from_buffer(data->sg, 1, host->virt_base,
+ sg_copy_to_buffer(data->sg, 1, host->virt_base,
data->sg->length);
}
}
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 5aa2c9404e92..be53044086c7 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1976,7 +1976,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
do {
value = atmci_readl(host, ATMCI_RDR);
if (likely(offset + 4 <= sg->length)) {
- sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset);
+ sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset);
offset += 4;
nbytes += 4;
@@ -1993,7 +1993,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
} else {
unsigned int remaining = sg->length - offset;
- sg_pcopy_to_buffer(sg, 1, &value, remaining, offset);
+ sg_pcopy_from_buffer(sg, 1, &value, remaining, offset);
nbytes += remaining;
flush_dcache_page(sg_page(sg));
@@ -2003,7 +2003,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
goto done;
offset = 4 - remaining;
- sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining,
+ sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining,
offset, 0);
nbytes += offset;
}
@@ -2042,7 +2042,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
do {
if (likely(offset + 4 <= sg->length)) {
- sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset);
+ sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset);
atmci_writel(host, ATMCI_TDR, value);
offset += 4;
@@ -2059,7 +2059,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
unsigned int remaining = sg->length - offset;
value = 0;
- sg_pcopy_from_buffer(sg, 1, &value, remaining, offset);
+ sg_pcopy_to_buffer(sg, 1, &value, remaining, offset);
nbytes += remaining;
host->sg = sg = sg_next(sg);
@@ -2070,7 +2070,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
}
offset = 4 - remaining;
- sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining,
+ sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining,
offset, 0);
atmci_writel(host, ATMCI_TDR, value);
nbytes += offset;
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 35cc0de6be67..ca0b43973769 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -45,14 +45,16 @@
/* DM_CM_RST */
#define RST_DTRANRST1 BIT(9)
#define RST_DTRANRST0 BIT(8)
-#define RST_RESERVED_BITS GENMASK_ULL(32, 0)
+#define RST_RESERVED_BITS GENMASK_ULL(31, 0)
/* DM_CM_INFO1 and DM_CM_INFO1_MASK */
#define INFO1_CLEAR 0
+#define INFO1_MASK_CLEAR GENMASK_ULL(31, 0)
#define INFO1_DTRANEND1 BIT(17)
#define INFO1_DTRANEND0 BIT(16)
/* DM_CM_INFO2 and DM_CM_INFO2_MASK */
+#define INFO2_MASK_CLEAR GENMASK_ULL(31, 0)
#define INFO2_DTRANERR1 BIT(17)
#define INFO2_DTRANERR0 BIT(16)
@@ -252,6 +254,12 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
{
struct renesas_sdhi *priv = host_to_priv(host);
+ /* Disable DMAC interrupts, we don't use them */
+ renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK,
+ INFO1_MASK_CLEAR);
+ renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK,
+ INFO2_MASK_CLEAR);
+
/* Each value is set to non-zero to assume "enabling" each DMA */
host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index ca18612c4201..67b2065e7a19 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -1338,6 +1338,11 @@ int denali_init(struct denali_nand_info *denali)
denali_enable_irq(denali);
denali_reset_banks(denali);
+ if (!denali->max_banks) {
+ /* Error out earlier if no chip is found for some reasons. */
+ ret = -ENODEV;
+ goto disable_irq;
+ }
denali->active_bank = DENALI_INVALID_BANK;
diff --git a/drivers/mtd/nand/raw/docg4.c b/drivers/mtd/nand/raw/docg4.c
index a3f04315c05c..427fcbc1b71c 100644
--- a/drivers/mtd/nand/raw/docg4.c
+++ b/drivers/mtd/nand/raw/docg4.c
@@ -1218,7 +1218,7 @@ static int docg4_resume(struct platform_device *pdev)
return 0;
}
-static void __init init_mtd_structs(struct mtd_info *mtd)
+static void init_mtd_structs(struct mtd_info *mtd)
{
/* initialize mtd and nand data structures */
@@ -1290,7 +1290,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
}
-static int __init read_id_reg(struct mtd_info *mtd)
+static int read_id_reg(struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct docg4_priv *doc = nand_get_controller_data(nand);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8bb1e38b1681..cecbb1d1f587 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -5913,12 +5913,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
return bp->hw_resc.max_cp_rings;
}
-void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
+unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
{
- bp->hw_resc.max_cp_rings = max;
+ return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp);
}
-unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
+static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@@ -6684,6 +6684,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
hw_resc->resv_rx_rings = 0;
hw_resc->resv_hw_ring_grps = 0;
hw_resc->resv_vnics = 0;
+ bp->tx_nr_rings = 0;
+ bp->rx_nr_rings = 0;
}
return rc;
}
@@ -8629,7 +8631,8 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
*max_tx = hw_resc->max_tx_rings;
*max_rx = hw_resc->max_rx_rings;
- *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings);
+ *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
+ hw_resc->max_irqs);
*max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
max_ring_grps = hw_resc->max_hw_ring_grps;
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -8769,20 +8772,25 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
if (bp->tx_nr_rings)
return 0;
+ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
netdev_err(bp->dev, "Not enough rings available.\n");
- return rc;
+ goto init_dflt_ring_err;
}
rc = bnxt_init_int_mode(bp);
if (rc)
- return rc;
+ goto init_dflt_ring_err;
+
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
bp->flags |= BNXT_FLAG_RFS;
bp->dev->features |= NETIF_F_NTUPLE;
}
- return 0;
+init_dflt_ring_err:
+ bnxt_ulp_irq_restart(bp, rc);
+ return rc;
}
int bnxt_restore_pf_fw_resources(struct bnxt *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index fefa011320e0..bde384630a75 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1481,8 +1481,7 @@ int bnxt_hwrm_set_coal(struct bnxt *);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
-void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
-unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
+unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp);
int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp);
void bnxt_tx_disable(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 6d583bcd2a81..fcd085a9853a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -451,7 +451,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
- vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
+ vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
@@ -549,7 +549,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
max_stat_ctxs = hw_resc->max_stat_ctxs;
/* Remaining rings are distributed equally amongs VF's for now */
- vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
+ vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) -
+ bp->cp_nr_rings) / num_vfs;
vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
@@ -643,7 +644,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
*/
vfs_supported = *num_vfs;
- avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
+ avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
avail_cp = min_t(int, avail_cp, avail_stat);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 139d96c5a023..092c817f8f11 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -110,16 +110,14 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
struct tcf_exts *tc_exts)
{
const struct tc_action *tc_act;
- LIST_HEAD(tc_actions);
- int rc;
+ int i, rc;
if (!tcf_exts_has_actions(tc_exts)) {
netdev_info(bp->dev, "no actions");
return -EINVAL;
}
- tcf_exts_to_list(tc_exts, &tc_actions);
- list_for_each_entry(tc_act, &tc_actions, list) {
+ tcf_exts_for_each_action(i, tc_act, tc_exts) {
/* Drop action */
if (is_tcf_gact_shot(tc_act)) {
actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index c37b2842f972..beee61292d5e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
}
bnxt_fill_msix_vecs(bp, ent);
- bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
return avail_msix;
}
@@ -178,7 +177,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
- int max_cp_rings, msix_requested;
ASSERT_RTNL();
if (ulp_id != BNXT_ROCE_ULP)
@@ -187,9 +185,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return 0;
- max_cp_rings = bnxt_get_max_func_cp_rings(bp);
- msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
- bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
edev->ulp_tbl[ulp_id].msix_requested = 0;
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
if (netif_running(dev)) {
@@ -220,21 +215,6 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
return 0;
}
-void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
-{
- ASSERT_RTNL();
- if (bnxt_ulp_registered(bp->edev, ulp_id)) {
- struct bnxt_en_dev *edev = bp->edev;
- unsigned int msix_req, max;
-
- msix_req = edev->ulp_tbl[ulp_id].msix_requested;
- max = bnxt_get_max_func_cp_rings(bp);
- bnxt_set_max_func_cp_rings(bp, max - msix_req);
- max = bnxt_get_max_func_stat_ctxs(bp);
- bnxt_set_max_func_stat_ctxs(bp, max - 1);
- }
-}
-
static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
struct bnxt_fw_msg *fw_msg)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index df48ac71729f..d9bea37cd211 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -90,7 +90,6 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
int bnxt_get_ulp_msix_num(struct bnxt *bp);
int bnxt_get_ulp_msix_base(struct bnxt *bp);
-void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index b773bc07edf7..14b49612aa86 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -186,6 +186,9 @@ struct bcmgenet_mib_counters {
#define UMAC_MAC1 0x010
#define UMAC_MAX_FRAME_LEN 0x014
+#define UMAC_MODE 0x44
+#define MODE_LINK_STATUS (1 << 5)
+
#define UMAC_EEE_CTRL 0x064
#define EN_LPI_RX_PAUSE (1 << 0)
#define EN_LPI_TX_PFC (1 << 1)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 5333274a283c..4241ae928d4a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev)
static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
struct fixed_phy_status *status)
{
- if (dev && dev->phydev && status)
- status->link = dev->phydev->link;
+ struct bcmgenet_priv *priv;
+ u32 reg;
+
+ if (dev && dev->phydev && status) {
+ priv = netdev_priv(dev);
+ reg = bcmgenet_umac_readl(priv, UMAC_MODE);
+ status->link = !!(reg & MODE_LINK_STATUS);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index dc09f9a8a49b..16e4ef7d7185 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -482,11 +482,6 @@ static int macb_mii_probe(struct net_device *dev)
if (np) {
if (of_phy_is_fixed_link(np)) {
- if (of_phy_register_fixed_link(np) < 0) {
- dev_err(&bp->pdev->dev,
- "broken fixed-link specification\n");
- return -ENODEV;
- }
bp->phy_node = of_node_get(np);
} else {
bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
@@ -569,7 +564,7 @@ static int macb_mii_init(struct macb *bp)
{
struct macb_platform_data *pdata;
struct device_node *np;
- int err;
+ int err = -ENXIO;
/* Enable management port */
macb_writel(bp, NCR, MACB_BIT(MPE));
@@ -592,12 +587,23 @@ static int macb_mii_init(struct macb *bp)
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
np = bp->pdev->dev.of_node;
- if (pdata)
- bp->mii_bus->phy_mask = pdata->phy_mask;
+ if (np && of_phy_is_fixed_link(np)) {
+ if (of_phy_register_fixed_link(np) < 0) {
+ dev_err(&bp->pdev->dev,
+ "broken fixed-link specification %pOF\n", np);
+ goto err_out_free_mdiobus;
+ }
+
+ err = mdiobus_register(bp->mii_bus);
+ } else {
+ if (pdata)
+ bp->mii_bus->phy_mask = pdata->phy_mask;
+
+ err = of_mdiobus_register(bp->mii_bus, np);
+ }
- err = of_mdiobus_register(bp->mii_bus, np);
if (err)
- goto err_out_free_mdiobus;
+ goto err_out_free_fixed_link;
err = macb_mii_probe(bp->dev);
if (err)
@@ -607,6 +613,7 @@ static int macb_mii_init(struct macb *bp)
err_out_unregister_bus:
mdiobus_unregister(bp->mii_bus);
+err_out_free_fixed_link:
if (np && of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
err_out_free_mdiobus:
@@ -642,7 +649,7 @@ static int macb_halt_tx(struct macb *bp)
if (!(status & MACB_BIT(TGO)))
return 0;
- usleep_range(10, 250);
+ udelay(250);
} while (time_before(halt_time, timeout));
return -ETIMEDOUT;
@@ -2028,14 +2035,17 @@ static void macb_reset_hw(struct macb *bp)
{
struct macb_queue *queue;
unsigned int q;
+ u32 ctrl = macb_readl(bp, NCR);
/* Disable RX and TX (XXX: Should we halt the transmission
* more gracefully?)
*/
- macb_writel(bp, NCR, 0);
+ ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
/* Clear the stats registers (XXX: Update stats first?) */
- macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
+ ctrl |= MACB_BIT(CLRSTAT);
+
+ macb_writel(bp, NCR, ctrl);
/* Clear all status flags */
macb_writel(bp, TSR, -1);
@@ -2223,7 +2233,7 @@ static void macb_init_hw(struct macb *bp)
}
/* Enable TX and RX */
- macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
}
/* The hash address register is 64 bits long and takes up two
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 623f73dd7738..c116f96956fe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -417,10 +417,9 @@ static void cxgb4_process_flow_actions(struct net_device *in,
struct ch_filter_specification *fs)
{
const struct tc_action *a;
- LIST_HEAD(actions);
+ int i;
- tcf_exts_to_list(cls->exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, cls->exts) {
if (is_tcf_gact_ok(a)) {
fs->action = FILTER_PASS;
} else if (is_tcf_gact_shot(a)) {
@@ -591,10 +590,9 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
bool act_redir = false;
bool act_pedit = false;
bool act_vlan = false;
- LIST_HEAD(actions);
+ int i;
- tcf_exts_to_list(cls->exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, cls->exts) {
if (is_tcf_gact_ok(a)) {
/* Do nothing */
} else if (is_tcf_gact_shot(a)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 18eb2aedd4cb..c7d2b4dc7568 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -93,14 +93,13 @@ static int fill_action_fields(struct adapter *adap,
unsigned int num_actions = 0;
const struct tc_action *a;
struct tcf_exts *exts;
- LIST_HEAD(actions);
+ int i;
exts = cls->knode.exts;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
- tcf_exts_to_list(exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, exts) {
/* Don't allow more than one action per rule. */
if (num_actions)
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index fa5b30f547f6..08a750fb60c4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -220,10 +220,10 @@ struct hnae_desc_cb {
/* priv data for the desc, e.g. skb when use with ip stack*/
void *priv;
- u16 page_offset;
- u16 reuse_flag;
+ u32 page_offset;
+ u32 length; /* length of the buffer */
- u16 length; /* length of the buffer */
+ u16 reuse_flag;
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
@@ -486,6 +486,8 @@ struct hnae_ae_ops {
u8 *auto_neg, u16 *speed, u8 *duplex);
void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
+ bool (*need_adjust_link)(struct hnae_handle *handle,
+ int speed, int duplex);
int (*set_loopback)(struct hnae_handle *handle,
enum hnae_loop loop_mode, int en);
void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index e6aad30e7e69..b52029e26d15 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -155,6 +155,41 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
}
+static int hns_ae_wait_flow_down(struct hnae_handle *handle)
+{
+ struct dsaf_device *dsaf_dev;
+ struct hns_ppe_cb *ppe_cb;
+ struct hnae_vf_cb *vf_cb;
+ int ret;
+ int i;
+
+ for (i = 0; i < handle->q_num; i++) {
+ ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]);
+ if (ret)
+ return ret;
+ }
+
+ ppe_cb = hns_get_ppe_cb(handle);
+ ret = hns_ppe_wait_tx_fifo_clean(ppe_cb);
+ if (ret)
+ return ret;
+
+ dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+ if (!dsaf_dev)
+ return -EINVAL;
+ ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id);
+ if (ret)
+ return ret;
+
+ vf_cb = hns_ae_get_vf_cb(handle);
+ ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb);
+ if (ret)
+ return ret;
+
+ mdelay(10);
+ return 0;
+}
+
static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
{
int q_num = handle->q_num;
@@ -399,12 +434,41 @@ static int hns_ae_get_mac_info(struct hnae_handle *handle,
return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
}
+static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed,
+ int duplex)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ return hns_mac_need_adjust_link(mac_cb, speed, duplex);
+}
+
static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
int duplex)
{
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
- hns_mac_adjust_link(mac_cb, speed, duplex);
+ switch (mac_cb->dsaf_dev->dsaf_ver) {
+ case AE_VERSION_1:
+ hns_mac_adjust_link(mac_cb, speed, duplex);
+ break;
+
+ case AE_VERSION_2:
+ /* chip need to clear all pkt inside */
+ hns_mac_disable(mac_cb, MAC_COMM_MODE_RX);
+ if (hns_ae_wait_flow_down(handle)) {
+ hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
+ break;
+ }
+
+ hns_mac_adjust_link(mac_cb, speed, duplex);
+ hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
+ break;
+
+ default:
+ break;
+ }
+
+ return;
}
static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
@@ -902,6 +966,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
.get_status = hns_ae_get_link_status,
.get_info = hns_ae_get_mac_info,
.adjust_link = hns_ae_adjust_link,
+ .need_adjust_link = hns_ae_need_adjust_link,
.set_loopback = hns_ae_config_loopback,
.get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
.get_pauseparam = hns_ae_get_pauseparam,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 5488c6e89f21..09e4061d1fa6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -257,6 +257,16 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
*tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B);
}
+static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed,
+ int duplex)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct hns_mac_cb *mac_cb = drv->mac_cb;
+
+ return (mac_cb->speed != speed) ||
+ (mac_cb->half_duplex == duplex);
+}
+
static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
u32 full_duplex)
{
@@ -309,6 +319,30 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en)
hns_gmac_set_uc_match(mac_drv, en);
}
+int hns_gmac_wait_fifo_clean(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ int wait_cnt;
+ u32 val;
+
+ wait_cnt = 0;
+ while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+ val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG);
+ /* bit5~bit0 is not send complete pkts */
+ if ((val & 0x3f) == 0)
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+ dev_err(drv->dev,
+ "hns ge %d fifo was not idle.\n", drv->mac_id);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static void hns_gmac_init(void *mac_drv)
{
u32 port;
@@ -690,6 +724,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
mac_drv->mac_disable = hns_gmac_disable;
mac_drv->mac_free = hns_gmac_free;
mac_drv->adjust_link = hns_gmac_adjust_link;
+ mac_drv->need_adjust_link = hns_gmac_need_adjust_link;
mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames;
mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length;
mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg;
@@ -717,6 +752,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
mac_drv->get_strings = hns_gmac_get_strings;
mac_drv->update_stats = hns_gmac_update_stats;
mac_drv->set_promiscuous = hns_gmac_set_promisc;
+ mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean;
return (void *)mac_drv;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 1c2326bd76e2..6ed6f142427e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -114,6 +114,26 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
return 0;
}
+/**
+ *hns_mac_is_adjust_link - check is need change mac speed and duplex register
+ *@mac_cb: mac device
+ *@speed: phy device speed
+ *@duplex:phy device duplex
+ *
+ */
+bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
+{
+ struct mac_driver *mac_ctrl_drv;
+
+ mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
+
+ if (mac_ctrl_drv->need_adjust_link)
+ return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv,
+ (enum mac_speed)speed, duplex);
+ else
+ return true;
+}
+
void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
{
int ret;
@@ -430,6 +450,16 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
return 0;
}
+int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+
+ if (drv->wait_fifo_clean)
+ return drv->wait_fifo_clean(drv);
+
+ return 0;
+}
+
void hns_mac_reset(struct hns_mac_cb *mac_cb)
{
struct mac_driver *drv = hns_mac_get_drv(mac_cb);
@@ -998,6 +1028,20 @@ static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
return DSAF_MAX_PORT_NUM;
}
+void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode);
+}
+
+void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode);
+}
+
/**
* hns_mac_init - init mac
* @dsaf_dev: dsa fabric device struct pointer
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index bbc0a98e7ca3..fbc75341bef7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -356,6 +356,9 @@ struct mac_driver {
/*adjust mac mode of port,include speed and duplex*/
int (*adjust_link)(void *mac_drv, enum mac_speed speed,
u32 full_duplex);
+ /* need adjust link */
+ bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed,
+ int duplex);
/* config autoegotaite mode of port*/
void (*set_an_mode)(void *mac_drv, u8 enable);
/* config loopbank mode */
@@ -394,6 +397,7 @@ struct mac_driver {
void (*get_info)(void *mac_drv, struct mac_info *mac_info);
void (*update_stats)(void *mac_drv);
+ int (*wait_fifo_clean)(void *mac_drv);
enum mac_mode mac_mode;
u8 mac_id;
@@ -427,6 +431,7 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
int hns_mac_init(struct dsaf_device *dsaf_dev);
void mac_adjust_link(struct net_device *net_dev);
+bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status);
int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
@@ -463,5 +468,8 @@ int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
const unsigned char *addr);
int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn);
+void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
+void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
+int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb);
#endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index ca50c2553a9c..e557a4ef5996 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -2727,6 +2727,35 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
}
+int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
+{
+ u32 val, val_tmp;
+ int wait_cnt;
+
+ if (port >= DSAF_SERVICE_NW_NUM)
+ return 0;
+
+ wait_cnt = 0;
+ while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+ val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG +
+ (port + DSAF_XGE_NUM) * 0x40);
+ val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG +
+ (port + DSAF_XGE_NUM) * 0x40);
+ if (val == val_tmp)
+ break;
+
+ usleep_range(100, 200);
+ }
+
+ if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+ dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n",
+ val, val_tmp);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
/**
* dsaf_probe - probo dsaf dev
* @pdev: dasf platform device
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 4507e8222683..0e1cd99831a6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -44,6 +44,8 @@ struct hns_mac_cb;
#define DSAF_ROCE_CREDIT_CHN 8
#define DSAF_ROCE_CHAN_MODE 3
+#define HNS_MAX_WAIT_CNT 10000
+
enum dsaf_roce_port_mode {
DSAF_ROCE_6PORT_MODE,
DSAF_ROCE_4PORT_MODE,
@@ -463,5 +465,6 @@ int hns_dsaf_rm_mac_addr(
int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
+int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index d160d8c9e45b..0942e4916d9d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -275,6 +275,29 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk);
}
+int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb)
+{
+ int wait_cnt;
+ u32 val;
+
+ wait_cnt = 0;
+ while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+ val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU;
+ if (!val)
+ break;
+
+ usleep_range(100, 200);
+ }
+
+ if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+ dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n",
+ val);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
/**
* ppe_init_hw - init ppe
* @ppe_cb: ppe device
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index 9d8e643e8aa6..f670e63a5a01 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -100,6 +100,7 @@ struct ppe_common_cb {
};
+int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb);
int hns_ppe_init(struct dsaf_device *dsaf_dev);
void hns_ppe_uninit(struct dsaf_device *dsaf_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 9d76e2e54f9d..5d64519b9b1d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -66,6 +66,29 @@ void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
"queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
}
+int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs)
+{
+ u32 head, tail;
+ int wait_cnt;
+
+ tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL);
+ wait_cnt = 0;
+ while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+ head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD);
+ if (tail == head)
+ break;
+
+ usleep_range(100, 200);
+ }
+
+ if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+ dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
/**
*hns_rcb_reset_ring_hw - ring reset
*@q: ring struct pointer
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 602816498c8d..2319b772a271 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -136,6 +136,7 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
void hns_rcb_init_hw(struct ring_pair_cb *ring);
void hns_rcb_reset_ring_hw(struct hnae_queue *q);
void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
+int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs);
u32 hns_rcb_get_rx_coalesced_frames(
struct rcb_common_cb *rcb_common, u32 port_idx);
u32 hns_rcb_get_tx_coalesced_frames(
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 886cbbf25761..74d935d82cbc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -464,6 +464,7 @@
#define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4
#define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8
+#define GMAC_FIFO_STATE_REG 0x0000UL
#define GMAC_DUPLEX_TYPE_REG 0x0008UL
#define GMAC_FD_FC_TYPE_REG 0x000CUL
#define GMAC_TX_WATER_LINE_REG 0x0010UL
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 9f2b552aee33..f56855e63c96 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -406,113 +406,13 @@ out_net_tx_busy:
return NETDEV_TX_BUSY;
}
-/**
- * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
- * @data: pointer to the start of the headers
- * @max: total length of section to find headers in
- *
- * This function is meant to determine the length of headers that will
- * be recognized by hardware for LRO, GRO, and RSC offloads. The main
- * motivation of doing this is to only perform one pull for IPv4 TCP
- * packets so that we can do basic things like calculating the gso_size
- * based on the average data per packet.
- **/
-static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
- unsigned int max_size)
-{
- unsigned char *network;
- u8 hlen;
-
- /* this should never happen, but better safe than sorry */
- if (max_size < ETH_HLEN)
- return max_size;
-
- /* initialize network frame pointer */
- network = data;
-
- /* set first protocol and move network header forward */
- network += ETH_HLEN;
-
- /* handle any vlan tag if present */
- if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
- == HNS_RX_FLAG_VLAN_PRESENT) {
- if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
- return max_size;
-
- network += VLAN_HLEN;
- }
-
- /* handle L3 protocols */
- if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
- == HNS_RX_FLAG_L3ID_IPV4) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct iphdr)))
- return max_size;
-
- /* access ihl as a u8 to avoid unaligned access on ia64 */
- hlen = (network[0] & 0x0F) << 2;
-
- /* verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct iphdr))
- return network - data;
-
- /* record next protocol if header is present */
- } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
- == HNS_RX_FLAG_L3ID_IPV6) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct ipv6hdr)))
- return max_size;
-
- /* record next protocol */
- hlen = sizeof(struct ipv6hdr);
- } else {
- return network - data;
- }
-
- /* relocate pointer to start of L4 header */
- network += hlen;
-
- /* finally sort out TCP/UDP */
- if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
- == HNS_RX_FLAG_L4ID_TCP) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct tcphdr)))
- return max_size;
-
- /* access doff as a u8 to avoid unaligned access on ia64 */
- hlen = (network[12] & 0xF0) >> 2;
-
- /* verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct tcphdr))
- return network - data;
-
- network += hlen;
- } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
- == HNS_RX_FLAG_L4ID_UDP) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct udphdr)))
- return max_size;
-
- network += sizeof(struct udphdr);
- }
-
- /* If everything has gone correctly network should be the
- * data section of the packet and will be the end of the header.
- * If not then it probably represents the end of the last recognized
- * header.
- */
- if ((typeof(max_size))(network - data) < max_size)
- return network - data;
- else
- return max_size;
-}
-
static void hns_nic_reuse_page(struct sk_buff *skb, int i,
struct hnae_ring *ring, int pull_len,
struct hnae_desc_cb *desc_cb)
{
struct hnae_desc *desc;
- int truesize, size;
+ u32 truesize;
+ int size;
int last_offset;
bool twobufs;
@@ -530,7 +430,7 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i,
}
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
- size - pull_len, truesize - pull_len);
+ size - pull_len, truesize);
/* avoid re-using remote pages,flag default unreuse */
if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
@@ -695,7 +595,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
} else {
ring->stats.seg_pkt_cnt++;
- pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
+ pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE);
memcpy(__skb_put(skb, pull_len), va,
ALIGN(pull_len, sizeof(long)));
@@ -1212,11 +1112,26 @@ static void hns_nic_adjust_link(struct net_device *ndev)
struct hnae_handle *h = priv->ae_handle;
int state = 1;
+ /* If there is no phy, do not need adjust link */
if (ndev->phydev) {
- h->dev->ops->adjust_link(h, ndev->phydev->speed,
- ndev->phydev->duplex);
- state = ndev->phydev->link;
+ /* When phy link down, do nothing */
+ if (ndev->phydev->link == 0)
+ return;
+
+ if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed,
+ ndev->phydev->duplex)) {
+ /* because Hi161X chip don't support to change gmac
+ * speed and duplex with traffic. Delay 200ms to
+ * make sure there is no more data in chip FIFO.
+ */
+ netif_carrier_off(ndev);
+ msleep(200);
+ h->dev->ops->adjust_link(h, ndev->phydev->speed,
+ ndev->phydev->duplex);
+ netif_carrier_on(ndev);
+ }
}
+
state = state && h->dev->ops->get_status(h);
if (state != priv->link) {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 08f3c4743f74..774beda040a1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev,
}
if (h->dev->ops->adjust_link) {
+ netif_carrier_off(net_dev);
h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
+ netif_carrier_on(net_dev);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 3554dca7a680..955c4ab18b03 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2019,7 +2019,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_desc_cb *desc_cb)
{
struct hns3_desc *desc;
- int truesize, size;
+ u32 truesize;
+ int size;
int last_offset;
bool twobufs;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index a02a96aee2a2..cb450d7ec8c1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -284,11 +284,11 @@ struct hns3_desc_cb {
/* priv data for the desc, e.g. skb when use with ip stack*/
void *priv;
- u16 page_offset;
- u16 reuse_flag;
-
+ u32 page_offset;
u32 length; /* length of the buffer */
+ u16 reuse_flag;
+
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
};
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 354c0982847b..372664686309 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -494,9 +494,6 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
case 16384:
ret |= EMAC_MR1_RFS_16K;
break;
- case 8192:
- ret |= EMAC4_MR1_RFS_8K;
- break;
case 4096:
ret |= EMAC_MR1_RFS_4K;
break;
@@ -537,6 +534,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
case 16384:
ret |= EMAC4_MR1_RFS_16K;
break;
+ case 8192:
+ ret |= EMAC4_MR1_RFS_8K;
+ break;
case 4096:
ret |= EMAC4_MR1_RFS_4K;
break;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index dafdd4ade705..4f0daf67b18d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1823,11 +1823,17 @@ static int do_reset(struct ibmvnic_adapter *adapter,
adapter->map_id = 1;
release_rx_pools(adapter);
release_tx_pools(adapter);
- init_rx_pools(netdev);
- init_tx_pools(netdev);
+ rc = init_rx_pools(netdev);
+ if (rc)
+ return rc;
+ rc = init_tx_pools(netdev);
+ if (rc)
+ return rc;
release_napi(adapter);
- init_napi(adapter);
+ rc = init_napi(adapter);
+ if (rc)
+ return rc;
} else {
rc = reset_tx_pools(adapter);
if (rc)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index bdb3f8e65ed4..2569a168334c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -624,14 +624,14 @@ static int e1000_set_ringparam(struct net_device *netdev,
adapter->tx_ring = tx_old;
e1000_free_all_rx_resources(adapter);
e1000_free_all_tx_resources(adapter);
- kfree(tx_old);
- kfree(rx_old);
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
err = e1000_up(adapter);
if (err)
goto err_setup;
}
+ kfree(tx_old);
+ kfree(rx_old);
clear_bit(__E1000_RESETTING, &adapter->flags);
return 0;
@@ -644,7 +644,8 @@ err_setup_rx:
err_alloc_rx:
kfree(txdr);
err_alloc_tx:
- e1000_up(adapter);
+ if (netif_running(adapter->netdev))
+ e1000_up(adapter);
err_setup:
clear_bit(__E1000_RESETTING, &adapter->flags);
return err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index abcd096ede14..5ff6caa83948 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2013,7 +2013,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
- WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
+ WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
"stat strings count mismatch!");
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f2c622e78802..ac685ad4d877 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -5122,15 +5122,17 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
u8 *bw_share)
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+ struct i40e_pf *pf = vsi->back;
i40e_status ret;
int i;
- if (vsi->back->flags & I40E_FLAG_TC_MQPRIO)
+ /* There is no need to reset BW when mqprio mode is on. */
+ if (pf->flags & I40E_FLAG_TC_MQPRIO)
return 0;
- if (!vsi->mqprio_qopt.qopt.hw) {
+ if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
if (ret)
- dev_info(&vsi->back->pdev->dev,
+ dev_info(&pf->pdev->dev,
"Failed to reset tx rate for vsi->seid %u\n",
vsi->seid);
return ret;
@@ -5139,12 +5141,11 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
bw_data.tc_bw_credits[i] = bw_share[i];
- ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
- NULL);
+ ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
if (ret) {
- dev_info(&vsi->back->pdev->dev,
+ dev_info(&pf->pdev->dev,
"AQ command Config VSI BW allocation per TC failed = %d\n",
- vsi->back->hw.aq.asq_last_status);
+ pf->hw.aq.asq_last_status);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index d8b5fff581e7..868f4a1d0f72 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -89,6 +89,13 @@ extern const char ice_drv_ver[];
#define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
+/* Macros for each allocated tx/rx ring whether used or not in a VSI */
+#define ice_for_each_alloc_txq(vsi, i) \
+ for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
+
+#define ice_for_each_alloc_rxq(vsi, i) \
+ for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
+
struct ice_tc_info {
u16 qoffset;
u16 qcount;
@@ -189,9 +196,9 @@ struct ice_vsi {
struct list_head tmp_sync_list; /* MAC filters to be synced */
struct list_head tmp_unsync_list; /* MAC filters to be unsynced */
- bool irqs_ready;
- bool current_isup; /* Sync 'link up' logging */
- bool stat_offsets_loaded;
+ u8 irqs_ready;
+ u8 current_isup; /* Sync 'link up' logging */
+ u8 stat_offsets_loaded;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -262,7 +269,7 @@ struct ice_pf {
struct ice_hw_port_stats stats;
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
- bool stat_prev_loaded; /* has previous stats been loaded */
+ u8 stat_prev_loaded; /* has previous stats been loaded */
char int_name[ICE_INT_NAME_STR_LEN];
};
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 7541ec2270b3..a0614f472658 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -329,19 +329,19 @@ struct ice_aqc_vsi_props {
/* VLAN section */
__le16 pvid; /* VLANS include priority bits */
u8 pvlan_reserved[2];
- u8 port_vlan_flags;
-#define ICE_AQ_VSI_PVLAN_MODE_S 0
-#define ICE_AQ_VSI_PVLAN_MODE_M (0x3 << ICE_AQ_VSI_PVLAN_MODE_S)
-#define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1
-#define ICE_AQ_VSI_PVLAN_MODE_TAGGED 0x2
-#define ICE_AQ_VSI_PVLAN_MODE_ALL 0x3
+ u8 vlan_flags;
+#define ICE_AQ_VSI_VLAN_MODE_S 0
+#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
+#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1
+#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2
+#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3
#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2)
-#define ICE_AQ_VSI_PVLAN_EMOD_S 3
-#define ICE_AQ_VSI_PVLAN_EMOD_M (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S)
-#define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S)
-#define ICE_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S)
-#define ICE_AQ_VSI_PVLAN_EMOD_STR (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S)
-#define ICE_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_S 3
+#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
u8 pvlan_reserved2[3];
/* ingress egress up sections */
__le32 ingress_table; /* bitmap, 3 bits per up */
@@ -594,6 +594,7 @@ struct ice_sw_rule_lg_act {
#define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S)
#define ICE_LG_ACT_GENERIC_PRIORITY_S 22
#define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S)
+#define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7
/* Action = 7 - Set Stat count */
#define ICE_LG_ACT_STAT_COUNT 0x7
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 71d032cc5fa7..661beea6af79 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -45,6 +45,9 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
+ *
+ * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
+ * configuration, flow director filters, etc.).
*/
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
{
@@ -1483,7 +1486,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
struct ice_phy_info *phy_info;
enum ice_status status = 0;
- if (!pi)
+ if (!pi || !link_up)
return ICE_ERR_PARAM;
phy_info = &pi->phy;
@@ -1619,20 +1622,23 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
}
/* LUT size is only valid for Global and PF table types */
- if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) {
- flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
- } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) {
+ switch (lut_size) {
+ case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
+ break;
+ case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
- } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) &&
- (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) {
- flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
- } else {
+ break;
+ case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
+ if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
+ flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
+ break;
+ }
+ /* fall-through */
+ default:
status = ICE_ERR_PARAM;
goto ice_aq_get_set_rss_lut_exit;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 7c511f144ed6..62be72fdc8f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -597,10 +597,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
return 0;
init_ctrlq_free_rq:
- ice_shutdown_rq(hw, cq);
- ice_shutdown_sq(hw, cq);
- mutex_destroy(&cq->sq_lock);
- mutex_destroy(&cq->rq_lock);
+ if (cq->rq.head) {
+ ice_shutdown_rq(hw, cq);
+ mutex_destroy(&cq->rq_lock);
+ }
+ if (cq->sq.head) {
+ ice_shutdown_sq(hw, cq);
+ mutex_destroy(&cq->sq_lock);
+ }
return status;
}
@@ -706,10 +710,14 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
return;
}
- ice_shutdown_sq(hw, cq);
- ice_shutdown_rq(hw, cq);
- mutex_destroy(&cq->sq_lock);
- mutex_destroy(&cq->rq_lock);
+ if (cq->sq.head) {
+ ice_shutdown_sq(hw, cq);
+ mutex_destroy(&cq->sq_lock);
+ }
+ if (cq->rq.head) {
+ ice_shutdown_rq(hw, cq);
+ mutex_destroy(&cq->rq_lock);
+ }
}
/**
@@ -1057,8 +1065,11 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
clean_rq_elem_out:
/* Set pending if needed, unlock and return */
- if (pending)
+ if (pending) {
+ /* re-read HW head to calculate actual pending messages */
+ ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
+ }
clean_rq_elem_err:
mutex_unlock(&cq->rq_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 1db304c01d10..c71a9b528d6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -26,7 +26,7 @@ static int ice_q_stats_len(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- return ((np->vsi->num_txq + np->vsi->num_rxq) *
+ return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
(sizeof(struct ice_q_stats) / sizeof(u64)));
}
@@ -218,7 +218,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
- ice_for_each_txq(vsi, i) {
+ ice_for_each_alloc_txq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN,
"tx-queue-%u.tx_packets", i);
p += ETH_GSTRING_LEN;
@@ -226,7 +226,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
- ice_for_each_rxq(vsi, i) {
+ ice_for_each_alloc_rxq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN,
"rx-queue-%u.rx_packets", i);
p += ETH_GSTRING_LEN;
@@ -253,6 +253,24 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
+ /* The number (and order) of strings reported *must* remain
+ * constant for a given netdevice. This function must not
+ * report a different number based on run time parameters
+ * (such as the number of queues in use, or the setting of
+ * a private ethtool flag). This is due to the nature of the
+ * ethtool stats API.
+ *
+ * User space programs such as ethtool must make 3 separate
+ * ioctl requests, one for size, one for the strings, and
+ * finally one for the stats. Since these cross into
+ * user space, changes to the number or size could result in
+ * undefined memory access or incorrect string<->value
+ * correlations for statistics.
+ *
+ * Even if it appears to be safe, changes to the size or
+ * order of strings will suffer from race conditions and are
+ * not safe.
+ */
return ICE_ALL_STATS_LEN(netdev);
default:
return -EOPNOTSUPP;
@@ -280,18 +298,26 @@ ice_get_ethtool_stats(struct net_device *netdev,
/* populate per queue stats */
rcu_read_lock();
- ice_for_each_txq(vsi, j) {
+ ice_for_each_alloc_txq(vsi, j) {
ring = READ_ONCE(vsi->tx_rings[j]);
- if (!ring)
- continue;
- data[i++] = ring->stats.pkts;
- data[i++] = ring->stats.bytes;
+ if (ring) {
+ data[i++] = ring->stats.pkts;
+ data[i++] = ring->stats.bytes;
+ } else {
+ data[i++] = 0;
+ data[i++] = 0;
+ }
}
- ice_for_each_rxq(vsi, j) {
+ ice_for_each_alloc_rxq(vsi, j) {
ring = READ_ONCE(vsi->rx_rings[j]);
- data[i++] = ring->stats.pkts;
- data[i++] = ring->stats.bytes;
+ if (ring) {
+ data[i++] = ring->stats.pkts;
+ data[i++] = ring->stats.bytes;
+ } else {
+ data[i++] = 0;
+ data[i++] = 0;
+ }
}
rcu_read_unlock();
@@ -519,7 +545,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
goto done;
}
- for (i = 0; i < vsi->num_txq; i++) {
+ for (i = 0; i < vsi->alloc_txq; i++) {
/* clone ring and setup updated count */
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_cnt;
@@ -551,7 +577,7 @@ process_rx:
goto done;
}
- for (i = 0; i < vsi->num_rxq; i++) {
+ for (i = 0; i < vsi->alloc_rxq; i++) {
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 499904874b3f..6076fc87df9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -121,10 +121,6 @@
#define PFINT_FW_CTL_CAUSE_ENA_S 30
#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
#define PFINT_OICR 0x0016CA00
-#define PFINT_OICR_HLP_RDY_S 14
-#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
-#define PFINT_OICR_CPM_RDY_S 15
-#define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S)
#define PFINT_OICR_ECC_ERR_S 16
#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S)
#define PFINT_OICR_MAL_DETECT_S 19
@@ -133,10 +129,6 @@
#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S)
#define PFINT_OICR_PCI_EXCEPTION_S 21
#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S)
-#define PFINT_OICR_GPIO_S 22
-#define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S)
-#define PFINT_OICR_STORM_DETECT_S 24
-#define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S)
#define PFINT_OICR_HMC_ERR_S 26
#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S)
#define PFINT_OICR_PE_CRITERR_S 28
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index d23a91665b46..068dbc740b76 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -265,6 +265,7 @@ enum ice_rx_flex_desc_status_error_0_bits {
struct ice_rlan_ctx {
u16 head;
u16 cpuid; /* bigger than needed, see above for reason */
+#define ICE_RLAN_BASE_S 7
u64 base;
u16 qlen;
#define ICE_RLAN_CTX_DBUF_S 7
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 5299caf55a7f..f1e80eed2fd6 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -901,7 +901,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
case ice_aqc_opc_get_link_status:
if (ice_handle_link_event(pf))
dev_err(&pf->pdev->dev,
- "Could not handle link event");
+ "Could not handle link event\n");
break;
default:
dev_dbg(&pf->pdev->dev,
@@ -917,13 +917,27 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
}
/**
+ * ice_ctrlq_pending - check if there is a difference between ntc and ntu
+ * @hw: pointer to hardware info
+ * @cq: control queue information
+ *
+ * returns true if there are pending messages in a queue, false if there aren't
+ */
+static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ u16 ntu;
+
+ ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
+ return cq->rq.next_to_clean != ntu;
+}
+
+/**
* ice_clean_adminq_subtask - clean the AdminQ rings
* @pf: board private structure
*/
static void ice_clean_adminq_subtask(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- u32 val;
if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
return;
@@ -933,9 +947,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
- /* re-enable Admin queue interrupt causes */
- val = rd32(hw, PFINT_FW_CTL);
- wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M));
+ /* There might be a situation where new messages arrive to a control
+ * queue between processing the last message and clearing the
+ * EVENT_PENDING bit. So before exiting, check queue head again (using
+ * ice_ctrlq_pending) and process new messages if any.
+ */
+ if (ice_ctrlq_pending(hw, &hw->adminq))
+ __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
ice_flush(hw);
}
@@ -1295,11 +1313,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
qcount = numq_tc;
}
- /* find higher power-of-2 of qcount */
- pow = ilog2(qcount);
-
- if (!is_power_of_2(qcount))
- pow++;
+ /* find the (rounded up) power-of-2 of qcount */
+ pow = order_base_2(qcount);
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
@@ -1352,14 +1367,15 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
/* Traffic from VSI can be sent to LAN */
ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
- /* Allow all packets untagged/tagged */
- ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL &
- ICE_AQ_VSI_PVLAN_MODE_M) >>
- ICE_AQ_VSI_PVLAN_MODE_S);
- /* Show VLAN/UP from packets in Rx descriptors */
- ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH &
- ICE_AQ_VSI_PVLAN_EMOD_M) >>
- ICE_AQ_VSI_PVLAN_EMOD_S);
+
+ /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
+ * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
+ * packets untagged/tagged.
+ */
+ ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
+ ICE_AQ_VSI_VLAN_MODE_M) >>
+ ICE_AQ_VSI_VLAN_MODE_S);
+
/* Have 1:1 UP mapping for both ingress/egress tables */
table |= ICE_UP_TABLE_TRANSLATE(0, 0);
table |= ICE_UP_TABLE_TRANSLATE(1, 1);
@@ -1688,15 +1704,12 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
rd32(hw, PFINT_OICR); /* read to clear */
- val = (PFINT_OICR_HLP_RDY_M |
- PFINT_OICR_CPM_RDY_M |
- PFINT_OICR_ECC_ERR_M |
+ val = (PFINT_OICR_ECC_ERR_M |
PFINT_OICR_MAL_DETECT_M |
PFINT_OICR_GRST_M |
PFINT_OICR_PCI_EXCEPTION_M |
- PFINT_OICR_GPIO_M |
- PFINT_OICR_STORM_DETECT_M |
- PFINT_OICR_HMC_ERR_M);
+ PFINT_OICR_HMC_ERR_M |
+ PFINT_OICR_PE_CRITERR_M);
wr32(hw, PFINT_OICR_ENA, val);
@@ -2058,15 +2071,13 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
skip_req_irq:
ice_ena_misc_vector(pf);
- val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
- (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) |
- PFINT_OICR_CTL_CAUSE_ENA_M;
+ val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
+ PFINT_OICR_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_OICR_CTL, val);
/* This enables Admin queue Interrupt causes */
- val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
- (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) |
- PFINT_FW_CTL_CAUSE_ENA_M;
+ val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
+ PFINT_FW_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_FW_CTL, val);
itr_gran = hw->itr_gran_200;
@@ -3246,8 +3257,10 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_dis_msix(pf);
- devm_kfree(&pf->pdev->dev, pf->irq_tracker);
- pf->irq_tracker = NULL;
+ if (pf->irq_tracker) {
+ devm_kfree(&pf->pdev->dev, pf->irq_tracker);
+ pf->irq_tracker = NULL;
+ }
}
/**
@@ -3271,7 +3284,7 @@ static int ice_probe(struct pci_dev *pdev,
err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
if (err) {
- dev_err(&pdev->dev, "I/O map error %d\n", err);
+ dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
return err;
}
@@ -3720,10 +3733,10 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
enum ice_status status;
/* Here we are configuring the VSI to let the driver add VLAN tags by
- * setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN
- * tag insertion happens in the Tx hot path, in ice_tx_map.
+ * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
+ * insertion happens in the Tx hot path, in ice_tx_map.
*/
- ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL;
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
ctxt.vsi_num = vsi->vsi_num;
@@ -3735,7 +3748,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
return -EIO;
}
- vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags;
+ vsi->info.vlan_flags = ctxt.info.vlan_flags;
return 0;
}
@@ -3757,12 +3770,15 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
*/
if (ena) {
/* Strip VLAN tag from Rx packet and put it in the desc */
- ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
} else {
/* Disable stripping. Leave tag in packet */
- ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING;
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
}
+ /* Allow all packets untagged/tagged */
+ ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
+
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
ctxt.vsi_num = vsi->vsi_num;
@@ -3773,7 +3789,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
return -EIO;
}
- vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags;
+ vsi->info.vlan_flags = ctxt.info.vlan_flags;
return 0;
}
@@ -3986,7 +4002,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
/* clear the context structure first */
memset(&rlan_ctx, 0, sizeof(rlan_ctx));
- rlan_ctx.base = ring->dma >> 7;
+ rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
rlan_ctx.qlen = ring->count;
@@ -4098,11 +4114,12 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
{
int err;
- ice_set_rx_mode(vsi->netdev);
-
- err = ice_restore_vlan(vsi);
- if (err)
- return err;
+ if (vsi->netdev) {
+ ice_set_rx_mode(vsi->netdev);
+ err = ice_restore_vlan(vsi);
+ if (err)
+ return err;
+ }
err = ice_vsi_cfg_txqs(vsi);
if (!err)
@@ -4868,7 +4885,7 @@ int ice_down(struct ice_vsi *vsi)
*/
static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
{
- int i, err;
+ int i, err = 0;
if (!vsi->num_txq) {
dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
@@ -4893,7 +4910,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
*/
static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
{
- int i, err;
+ int i, err = 0;
if (!vsi->num_rxq) {
dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
@@ -5235,7 +5252,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
u8 count = 0;
if (new_mtu == netdev->mtu) {
- netdev_warn(netdev, "mtu is already %d\n", netdev->mtu);
+ netdev_warn(netdev, "mtu is already %u\n", netdev->mtu);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 92da0a626ce0..295a8cd87fc1 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -131,9 +131,8 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
*
* This function will request NVM ownership.
*/
-static enum
-ice_status ice_acquire_nvm(struct ice_hw *hw,
- enum ice_aq_res_access_type access)
+static enum ice_status
+ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
if (hw->nvm.blank_nvm_mode)
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2e6c1d92cc88..eeae199469b6 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1576,8 +1576,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
return status;
}
- if (owner == ICE_SCHED_NODE_OWNER_LAN)
- vsi->max_lanq[tc] = new_numqs;
+ vsi->max_lanq[tc] = new_numqs;
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 723d15f1e90b..6b7ec2ae5ad6 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -645,14 +645,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
- act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
+ act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
+ ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
/* Third action Marker value */
act |= ICE_LG_ACT_GENERIC;
act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
ICE_LG_ACT_GENERIC_VALUE_M;
- act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
/* call the fill switch rule to fill the lookup tx rx structure */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 6f4a0d159dbf..9b8ec128ee31 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -17,7 +17,7 @@ struct ice_vsi_ctx {
u16 vsis_unallocated;
u16 flags;
struct ice_aqc_vsi_props info;
- bool alloc_from_pool;
+ u8 alloc_from_pool;
};
enum ice_sw_fwd_act_type {
@@ -94,8 +94,8 @@ struct ice_fltr_info {
u8 qgrp_size;
/* Rule creations populate these indicators basing on the switch type */
- bool lb_en; /* Indicate if packet can be looped back */
- bool lan_en; /* Indicate if packet can be forwarded to the uplink */
+ u8 lb_en; /* Indicate if packet can be looped back */
+ u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
};
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 567067b650c4..31bc998fe200 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -143,7 +143,7 @@ struct ice_ring {
u16 next_to_use;
u16 next_to_clean;
- bool ring_active; /* is ring online or not */
+ u8 ring_active; /* is ring online or not */
/* stats structs */
struct ice_q_stats stats;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 99c8a9a71b5e..97c366e0ca59 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -83,7 +83,7 @@ struct ice_link_status {
u64 phy_type_low;
u16 max_frame_size;
u16 link_speed;
- bool lse_ena; /* Link Status Event notification */
+ u8 lse_ena; /* Link Status Event notification */
u8 link_info;
u8 an_info;
u8 ext_info;
@@ -101,7 +101,7 @@ struct ice_phy_info {
struct ice_link_status link_info_old;
u64 phy_type_low;
enum ice_media_type media_type;
- bool get_link_info;
+ u8 get_link_info;
};
/* Common HW capabilities for SW use */
@@ -167,7 +167,7 @@ struct ice_nvm_info {
u32 oem_ver; /* OEM version info */
u16 sr_words; /* Shadow RAM size in words */
u16 ver; /* NVM package version */
- bool blank_nvm_mode; /* is NVM empty (no FW present) */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
};
/* Max number of port to queue branches w.r.t topology */
@@ -181,7 +181,7 @@ struct ice_sched_node {
struct ice_aqc_txsched_elem_data info;
u32 agg_id; /* aggregator group id */
u16 vsi_id;
- bool in_use; /* suspended or in use */
+ u8 in_use; /* suspended or in use */
u8 tx_sched_layer; /* Logical Layer (1-9) */
u8 num_children;
u8 tc_num;
@@ -218,7 +218,7 @@ struct ice_sched_vsi_info {
struct ice_sched_tx_policy {
u16 max_num_vsis;
u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS];
- bool rdma_ena;
+ u8 rdma_ena;
};
struct ice_port_info {
@@ -243,7 +243,7 @@ struct ice_port_info {
struct list_head agg_list; /* lists all aggregator */
u8 lport;
#define ICE_LPORT_MASK 0xff
- bool is_vf;
+ u8 is_vf;
};
struct ice_switch_info {
@@ -287,7 +287,7 @@ struct ice_hw {
u8 max_cgds;
u8 sw_entry_point_layer;
- bool evb_veb; /* true for VEB, false for VEPA */
+ u8 evb_veb; /* true for VEB, false for VEPA */
struct ice_bus_info bus;
struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
@@ -318,7 +318,7 @@ struct ice_hw {
u8 itr_gran_100;
u8 itr_gran_50;
u8 itr_gran_25;
- bool ucast_shared; /* true if VSIs can share unicast addr */
+ u8 ucast_shared; /* true if VSIs can share unicast addr */
};
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index f92f7918112d..5acf3b743876 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1649,7 +1649,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
if (hw->phy.type == e1000_phy_m88)
igb_phy_disable_receiver(adapter);
- mdelay(500);
+ msleep(500);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d03c2f0d7592..a32c576c1e65 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3873,7 +3873,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
sizeof(struct igb_mac_addr),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!adapter->mac_table)
return -ENOMEM;
@@ -3883,7 +3883,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
/* Setup and initialize a copy of the hw vlan table array */
adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!adapter->shadow_vfta)
return -ENOMEM;
@@ -5816,7 +5816,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
if (skb->ip_summed != CHECKSUM_PARTIAL) {
csum_failed:
- if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
+ if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
+ !tx_ring->launchtime_enable)
return;
goto no_csum;
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 43664adf7a3c..d3e72d0f66ef 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -771,14 +771,13 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
rxdr->size = ALIGN(rxdr->size, 4096);
- rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
- GFP_KERNEL);
+ rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
return -ENOMEM;
}
- memset(rxdr->desc, 0, rxdr->size);
rxdr->next_to_clean = 0;
rxdr->next_to_use = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 94b3165ff543..ccd852ad62a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
}
/* alloc the udl from per cpu ddp pool */
- ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
+ ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap;
@@ -760,7 +760,7 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
return 0;
/* Extra buffer to be shared by all DDPs for HW work around */
- buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+ buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 447098005490..9a23d33a47ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6201,7 +6201,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
sizeof(struct ixgbe_mac_addr),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!adapter->mac_table)
return -ENOMEM;
@@ -6620,8 +6620,18 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
if (adapter->xdp_prog) {
- e_warn(probe, "MTU cannot be changed while XDP program is loaded\n");
- return -EPERM;
+ int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN;
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+ if (new_frame_size > ixgbe_rx_bufsz(ring)) {
+ e_warn(probe, "Requested MTU size is not supported with XDP\n");
+ return -EINVAL;
+ }
+ }
}
/*
@@ -8983,6 +8993,15 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
#ifdef CONFIG_IXGBE_DCB
if (tc) {
+ if (adapter->xdp_prog) {
+ e_warn(probe, "DCB is not supported with XDP\n");
+
+ ixgbe_init_interrupt_scheme(adapter);
+ if (netif_running(dev))
+ ixgbe_open(dev);
+ return -EINVAL;
+ }
+
netdev_set_num_tc(dev, tc);
ixgbe_set_prio_tc_map(adapter);
@@ -9171,14 +9190,12 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
struct tcf_exts *exts, u64 *action, u8 *queue)
{
const struct tc_action *a;
- LIST_HEAD(actions);
+ int i;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
- tcf_exts_to_list(exts, &actions);
- list_for_each_entry(a, &actions, list) {
-
+ tcf_exts_for_each_action(i, a, exts) {
/* Drop action */
if (is_tcf_gact_shot(a)) {
*action = IXGBE_FDIR_DROP_QUEUE;
@@ -9936,6 +9953,11 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
int tcs = adapter->hw_tcs ? : 1;
int pool, err;
+ if (adapter->xdp_prog) {
+ e_warn(probe, "L2FW offload is not supported with XDP\n");
+ return ERR_PTR(-EINVAL);
+ }
+
/* The hardware supported by ixgbe only filters on the destination MAC
* address. In order to avoid issues we only support offloading modes
* where the hardware can actually provide the functionality.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 6f59933cdff7..3c6f01c41b78 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -53,6 +53,11 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
int i;
+ if (adapter->xdp_prog) {
+ e_warn(probe, "SRIOV is not supported with XDP\n");
+ return -EINVAL;
+ }
+
/* Enable VMDq flag so device will be set in VM mode */
adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
IXGBE_FLAG_VMDQ_ENABLED;
@@ -688,8 +693,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+ u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
u8 num_tcs = adapter->hw_tcs;
+ u32 reg_val;
+ u32 queue;
+ u32 word;
/* remove VLAN filters beloning to this VF */
ixgbe_clear_vf_vlans(adapter, vf);
@@ -726,6 +736,27 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
/* reset VF api back to unknown */
adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
+
+ /* Restart each queue for given VF */
+ for (queue = 0; queue < q_per_pool; queue++) {
+ unsigned int reg_idx = (vf * q_per_pool) + queue;
+
+ reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx));
+
+ /* Re-enabling only configured queues */
+ if (reg_val) {
+ reg_val |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
+ reg_val &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
+ }
+ }
+
+ /* Clear VF's mailbox memory */
+ for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
+
+ IXGBE_WRITE_FLUSH(hw);
}
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 44cfb2021145..41bcbb337e83 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2518,6 +2518,7 @@ enum {
/* Translated register #defines */
#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
+#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 32d785b616e1..28500417843e 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4803,6 +4803,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->min_mtu = ETH_MIN_MTU;
/* 9704 == 9728 - 20 and rounding to 8 */
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
+ dev->dev.of_node = port_node;
/* Phylink isn't used w/ ACPI as of now */
if (port_node) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9131a1376e7d..9fed54017659 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1982,14 +1982,15 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
goto out_ok;
modify_ip_header = false;
- tcf_exts_to_list(exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, exts) {
+ int k;
+
if (!is_tcf_pedit(a))
continue;
nkeys = tcf_pedit_nkeys(a);
- for (i = 0; i < nkeys; i++) {
- htype = tcf_pedit_htype(a, i);
+ for (k = 0; k < nkeys; k++) {
+ htype = tcf_pedit_htype(a, k);
if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
modify_ip_header = true;
@@ -2053,15 +2054,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
const struct tc_action *a;
LIST_HEAD(actions);
u32 action = 0;
- int err;
+ int err, i;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
- tcf_exts_to_list(exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, exts) {
if (is_tcf_gact_shot(a)) {
action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
@@ -2666,7 +2666,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
LIST_HEAD(actions);
bool encap = false;
u32 action = 0;
- int err;
+ int err, i;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
@@ -2674,8 +2674,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
attr->in_rep = rpriv->rep;
attr->in_mdev = priv->mdev;
- tcf_exts_to_list(exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, exts) {
if (is_tcf_gact_shot(a)) {
action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 86478a6b99c5..c8c315eb5128 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -139,14 +139,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
struct mlx5_wq_ctrl *wq_ctrl)
{
u32 sq_strides_offset;
+ u32 rq_pg_remainder;
int err;
mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
MLX5_GET(qpc, qpc, log_rq_size),
&wq->rq.fbc);
- sq_strides_offset =
- ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
+ rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE;
+ sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB;
mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
MLX5_GET(qpc, qpc, log_sq_size),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 6070d1591d1e..930700413b1d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1346,8 +1346,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOMEM;
mall_tc_entry->cookie = f->cookie;
- tcf_exts_to_list(f->exts, &actions);
- a = list_first_entry(&actions, struct tc_action, list);
+ a = tcf_exts_first_action(f->exts);
if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3ae930196741..3cdb7aca90b7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -414,6 +414,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
+void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev);
/* spectrum_kvdl.c */
enum mlxsw_sp_kvdl_entry_type {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index ebd1b24ebaa5..8d211972c5e9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -21,8 +21,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack)
{
const struct tc_action *a;
- LIST_HEAD(actions);
- int err;
+ int err, i;
if (!tcf_exts_has_actions(exts))
return 0;
@@ -32,8 +31,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- tcf_exts_to_list(exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, exts) {
if (is_tcf_gact_ok(a)) {
err = mlxsw_sp_acl_rulei_act_terminate(rulei);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 3a96307f51b0..2ab9cf25a08a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -6234,6 +6234,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
+void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
+ return;
+ mlxsw_sp_rif_destroy(rif);
+}
+
static void
mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 0d8444aaba01..db715da7bab7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -127,6 +127,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
}
+static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
+ void *data)
+{
+ struct mlxsw_sp *mlxsw_sp = data;
+
+ mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
+ return 0;
+}
+
+static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev)
+{
+ mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
+ netdev_walk_all_upper_dev_rcu(dev,
+ mlxsw_sp_bridge_device_upper_rif_destroy,
+ mlxsw_sp);
+}
+
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
struct net_device *br_dev)
@@ -165,6 +183,8 @@ static void
mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
struct mlxsw_sp_bridge_device *bridge_device)
{
+ mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
+ bridge_device->dev);
list_del(&bridge_device->list);
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 0ba0356ec4e6..9044496803e6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -796,11 +796,10 @@ int nfp_flower_compile_action(struct nfp_app *app,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow)
{
- int act_len, act_cnt, err, tun_out_cnt, out_cnt;
+ int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
enum nfp_flower_tun_type tun_type;
const struct tc_action *a;
u32 csum_updated = 0;
- LIST_HEAD(actions);
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
nfp_flow->meta.act_len = 0;
@@ -810,8 +809,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
tun_out_cnt = 0;
out_cnt = 0;
- tcf_exts_to_list(flow->exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, flow->exts) {
err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
&out_cnt, &csum_updated);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index a8b9fbab5f73..253bdaef1505 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -229,29 +229,16 @@ done:
spin_unlock_bh(&nn->reconfig_lock);
}
-/**
- * nfp_net_reconfig() - Reconfigure the firmware
- * @nn: NFP Net device to reconfigure
- * @update: The value for the update field in the BAR config
- *
- * Write the update word to the BAR and ping the reconfig queue. The
- * poll until the firmware has acknowledged the update by zeroing the
- * update word.
- *
- * Return: Negative errno on error, 0 on success
- */
-int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
{
bool cancelled_timer = false;
u32 pre_posted_requests;
- int ret;
spin_lock_bh(&nn->reconfig_lock);
nn->reconfig_sync_present = true;
if (nn->reconfig_timer_active) {
- del_timer(&nn->reconfig_timer);
nn->reconfig_timer_active = false;
cancelled_timer = true;
}
@@ -260,14 +247,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
spin_unlock_bh(&nn->reconfig_lock);
- if (cancelled_timer)
+ if (cancelled_timer) {
+ del_timer_sync(&nn->reconfig_timer);
nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
+ }
/* Run the posted reconfigs which were issued before we started */
if (pre_posted_requests) {
nfp_net_reconfig_start(nn, pre_posted_requests);
nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
}
+}
+
+static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
+{
+ nfp_net_reconfig_sync_enter(nn);
+
+ spin_lock_bh(&nn->reconfig_lock);
+ nn->reconfig_sync_present = false;
+ spin_unlock_bh(&nn->reconfig_lock);
+}
+
+/**
+ * nfp_net_reconfig() - Reconfigure the firmware
+ * @nn: NFP Net device to reconfigure
+ * @update: The value for the update field in the BAR config
+ *
+ * Write the update word to the BAR and ping the reconfig queue. The
+ * poll until the firmware has acknowledged the update by zeroing the
+ * update word.
+ *
+ * Return: Negative errno on error, 0 on success
+ */
+int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+{
+ int ret;
+
+ nfp_net_reconfig_sync_enter(nn);
nfp_net_reconfig_start(nn, update);
ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
@@ -3633,6 +3649,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
*/
void nfp_net_free(struct nfp_net *nn)
{
+ WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
else
@@ -3920,4 +3937,5 @@ void nfp_net_clean(struct nfp_net *nn)
return;
unregister_netdev(nn->dp.netdev);
+ nfp_net_reconfig_wait_posted(nn);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index d9ab5add27a8..34193c2f1699 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -407,7 +407,7 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
if (i == QED_INIT_MAX_POLL_COUNT) {
DP_ERR(p_hwfn,
- "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
+ "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
addr, le32_to_cpu(cmd->expected_val),
val, le32_to_cpu(cmd->op_data));
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index d89a0e22f6e4..5d37ec7e9b0b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -48,7 +48,7 @@
#include "qed_reg_addr.h"
#include "qed_sriov.h"
-#define CHIP_MCP_RESP_ITER_US 10
+#define QED_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
@@ -183,18 +183,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
return 0;
}
+/* Maximum of 1 sec to wait for the SHMEM ready indication */
+#define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
+#define QED_MCP_SHMEM_RDY_ITER_MS 50
+
static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+ u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
+ u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
u32 drv_mb_offsize, mfw_mb_offsize;
u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
- if (!p_info->public_base)
- return 0;
+ if (!p_info->public_base) {
+ DP_NOTICE(p_hwfn,
+ "The address of the MCP scratch-pad is not configured\n");
+ return -EINVAL;
+ }
p_info->public_base |= GRCBASE_MCP;
+ /* Get the MFW MB address and number of supported messages */
+ mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_MFW_MB));
+ p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+ p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr +
+ offsetof(struct public_mfw_mb,
+ sup_msgs));
+
+ /* The driver can notify that there was an MCP reset, and might read the
+ * SHMEM values before the MFW has completed initializing them.
+ * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
+ * data ready indication.
+ */
+ while (!p_info->mfw_mb_length && --cnt) {
+ msleep(msec);
+ p_info->mfw_mb_length =
+ (u16)qed_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr +
+ offsetof(struct public_mfw_mb, sup_msgs));
+ }
+
+ if (!cnt) {
+ DP_NOTICE(p_hwfn,
+ "Failed to get the SHMEM ready notification after %d msec\n",
+ QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
+ return -EBUSY;
+ }
+
/* Calculate the driver and MFW mailbox address */
drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_info->public_base,
@@ -204,13 +243,6 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
"drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
- /* Set the MFW MB address */
- mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
- SECTION_OFFSIZE_ADDR(p_info->public_base,
- PUBLIC_MFW_MB));
- p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
- p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
-
/* Get the current driver mailbox sequence before sending
* the first command
*/
@@ -285,9 +317,15 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
+ u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
int rc = 0;
+ if (p_hwfn->mcp_info->b_block_cmd) {
+ DP_NOTICE(p_hwfn,
+ "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
+ return -EBUSY;
+ }
+
/* Ensure that only a single thread is accessing the mailbox */
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
@@ -413,14 +451,41 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
(p_mb_params->cmd | seq_num), p_mb_params->param);
}
+static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
+{
+ p_hwfn->mcp_info->b_block_cmd = block_cmd;
+
+ DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
+ block_cmd ? "Block" : "Unblock");
+}
+
+static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
+ u32 delay = QED_MCP_RESP_ITER_US;
+
+ cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+ udelay(delay);
+ cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+ udelay(delay);
+ cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+
+ DP_NOTICE(p_hwfn,
+ "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
+ cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
+}
+
static int
_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_mb_params *p_mb_params,
- u32 max_retries, u32 delay)
+ u32 max_retries, u32 usecs)
{
+ u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
struct qed_mcp_cmd_elem *p_cmd_elem;
- u32 cnt = 0;
u16 seq_num;
int rc = 0;
@@ -443,7 +508,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
goto err;
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
- udelay(delay);
+
+ if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
+ msleep(msecs);
+ else
+ udelay(usecs);
} while (++cnt < max_retries);
if (cnt >= max_retries) {
@@ -472,7 +541,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
* The spinlock stays locked until the list element is removed.
*/
- udelay(delay);
+ if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
+ msleep(msecs);
+ else
+ udelay(usecs);
+
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
if (p_cmd_elem->b_is_completed)
@@ -491,11 +564,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param);
+ qed_mcp_print_cpu_info(p_hwfn, p_ptt);
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+ if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
+ qed_mcp_cmd_set_blocking(p_hwfn, true);
+
return -EAGAIN;
}
@@ -507,7 +584,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
"MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
p_mb_params->mcp_resp,
p_mb_params->mcp_param,
- (cnt * delay) / 1000, (cnt * delay) % 1000);
+ (cnt * usecs) / 1000, (cnt * usecs) % 1000);
/* Clear the sequence number from the MFW response */
p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
@@ -525,7 +602,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
{
size_t union_data_size = sizeof(union drv_union_data);
u32 max_retries = QED_DRV_MB_MAX_RETRIES;
- u32 delay = CHIP_MCP_RESP_ITER_US;
+ u32 usecs = QED_MCP_RESP_ITER_US;
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
@@ -533,6 +610,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
return -EBUSY;
}
+ if (p_hwfn->mcp_info->b_block_cmd) {
+ DP_NOTICE(p_hwfn,
+ "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return -EBUSY;
+ }
+
if (p_mb_params->data_src_size > union_data_size ||
p_mb_params->data_dst_size > union_data_size) {
DP_ERR(p_hwfn,
@@ -542,8 +626,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
+ if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
+ max_retries = DIV_ROUND_UP(max_retries, 1000);
+ usecs *= 1000;
+ }
+
return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
- delay);
+ usecs);
}
int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@@ -761,6 +850,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
mb_params.data_src_size = sizeof(load_req);
mb_params.p_data_dst = &load_rsp;
mb_params.data_dst_size = sizeof(load_rsp);
+ mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
@@ -982,7 +1072,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 wol_param, mcp_resp, mcp_param;
+ struct qed_mcp_mb_params mb_params;
+ u32 wol_param;
switch (p_hwfn->cdev->wol_config) {
case QED_OV_WOL_DISABLED:
@@ -1000,8 +1091,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
}
- return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
- &mcp_resp, &mcp_param);
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
+ mb_params.param = wol_param;
+ mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
+
+ return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
}
int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -2077,31 +2172,65 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
return rc;
}
+/* A maximal 100 msec waiting time for the MCP to halt */
+#define QED_MCP_HALT_SLEEP_MS 10
+#define QED_MCP_HALT_MAX_RETRIES 10
+
int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 resp = 0, param = 0;
+ u32 resp = 0, param = 0, cpu_state, cnt = 0;
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
&param);
- if (rc)
+ if (rc) {
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
- return rc;
+ do {
+ msleep(QED_MCP_HALT_SLEEP_MS);
+ cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
+ break;
+ } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
+
+ if (cnt == QED_MCP_HALT_MAX_RETRIES) {
+ DP_NOTICE(p_hwfn,
+ "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
+ return -EBUSY;
+ }
+
+ qed_mcp_cmd_set_blocking(p_hwfn, true);
+
+ return 0;
}
+#define QED_MCP_RESUME_SLEEP_MS 10
+
int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 value, cpu_mode;
+ u32 cpu_mode, cpu_state;
qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
- value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
- value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
- qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+ qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
+ msleep(QED_MCP_RESUME_SLEEP_MS);
+ cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
- return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
+ DP_NOTICE(p_hwfn,
+ "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ cpu_mode, cpu_state);
+ return -EBUSY;
+ }
+
+ qed_mcp_cmd_set_blocking(p_hwfn, false);
+
+ return 0;
}
int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 047976d5c6e9..85e6b3989e7a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -635,11 +635,14 @@ struct qed_mcp_info {
*/
spinlock_t cmd_lock;
+ /* Flag to indicate whether sending a MFW mailbox command is blocked */
+ bool b_block_cmd;
+
/* Spinlock used for syncing SW link-changes and link-changes
* originating from attention context.
*/
spinlock_t link_lock;
- bool block_mb_sending;
+
u32 public_base;
u32 drv_mb_addr;
u32 mfw_mb_addr;
@@ -660,14 +663,20 @@ struct qed_mcp_info {
};
struct qed_mcp_mb_params {
- u32 cmd;
- u32 param;
- void *p_data_src;
- u8 data_src_size;
- void *p_data_dst;
- u8 data_dst_size;
- u32 mcp_resp;
- u32 mcp_param;
+ u32 cmd;
+ u32 param;
+ void *p_data_src;
+ void *p_data_dst;
+ u8 data_src_size;
+ u8 data_dst_size;
+ u32 mcp_resp;
+ u32 mcp_param;
+ u32 flags;
+#define QED_MB_FLAG_CAN_SLEEP (0x1 << 0)
+#define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1)
+#define QED_MB_FLAGS_IS_SET(params, flag) \
+ ({ typeof(params) __params = (params); \
+ (__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
};
struct qed_drv_tlv_hdr {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index d8ad2dcad8d5..f736f70956fd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -562,8 +562,10 @@
0
#define MCP_REG_CPU_STATE \
0xe05004UL
+#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10)
#define MCP_REG_CPU_EVENT_MASK \
0xe05008UL
+#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL
#define PGLUE_B_REG_PF_BAR0_SIZE \
0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE \
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 9673d19308e6..b16ce7d93caf 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -2006,18 +2006,16 @@ unlock:
static int qede_parse_actions(struct qede_dev *edev,
struct tcf_exts *exts)
{
- int rc = -EINVAL, num_act = 0;
+ int rc = -EINVAL, num_act = 0, i;
const struct tc_action *a;
bool is_drop = false;
- LIST_HEAD(actions);
if (!tcf_exts_has_actions(exts)) {
DP_NOTICE(edev, "No tc actions received\n");
return rc;
}
- tcf_exts_to_list(exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, exts) {
num_act++;
if (is_tcf_gact_shot(a))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 353f1c129af1..059ba9429e51 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2384,26 +2384,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
return status;
}
-static netdev_features_t qlge_fix_features(struct net_device *ndev,
- netdev_features_t features)
-{
- int err;
-
- /* Update the behavior of vlan accel in the adapter */
- err = qlge_update_hw_vlan_features(ndev, features);
- if (err)
- return err;
-
- return features;
-}
-
static int qlge_set_features(struct net_device *ndev,
netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
+ int err;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
+ /* Update the behavior of vlan accel in the adapter */
+ err = qlge_update_hw_vlan_features(ndev, features);
+ if (err)
+ return err;
- if (changed & NETIF_F_HW_VLAN_CTAG_RX)
qlge_vlan_mode(ndev, features);
+ }
return 0;
}
@@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = {
.ndo_set_mac_address = qlge_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = qlge_tx_timeout,
- .ndo_fix_features = qlge_fix_features,
.ndo_set_features = qlge_set_features,
.ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 0efa977c422d..b08d51bf7a20 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -218,6 +218,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
+ { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
{ PCI_VENDOR_ID_DLINK, 0x4300,
PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
@@ -4522,7 +4523,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
rtl_hw_reset(tp);
}
-static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
+static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
{
/* Set DMA burst size and Interframe Gap Time */
RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
@@ -4633,12 +4634,14 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_set_rx_max_size(tp);
rtl_set_rx_tx_desc_registers(tp);
- rtl_set_rx_tx_config_registers(tp);
+ rtl_set_tx_config_registers(tp);
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
RTL_R8(tp, IntrMask);
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
+ rtl_init_rxcfg(tp);
+
rtl_set_rx_mode(tp->dev);
/* no early-rx interrupts */
RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b81f4faf7b10..1470fc12282b 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Renesas Ethernet AVB device driver
*
* Copyright (C) 2014-2015 Renesas Electronics Corporation
@@ -5,10 +6,6 @@
* Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
*
* Based on the SuperH Ethernet driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
#ifndef __RAVB_H__
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c06f2df895c2..aff5516b781e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Renesas Ethernet AVB device driver
*
* Copyright (C) 2014-2015 Renesas Electronics Corporation
@@ -5,10 +6,6 @@
* Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
*
* Based on the SuperH Ethernet driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
#include <linux/cache.h>
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5573199c4536..f27a0dc8c563 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* SuperH Ethernet device driver
*
* Copyright (C) 2014 Renesas Electronics Corporation
@@ -5,18 +6,6 @@
* Copyright (C) 2008-2014 Renesas Solutions Corp.
* Copyright (C) 2013-2017 Cogent Embedded, Inc.
* Copyright (C) 2014 Codethink Limited
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
*/
#include <linux/module.h>
@@ -809,6 +798,41 @@ static struct sh_eth_cpu_data r8a77980_data = {
.magic = 1,
.cexcr = 1,
};
+
+/* R7S9210 */
+static struct sh_eth_cpu_data r7s9210_data = {
+ .soft_reset = sh_eth_soft_reset,
+
+ .set_duplex = sh_eth_set_duplex,
+ .set_rate = sh_eth_set_rate_rcar,
+
+ .register_type = SH_ETH_REG_FAST_SH4,
+
+ .edtrr_trns = EDTRR_TRNS_ETHER,
+ .ecsr_value = ECSR_ICD,
+ .ecsipr_value = ECSIPR_ICDIP,
+ .eesipr_value = EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP |
+ EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP |
+ EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP |
+ EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP |
+ EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
+ EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP |
+ EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP,
+
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
+ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
+
+ .fdr_value = 0x0000070f,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+ .rpadir = 1,
+ .no_ade = 1,
+ .xdfar_rw = 1,
+};
#endif /* CONFIG_OF */
static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -3132,6 +3156,7 @@ static const struct of_device_id sh_eth_match_table[] = {
{ .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
{ .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
{ .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
+ { .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data },
{ .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
{ .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
{ }
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index f94be99cf400..0c18650bbfe6 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
* Copyright (C) 2008-2012 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
*/
#ifndef __SH_ETH_H__
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index edf20361ea5f..324049eebb9b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -33,7 +33,7 @@ config DWMAC_DWC_QOS_ETH
select PHYLIB
select CRC32
select MII
- depends on OF && COMMON_CLK && HAS_DMA
+ depends on OF && HAS_DMA
help
Support for chips using the snps,dwc-qos-ethernet.txt DT binding.
@@ -57,7 +57,7 @@ config DWMAC_ANARION
config DWMAC_IPQ806X
tristate "QCA IPQ806x DWMAC support"
default ARCH_QCOM
- depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST)
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
select MFD_SYSCON
help
Support for QCA IPQ806X DWMAC Ethernet.
@@ -100,7 +100,7 @@ config DWMAC_OXNAS
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
default ARCH_ROCKCHIP
- depends on OF && COMMON_CLK && (ARCH_ROCKCHIP || COMPILE_TEST)
+ depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
select MFD_SYSCON
help
Support for Ethernet controller on Rockchip RK3288 SoC.
@@ -110,7 +110,7 @@ config DWMAC_ROCKCHIP
config DWMAC_SOCFPGA
tristate "SOCFPGA dwmac support"
- default ARCH_SOCFPGA
+ default (ARCH_SOCFPGA || ARCH_STRATIX10)
depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
select MFD_SYSCON
help
@@ -123,7 +123,7 @@ config DWMAC_SOCFPGA
config DWMAC_STI
tristate "STi GMAC support"
default ARCH_STI
- depends on OF && COMMON_CLK && (ARCH_STI || COMPILE_TEST)
+ depends on OF && (ARCH_STI || COMPILE_TEST)
select MFD_SYSCON
---help---
Support for ethernet controller on STi SOCs.
@@ -147,7 +147,7 @@ config DWMAC_STM32
config DWMAC_SUNXI
tristate "Allwinner GMAC support"
default ARCH_SUNXI
- depends on OF && COMMON_CLK && (ARCH_SUNXI || COMPILE_TEST)
+ depends on OF && (ARCH_SUNXI || COMPILE_TEST)
---help---
Support for Allwinner A20/A31 GMAC ethernet controllers.
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 76649adf8fb0..c0a855b7ab3b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -112,7 +112,6 @@ struct stmmac_priv {
u32 tx_count_frames;
u32 tx_coal_frames;
u32 tx_coal_timer;
- bool tx_timer_armed;
int tx_coalesce;
int hwts_tx_en;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ff1ffb46198a..9f458bb16f2a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3147,16 +3147,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* element in case of no SG.
*/
priv->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
- !priv->tx_timer_armed) {
+ if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
mod_timer(&priv->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer));
- priv->tx_timer_armed = true;
} else {
priv->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
- priv->tx_timer_armed = false;
}
skb_tx_timestamp(skb);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 1a96dd9c1091..531294f4978b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -61,7 +61,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry,
struct stmmac_tc_entry *action_entry = entry;
const struct tc_action *act;
struct tcf_exts *exts;
- LIST_HEAD(actions);
+ int i;
exts = cls->knode.exts;
if (!tcf_exts_has_actions(exts))
@@ -69,8 +69,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry,
if (frag)
action_entry = frag;
- tcf_exts_to_list(exts, &actions);
- list_for_each_entry(act, &actions, list) {
+ tcf_exts_for_each_action(i, act, exts) {
/* Accept */
if (is_tcf_gact_ok(act)) {
action_entry->val.af = 1;
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 0c1adad7415d..396e1cd10667 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -170,10 +170,13 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
struct device_node *node;
struct cpsw_phy_sel_priv *priv;
- node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+ node = of_parse_phandle(dev->of_node, "cpsw-phy-sel", 0);
if (!node) {
- dev_err(dev, "Phy mode driver DT not found\n");
- return;
+ node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+ if (!node) {
+ dev_err(dev, "Phy mode driver DT not found\n");
+ return;
+ }
}
dev = bus_find_device(&platform_bus_type, NULL, node, match);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 507f68190cb1..70921bbe0e28 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -29,6 +29,7 @@
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
+#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
@@ -2039,12 +2040,16 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
{
struct net_device *ndev;
struct net_device_context *net_device_ctx;
+ struct device *pdev = vf_netdev->dev.parent;
struct netvsc_device *netvsc_dev;
int ret;
if (vf_netdev->addr_len != ETH_ALEN)
return NOTIFY_DONE;
+ if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev))
+ return NOTIFY_DONE;
+
/*
* We will use the MAC address to locate the synthetic interface to
* associate with the VF interface. If we don't find a matching
@@ -2201,6 +2206,16 @@ static int netvsc_probe(struct hv_device *dev,
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
+ /* We must get rtnl lock before scheduling nvdev->subchan_work,
+ * otherwise netvsc_subchan_work() can get rtnl lock first and wait
+ * all subchannels to show up, but that may not happen because
+ * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
+ * -> ... -> device_add() -> ... -> __device_attach() can't get
+ * the device lock, so all the subchannels can't be processed --
+ * finally netvsc_subchan_work() hangs for ever.
+ */
+ rtnl_lock();
+
if (nvdev->num_chn > 1)
schedule_work(&nvdev->subchan_work);
@@ -2219,7 +2234,6 @@ static int netvsc_probe(struct hv_device *dev,
else
net->max_mtu = ETH_DATA_LEN;
- rtnl_lock();
ret = register_netdevice(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 4637d980310e..52fffb98fde9 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -398,7 +398,6 @@ static umode_t sfp_hwmon_is_visible(const void *data,
switch (type) {
case hwmon_temp:
switch (attr) {
- case hwmon_temp_input:
case hwmon_temp_min_alarm:
case hwmon_temp_max_alarm:
case hwmon_temp_lcrit_alarm:
@@ -407,13 +406,16 @@ static umode_t sfp_hwmon_is_visible(const void *data,
case hwmon_temp_max:
case hwmon_temp_lcrit:
case hwmon_temp_crit:
+ if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+ return 0;
+ /* fall through */
+ case hwmon_temp_input:
return 0444;
default:
return 0;
}
case hwmon_in:
switch (attr) {
- case hwmon_in_input:
case hwmon_in_min_alarm:
case hwmon_in_max_alarm:
case hwmon_in_lcrit_alarm:
@@ -422,13 +424,16 @@ static umode_t sfp_hwmon_is_visible(const void *data,
case hwmon_in_max:
case hwmon_in_lcrit:
case hwmon_in_crit:
+ if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+ return 0;
+ /* fall through */
+ case hwmon_in_input:
return 0444;
default:
return 0;
}
case hwmon_curr:
switch (attr) {
- case hwmon_curr_input:
case hwmon_curr_min_alarm:
case hwmon_curr_max_alarm:
case hwmon_curr_lcrit_alarm:
@@ -437,6 +442,10 @@ static umode_t sfp_hwmon_is_visible(const void *data,
case hwmon_curr_max:
case hwmon_curr_lcrit:
case hwmon_curr_crit:
+ if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+ return 0;
+ /* fall through */
+ case hwmon_curr_input:
return 0444;
default:
return 0;
@@ -452,7 +461,6 @@ static umode_t sfp_hwmon_is_visible(const void *data,
channel == 1)
return 0;
switch (attr) {
- case hwmon_power_input:
case hwmon_power_min_alarm:
case hwmon_power_max_alarm:
case hwmon_power_lcrit_alarm:
@@ -461,6 +469,10 @@ static umode_t sfp_hwmon_is_visible(const void *data,
case hwmon_power_max:
case hwmon_power_lcrit:
case hwmon_power_crit:
+ if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+ return 0;
+ /* fall through */
+ case hwmon_power_input:
return 0444;
default:
return 0;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 97742708460b..2cd71bdb6484 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -5217,8 +5217,8 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
- if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 &&
- udev->serial && !strcmp(udev->serial, "000001000000")) {
+ if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
+ (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) {
dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index b4c3a957c102..73969dbeb5c5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -985,15 +985,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
iwl_ext_nvm_channels : iwl_nvm_channels;
struct ieee80211_regdomain *regd, *copy_rd;
- int size_of_regd, regd_to_copy, wmms_to_copy;
- int size_of_wmms = 0;
+ int size_of_regd, regd_to_copy;
struct ieee80211_reg_rule *rule;
- struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm;
struct regdb_ptrs *regdb_ptrs;
enum nl80211_band band;
int center_freq, prev_center_freq = 0;
- int valid_rules = 0, n_wmms = 0;
- int i;
+ int valid_rules = 0;
bool new_rule;
int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
@@ -1012,11 +1009,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
sizeof(struct ieee80211_regdomain) +
num_of_ch * sizeof(struct ieee80211_reg_rule);
- if (geo_info & GEO_WMM_ETSI_5GHZ_INFO)
- size_of_wmms =
- num_of_ch * sizeof(struct ieee80211_wmm_rule);
-
- regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
+ regd = kzalloc(size_of_regd, GFP_KERNEL);
if (!regd)
return ERR_PTR(-ENOMEM);
@@ -1030,8 +1023,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
regd->alpha2[0] = fw_mcc >> 8;
regd->alpha2[1] = fw_mcc & 0xff;
- wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
-
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
band = (ch_idx < NUM_2GHZ_CHANNELS) ?
@@ -1085,26 +1076,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
band == NL80211_BAND_2GHZ)
continue;
- if (!reg_query_regdb_wmm(regd->alpha2, center_freq,
- &regdb_ptrs[n_wmms].token, wmm_rule)) {
- /* Add only new rules */
- for (i = 0; i < n_wmms; i++) {
- if (regdb_ptrs[i].token ==
- regdb_ptrs[n_wmms].token) {
- rule->wmm_rule = regdb_ptrs[i].rule;
- break;
- }
- }
- if (i == n_wmms) {
- rule->wmm_rule = wmm_rule;
- regdb_ptrs[n_wmms++].rule = wmm_rule;
- wmm_rule++;
- }
- }
+ reg_query_regdb_wmm(regd->alpha2, center_freq, rule);
}
regd->n_reg_rules = valid_rules;
- regd->n_wmm_rules = n_wmms;
/*
* Narrow down regdom for unused regulatory rules to prevent hole
@@ -1113,28 +1088,13 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
regd_to_copy = sizeof(struct ieee80211_regdomain) +
valid_rules * sizeof(struct ieee80211_reg_rule);
- wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms;
-
- copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL);
+ copy_rd = kzalloc(regd_to_copy, GFP_KERNEL);
if (!copy_rd) {
copy_rd = ERR_PTR(-ENOMEM);
goto out;
}
memcpy(copy_rd, regd, regd_to_copy);
- memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd,
- wmms_to_copy);
-
- d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy);
- s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
-
- for (i = 0; i < regd->n_reg_rules; i++) {
- if (!regd->reg_rules[i].wmm_rule)
- continue;
-
- copy_rd->reg_rules[i].wmm_rule = d_wmm +
- (regd->reg_rules[i].wmm_rule - s_wmm);
- }
out:
kfree(regdb_ptrs);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 998dfac0fcff..1068757ec42e 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -34,6 +34,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/rhashtable.h>
+#include <linux/nospec.h>
#include "mac80211_hwsim.h"
#define WARN_QUEUE 100
@@ -2820,9 +2821,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_TXSTBC |
- IEEE80211_VHT_CAP_RXSTBC_1 |
- IEEE80211_VHT_CAP_RXSTBC_2 |
- IEEE80211_VHT_CAP_RXSTBC_3 |
IEEE80211_VHT_CAP_RXSTBC_4 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
sband->vht_cap.vht_mcs.rx_mcs_map =
@@ -3317,6 +3315,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_CHANNELS])
param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
+ if (param.channels < 1) {
+ GENL_SET_ERR_MSG(info, "must have at least one channel");
+ return -EINVAL;
+ }
+
if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
GENL_SET_ERR_MSG(info, "too many channels specified");
return -EINVAL;
@@ -3350,6 +3353,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
kfree(hwname);
return -EINVAL;
}
+
+ idx = array_index_nospec(idx,
+ ARRAY_SIZE(hwsim_world_regdom_custom));
param.regd = hwsim_world_regdom_custom[idx];
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 1b9951d2067e..d668682f91df 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -316,6 +316,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
old_value = *dbbuf_db;
*dbbuf_db = value;
+ /*
+ * Ensure that the doorbell is updated before reading the event
+ * index from memory. The controller needs to provide similar
+ * ordering to ensure the envent index is updated before reading
+ * the doorbell.
+ */
+ mb();
+
if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
return false;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index ebf3e7a6c49e..b5ec96abd048 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1210,7 +1210,7 @@ static int __init nvmet_init(void)
error = nvmet_init_discovery();
if (error)
- goto out;
+ goto out_free_work_queue;
error = nvmet_init_configfs();
if (error)
@@ -1219,6 +1219,8 @@ static int __init nvmet_init(void)
out_exit_discovery:
nvmet_exit_discovery();
+out_free_work_queue:
+ destroy_workqueue(buffered_io_wq);
out:
return error;
}
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 34712def81b1..5251689a1d9a 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -311,7 +311,7 @@ fcloop_tgt_lsrqst_done_work(struct work_struct *work)
struct fcloop_tport *tport = tls_req->tport;
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
- if (tport->remoteport)
+ if (!tport || tport->remoteport)
lsreq->done(lsreq, tls_req->status);
}
@@ -329,6 +329,7 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
if (!rport->targetport) {
tls_req->status = -ECONNREFUSED;
+ tls_req->tport = NULL;
schedule_work(&tls_req->work);
return ret;
}
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 466e3c8582f0..9095b8290150 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -54,6 +54,28 @@ DEFINE_MUTEX(of_mutex);
*/
DEFINE_RAW_SPINLOCK(devtree_lock);
+bool of_node_name_eq(const struct device_node *np, const char *name)
+{
+ const char *node_name;
+ size_t len;
+
+ if (!np)
+ return false;
+
+ node_name = kbasename(np->full_name);
+ len = strchrnul(node_name, '@') - node_name;
+
+ return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
+}
+
+bool of_node_name_prefix(const struct device_node *np, const char *prefix)
+{
+ if (!np)
+ return false;
+
+ return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
+}
+
int of_n_addr_cells(struct device_node *np)
{
u32 cells;
@@ -720,6 +742,31 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
EXPORT_SYMBOL(of_get_next_available_child);
/**
+ * of_get_compatible_child - Find compatible child node
+ * @parent: parent node
+ * @compatible: compatible string
+ *
+ * Lookup child node whose compatible property contains the given compatible
+ * string.
+ *
+ * Returns a node pointer with refcount incremented, use of_node_put() on it
+ * when done; or NULL if not found.
+ */
+struct device_node *of_get_compatible_child(const struct device_node *parent,
+ const char *compatible)
+{
+ struct device_node *child;
+
+ for_each_child_of_node(parent, child) {
+ if (of_device_is_compatible(child, compatible))
+ break;
+ }
+
+ return child;
+}
+EXPORT_SYMBOL(of_get_compatible_child);
+
+/**
* of_get_child_by_name - Find the child node by name for a given parent
* @node: parent node
* @name: child name to look for.
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 7ba90c290a42..6c59673933e9 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -241,6 +241,10 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
if (!dev)
goto err_clear_flag;
+ /* AMBA devices only support a single DMA mask */
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
+
/* setup generic device info */
dev->dev.of_node = of_node_get(node);
dev->dev.fwnode = &node->fwnode;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8fc851a9e116..7c097006c54d 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -52,12 +52,12 @@ config SCSI_MQ_DEFAULT
default y
depends on SCSI
---help---
- This option enables the new blk-mq based I/O path for SCSI
- devices by default. With the option the scsi_mod.use_blk_mq
- module/boot option defaults to Y, without it to N, but it can
- still be overridden either way.
+ This option enables the blk-mq based I/O path for SCSI devices by
+ default. With this option the scsi_mod.use_blk_mq module/boot
+ option defaults to Y, without it to N, but it can still be
+ overridden either way.
- If unsure say N.
+ If unsure say Y.
config SCSI_PROC_FS
bool "legacy /proc/scsi/ support"
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 29bf1e60f542..39eb415987fc 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1346,7 +1346,7 @@ struct fib {
struct aac_hba_map_info {
__le32 rmw_nexus; /* nexus for native HBA devices */
u8 devtype; /* device type */
- u8 reset_state; /* 0 - no reset, 1..x - */
+ s8 reset_state; /* 0 - no reset, 1..x - */
/* after xth TM LUN reset */
u16 qd_limit;
u32 scan_counter;
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 23d07e9f87d0..e51923886475 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -1602,6 +1602,46 @@ fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
}
/**
+ * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
+ * @caps32: a 32-bit Port Capabilities value
+ *
+ * Returns the equivalent 16-bit Port Capabilities value. Note that
+ * not all 32-bit Port Capabilities can be represented in the 16-bit
+ * Port Capabilities and some fields/values may not make it.
+ */
+fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
+{
+ fw_port_cap16_t caps16 = 0;
+
+ #define CAP32_TO_CAP16(__cap) \
+ do { \
+ if (caps32 & FW_PORT_CAP32_##__cap) \
+ caps16 |= FW_PORT_CAP_##__cap; \
+ } while (0)
+
+ CAP32_TO_CAP16(SPEED_100M);
+ CAP32_TO_CAP16(SPEED_1G);
+ CAP32_TO_CAP16(SPEED_10G);
+ CAP32_TO_CAP16(SPEED_25G);
+ CAP32_TO_CAP16(SPEED_40G);
+ CAP32_TO_CAP16(SPEED_100G);
+ CAP32_TO_CAP16(FC_RX);
+ CAP32_TO_CAP16(FC_TX);
+ CAP32_TO_CAP16(802_3_PAUSE);
+ CAP32_TO_CAP16(802_3_ASM_DIR);
+ CAP32_TO_CAP16(ANEG);
+ CAP32_TO_CAP16(FORCE_PAUSE);
+ CAP32_TO_CAP16(MDIAUTO);
+ CAP32_TO_CAP16(MDISTRAIGHT);
+ CAP32_TO_CAP16(FEC_RS);
+ CAP32_TO_CAP16(FEC_BASER_RS);
+
+ #undef CAP32_TO_CAP16
+
+ return caps16;
+}
+
+/**
* lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
* @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
*
@@ -1759,7 +1799,7 @@ csio_enable_ports(struct csio_hw *hw)
val = 1;
csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO,
- hw->pfn, 0, 1, &param, &val, false,
+ hw->pfn, 0, 1, &param, &val, true,
NULL);
if (csio_mb_issue(hw, mbp)) {
@@ -1769,16 +1809,9 @@ csio_enable_ports(struct csio_hw *hw)
return -EINVAL;
}
- csio_mb_process_read_params_rsp(hw, mbp, &retval, 1,
- &val);
- if (retval != FW_SUCCESS) {
- csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n",
- portid, retval);
- mempool_free(mbp, hw->mb_mempool);
- return -EINVAL;
- }
-
- fw_caps = val;
+ csio_mb_process_read_params_rsp(hw, mbp, &retval,
+ 0, NULL);
+ fw_caps = retval ? FW_CAPS16 : FW_CAPS32;
}
/* Read PORT information */
@@ -2364,8 +2397,8 @@ bye:
}
/*
- * Returns -EINVAL if attempts to flash the firmware failed
- * else returns 0,
+ * Returns -EINVAL if attempts to flash the firmware failed,
+ * -ENOMEM if memory allocation failed else returns 0,
* if flashing was not attempted because the card had the
* latest firmware ECANCELED is returned
*/
@@ -2393,6 +2426,13 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
return -EINVAL;
}
+ /* allocate memory to read the header of the firmware on the
+ * card
+ */
+ card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
+ if (!card_fw)
+ return -ENOMEM;
+
if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
fw_bin_file = FW_FNAME_T5;
else
@@ -2406,11 +2446,6 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
fw_size = fw->size;
}
- /* allocate memory to read the header of the firmware on the
- * card
- */
- card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
-
/* upgrade FW logic */
ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
hw->fw_state, reset);
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 9e73ef771eb7..e351af6e7c81 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -639,6 +639,7 @@ int csio_handle_intr_status(struct csio_hw *, unsigned int,
fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps);
fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16);
+fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32);
fw_port_cap32_t lstatus_to_fwcap(u32 lstatus);
int csio_hw_start(struct csio_hw *);
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index c026417269c3..6f13673d6aa0 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -368,7 +368,7 @@ csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
if (fw_caps == FW_CAPS16)
- cmdp->u.l1cfg.rcap = cpu_to_be32(fc);
+ cmdp->u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(fc));
else
cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc);
}
@@ -395,8 +395,8 @@ csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
*pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap));
*acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap));
} else {
- *pcaps = ntohs(rsp->u.info32.pcaps32);
- *acaps = ntohs(rsp->u.info32.acaps32);
+ *pcaps = be32_to_cpu(rsp->u.info32.pcaps32);
+ *acaps = be32_to_cpu(rsp->u.info32.acaps32);
}
}
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f02dcc875a09..ea4b0bb0c1cd 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -563,35 +563,13 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_host_get);
-struct scsi_host_mq_in_flight {
- int cnt;
-};
-
-static void scsi_host_check_in_flight(struct request *rq, void *data,
- bool reserved)
-{
- struct scsi_host_mq_in_flight *in_flight = data;
-
- if (blk_mq_request_started(rq))
- in_flight->cnt++;
-}
-
/**
* scsi_host_busy - Return the host busy counter
* @shost: Pointer to Scsi_Host to inc.
**/
int scsi_host_busy(struct Scsi_Host *shost)
{
- struct scsi_host_mq_in_flight in_flight = {
- .cnt = 0,
- };
-
- if (!shost->use_blk_mq)
- return atomic_read(&shost->host_busy);
-
- blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight,
- &in_flight);
- return in_flight.cnt;
+ return atomic_read(&shost->host_busy);
}
EXPORT_SYMBOL(scsi_host_busy);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 58bb70b886d7..c120929d4ffe 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -976,7 +976,7 @@ static struct scsi_host_template hpsa_driver_template = {
#endif
.sdev_attrs = hpsa_sdev_attrs,
.shost_attrs = hpsa_shost_attrs,
- .max_sectors = 1024,
+ .max_sectors = 2048,
.no_write_same = 1,
};
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e0d0da5f43d6..43732e8d1347 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -672,7 +672,7 @@ struct lpfc_hba {
#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
-#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */
+#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
uint32_t hba_flag; /* hba generic flags */
#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5a25553415f8..057a60abe664 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5122,16 +5122,16 @@ LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
/*
# lpfc_fdmi_on: Controls FDMI support.
-# 0 No FDMI support (default)
-# 1 Traditional FDMI support
+# 0 No FDMI support
+# 1 Traditional FDMI support (default)
# Traditional FDMI support means the driver will assume FDMI-2 support;
# however, if that fails, it will fallback to FDMI-1.
# If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
# If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
# lpfc_fdmi_on.
-# Value range [0,1]. Default value is 0.
+# Value range [0,1]. Default value is 1.
*/
-LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support");
+LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");
/*
# Specifies the maximum number of ELS cmds we can have outstanding (for
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0adfb3bce0fd..eb97d2dd3651 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -345,8 +345,7 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost)
unsigned long flags;
rcu_read_lock();
- if (!shost->use_blk_mq)
- atomic_dec(&shost->host_busy);
+ atomic_dec(&shost->host_busy);
if (unlikely(scsi_host_in_recovery(shost))) {
spin_lock_irqsave(shost->host_lock, flags);
if (shost->host_failed || shost->host_eh_scheduled)
@@ -445,12 +444,7 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget)
static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
{
- /*
- * blk-mq can handle host queue busy efficiently via host-wide driver
- * tag allocation
- */
-
- if (!shost->use_blk_mq && shost->can_queue > 0 &&
+ if (shost->can_queue > 0 &&
atomic_read(&shost->host_busy) >= shost->can_queue)
return true;
if (atomic_read(&shost->host_blocked) > 0)
@@ -1606,10 +1600,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
if (scsi_host_in_recovery(shost))
return 0;
- if (!shost->use_blk_mq)
- busy = atomic_inc_return(&shost->host_busy) - 1;
- else
- busy = 0;
+ busy = atomic_inc_return(&shost->host_busy) - 1;
if (atomic_read(&shost->host_blocked) > 0) {
if (busy)
goto starved;
@@ -1625,7 +1616,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
"unblocking host at zero depth\n"));
}
- if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue)
+ if (shost->can_queue > 0 && busy >= shost->can_queue)
goto starved;
if (shost->host_self_blocked)
goto starved;
@@ -1711,9 +1702,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
* with the locks as normal issue path does.
*/
atomic_inc(&sdev->device_busy);
-
- if (!shost->use_blk_mq)
- atomic_inc(&shost->host_busy);
+ atomic_inc(&shost->host_busy);
if (starget->can_queue > 0)
atomic_inc(&starget->target_busy);
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index 768cce0ccb80..76a262674c8d 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -207,8 +207,8 @@ cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
sgl->offset = sg_offset;
if (!ret) {
- pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
- __func__, 0, xferlen, sgcnt);
+ pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
+ __func__, 0, xferlen, sgcnt);
goto rel_ppods;
}
@@ -250,8 +250,8 @@ cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
if (ret < 0) {
- pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
- csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
+ pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
+ csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
ttinfo->sgl = NULL;
ttinfo->nents = 0;
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 977a8307fbb1..4f2816559205 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -260,10 +260,13 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz,
mutex_lock(&tz->lock);
- if (mode == THERMAL_DEVICE_ENABLED)
+ if (mode == THERMAL_DEVICE_ENABLED) {
tz->polling_delay = data->polling_delay;
- else
+ tz->passive_delay = data->passive_delay;
+ } else {
tz->polling_delay = 0;
+ tz->passive_delay = 0;
+ }
mutex_unlock(&tz->lock);
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index c866cc165960..450ed66edf58 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -1,16 +1,6 @@
-/*
- * Copyright 2016 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2016 Freescale Semiconductor, Inc.
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -197,7 +187,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
int ret;
struct qoriq_tmu_data *data;
struct device_node *np = pdev->dev.of_node;
- u32 site = 0;
+ u32 site;
if (!np) {
dev_err(&pdev->dev, "Device OF-Node is NULL");
@@ -233,8 +223,9 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
if (ret < 0)
goto err_tmu;
- data->tz = thermal_zone_of_sensor_register(&pdev->dev, data->sensor_id,
- data, &tmu_tz_ops);
+ data->tz = devm_thermal_zone_of_sensor_register(&pdev->dev,
+ data->sensor_id,
+ data, &tmu_tz_ops);
if (IS_ERR(data->tz)) {
ret = PTR_ERR(data->tz);
dev_err(&pdev->dev,
@@ -243,7 +234,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
}
/* Enable monitoring */
- site |= 0x1 << (15 - data->sensor_id);
+ site = 0x1 << (15 - data->sensor_id);
tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr);
return 0;
@@ -261,8 +252,6 @@ static int qoriq_tmu_remove(struct platform_device *pdev)
{
struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
- thermal_zone_of_sensor_unregister(&pdev->dev, data->tz);
-
/* Disable monitoring */
tmu_write(data, TMR_DISABLE, &data->regs->tmr);
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 766521eb7071..7aed5337bdd3 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R-Car Gen3 THS thermal sensor driver
* Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen.
*
* Copyright (C) 2016 Renesas Electronics Corporation.
* Copyright (C) 2016 Sang Engineering
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
*/
#include <linux/delay.h>
#include <linux/err.h>
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index e77e63070e99..78f932822d38 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R-Car THS/TSC thermal sensor driver
*
* Copyright (C) 2012 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*/
#include <linux/delay.h>
#include <linux/err.h>
@@ -660,6 +648,6 @@ static struct platform_driver rcar_thermal_driver = {
};
module_platform_driver(rcar_thermal_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver");
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 96c1d8400822..b13c6b4b2c66 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -952,7 +952,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
list_for_each_entry_safe(node, n, &d->pending_list, node) {
struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
if (msg->iova <= vq_msg->iova &&
- msg->iova + msg->size - 1 > vq_msg->iova &&
+ msg->iova + msg->size - 1 >= vq_msg->iova &&
vq_msg->type == VHOST_IOTLB_MISS) {
vhost_poll_queue(&node->vq->poll);
list_del(&node->node);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index f2088838f690..5b471889d723 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -402,10 +402,19 @@ static ssize_t modalias_show(struct device *dev,
}
static DEVICE_ATTR_RO(modalias);
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n",
+ xenbus_strstate(to_xenbus_device(dev)->state));
+}
+static DEVICE_ATTR_RO(state);
+
static struct attribute *xenbus_dev_attrs[] = {
&dev_attr_nodename.attr,
&dev_attr_devtype.attr,
&dev_attr_modalias.attr,
+ &dev_attr_state.attr,
NULL,
};
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 0c3285c8db95..476dcbb79713 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -98,13 +98,13 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
goto inval;
args = strchr(name, ' ');
- if (!args)
- goto inval;
- do {
- *args++ = 0;
- } while(*args == ' ');
- if (!*args)
- goto inval;
+ if (args) {
+ do {
+ *args++ = 0;
+ } while(*args == ' ');
+ if (!*args)
+ goto inval;
+ }
/* determine command to perform */
_debug("cmd=%s name=%s args=%s", buf, name, args);
@@ -120,7 +120,6 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags))
afs_put_cell(net, cell);
- printk("kAFS: Added new cell '%s'\n", name);
} else {
goto inval;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 53af9f5253f4..2cddfe7806a4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1280,6 +1280,7 @@ struct btrfs_root {
int send_in_progress;
struct btrfs_subvolume_writers *subv_writers;
atomic_t will_be_snapshotted;
+ atomic_t snapshot_force_cow;
/* For qgroup metadata reserved space */
spinlock_t qgroup_meta_rsv_lock;
@@ -3390,9 +3391,9 @@ do { \
#define btrfs_debug(fs_info, fmt, args...) \
btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
- btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
+ btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
- btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
+ btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
#define btrfs_debug_rl(fs_info, fmt, args...) \
btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
#endif
@@ -3404,6 +3405,13 @@ do { \
rcu_read_unlock(); \
} while (0)
+#define btrfs_no_printk_in_rcu(fs_info, fmt, args...) \
+do { \
+ rcu_read_lock(); \
+ btrfs_no_printk(fs_info, fmt, ##args); \
+ rcu_read_unlock(); \
+} while (0)
+
#define btrfs_printk_ratelimited(fs_info, fmt, args...) \
do { \
static DEFINE_RATELIMIT_STATE(_rs, \
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5124c15705ce..05dc3c17cb62 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1187,6 +1187,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
atomic_set(&root->log_batch, 0);
refcount_set(&root->refs, 1);
atomic_set(&root->will_be_snapshotted, 0);
+ atomic_set(&root->snapshot_force_cow, 0);
root->log_transid = 0;
root->log_transid_committed = -1;
root->last_log_commit = 0;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index de6f75f5547b..2d9074295d7f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5800,7 +5800,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
* root: the root of the parent directory
* rsv: block reservation
* items: the number of items that we need do reservation
- * qgroup_reserved: used to return the reserved size in qgroup
+ * use_global_rsv: allow fallback to the global block reservation
*
* This function is used to reserve the space for snapshot/subvolume
* creation and deletion. Those operations are different with the
@@ -5810,10 +5810,10 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
* the space reservation mechanism in start_transaction().
*/
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
- struct btrfs_block_rsv *rsv,
- int items,
+ struct btrfs_block_rsv *rsv, int items,
bool use_global_rsv)
{
+ u64 qgroup_num_bytes = 0;
u64 num_bytes;
int ret;
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -5821,12 +5821,11 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
/* One for parent inode, two for dir entries */
- num_bytes = 3 * fs_info->nodesize;
- ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
+ qgroup_num_bytes = 3 * fs_info->nodesize;
+ ret = btrfs_qgroup_reserve_meta_prealloc(root,
+ qgroup_num_bytes, true);
if (ret)
return ret;
- } else {
- num_bytes = 0;
}
num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
@@ -5838,8 +5837,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
if (ret == -ENOSPC && use_global_rsv)
ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
- if (ret && num_bytes)
- btrfs_qgroup_free_meta_prealloc(root, num_bytes);
+ if (ret && qgroup_num_bytes)
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
return ret;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 9357a19d2bff..3ea5339603cf 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1271,7 +1271,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
u64 disk_num_bytes;
u64 ram_bytes;
int extent_type;
- int ret, err;
+ int ret;
int type;
int nocow;
int check_prev = 1;
@@ -1403,11 +1403,8 @@ next_slot:
* if there are pending snapshots for this root,
* we fall into common COW way.
*/
- if (!nolock) {
- err = btrfs_start_write_no_snapshotting(root);
- if (!err)
- goto out_check;
- }
+ if (!nolock && atomic_read(&root->snapshot_force_cow))
+ goto out_check;
/*
* force cow if csum exists in the range.
* this ensure that csum for a given extent are
@@ -1416,9 +1413,6 @@ next_slot:
ret = csum_exist_in_range(fs_info, disk_bytenr,
num_bytes);
if (ret) {
- if (!nolock)
- btrfs_end_write_no_snapshotting(root);
-
/*
* ret could be -EIO if the above fails to read
* metadata.
@@ -1431,11 +1425,8 @@ next_slot:
WARN_ON_ONCE(nolock);
goto out_check;
}
- if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) {
- if (!nolock)
- btrfs_end_write_no_snapshotting(root);
+ if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
goto out_check;
- }
nocow = 1;
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
extent_end = found_key.offset +
@@ -1448,8 +1439,6 @@ next_slot:
out_check:
if (extent_end <= start) {
path->slots[0]++;
- if (!nolock && nocow)
- btrfs_end_write_no_snapshotting(root);
if (nocow)
btrfs_dec_nocow_writers(fs_info, disk_bytenr);
goto next_slot;
@@ -1471,8 +1460,6 @@ out_check:
end, page_started, nr_written, 1,
NULL);
if (ret) {
- if (!nolock && nocow)
- btrfs_end_write_no_snapshotting(root);
if (nocow)
btrfs_dec_nocow_writers(fs_info,
disk_bytenr);
@@ -1492,8 +1479,6 @@ out_check:
ram_bytes, BTRFS_COMPRESS_NONE,
BTRFS_ORDERED_PREALLOC);
if (IS_ERR(em)) {
- if (!nolock && nocow)
- btrfs_end_write_no_snapshotting(root);
if (nocow)
btrfs_dec_nocow_writers(fs_info,
disk_bytenr);
@@ -1532,8 +1517,6 @@ out_check:
EXTENT_CLEAR_DATA_RESV,
PAGE_UNLOCK | PAGE_SET_PRIVATE2);
- if (!nolock && nocow)
- btrfs_end_write_no_snapshotting(root);
cur_offset = extent_end;
/*
@@ -6639,6 +6622,8 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
drop_inode = 1;
} else {
struct dentry *parent = dentry->d_parent;
+ int ret;
+
err = btrfs_update_inode(trans, root, inode);
if (err)
goto fail;
@@ -6652,7 +6637,12 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
goto fail;
}
d_instantiate(dentry, inode);
- btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent);
+ ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
+ true, NULL);
+ if (ret == BTRFS_NEED_TRANS_COMMIT) {
+ err = btrfs_commit_transaction(trans);
+ trans = NULL;
+ }
}
fail:
@@ -9388,14 +9378,21 @@ static int btrfs_rename_exchange(struct inode *old_dir,
u64 new_idx = 0;
u64 root_objectid;
int ret;
- int ret2;
bool root_log_pinned = false;
bool dest_log_pinned = false;
+ struct btrfs_log_ctx ctx_root;
+ struct btrfs_log_ctx ctx_dest;
+ bool sync_log_root = false;
+ bool sync_log_dest = false;
+ bool commit_transaction = false;
/* we only allow rename subvolume link between subvolumes */
if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
return -EXDEV;
+ btrfs_init_log_ctx(&ctx_root, old_inode);
+ btrfs_init_log_ctx(&ctx_dest, new_inode);
+
/* close the race window with snapshot create/destroy ioctl */
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
down_read(&fs_info->subvol_sem);
@@ -9542,15 +9539,29 @@ static int btrfs_rename_exchange(struct inode *old_dir,
if (root_log_pinned) {
parent = new_dentry->d_parent;
- btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
- parent);
+ ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
+ BTRFS_I(old_dir), parent,
+ false, &ctx_root);
+ if (ret == BTRFS_NEED_LOG_SYNC)
+ sync_log_root = true;
+ else if (ret == BTRFS_NEED_TRANS_COMMIT)
+ commit_transaction = true;
+ ret = 0;
btrfs_end_log_trans(root);
root_log_pinned = false;
}
if (dest_log_pinned) {
- parent = old_dentry->d_parent;
- btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir),
- parent);
+ if (!commit_transaction) {
+ parent = old_dentry->d_parent;
+ ret = btrfs_log_new_name(trans, BTRFS_I(new_inode),
+ BTRFS_I(new_dir), parent,
+ false, &ctx_dest);
+ if (ret == BTRFS_NEED_LOG_SYNC)
+ sync_log_dest = true;
+ else if (ret == BTRFS_NEED_TRANS_COMMIT)
+ commit_transaction = true;
+ ret = 0;
+ }
btrfs_end_log_trans(dest);
dest_log_pinned = false;
}
@@ -9583,8 +9594,26 @@ out_fail:
dest_log_pinned = false;
}
}
- ret2 = btrfs_end_transaction(trans);
- ret = ret ? ret : ret2;
+ if (!ret && sync_log_root && !commit_transaction) {
+ ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root,
+ &ctx_root);
+ if (ret)
+ commit_transaction = true;
+ }
+ if (!ret && sync_log_dest && !commit_transaction) {
+ ret = btrfs_sync_log(trans, BTRFS_I(new_inode)->root,
+ &ctx_dest);
+ if (ret)
+ commit_transaction = true;
+ }
+ if (commit_transaction) {
+ ret = btrfs_commit_transaction(trans);
+ } else {
+ int ret2;
+
+ ret2 = btrfs_end_transaction(trans);
+ ret = ret ? ret : ret2;
+ }
out_notrans:
if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&fs_info->subvol_sem);
@@ -9661,6 +9690,9 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
int ret;
u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
bool log_pinned = false;
+ struct btrfs_log_ctx ctx;
+ bool sync_log = false;
+ bool commit_transaction = false;
if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
return -EPERM;
@@ -9818,8 +9850,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (log_pinned) {
struct dentry *parent = new_dentry->d_parent;
- btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
- parent);
+ btrfs_init_log_ctx(&ctx, old_inode);
+ ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
+ BTRFS_I(old_dir), parent,
+ false, &ctx);
+ if (ret == BTRFS_NEED_LOG_SYNC)
+ sync_log = true;
+ else if (ret == BTRFS_NEED_TRANS_COMMIT)
+ commit_transaction = true;
+ ret = 0;
btrfs_end_log_trans(root);
log_pinned = false;
}
@@ -9856,7 +9895,19 @@ out_fail:
btrfs_end_log_trans(root);
log_pinned = false;
}
- btrfs_end_transaction(trans);
+ if (!ret && sync_log) {
+ ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx);
+ if (ret)
+ commit_transaction = true;
+ }
+ if (commit_transaction) {
+ ret = btrfs_commit_transaction(trans);
+ } else {
+ int ret2;
+
+ ret2 = btrfs_end_transaction(trans);
+ ret = ret ? ret : ret2;
+ }
out_notrans:
if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&fs_info->subvol_sem);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 63600dc2ac4c..d60b6caf09e8 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -747,6 +747,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
struct btrfs_pending_snapshot *pending_snapshot;
struct btrfs_trans_handle *trans;
int ret;
+ bool snapshot_force_cow = false;
if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
return -EINVAL;
@@ -763,6 +764,11 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
goto free_pending;
}
+ /*
+ * Force new buffered writes to reserve space even when NOCOW is
+ * possible. This is to avoid later writeback (running dealloc) to
+ * fallback to COW mode and unexpectedly fail with ENOSPC.
+ */
atomic_inc(&root->will_be_snapshotted);
smp_mb__after_atomic();
/* wait for no snapshot writes */
@@ -773,6 +779,14 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
if (ret)
goto dec_and_free;
+ /*
+ * All previous writes have started writeback in NOCOW mode, so now
+ * we force future writes to fallback to COW mode during snapshot
+ * creation.
+ */
+ atomic_inc(&root->snapshot_force_cow);
+ snapshot_force_cow = true;
+
btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
btrfs_init_block_rsv(&pending_snapshot->block_rsv,
@@ -837,6 +851,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
fail:
btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv);
dec_and_free:
+ if (snapshot_force_cow)
+ atomic_dec(&root->snapshot_force_cow);
if (atomic_dec_and_test(&root->will_be_snapshotted))
wake_up_var(&root->will_be_snapshotted);
free_pending:
@@ -3453,6 +3469,25 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
same_lock_start = min_t(u64, loff, dst_loff);
same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
+ } else {
+ /*
+ * If the source and destination inodes are different, the
+ * source's range end offset matches the source's i_size, that
+ * i_size is not a multiple of the sector size, and the
+ * destination range does not go past the destination's i_size,
+ * we must round down the length to the nearest sector size
+ * multiple. If we don't do this adjustment we end replacing
+ * with zeroes the bytes in the range that starts at the
+ * deduplication range's end offset and ends at the next sector
+ * size multiple.
+ */
+ if (loff + olen == i_size_read(src) &&
+ dst_loff + len < i_size_read(dst)) {
+ const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
+
+ len = round_down(i_size_read(src), sz) - loff;
+ olen = len;
+ }
}
again:
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 4353bb69bb86..d4917c0cddf5 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1019,10 +1019,9 @@ out_add_root:
spin_unlock(&fs_info->qgroup_lock);
ret = btrfs_commit_transaction(trans);
- if (ret) {
- trans = NULL;
+ trans = NULL;
+ if (ret)
goto out_free_path;
- }
ret = qgroup_rescan_init(fs_info, 0, 1);
if (!ret) {
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 1650dc44a5e3..3c2ae0e4f25a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -6025,14 +6025,25 @@ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
* Call this after adding a new name for a file and it will properly
* update the log to reflect the new name.
*
- * It will return zero if all goes well, and it will return 1 if a
- * full transaction commit is required.
+ * @ctx can not be NULL when @sync_log is false, and should be NULL when it's
+ * true (because it's not used).
+ *
+ * Return value depends on whether @sync_log is true or false.
+ * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
+ * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT
+ * otherwise.
+ * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to
+ * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log,
+ * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
+ * committed (without attempting to sync the log).
*/
int btrfs_log_new_name(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, struct btrfs_inode *old_dir,
- struct dentry *parent)
+ struct dentry *parent,
+ bool sync_log, struct btrfs_log_ctx *ctx)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
+ int ret;
/*
* this will force the logging code to walk the dentry chain
@@ -6047,9 +6058,34 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
*/
if (inode->logged_trans <= fs_info->last_trans_committed &&
(!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
- return 0;
+ return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT :
+ BTRFS_DONT_NEED_LOG_SYNC;
+
+ if (sync_log) {
+ struct btrfs_log_ctx ctx2;
+
+ btrfs_init_log_ctx(&ctx2, &inode->vfs_inode);
+ ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
+ LOG_INODE_EXISTS, &ctx2);
+ if (ret == BTRFS_NO_LOG_SYNC)
+ return BTRFS_DONT_NEED_TRANS_COMMIT;
+ else if (ret)
+ return BTRFS_NEED_TRANS_COMMIT;
+
+ ret = btrfs_sync_log(trans, inode->root, &ctx2);
+ if (ret)
+ return BTRFS_NEED_TRANS_COMMIT;
+ return BTRFS_DONT_NEED_TRANS_COMMIT;
+ }
+
+ ASSERT(ctx);
+ ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
+ LOG_INODE_EXISTS, ctx);
+ if (ret == BTRFS_NO_LOG_SYNC)
+ return BTRFS_DONT_NEED_LOG_SYNC;
+ else if (ret)
+ return BTRFS_NEED_TRANS_COMMIT;
- return btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
- LOG_INODE_EXISTS, NULL);
+ return BTRFS_NEED_LOG_SYNC;
}
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 122e68b89a5a..7ab9bb88a639 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -71,8 +71,16 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
int for_rename);
void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
struct btrfs_inode *dir);
+/* Return values for btrfs_log_new_name() */
+enum {
+ BTRFS_DONT_NEED_TRANS_COMMIT,
+ BTRFS_NEED_TRANS_COMMIT,
+ BTRFS_DONT_NEED_LOG_SYNC,
+ BTRFS_NEED_LOG_SYNC,
+};
int btrfs_log_new_name(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, struct btrfs_inode *old_dir,
- struct dentry *parent);
+ struct dentry *parent,
+ bool sync_log, struct btrfs_log_ctx *ctx);
#endif
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index da86706123ff..f4405e430da6 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4491,7 +4491,12 @@ again:
/* Now btrfs_update_device() will change the on-disk size. */
ret = btrfs_update_device(trans, device);
- btrfs_end_transaction(trans);
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ } else {
+ ret = btrfs_commit_transaction(trans);
+ }
done:
btrfs_free_path(path);
if (ret) {
diff --git a/fs/buffer.c b/fs/buffer.c
index 4cc679d5bf58..6f1ae3ac9789 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -39,7 +39,6 @@
#include <linux/buffer_head.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/bio.h>
-#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/mpage.h>
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 43ca3b763875..eab1359d0553 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -602,6 +602,8 @@ static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
/*
* create a new fs client
+ *
+ * Success or not, this function consumes @fsopt and @opt.
*/
static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
struct ceph_options *opt)
@@ -609,17 +611,20 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
struct ceph_fs_client *fsc;
int page_count;
size_t size;
- int err = -ENOMEM;
+ int err;
fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
- if (!fsc)
- return ERR_PTR(-ENOMEM);
+ if (!fsc) {
+ err = -ENOMEM;
+ goto fail;
+ }
fsc->client = ceph_create_client(opt, fsc);
if (IS_ERR(fsc->client)) {
err = PTR_ERR(fsc->client);
goto fail;
}
+ opt = NULL; /* fsc->client now owns this */
fsc->client->extra_mon_dispatch = extra_mon_dispatch;
fsc->client->osdc.abort_on_full = true;
@@ -677,6 +682,9 @@ fail_client:
ceph_destroy_client(fsc->client);
fail:
kfree(fsc);
+ if (opt)
+ ceph_destroy_options(opt);
+ destroy_mount_options(fsopt);
return ERR_PTR(err);
}
@@ -1042,8 +1050,6 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
fsc = create_fs_client(fsopt, opt);
if (IS_ERR(fsc)) {
res = ERR_CAST(fsc);
- destroy_mount_options(fsopt);
- ceph_destroy_options(opt);
goto out_final;
}
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index b380e0871372..a2b2355e7f01 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -105,9 +105,6 @@ convert_sfm_char(const __u16 src_char, char *target)
case SFM_LESSTHAN:
*target = '<';
break;
- case SFM_SLASH:
- *target = '\\';
- break;
case SFM_SPACE:
*target = ' ';
break;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index c832a8a1970a..7aa08dba4719 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2547,7 +2547,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info)
if (tcon == NULL)
return -ENOMEM;
- snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->serverName);
+ snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->server->hostname);
/* cannot fail */
nls_codepage = load_nls_default();
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index d32eaa4b2437..6e8765f44508 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -467,6 +467,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_READ;
oparms.create_options = CREATE_NOT_DIR;
+ if (backup_cred(cifs_sb))
+ oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
oparms.disposition = FILE_OPEN;
oparms.path = path;
oparms.fid = &fid;
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index db0453660ff6..6a9c47541c53 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -248,16 +248,20 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
* MacOS server pads after SMB2.1 write response with 3 bytes
* of junk. Other servers match RFC1001 len to actual
* SMB2/SMB3 frame length (header + smb2 response specific data)
- * Some windows servers do too when compounding is used.
- * Log the server error (once), but allow it and continue
+ * Some windows servers also pad up to 8 bytes when compounding.
+ * If pad is longer than eight bytes, log the server behavior
+ * (once), since may indicate a problem but allow it and continue
* since the frame is parseable.
*/
if (clc_len < len) {
- printk_once(KERN_WARNING
- "SMB2 server sent bad RFC1001 len %d not %d\n",
- len, clc_len);
+ pr_warn_once(
+ "srv rsp padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",
+ len, clc_len, command, mid);
return 0;
}
+ pr_warn_once(
+ "srv rsp too short, len %d not %d. cmd:%d mid:%llu\n",
+ len, clc_len, command, mid);
return 1;
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 247a98e6c856..d954ce36b473 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -630,7 +630,10 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
- oparms.create_options = 0;
+ if (backup_cred(cifs_sb))
+ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+ else
+ oparms.create_options = 0;
oparms.fid = &fid;
oparms.reconnect = false;
@@ -779,7 +782,10 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_EA;
oparms.disposition = FILE_OPEN;
- oparms.create_options = 0;
+ if (backup_cred(cifs_sb))
+ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+ else
+ oparms.create_options = 0;
oparms.fid = &fid;
oparms.reconnect = false;
@@ -858,7 +864,10 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_WRITE_EA;
oparms.disposition = FILE_OPEN;
- oparms.create_options = 0;
+ if (backup_cred(cifs_sb))
+ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+ else
+ oparms.create_options = 0;
oparms.fid = &fid;
oparms.reconnect = false;
@@ -1453,7 +1462,10 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
oparms.disposition = FILE_OPEN;
- oparms.create_options = 0;
+ if (backup_cred(cifs_sb))
+ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+ else
+ oparms.create_options = 0;
oparms.fid = fid;
oparms.reconnect = false;
@@ -1857,7 +1869,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
- oparms.create_options = 0;
+ if (backup_cred(cifs_sb))
+ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+ else
+ oparms.create_options = 0;
oparms.fid = &fid;
oparms.reconnect = false;
@@ -3639,7 +3654,7 @@ struct smb_version_values smb21_values = {
struct smb_version_values smb3any_values = {
.version_string = SMB3ANY_VERSION_STRING,
.protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
.large_lock_type = 0,
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3660,7 +3675,7 @@ struct smb_version_values smb3any_values = {
struct smb_version_values smbdefault_values = {
.version_string = SMBDEFAULT_VERSION_STRING,
.protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
.large_lock_type = 0,
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3681,7 +3696,7 @@ struct smb_version_values smbdefault_values = {
struct smb_version_values smb30_values = {
.version_string = SMB30_VERSION_STRING,
.protocol_id = SMB30_PROT_ID,
- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
.large_lock_type = 0,
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3702,7 +3717,7 @@ struct smb_version_values smb30_values = {
struct smb_version_values smb302_values = {
.version_string = SMB302_VERSION_STRING,
.protocol_id = SMB302_PROT_ID,
- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
.large_lock_type = 0,
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3723,7 +3738,7 @@ struct smb_version_values smb302_values = {
struct smb_version_values smb311_values = {
.version_string = SMB311_VERSION_STRING,
.protocol_id = SMB311_PROT_ID,
- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
.large_lock_type = 0,
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 5740aa809be6..c08acfc77abc 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2178,6 +2178,9 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
*oplock == SMB2_OPLOCK_LEVEL_NONE)
req->RequestedOplockLevel = *oplock;
+ else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
+ (oparms->create_options & CREATE_NOT_FILE))
+ req->RequestedOplockLevel = *oplock; /* no srv lease support */
else {
rc = add_lease_context(server, iov, &n_iov,
oparms->fid->lease_key, oplock);
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index ec3fba7d492f..488a9e7f8f66 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -24,6 +24,7 @@
#include <linux/mpage.h>
#include <linux/user_namespace.h>
#include <linux/seq_file.h>
+#include <linux/blkdev.h>
#include "isofs.h"
#include "zisofs.h"
@@ -653,6 +654,12 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
/*
* What if bugger tells us to go beyond page size?
*/
+ if (bdev_logical_block_size(s->s_bdev) > 2048) {
+ printk(KERN_WARNING
+ "ISOFS: unsupported/invalid hardware sector size %d\n",
+ bdev_logical_block_size(s->s_bdev));
+ goto out_freesbi;
+ }
opt.blocksize = sb_min_blocksize(s, opt.blocksize);
sbi->s_high_sierra = 0; /* default is iso9660 */
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index 03b8ba933eb2..235b959fc2b3 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* alloc.c - NILFS dat/inode allocator
*
* Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Originally written by Koji Sato.
* Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
*/
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index 05149e606a78..0303c3968cee 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* alloc.h - persistent object (dat entry/disk inode) allocator/deallocator
*
* Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Originally written by Koji Sato.
* Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
*/
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 01fb1831ca25..fb5a9a8a13cf 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bmap.c - NILFS block mapping.
*
* Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Koji Sato.
*/
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h
index 2b6ffbe5997a..2c63858e81c9 100644
--- a/fs/nilfs2/bmap.h
+++ b/fs/nilfs2/bmap.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* bmap.h - NILFS block mapping.
*
* Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Koji Sato.
*/
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index dec98cab729d..ebb24a314f43 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* btnode.c - NILFS B-tree node cache
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Originally written by Seiji Kihara.
* Fully revised by Ryusuke Konishi for stabilization and simplification.
*
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
index 4e8aaa1aeb65..0f88dbc9bcb3 100644
--- a/fs/nilfs2/btnode.h
+++ b/fs/nilfs2/btnode.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* btnode.h - NILFS B-tree node cache
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Seiji Kihara.
* Revised by Ryusuke Konishi.
*/
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 16a7a67a11c9..23e043eca237 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* btree.c - NILFS B-tree.
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Koji Sato.
*/
diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h
index 2184e47fa4bf..d1421b646ce4 100644
--- a/fs/nilfs2/btree.h
+++ b/fs/nilfs2/btree.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* btree.h - NILFS B-tree.
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Koji Sato.
*/
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index a15a1601e931..8d41311b5db4 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* cpfile.c - NILFS checkpoint file.
*
* Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Koji Sato.
*/
diff --git a/fs/nilfs2/cpfile.h b/fs/nilfs2/cpfile.h
index 6eca972f9673..6336222df24a 100644
--- a/fs/nilfs2/cpfile.h
+++ b/fs/nilfs2/cpfile.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* cpfile.h - NILFS checkpoint file.
*
* Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Koji Sato.
*/
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index dffedb2f8817..6f4066636be9 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* dat.c - NILFS disk address translation.
*
* Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Koji Sato.
*/
diff --git a/fs/nilfs2/dat.h b/fs/nilfs2/dat.h
index 57dc6cf466d0..b17ee34580ae 100644
--- a/fs/nilfs2/dat.h
+++ b/fs/nilfs2/dat.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* dat.h - NILFS disk address translation.
*
* Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Koji Sato.
*/
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 582831ab3eb9..81394e22d0a0 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* dir.c - NILFS directory entry operations
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Modified for NILFS by Amagai Yoshiji.
*/
/*
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
index 96e3ed0d9652..533e24ea3a88 100644
--- a/fs/nilfs2/direct.c
+++ b/fs/nilfs2/direct.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* direct.c - NILFS direct block pointer.
*
* Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Koji Sato.
*/
diff --git a/fs/nilfs2/direct.h b/fs/nilfs2/direct.h
index cfe85e848bba..ec9a23c77994 100644
--- a/fs/nilfs2/direct.h
+++ b/fs/nilfs2/direct.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* direct.h - NILFS direct block pointer.
*
* Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Koji Sato.
*/
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 7da0fac71dc2..64bc81363c6c 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* file.c - NILFS regular file handling primitives including fsync().
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Amagai Yoshiji and Ryusuke Konishi.
*/
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 853a831dcde0..aa3c328ee189 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* gcinode.c - dummy inodes to buffer blocks for garbage collection
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi.
* Revised by Ryusuke Konishi.
*
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index b8fa45c20c63..4140d232cadc 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ifile.c - NILFS inode file
*
* Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Amagai Yoshiji.
* Revised by Ryusuke Konishi.
*
diff --git a/fs/nilfs2/ifile.h b/fs/nilfs2/ifile.h
index 188b94fe0ec5..a1e1e5711a05 100644
--- a/fs/nilfs2/ifile.h
+++ b/fs/nilfs2/ifile.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* ifile.h - NILFS inode file
*
* Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Amagai Yoshiji.
* Revised by Ryusuke Konishi.
*
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 6a612d832e7d..671085512e0f 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* inode.c - NILFS inode operations.
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Ryusuke Konishi.
*
*/
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 1d2c3d7711fe..9b96d79eea6c 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ioctl.c - NILFS ioctl operations.
*
* Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Koji Sato.
*/
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index c6bc1033e7d2..700870a92bc4 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* mdt.c - meta data file for NILFS
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Ryusuke Konishi.
*/
diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h
index 3f67f3932097..e77aea4bb921 100644
--- a/fs/nilfs2/mdt.h
+++ b/fs/nilfs2/mdt.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* mdt.h - NILFS meta data file prototype and definitions
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Ryusuke Konishi.
*/
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index dd52d3f82e8d..9fe6d4ab74f0 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* namei.c - NILFS pathname lookup operations.
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Modified for NILFS by Amagai Yoshiji and Ryusuke Konishi.
*/
/*
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 33f8c8fc96e8..a2f247b6a209 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* nilfs.h - NILFS local header file.
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Koji Sato and Ryusuke Konishi.
*/
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 4cb850a6f1c2..329a056b73b1 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* page.c - buffer/page management specific to NILFS
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Ryusuke Konishi and Seiji Kihara.
*/
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index f3687c958fa8..62b9bb469e92 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* page.h - buffer/page management specific to NILFS
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Ryusuke Konishi and Seiji Kihara.
*/
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 5139efed1888..140b663e91c7 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* recovery.c - NILFS recovery logic
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Ryusuke Konishi.
*/
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 68cb9e4740b4..20c479b5e41b 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* segbuf.c - NILFS segment buffer
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Ryusuke Konishi.
*
*/
diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h
index 10e16935fff6..9bea1bd59041 100644
--- a/fs/nilfs2/segbuf.h
+++ b/fs/nilfs2/segbuf.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* segbuf.h - NILFS Segment buffer prototypes and definitions
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Ryusuke Konishi.
*
*/
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 0953635e7d48..445eef41bfaf 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* segment.c - NILFS segment constructor.
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Ryusuke Konishi.
*
*/
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 04634e3e3d58..f5cf5308f3fc 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* segment.h - NILFS Segment constructor prototypes and definitions
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Ryusuke Konishi.
*
*/
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index c7fa139d50e8..bf3f8f05c89b 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* sufile.c - NILFS segment usage file.
*
* Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Koji Sato.
* Revised by Ryusuke Konishi.
*/
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h
index 673a891350f4..c4e2c7a7add1 100644
--- a/fs/nilfs2/sufile.h
+++ b/fs/nilfs2/sufile.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* sufile.h - NILFS segment usage file.
*
* Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Koji Sato.
*/
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 1b9067cf4511..26290aa1023f 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* super.c - NILFS module and super block management.
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Ryusuke Konishi.
*/
/*
diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
index 4b25837e7724..e60be7bb55b0 100644
--- a/fs/nilfs2/sysfs.c
+++ b/fs/nilfs2/sysfs.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* sysfs.c - sysfs support implementation.
*
* Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation.
* Copyright (C) 2014 HGST, Inc., a Western Digital Company.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com>
*/
diff --git a/fs/nilfs2/sysfs.h b/fs/nilfs2/sysfs.h
index 648cedf9c06e..d001eb862dae 100644
--- a/fs/nilfs2/sysfs.h
+++ b/fs/nilfs2/sysfs.h
@@ -1,19 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* sysfs.h - sysfs support declarations.
*
* Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation.
* Copyright (C) 2014 HGST, Inc., a Western Digital Company.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com>
*/
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 1a85317e83f0..484785cdf96e 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* the_nilfs.c - the_nilfs shared structure.
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Ryusuke Konishi.
*
*/
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 36da1779f976..380a543c5b19 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* the_nilfs.h - the_nilfs shared structure.
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by Ryusuke Konishi.
*
*/
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index f174397b63a0..ababdbfab537 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -351,16 +351,9 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
- if ((mask & FS_MODIFY) ||
- (test_mask & to_tell->i_fsnotify_mask)) {
- iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
- fsnotify_first_mark(&to_tell->i_fsnotify_marks);
- }
-
- if (mnt && ((mask & FS_MODIFY) ||
- (test_mask & mnt->mnt_fsnotify_mask))) {
- iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
- fsnotify_first_mark(&to_tell->i_fsnotify_marks);
+ iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
+ fsnotify_first_mark(&to_tell->i_fsnotify_marks);
+ if (mnt) {
iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] =
fsnotify_first_mark(&mnt->mnt_fsnotify_marks);
}
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 05506d60131c..59cdb27826de 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -132,13 +132,13 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
struct fsnotify_mark *mark;
assert_spin_locked(&conn->lock);
+ /* We can get detached connector here when inode is getting unlinked. */
+ if (!fsnotify_valid_obj_type(conn->type))
+ return;
hlist_for_each_entry(mark, &conn->list, obj_list) {
if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)
new_mask |= mark->mask;
}
- if (WARN_ON(!fsnotify_valid_obj_type(conn->type)))
- return;
-
*fsnotify_conn_mask_p(conn) = new_mask;
}
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 860bfbe7a07a..f0cbf58ad4da 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -18,6 +18,7 @@
#include <linux/quotaops.h>
#include <linux/types.h>
#include <linux/writeback.h>
+#include <linux/nospec.h>
static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
qid_t id)
@@ -120,8 +121,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
struct if_dqinfo uinfo;
int ret;
- /* This checks whether qc_state has enough entries... */
- BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS);
if (!sb->s_qcop->get_state)
return -ENOSYS;
ret = sb->s_qcop->get_state(sb, &state);
@@ -354,10 +353,10 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
* GETXSTATE quotactl has space for just one set of time limits so
* report them for the first enabled quota type
*/
- for (type = 0; type < XQM_MAXQUOTAS; type++)
+ for (type = 0; type < MAXQUOTAS; type++)
if (state.s_state[type].flags & QCI_ACCT_ENABLED)
break;
- BUG_ON(type == XQM_MAXQUOTAS);
+ BUG_ON(type == MAXQUOTAS);
fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
@@ -427,10 +426,10 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
* GETXSTATV quotactl has space for just one set of time limits so
* report them for the first enabled quota type
*/
- for (type = 0; type < XQM_MAXQUOTAS; type++)
+ for (type = 0; type < MAXQUOTAS; type++)
if (state.s_state[type].flags & QCI_ACCT_ENABLED)
break;
- BUG_ON(type == XQM_MAXQUOTAS);
+ BUG_ON(type == MAXQUOTAS);
fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
@@ -701,8 +700,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
{
int ret;
- if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
+ if (type >= MAXQUOTAS)
return -EINVAL;
+ type = array_index_nospec(type, MAXQUOTAS);
/*
* Quota not supported on this fs? Check this before s_quota_types
* since they needn't be set if quota is not supported at all.
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 3040dc2a32f6..6f515651a2c2 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -764,9 +764,7 @@ static int udf_find_fileset(struct super_block *sb,
struct kernel_lb_addr *root)
{
struct buffer_head *bh = NULL;
- long lastblock;
uint16_t ident;
- struct udf_sb_info *sbi;
if (fileset->logicalBlockNum != 0xFFFFFFFF ||
fileset->partitionReferenceNum != 0xFFFF) {
@@ -779,69 +777,11 @@ static int udf_find_fileset(struct super_block *sb,
return 1;
}
- }
-
- sbi = UDF_SB(sb);
- if (!bh) {
- /* Search backwards through the partitions */
- struct kernel_lb_addr newfileset;
-
-/* --> cvg: FIXME - is it reasonable? */
- return 1;
-
- for (newfileset.partitionReferenceNum = sbi->s_partitions - 1;
- (newfileset.partitionReferenceNum != 0xFFFF &&
- fileset->logicalBlockNum == 0xFFFFFFFF &&
- fileset->partitionReferenceNum == 0xFFFF);
- newfileset.partitionReferenceNum--) {
- lastblock = sbi->s_partmaps
- [newfileset.partitionReferenceNum]
- .s_partition_len;
- newfileset.logicalBlockNum = 0;
-
- do {
- bh = udf_read_ptagged(sb, &newfileset, 0,
- &ident);
- if (!bh) {
- newfileset.logicalBlockNum++;
- continue;
- }
-
- switch (ident) {
- case TAG_IDENT_SBD:
- {
- struct spaceBitmapDesc *sp;
- sp = (struct spaceBitmapDesc *)
- bh->b_data;
- newfileset.logicalBlockNum += 1 +
- ((le32_to_cpu(sp->numOfBytes) +
- sizeof(struct spaceBitmapDesc)
- - 1) >> sb->s_blocksize_bits);
- brelse(bh);
- break;
- }
- case TAG_IDENT_FSD:
- *fileset = newfileset;
- break;
- default:
- newfileset.logicalBlockNum++;
- brelse(bh);
- bh = NULL;
- break;
- }
- } while (newfileset.logicalBlockNum < lastblock &&
- fileset->logicalBlockNum == 0xFFFFFFFF &&
- fileset->partitionReferenceNum == 0xFFFF);
- }
- }
-
- if ((fileset->logicalBlockNum != 0xFFFFFFFF ||
- fileset->partitionReferenceNum != 0xFFFF) && bh) {
udf_debug("Fileset at block=%u, partition=%u\n",
fileset->logicalBlockNum,
fileset->partitionReferenceNum);
- sbi->s_partition = fileset->partitionReferenceNum;
+ UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
udf_load_fileset(sb, bh, root);
brelse(bh);
return 0;
@@ -1570,10 +1510,16 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
*/
#define PART_DESC_ALLOC_STEP 32
+struct part_desc_seq_scan_data {
+ struct udf_vds_record rec;
+ u32 partnum;
+};
+
struct desc_seq_scan_data {
struct udf_vds_record vds[VDS_POS_LENGTH];
unsigned int size_part_descs;
- struct udf_vds_record *part_descs_loc;
+ unsigned int num_part_descs;
+ struct part_desc_seq_scan_data *part_descs_loc;
};
static struct udf_vds_record *handle_partition_descriptor(
@@ -1582,10 +1528,14 @@ static struct udf_vds_record *handle_partition_descriptor(
{
struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
int partnum;
+ int i;
partnum = le16_to_cpu(desc->partitionNumber);
- if (partnum >= data->size_part_descs) {
- struct udf_vds_record *new_loc;
+ for (i = 0; i < data->num_part_descs; i++)
+ if (partnum == data->part_descs_loc[i].partnum)
+ return &(data->part_descs_loc[i].rec);
+ if (data->num_part_descs >= data->size_part_descs) {
+ struct part_desc_seq_scan_data *new_loc;
unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
@@ -1597,7 +1547,7 @@ static struct udf_vds_record *handle_partition_descriptor(
data->part_descs_loc = new_loc;
data->size_part_descs = new_size;
}
- return &(data->part_descs_loc[partnum]);
+ return &(data->part_descs_loc[data->num_part_descs++].rec);
}
@@ -1647,6 +1597,7 @@ static noinline int udf_process_sequence(
memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
data.size_part_descs = PART_DESC_ALLOC_STEP;
+ data.num_part_descs = 0;
data.part_descs_loc = kcalloc(data.size_part_descs,
sizeof(*data.part_descs_loc),
GFP_KERNEL);
@@ -1658,7 +1609,6 @@ static noinline int udf_process_sequence(
* are in it.
*/
for (; (!done && block <= lastblock); block++) {
-
bh = udf_read_tagged(sb, block, block, &ident);
if (!bh)
break;
@@ -1730,13 +1680,10 @@ static noinline int udf_process_sequence(
}
/* Now handle prevailing Partition Descriptors */
- for (i = 0; i < data.size_part_descs; i++) {
- if (data.part_descs_loc[i].block) {
- ret = udf_load_partdesc(sb,
- data.part_descs_loc[i].block);
- if (ret < 0)
- return ret;
- }
+ for (i = 0; i < data.num_part_descs; i++) {
+ ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
+ if (ret < 0)
+ return ret;
}
return 0;
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index ca1d2cc2cdfa..18863d56273c 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -199,47 +199,57 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#define __declare_arg_0(a0, res) \
struct arm_smccc_res *___res = res; \
- register u32 r0 asm("r0") = a0; \
+ register unsigned long r0 asm("r0") = (u32)a0; \
register unsigned long r1 asm("r1"); \
register unsigned long r2 asm("r2"); \
register unsigned long r3 asm("r3")
#define __declare_arg_1(a0, a1, res) \
+ typeof(a1) __a1 = a1; \
struct arm_smccc_res *___res = res; \
- register u32 r0 asm("r0") = a0; \
- register typeof(a1) r1 asm("r1") = a1; \
+ register unsigned long r0 asm("r0") = (u32)a0; \
+ register unsigned long r1 asm("r1") = __a1; \
register unsigned long r2 asm("r2"); \
register unsigned long r3 asm("r3")
#define __declare_arg_2(a0, a1, a2, res) \
+ typeof(a1) __a1 = a1; \
+ typeof(a2) __a2 = a2; \
struct arm_smccc_res *___res = res; \
- register u32 r0 asm("r0") = a0; \
- register typeof(a1) r1 asm("r1") = a1; \
- register typeof(a2) r2 asm("r2") = a2; \
+ register unsigned long r0 asm("r0") = (u32)a0; \
+ register unsigned long r1 asm("r1") = __a1; \
+ register unsigned long r2 asm("r2") = __a2; \
register unsigned long r3 asm("r3")
#define __declare_arg_3(a0, a1, a2, a3, res) \
+ typeof(a1) __a1 = a1; \
+ typeof(a2) __a2 = a2; \
+ typeof(a3) __a3 = a3; \
struct arm_smccc_res *___res = res; \
- register u32 r0 asm("r0") = a0; \
- register typeof(a1) r1 asm("r1") = a1; \
- register typeof(a2) r2 asm("r2") = a2; \
- register typeof(a3) r3 asm("r3") = a3
+ register unsigned long r0 asm("r0") = (u32)a0; \
+ register unsigned long r1 asm("r1") = __a1; \
+ register unsigned long r2 asm("r2") = __a2; \
+ register unsigned long r3 asm("r3") = __a3
#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
+ typeof(a4) __a4 = a4; \
__declare_arg_3(a0, a1, a2, a3, res); \
- register typeof(a4) r4 asm("r4") = a4
+ register unsigned long r4 asm("r4") = __a4
#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
+ typeof(a5) __a5 = a5; \
__declare_arg_4(a0, a1, a2, a3, a4, res); \
- register typeof(a5) r5 asm("r5") = a5
+ register unsigned long r5 asm("r5") = __a5
#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
+ typeof(a6) __a6 = a6; \
__declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
- register typeof(a6) r6 asm("r6") = a6
+ register unsigned long r6 asm("r6") = __a6
#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
+ typeof(a7) __a7 = a7; \
__declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
- register typeof(a7) r7 asm("r7") = a7
+ register unsigned long r7 asm("r7") = __a7
#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 34aec30e06c7..6d766a19f2bb 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -56,6 +56,7 @@ struct blkcg {
struct list_head all_blkcgs_node;
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head cgwb_list;
+ refcount_t cgwb_refcnt;
#endif
};
@@ -89,7 +90,6 @@ struct blkg_policy_data {
/* the blkg and policy id this per-policy data belongs to */
struct blkcg_gq *blkg;
int plid;
- bool offline;
};
/*
@@ -387,6 +387,49 @@ static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
return cpd ? cpd->blkcg : NULL;
}
+extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+/**
+ * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
+ * @blkcg: blkcg of interest
+ *
+ * This is used to track the number of active wb's related to a blkcg.
+ */
+static inline void blkcg_cgwb_get(struct blkcg *blkcg)
+{
+ refcount_inc(&blkcg->cgwb_refcnt);
+}
+
+/**
+ * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
+ * @blkcg: blkcg of interest
+ *
+ * This is used to track the number of active wb's related to a blkcg.
+ * When this count goes to zero, all active wb has finished so the
+ * blkcg can continue destruction by calling blkcg_destroy_blkgs().
+ * This work may occur in cgwb_release_workfn() on the cgwb_release
+ * workqueue.
+ */
+static inline void blkcg_cgwb_put(struct blkcg *blkcg)
+{
+ if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
+ blkcg_destroy_blkgs(blkcg);
+}
+
+#else
+
+static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }
+
+static inline void blkcg_cgwb_put(struct blkcg *blkcg)
+{
+ /* wb isn't being accounted, so trigger destruction right away */
+ blkcg_destroy_blkgs(blkcg);
+}
+
+#endif
+
/**
* blkg_path - format cgroup path of blkg
* @blkg: blkg of interest
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index b79387fd57da..65b4eaed1d96 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -855,7 +855,7 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
}
u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold);
-void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf);
+void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred);
int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
/**
diff --git a/include/linux/of.h b/include/linux/of.h
index 4d25e4f952d9..99b0ebf49632 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -256,6 +256,9 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
+extern bool of_node_name_eq(const struct device_node *np, const char *name);
+extern bool of_node_name_prefix(const struct device_node *np, const char *prefix);
+
static inline const char *of_node_full_name(const struct device_node *np)
{
return np ? np->full_name : "<no-node>";
@@ -290,6 +293,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
extern struct device_node *of_get_next_available_child(
const struct device_node *node, struct device_node *prev);
+extern struct device_node *of_get_compatible_child(const struct device_node *parent,
+ const char *compatible);
extern struct device_node *of_get_child_by_name(const struct device_node *node,
const char *name);
@@ -561,6 +566,16 @@ static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode)
return NULL;
}
+static inline bool of_node_name_eq(const struct device_node *np, const char *name)
+{
+ return false;
+}
+
+static inline bool of_node_name_prefix(const struct device_node *np, const char *prefix)
+{
+ return false;
+}
+
static inline const char* of_node_full_name(const struct device_node *np)
{
return "<no-node>";
@@ -632,6 +647,12 @@ static inline bool of_have_populated_dt(void)
return false;
}
+static inline struct device_node *of_get_compatible_child(const struct device_node *parent,
+ const char *compatible)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_child_by_name(
const struct device_node *node,
const char *name)
@@ -967,6 +988,18 @@ static inline struct device_node *of_find_matching_node(
return of_find_matching_node_and_match(from, matches, NULL);
}
+static inline const char *of_node_get_device_type(const struct device_node *np)
+{
+ return of_get_property(np, "type", NULL);
+}
+
+static inline bool of_node_is_type(const struct device_node *np, const char *type)
+{
+ const char *match = of_node_get_device_type(np);
+
+ return np && match && type && !strcmp(match, type);
+}
+
/**
* of_property_count_u8_elems - Count the number of u8 elements in a property
*
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 99d366cb0e9f..d157983b84cf 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -3084,4 +3084,6 @@
#define PCI_VENDOR_ID_OCZ 0x1b85
+#define PCI_VENDOR_ID_NCUBE 0x10ff
+
#endif /* _LINUX_PCI_IDS_H */
diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h
index 9abc0ca7259b..9f0aa1b48c78 100644
--- a/include/linux/platform_data/ina2xx.h
+++ b/include/linux/platform_data/ina2xx.h
@@ -1,7 +1,7 @@
/*
* Driver for Texas Instruments INA219, INA226 power monitor chips
*
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/include/linux/quota.h b/include/linux/quota.h
index ca9772c8e48b..f32dd270b8e3 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -408,13 +408,7 @@ struct qc_type_state {
struct qc_state {
unsigned int s_incoredqs; /* Number of dquots in core */
- /*
- * Per quota type information. The array should really have
- * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in
- * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS
- * supports project quotas, this can be changed to MAXQUOTAS
- */
- struct qc_type_state s_state[XQM_MAXQUOTAS];
+ struct qc_type_state s_state[MAXQUOTAS]; /* Per quota type information */
};
/* Structure for communicating via ->set_info */
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 5d738804e3d6..a5a3cfc3c2fa 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -258,8 +258,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
extern int persistent_clock_is_local;
extern void read_persistent_clock64(struct timespec64 *ts);
-void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock,
- struct timespec64 *boot_offset);
+void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
+ struct timespec64 *boot_offset);
extern int update_persistent_clock64(struct timespec64 now);
/*
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 7f2e16e76ac4..041f7e56a289 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -158,8 +158,10 @@ extern void syscall_unregfunc(void);
* For rcuidle callers, use srcu since sched-rcu \
* doesn't work from the idle path. \
*/ \
- if (rcuidle) \
+ if (rcuidle) { \
idx = srcu_read_lock_notrace(&tracepoint_srcu); \
+ rcu_irq_enter_irqson(); \
+ } \
\
it_func_ptr = rcu_dereference_raw((tp)->funcs); \
\
@@ -171,8 +173,10 @@ extern void syscall_unregfunc(void);
} while ((++it_func_ptr)->func); \
} \
\
- if (rcuidle) \
+ if (rcuidle) { \
+ rcu_irq_exit_irqson(); \
srcu_read_unlock_notrace(&tracepoint_srcu, idx);\
+ } \
\
preempt_enable_notrace(); \
} while (0)
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 1ad5b19e83a9..970303448c90 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -23,13 +23,11 @@ struct tc_action {
const struct tc_action_ops *ops;
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */
__u32 order;
- struct list_head list;
struct tcf_idrinfo *idrinfo;
u32 tcfa_index;
refcount_t tcfa_refcnt;
atomic_t tcfa_bindcnt;
- u32 tcfa_capab;
int tcfa_action;
struct tcf_t tcfa_tm;
struct gnet_stats_basic_packed tcfa_bstats;
@@ -44,7 +42,6 @@ struct tc_action {
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
#define tcf_bindcnt common.tcfa_bindcnt
-#define tcf_capab common.tcfa_capab
#define tcf_action common.tcfa_action
#define tcf_tm common.tcfa_tm
#define tcf_bstats common.tcfa_bstats
@@ -102,7 +99,6 @@ struct tc_action_ops {
size_t (*get_fill_size)(const struct tc_action *act);
struct net_device *(*get_dev)(const struct tc_action *a);
void (*put_dev)(struct net_device *dev);
- int (*delete)(struct net *net, u32 index);
};
struct tc_action_net {
@@ -148,8 +144,6 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
const struct tc_action_ops *ops,
struct netlink_ext_ack *extack);
int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
-bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
- int bind);
int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
struct tc_action **a, const struct tc_action_ops *ops,
int bind, bool cpustats);
@@ -158,7 +152,6 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
struct tc_action **a, int bind);
-int tcf_idr_delete_index(struct tc_action_net *tn, u32 index);
int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
static inline int tcf_idr_release(struct tc_action *a, bool bind)
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 9a850973e09a..8ebabc9873d1 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4865,8 +4865,8 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
*
* Return: 0 on success. -ENODATA.
*/
-int reg_query_regdb_wmm(char *alpha2, int freq, u32 *ptr,
- struct ieee80211_wmm_rule *rule);
+int reg_query_regdb_wmm(char *alpha2, int freq,
+ struct ieee80211_reg_rule *rule);
/*
* callbacks for asynchronous cfg80211 methods, notification
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index ef727f71336e..75a3f3fdb359 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -298,19 +298,13 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
#endif
}
-static inline void tcf_exts_to_list(const struct tcf_exts *exts,
- struct list_head *actions)
-{
#ifdef CONFIG_NET_CLS_ACT
- int i;
-
- for (i = 0; i < exts->nr_actions; i++) {
- struct tc_action *a = exts->actions[i];
-
- list_add_tail(&a->list, actions);
- }
+#define tcf_exts_for_each_action(i, a, exts) \
+ for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
+#else
+#define tcf_exts_for_each_action(i, a, exts) \
+ for (; 0; (void)(i), (void)(a), (void)(exts))
#endif
-}
static inline void
tcf_exts_stats_update(const struct tcf_exts *exts,
@@ -361,6 +355,15 @@ static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
#endif
}
+static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return exts->actions[0];
+#else
+ return NULL;
+#endif
+}
+
/**
* tcf_exts_exec - execute tc filter extensions
* @skb: socket buffer
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index 60f8cc86a447..3469750df0f4 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -217,15 +217,15 @@ struct ieee80211_wmm_rule {
struct ieee80211_reg_rule {
struct ieee80211_freq_range freq_range;
struct ieee80211_power_rule power_rule;
- struct ieee80211_wmm_rule *wmm_rule;
+ struct ieee80211_wmm_rule wmm_rule;
u32 flags;
u32 dfs_cac_ms;
+ bool has_wmm;
};
struct ieee80211_regdomain {
struct rcu_head rcu_head;
u32 n_reg_rules;
- u32 n_wmm_rules;
char alpha2[3];
enum nl80211_dfs_regions dfs_region;
struct ieee80211_reg_rule reg_rules[];
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index 7b8c9e19bad1..910cc4334b21 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -65,7 +65,7 @@
/* keyctl structures */
struct keyctl_dh_params {
- __s32 private;
+ __s32 dh_private;
__s32 prime;
__s32 base;
};
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index dc520e1a4123..8b73cb603c5f 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -37,6 +37,7 @@
#include <linux/types.h>
#include <linux/socket.h> /* For __kernel_sockaddr_storage. */
+#include <linux/in6.h> /* For struct in6_addr. */
#define RDS_IB_ABI_VERSION 0x301
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index b1e22c40c4b6..84c3de89696a 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -176,7 +176,7 @@ struct vhost_memory {
#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
-#define VHOST_GET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x26, __u64)
+#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
/* VHOST_NET specific defines */
diff --git a/ipc/shm.c b/ipc/shm.c
index b0eb3757ab89..4cd402e4cfeb 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -199,6 +199,7 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
}
ipc_unlock_object(ipcp);
+ ipcp = ERR_PTR(-EIDRM);
err:
rcu_read_unlock();
/*
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 04b8eda94e7d..03cc59ee9c95 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -15,6 +15,7 @@
#include <linux/jhash.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
+#include <linux/random.h>
#include <uapi/linux/btf.h>
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
@@ -41,6 +42,7 @@ struct bpf_htab {
atomic_t count; /* number of elements in this hashtable */
u32 n_buckets; /* number of hash buckets */
u32 elem_size; /* size of each element in bytes */
+ u32 hashrnd;
};
/* each htab element is struct htab_elem + key + value */
@@ -371,6 +373,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (!htab->buckets)
goto free_htab;
+ htab->hashrnd = get_random_int();
for (i = 0; i < htab->n_buckets; i++) {
INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
raw_spin_lock_init(&htab->buckets[i].lock);
@@ -402,9 +405,9 @@ free_htab:
return ERR_PTR(err);
}
-static inline u32 htab_map_hash(const void *key, u32 key_len)
+static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
{
- return jhash(key, key_len, 0);
+ return jhash(key, key_len, hashrnd);
}
static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
@@ -470,7 +473,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
head = select_bucket(htab, hash);
@@ -597,7 +600,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
if (!key)
goto find_first_elem;
- hash = htab_map_hash(key, key_size);
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
head = select_bucket(htab, hash);
@@ -824,7 +827,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash);
head = &b->head;
@@ -880,7 +883,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash);
head = &b->head;
@@ -945,7 +948,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash);
head = &b->head;
@@ -998,7 +1001,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash);
head = &b->head;
@@ -1071,7 +1074,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash);
head = &b->head;
@@ -1103,7 +1106,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash);
head = &b->head;
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 98e621a29e8e..488ef9663c01 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -236,7 +236,7 @@ static int bpf_tcp_init(struct sock *sk)
}
static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
-static int free_start_sg(struct sock *sk, struct sk_msg_buff *md);
+static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge);
static void bpf_tcp_release(struct sock *sk)
{
@@ -248,7 +248,7 @@ static void bpf_tcp_release(struct sock *sk)
goto out;
if (psock->cork) {
- free_start_sg(psock->sock, psock->cork);
+ free_start_sg(psock->sock, psock->cork, true);
kfree(psock->cork);
psock->cork = NULL;
}
@@ -330,14 +330,14 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
close_fun = psock->save_close;
if (psock->cork) {
- free_start_sg(psock->sock, psock->cork);
+ free_start_sg(psock->sock, psock->cork, true);
kfree(psock->cork);
psock->cork = NULL;
}
list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
list_del(&md->list);
- free_start_sg(psock->sock, md);
+ free_start_sg(psock->sock, md, true);
kfree(md);
}
@@ -369,7 +369,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
/* If another thread deleted this object skip deletion.
* The refcnt on psock may or may not be zero.
*/
- if (l) {
+ if (l && l == link) {
hlist_del_rcu(&link->hash_node);
smap_release_sock(psock, link->sk);
free_htab_elem(htab, link);
@@ -570,14 +570,16 @@ static void free_bytes_sg(struct sock *sk, int bytes,
md->sg_start = i;
}
-static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
+static int free_sg(struct sock *sk, int start,
+ struct sk_msg_buff *md, bool charge)
{
struct scatterlist *sg = md->sg_data;
int i = start, free = 0;
while (sg[i].length) {
free += sg[i].length;
- sk_mem_uncharge(sk, sg[i].length);
+ if (charge)
+ sk_mem_uncharge(sk, sg[i].length);
if (!md->skb)
put_page(sg_page(&sg[i]));
sg[i].length = 0;
@@ -594,9 +596,9 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
return free;
}
-static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
+static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge)
{
- int free = free_sg(sk, md->sg_start, md);
+ int free = free_sg(sk, md->sg_start, md, charge);
md->sg_start = md->sg_end;
return free;
@@ -604,7 +606,7 @@ static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
{
- return free_sg(sk, md->sg_curr, md);
+ return free_sg(sk, md->sg_curr, md, true);
}
static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
@@ -718,7 +720,7 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
list_add_tail(&r->list, &psock->ingress);
sk->sk_data_ready(sk);
} else {
- free_start_sg(sk, r);
+ free_start_sg(sk, r, true);
kfree(r);
}
@@ -752,14 +754,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
release_sock(sk);
}
smap_release_sock(psock, sk);
- if (unlikely(err))
- goto out;
- return 0;
+ return err;
out_rcu:
rcu_read_unlock();
-out:
- free_bytes_sg(NULL, send, md, false);
- return err;
+ return 0;
}
static inline void bpf_md_init(struct smap_psock *psock)
@@ -822,7 +820,7 @@ more_data:
case __SK_PASS:
err = bpf_tcp_push(sk, send, m, flags, true);
if (unlikely(err)) {
- *copied -= free_start_sg(sk, m);
+ *copied -= free_start_sg(sk, m, true);
break;
}
@@ -845,16 +843,17 @@ more_data:
lock_sock(sk);
if (unlikely(err < 0)) {
- free_start_sg(sk, m);
+ int free = free_start_sg(sk, m, false);
+
psock->sg_size = 0;
if (!cork)
- *copied -= send;
+ *copied -= free;
} else {
psock->sg_size -= send;
}
if (cork) {
- free_start_sg(sk, m);
+ free_start_sg(sk, m, true);
psock->sg_size = 0;
kfree(m);
m = NULL;
@@ -912,6 +911,8 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (unlikely(flags & MSG_ERRQUEUE))
return inet_recv_error(sk, msg, len, addr_len);
+ if (!skb_queue_empty(&sk->sk_receive_queue))
+ return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
rcu_read_lock();
psock = smap_psock_sk(sk);
@@ -922,9 +923,6 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
goto out;
rcu_read_unlock();
- if (!skb_queue_empty(&sk->sk_receive_queue))
- return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
-
lock_sock(sk);
bytes_ready:
while (copied != len) {
@@ -1122,7 +1120,7 @@ wait_for_memory:
err = sk_stream_wait_memory(sk, &timeo);
if (err) {
if (m && m != psock->cork)
- free_start_sg(sk, m);
+ free_start_sg(sk, m, true);
goto out_err;
}
}
@@ -1427,12 +1425,15 @@ out:
static void smap_write_space(struct sock *sk)
{
struct smap_psock *psock;
+ void (*write_space)(struct sock *sk);
rcu_read_lock();
psock = smap_psock_sk(sk);
if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
schedule_work(&psock->tx_work);
+ write_space = psock->save_write_space;
rcu_read_unlock();
+ write_space(sk);
}
static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
@@ -1461,10 +1462,16 @@ static void smap_destroy_psock(struct rcu_head *rcu)
schedule_work(&psock->gc_work);
}
+static bool psock_is_smap_sk(struct sock *sk)
+{
+ return inet_csk(sk)->icsk_ulp_ops == &bpf_tcp_ulp_ops;
+}
+
static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
{
if (refcount_dec_and_test(&psock->refcnt)) {
- tcp_cleanup_ulp(sock);
+ if (psock_is_smap_sk(sock))
+ tcp_cleanup_ulp(sock);
write_lock_bh(&sock->sk_callback_lock);
smap_stop_sock(psock, sock);
write_unlock_bh(&sock->sk_callback_lock);
@@ -1578,13 +1585,13 @@ static void smap_gc_work(struct work_struct *w)
bpf_prog_put(psock->bpf_tx_msg);
if (psock->cork) {
- free_start_sg(psock->sock, psock->cork);
+ free_start_sg(psock->sock, psock->cork, true);
kfree(psock->cork);
}
list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
list_del(&md->list);
- free_start_sg(psock->sock, md);
+ free_start_sg(psock->sock, md, true);
kfree(md);
}
@@ -1891,6 +1898,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
* doesn't update user data.
*/
if (psock) {
+ if (!psock_is_smap_sk(sock)) {
+ err = -EBUSY;
+ goto out_progs;
+ }
if (READ_ONCE(psock->bpf_parse) && parse) {
err = -EBUSY;
goto out_progs;
@@ -2140,7 +2151,9 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
return ERR_PTR(-EPERM);
/* check sanity of attributes */
- if (attr->max_entries == 0 || attr->value_size != 4 ||
+ if (attr->max_entries == 0 ||
+ attr->key_size == 0 ||
+ attr->value_size != 4 ||
attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL);
@@ -2267,8 +2280,10 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
}
l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
htab->map.numa_node);
- if (!l_new)
+ if (!l_new) {
+ atomic_dec(&htab->count);
return ERR_PTR(-ENOMEM);
+ }
memcpy(l_new->key, key, key_size);
l_new->sk = sk;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index ed44d7d34c2d..0097acec1c71 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -102,8 +102,6 @@ static inline void cpuhp_lock_release(bool bringup) { }
* @name: Name of the step
* @startup: Startup function of the step
* @teardown: Teardown function of the step
- * @skip_onerr: Do not invoke the functions on error rollback
- * Will go away once the notifiers are gone
* @cant_stop: Bringup/teardown can't be stopped at this step
*/
struct cpuhp_step {
@@ -119,7 +117,6 @@ struct cpuhp_step {
struct hlist_node *node);
} teardown;
struct hlist_head list;
- bool skip_onerr;
bool cant_stop;
bool multi_instance;
};
@@ -550,12 +547,8 @@ static int bringup_cpu(unsigned int cpu)
static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
{
- for (st->state--; st->state > st->target; st->state--) {
- struct cpuhp_step *step = cpuhp_get_step(st->state);
-
- if (!step->skip_onerr)
- cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
- }
+ for (st->state--; st->state > st->target; st->state--)
+ cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
}
static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
@@ -614,15 +607,15 @@ static void cpuhp_thread_fun(unsigned int cpu)
bool bringup = st->bringup;
enum cpuhp_state state;
+ if (WARN_ON_ONCE(!st->should_run))
+ return;
+
/*
* ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
* that if we see ->should_run we also see the rest of the state.
*/
smp_mb();
- if (WARN_ON_ONCE(!st->should_run))
- return;
-
cpuhp_lock_acquire(bringup);
if (st->single) {
@@ -644,12 +637,6 @@ static void cpuhp_thread_fun(unsigned int cpu)
WARN_ON_ONCE(!cpuhp_is_ap_state(state));
- if (st->rollback) {
- struct cpuhp_step *step = cpuhp_get_step(state);
- if (step->skip_onerr)
- goto next;
- }
-
if (cpuhp_is_atomic_state(state)) {
local_irq_disable();
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
@@ -673,7 +660,6 @@ static void cpuhp_thread_fun(unsigned int cpu)
st->should_run = false;
}
-next:
cpuhp_lock_release(bringup);
if (!st->should_run)
@@ -916,12 +902,8 @@ void cpuhp_report_idle_dead(void)
static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
{
- for (st->state++; st->state < st->target; st->state++) {
- struct cpuhp_step *step = cpuhp_get_step(st->state);
-
- if (!step->skip_onerr)
- cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
- }
+ for (st->state++; st->state < st->target; st->state++)
+ cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
}
static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
@@ -934,7 +916,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
if (ret) {
st->target = prev_state;
- undo_cpu_down(cpu, st);
+ if (st->state < prev_state)
+ undo_cpu_down(cpu, st);
break;
}
}
@@ -987,7 +970,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
* to do the further cleanups.
*/
ret = cpuhp_down_callbacks(cpu, st, target);
- if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
+ if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
cpuhp_reset_state(st, prev_state);
__cpuhp_kick_ap(st);
}
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 1c35b7b945d0..de87b0282e74 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -168,7 +168,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
int dma_direct_supported(struct device *dev, u64 mask)
{
#ifdef CONFIG_ZONE_DMA
- if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+ if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
return 0;
#else
/*
@@ -177,7 +177,7 @@ int dma_direct_supported(struct device *dev, u64 mask)
* memory, or by providing a ZONE_DMA32. If neither is the case, the
* architecture needs to use an IOMMU instead of the direct mapping.
*/
- if (mask < DMA_BIT_MASK(32))
+ if (mask < phys_to_dma(dev, DMA_BIT_MASK(32)))
return 0;
#endif
/*
diff --git a/kernel/fork.c b/kernel/fork.c
index d896e9ca38b0..f0b58479534f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -550,8 +550,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
goto out;
}
/* a new mm has just been created */
- arch_dup_mmap(oldmm, mm);
- retval = 0;
+ retval = arch_dup_mmap(oldmm, mm);
out:
up_write(&mm->mmap_sem);
flush_tlb_mm(oldmm);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 924e37fb1620..fd6f8ed28e01 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -38,7 +38,6 @@
#include <linux/kmsg_dump.h>
#include <linux/syslog.h>
#include <linux/cpu.h>
-#include <linux/notifier.h>
#include <linux/rculist.h>
#include <linux/poll.h>
#include <linux/irq_work.h>
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index a0a74c533e4b..0913b4d385de 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -306,12 +306,12 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
return printk_safe_log_store(s, fmt, args);
}
-void printk_nmi_enter(void)
+void notrace printk_nmi_enter(void)
{
this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
}
-void printk_nmi_exit(void)
+void notrace printk_nmi_exit(void)
{
this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
}
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index f74fb00d8064..0e6e97a01942 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
spin_unlock_irqrestore(&watchdog_lock, *flags);
}
+static int clocksource_watchdog_kthread(void *data);
+static void __clocksource_change_rating(struct clocksource *cs, int rating);
+
/*
* Interval: 0.5sec Threshold: 0.0625s
*/
#define WATCHDOG_INTERVAL (HZ >> 1)
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
+static void clocksource_watchdog_work(struct work_struct *work)
+{
+ /*
+ * We cannot directly run clocksource_watchdog_kthread() here, because
+ * clocksource_select() calls timekeeping_notify() which uses
+ * stop_machine(). One cannot use stop_machine() from a workqueue() due
+ * lock inversions wrt CPU hotplug.
+ *
+ * Also, we only ever run this work once or twice during the lifetime
+ * of the kernel, so there is no point in creating a more permanent
+ * kthread for this.
+ *
+ * If kthread_run fails the next watchdog scan over the
+ * watchdog_list will find the unstable clock again.
+ */
+ kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
+}
+
static void __clocksource_unstable(struct clocksource *cs)
{
cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
cs->flags |= CLOCK_SOURCE_UNSTABLE;
/*
- * If the clocksource is registered clocksource_watchdog_work() will
+ * If the clocksource is registered clocksource_watchdog_kthread() will
* re-rate and re-select.
*/
if (list_empty(&cs->list)) {
@@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs)
if (cs->mark_unstable)
cs->mark_unstable(cs);
- /* kick clocksource_watchdog_work() */
+ /* kick clocksource_watchdog_kthread() */
if (finished_booting)
schedule_work(&watchdog_work);
}
@@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs)
* @cs: clocksource to be marked unstable
*
* This function is called by the x86 TSC code to mark clocksources as unstable;
- * it defers demotion and re-selection to a work.
+ * it defers demotion and re-selection to a kthread.
*/
void clocksource_mark_unstable(struct clocksource *cs)
{
@@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
}
}
-static void __clocksource_change_rating(struct clocksource *cs, int rating);
-
-static int __clocksource_watchdog_work(void)
+static int __clocksource_watchdog_kthread(void)
{
struct clocksource *cs, *tmp;
unsigned long flags;
@@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void)
return select;
}
-static void clocksource_watchdog_work(struct work_struct *work)
+static int clocksource_watchdog_kthread(void *data)
{
mutex_lock(&clocksource_mutex);
- if (__clocksource_watchdog_work())
+ if (__clocksource_watchdog_kthread())
clocksource_select();
mutex_unlock(&clocksource_mutex);
+ return 0;
}
static bool clocksource_is_watchdog(struct clocksource *cs)
@@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
static void clocksource_select_watchdog(bool fallback) { }
static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
static inline void clocksource_resume_watchdog(void) { }
-static inline int __clocksource_watchdog_work(void) { return 0; }
+static inline int __clocksource_watchdog_kthread(void) { return 0; }
static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
void clocksource_mark_unstable(struct clocksource *cs) { }
@@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void)
/*
* Run the watchdog first to eliminate unstable clock sources
*/
- __clocksource_watchdog_work();
+ __clocksource_watchdog_kthread();
clocksource_select();
mutex_unlock(&clocksource_mutex);
return 0;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 5470dce212c0..977918d5d350 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -261,7 +261,7 @@ static void __touch_watchdog(void)
* entering idle state. This should only be used for scheduler events.
* Use touch_softlockup_watchdog() for everything else.
*/
-void touch_softlockup_watchdog_sched(void)
+notrace void touch_softlockup_watchdog_sched(void)
{
/*
* Preemption can be enabled. It doesn't matter which CPU's timestamp
@@ -270,7 +270,7 @@ void touch_softlockup_watchdog_sched(void)
raw_cpu_write(watchdog_touch_ts, 0);
}
-void touch_softlockup_watchdog(void)
+notrace void touch_softlockup_watchdog(void)
{
touch_softlockup_watchdog_sched();
wq_watchdog_touch(raw_smp_processor_id());
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 1f7020d65d0a..71381168dede 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -29,7 +29,7 @@ static struct cpumask dead_events_mask;
static unsigned long hardlockup_allcpu_dumped;
static atomic_t watchdog_cpus = ATOMIC_INIT(0);
-void arch_touch_nmi_watchdog(void)
+notrace void arch_touch_nmi_watchdog(void)
{
/*
* Using __raw here because some code paths have
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 60e80198c3df..0280deac392e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -5574,7 +5574,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
mod_timer(&wq_watchdog_timer, jiffies + thresh);
}
-void wq_watchdog_touch(int cpu)
+notrace void wq_watchdog_touch(int cpu)
{
if (cpu >= 0)
per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 613316724c6a..4966c4fbe7f7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1277,13 +1277,13 @@ config WARN_ALL_UNSEEDED_RANDOM
time. This is really bad from a security perspective, and
so architecture maintainers really need to do what they can
to get the CRNG seeded sooner after the system is booted.
- However, since users can not do anything actionble to
+ However, since users cannot do anything actionable to
address this, by default the kernel will issue only a single
warning for the first use of unseeded randomness.
Say Y here if you want to receive warnings for all uses of
unseeded randomness. This will be of use primarily for
- those developers interersted in improving the security of
+ those developers interested in improving the security of
Linux kernels running on their architecture (or
subarchitecture).
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index c72577e472f2..a66595ba5543 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -4,7 +4,6 @@
*/
#include <linux/percpu_counter.h>
-#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/cpu.h>
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 310e29b51507..30526afa8343 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -28,7 +28,6 @@
#include <linux/rhashtable.h>
#include <linux/err.h>
#include <linux/export.h>
-#include <linux/rhashtable.h>
#define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4U
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index f5981e9d6ae2..8a8bb8796c6c 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -491,6 +491,7 @@ static void cgwb_release_workfn(struct work_struct *work)
{
struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
release_work);
+ struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css);
mutex_lock(&wb->bdi->cgwb_release_mutex);
wb_shutdown(wb);
@@ -499,6 +500,9 @@ static void cgwb_release_workfn(struct work_struct *work)
css_put(wb->blkcg_css);
mutex_unlock(&wb->bdi->cgwb_release_mutex);
+ /* triggers blkg destruction if cgwb_refcnt becomes zero */
+ blkcg_cgwb_put(blkcg);
+
fprop_local_destroy_percpu(&wb->memcg_completions);
percpu_ref_exit(&wb->refcnt);
wb_exit(wb);
@@ -597,6 +601,7 @@ static int cgwb_create(struct backing_dev_info *bdi,
list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
list_add(&wb->memcg_node, memcg_cgwb_list);
list_add(&wb->blkcg_node, blkcg_cgwb_list);
+ blkcg_cgwb_get(blkcg);
css_get(memcg_css);
css_get(blkcg_css);
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c3bc7e9c9a2a..533f9b00147d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -821,11 +821,11 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
* but we need to be consistent with PTEs and architectures that
* can't support a 'special' bit.
*/
- BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
+ !pfn_t_devmap(pfn));
BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
(VM_PFNMAP|VM_MIXEDMAP));
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
- BUG_ON(!pfn_t_devmap(pfn));
if (addr < vma->vm_start || addr >= vma->vm_end)
return VM_FAULT_SIGBUS;
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 9a085d525bbc..17dd883198ae 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -2097,6 +2097,11 @@ static int __init kmemleak_late_init(void)
kmemleak_initialized = 1;
+ dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
+ &kmemleak_fops);
+ if (!dentry)
+ pr_warn("Failed to create the debugfs kmemleak file\n");
+
if (kmemleak_error) {
/*
* Some error occurred and kmemleak was disabled. There is a
@@ -2108,10 +2113,6 @@ static int __init kmemleak_late_init(void)
return -ENOMEM;
}
- dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
- &kmemleak_fops);
- if (!dentry)
- pr_warn("Failed to create the debugfs kmemleak file\n");
mutex_lock(&scan_mutex);
start_scan_thread();
mutex_unlock(&scan_mutex);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4ead5a4817de..e79cb59552d9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1701,8 +1701,6 @@ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int
if (mem_cgroup_out_of_memory(memcg, mask, order))
return OOM_SUCCESS;
- WARN(1,"Memory cgroup charge failed because of no reclaimable memory! "
- "This looks like a misconfiguration or a kernel bug.");
return OOM_FAILED;
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9eea6e809a4e..38d94b703e9d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1333,7 +1333,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
if (__PageMovable(page))
return pfn;
if (PageHuge(page)) {
- if (page_huge_active(page))
+ if (hugepage_migration_supported(page_hstate(page)) &&
+ page_huge_active(page))
return pfn;
else
pfn = round_up(pfn + 1,
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index b5b25e4dcbbb..f10aa5360616 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -522,6 +522,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
tlb_gather_mmu(&tlb, mm, start, end);
if (mmu_notifier_invalidate_range_start_nonblock(mm, start, end)) {
+ tlb_finish_mmu(&tlb, start, end);
ret = false;
continue;
}
@@ -1103,10 +1104,17 @@ bool out_of_memory(struct oom_control *oc)
}
select_bad_process(oc);
- /* Found nothing?!?! Either we hang forever, or we panic. */
- if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
+ /* Found nothing?!?! */
+ if (!oc->chosen) {
dump_header(oc, NULL);
- panic("Out of memory and no killable processes...\n");
+ pr_warn("Out of memory and no killable processes...\n");
+ /*
+ * If we got here due to an actual allocation at the
+ * system level, we cannot survive this and will enter
+ * an endless loop in the allocator. Bail out now.
+ */
+ if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
+ panic("System is deadlocked on memory\n");
}
if (oc->chosen && oc->chosen != (void *)-1UL)
oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 6551d3b0dc30..84ae9bf5858a 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -27,7 +27,6 @@
#include <linux/mpage.h>
#include <linux/rmap.h>
#include <linux/percpu.h>
-#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e75865d58ba7..89d2a2ab3fe6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -32,7 +32,6 @@
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/oom.h>
-#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
@@ -7709,6 +7708,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
* handle each tail page individually in migration.
*/
if (PageHuge(page)) {
+
+ if (!hugepage_migration_supported(page_hstate(page)))
+ goto unmovable;
+
iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
continue;
}
diff --git a/mm/slub.c b/mm/slub.c
index ce2b9e5cea77..8da34a8af53d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -19,7 +19,6 @@
#include <linux/slab.h>
#include "slab.h"
#include <linux/proc_fs.h>
-#include <linux/notifier.h>
#include <linux/seq_file.h>
#include <linux/kasan.h>
#include <linux/cpu.h>
diff --git a/mm/util.c b/mm/util.c
index d2890a407332..9e3ebd2ef65f 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -435,11 +435,14 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
EXPORT_SYMBOL(kvmalloc_node);
/**
- * kvfree - free memory allocated with kvmalloc
- * @addr: pointer returned by kvmalloc
+ * kvfree() - Free memory.
+ * @addr: Pointer to allocated memory.
*
- * If the memory is allocated from vmalloc area it is freed with vfree().
- * Otherwise kfree() is used.
+ * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
+ * It is slightly more efficient to use kfree() or vfree() if you are certain
+ * that you know which one to use.
+ *
+ * Context: Any context except NMI.
*/
void kvfree(const void *addr)
{
diff --git a/net/core/dev.c b/net/core/dev.c
index 325fc5088370..82114e1111e6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -93,7 +93,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/notifier.h>
#include <linux/skbuff.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
diff --git a/net/core/filter.c b/net/core/filter.c
index c25eb36f1320..aecdeba052d3 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2282,14 +2282,21 @@ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
.arg2_type = ARG_ANYTHING,
};
+#define sk_msg_iter_var(var) \
+ do { \
+ var++; \
+ if (var == MAX_SKB_FRAGS) \
+ var = 0; \
+ } while (0)
+
BPF_CALL_4(bpf_msg_pull_data,
struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags)
{
- unsigned int len = 0, offset = 0, copy = 0;
+ unsigned int len = 0, offset = 0, copy = 0, poffset = 0;
+ int bytes = end - start, bytes_sg_total;
struct scatterlist *sg = msg->sg_data;
int first_sg, last_sg, i, shift;
unsigned char *p, *to, *from;
- int bytes = end - start;
struct page *page;
if (unlikely(flags || end <= start))
@@ -2299,21 +2306,22 @@ BPF_CALL_4(bpf_msg_pull_data,
i = msg->sg_start;
do {
len = sg[i].length;
- offset += len;
if (start < offset + len)
break;
- i++;
- if (i == MAX_SKB_FRAGS)
- i = 0;
+ offset += len;
+ sk_msg_iter_var(i);
} while (i != msg->sg_end);
if (unlikely(start >= offset + len))
return -EINVAL;
- if (!msg->sg_copy[i] && bytes <= len)
- goto out;
-
first_sg = i;
+ /* The start may point into the sg element so we need to also
+ * account for the headroom.
+ */
+ bytes_sg_total = start - offset + bytes;
+ if (!msg->sg_copy[i] && bytes_sg_total <= len)
+ goto out;
/* At this point we need to linearize multiple scatterlist
* elements or a single shared page. Either way we need to
@@ -2327,37 +2335,32 @@ BPF_CALL_4(bpf_msg_pull_data,
*/
do {
copy += sg[i].length;
- i++;
- if (i == MAX_SKB_FRAGS)
- i = 0;
- if (bytes < copy)
+ sk_msg_iter_var(i);
+ if (bytes_sg_total <= copy)
break;
} while (i != msg->sg_end);
last_sg = i;
- if (unlikely(copy < end - start))
+ if (unlikely(bytes_sg_total > copy))
return -EINVAL;
page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy));
if (unlikely(!page))
return -ENOMEM;
p = page_address(page);
- offset = 0;
i = first_sg;
do {
from = sg_virt(&sg[i]);
len = sg[i].length;
- to = p + offset;
+ to = p + poffset;
memcpy(to, from, len);
- offset += len;
+ poffset += len;
sg[i].length = 0;
put_page(sg_page(&sg[i]));
- i++;
- if (i == MAX_SKB_FRAGS)
- i = 0;
+ sk_msg_iter_var(i);
} while (i != last_sg);
sg[first_sg].length = copy;
@@ -2367,11 +2370,15 @@ BPF_CALL_4(bpf_msg_pull_data,
* had a single entry though we can just replace it and
* be done. Otherwise walk the ring and shift the entries.
*/
- shift = last_sg - first_sg - 1;
+ WARN_ON_ONCE(last_sg == first_sg);
+ shift = last_sg > first_sg ?
+ last_sg - first_sg - 1 :
+ MAX_SKB_FRAGS - first_sg + last_sg - 1;
if (!shift)
goto out;
- i = first_sg + 1;
+ i = first_sg;
+ sk_msg_iter_var(i);
do {
int move_from;
@@ -2388,15 +2395,13 @@ BPF_CALL_4(bpf_msg_pull_data,
sg[move_from].page_link = 0;
sg[move_from].offset = 0;
- i++;
- if (i == MAX_SKB_FRAGS)
- i = 0;
+ sk_msg_iter_var(i);
} while (1);
msg->sg_end -= shift;
if (msg->sg_end < 0)
msg->sg_end += MAX_SKB_FRAGS;
out:
- msg->data = sg_virt(&sg[i]) + start - offset;
+ msg->data = sg_virt(&sg[first_sg]) + start - offset;
msg->data_end = msg->data + bytes;
return 0;
@@ -7281,7 +7286,7 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct sk_reuseport_md, ip_protocol):
- BUILD_BUG_ON(hweight_long(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
+ BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset,
BPF_W, 0);
*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 24431e578310..60c928894a78 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -324,6 +324,10 @@ void rtnl_unregister_all(int protocol)
rtnl_lock();
tab = rtnl_msg_handlers[protocol];
+ if (!tab) {
+ rtnl_unlock();
+ return;
+ }
RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
link = tab[msgindex];
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index e63c554e0623..9f3209ff7ffd 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -19,12 +19,10 @@
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
-#include <linux/of_gpio.h>
#include <linux/netdevice.h>
#include <linux/sysfs.h>
#include <linux/phy_fixed.h>
#include <linux/ptp_classify.h>
-#include <linux/gpio/consumer.h>
#include <linux/etherdevice.h>
#include "dsa_priv.h"
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 962c4fd338ba..1c45c1d6d241 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -767,7 +767,6 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
const struct tc_action *a;
struct dsa_port *to_dp;
int err = -EOPNOTSUPP;
- LIST_HEAD(actions);
if (!ds->ops->port_mirror_add)
return err;
@@ -775,8 +774,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
if (!tcf_exts_has_one_action(cls->exts))
return err;
- tcf_exts_to_list(cls->exts, &actions);
- a = list_first_entry(&actions, struct tc_action, list);
+ a = tcf_exts_first_action(cls->exts);
if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
struct dsa_mall_mirror_tc_entry *mirror;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index cf75f8944b05..4da39446da2d 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -820,10 +820,9 @@ static void igmp_timer_expire(struct timer_list *t)
spin_lock(&im->lock);
im->tm_running = 0;
- if (im->unsolicit_count) {
- im->unsolicit_count--;
+ if (im->unsolicit_count && --im->unsolicit_count)
igmp_start_timer(im, unsolicited_report_interval(in_dev));
- }
+
im->reporter = 1;
spin_unlock(&im->lock);
@@ -1308,6 +1307,8 @@ static void igmp_group_added(struct ip_mc_list *im)
if (in_dev->dead)
return;
+
+ im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
spin_lock_bh(&im->lock);
igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
@@ -1391,9 +1392,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
unsigned int mode)
{
struct ip_mc_list *im;
-#ifdef CONFIG_IP_MULTICAST
- struct net *net = dev_net(in_dev->dev);
-#endif
ASSERT_RTNL();
@@ -1420,7 +1418,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
spin_lock_init(&im->lock);
#ifdef CONFIG_IP_MULTICAST
timer_setup(&im->timer, igmp_timer_expire, 0);
- im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
#endif
im->next_rcu = in_dev->mc_list;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 51a5d06085ac..ae714aecc31c 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1508,11 +1508,14 @@ nla_put_failure:
static void erspan_setup(struct net_device *dev)
{
+ struct ip_tunnel *t = netdev_priv(dev);
+
ether_setup(dev);
dev->netdev_ops = &erspan_netdev_ops;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
ip_tunnel_setup(dev, erspan_net_id);
+ t->erspan_ver = 1;
}
static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 13d34427ca3d..02ff2dde9609 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -95,11 +95,10 @@ struct bbr {
u32 mode:3, /* current bbr_mode in state machine */
prev_ca_state:3, /* CA state on previous ACK */
packet_conservation:1, /* use packet conservation? */
- restore_cwnd:1, /* decided to revert cwnd to old value */
round_start:1, /* start of packet-timed tx->ack round? */
idle_restart:1, /* restarting after idle? */
probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */
- unused:12,
+ unused:13,
lt_is_sampling:1, /* taking long-term ("LT") samples now? */
lt_rtt_cnt:7, /* round trips in long-term interval */
lt_use_bw:1; /* use lt_bw as our bw estimate? */
@@ -175,6 +174,8 @@ static const u32 bbr_lt_bw_diff = 4000 / 8;
/* If we estimate we're policed, use lt_bw for this many round trips: */
static const u32 bbr_lt_bw_max_rtts = 48;
+static void bbr_check_probe_rtt_done(struct sock *sk);
+
/* Do we estimate that STARTUP filled the pipe? */
static bool bbr_full_bw_reached(const struct sock *sk)
{
@@ -309,6 +310,8 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
*/
if (bbr->mode == BBR_PROBE_BW)
bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
+ else if (bbr->mode == BBR_PROBE_RTT)
+ bbr_check_probe_rtt_done(sk);
}
}
@@ -396,17 +399,11 @@ static bool bbr_set_cwnd_to_recover_or_restore(
cwnd = tcp_packets_in_flight(tp) + acked;
} else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
/* Exiting loss recovery; restore cwnd saved before recovery. */
- bbr->restore_cwnd = 1;
+ cwnd = max(cwnd, bbr->prior_cwnd);
bbr->packet_conservation = 0;
}
bbr->prev_ca_state = state;
- if (bbr->restore_cwnd) {
- /* Restore cwnd after exiting loss recovery or PROBE_RTT. */
- cwnd = max(cwnd, bbr->prior_cwnd);
- bbr->restore_cwnd = 0;
- }
-
if (bbr->packet_conservation) {
*new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
return true; /* yes, using packet conservation */
@@ -423,10 +420,10 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
- u32 cwnd = 0, target_cwnd = 0;
+ u32 cwnd = tp->snd_cwnd, target_cwnd = 0;
if (!acked)
- return;
+ goto done; /* no packet fully ACKed; just apply caps */
if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
goto done;
@@ -748,6 +745,20 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
}
+static void bbr_check_probe_rtt_done(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ if (!(bbr->probe_rtt_done_stamp &&
+ after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
+ return;
+
+ bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */
+ tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
+ bbr_reset_mode(sk);
+}
+
/* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
* periodically drain the bottleneck queue, to converge to measure the true
* min_rtt (unloaded propagation delay). This allows the flows to keep queues
@@ -806,12 +817,8 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
} else if (bbr->probe_rtt_done_stamp) {
if (bbr->round_start)
bbr->probe_rtt_round_done = 1;
- if (bbr->probe_rtt_round_done &&
- after(tcp_jiffies32, bbr->probe_rtt_done_stamp)) {
- bbr->min_rtt_stamp = tcp_jiffies32;
- bbr->restore_cwnd = 1; /* snap to prior_cwnd */
- bbr_reset_mode(sk);
- }
+ if (bbr->probe_rtt_round_done)
+ bbr_check_probe_rtt_done(sk);
}
}
/* Restart after idle ends only once we process a new S/ACK for data */
@@ -862,7 +869,6 @@ static void bbr_init(struct sock *sk)
bbr->has_seen_rtt = 0;
bbr_init_pacing_rate_from_rtt(sk);
- bbr->restore_cwnd = 0;
bbr->round_start = 0;
bbr->idle_restart = 0;
bbr->full_bw_reached = 0;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9e041fa5c545..44c09eddbb78 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2517,6 +2517,12 @@ static int __net_init tcp_sk_init(struct net *net)
if (res)
goto fail;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
+
+ /* Please enforce IP_DF and IPID==0 for RST and
+ * ACK sent in SYN-RECV and TIME-WAIT state.
+ */
+ inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
+
*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 75ef332a7caf..12affb7864d9 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -184,8 +184,9 @@ kill:
inet_twsk_deschedule_put(tw);
return TCP_TW_SUCCESS;
}
+ } else {
+ inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
}
- inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
if (tmp_opt.saw_tstamp) {
tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 2fac4ad74867..d51a8c0b3372 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2398,7 +2398,7 @@ static void addrconf_add_mroute(struct net_device *dev)
ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
- ip6_route_add(&cfg, GFP_ATOMIC, NULL);
+ ip6_route_add(&cfg, GFP_KERNEL, NULL);
}
static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
@@ -3062,7 +3062,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
if (addr.s6_addr32[3]) {
add_addr(idev, &addr, plen, scope);
addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
- GFP_ATOMIC);
+ GFP_KERNEL);
return;
}
@@ -3087,7 +3087,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
add_addr(idev, &addr, plen, flag);
addrconf_prefix_route(&addr, plen, 0, idev->dev,
- 0, pflags, GFP_ATOMIC);
+ 0, pflags, GFP_KERNEL);
}
}
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 673bba31eb18..9a4261e50272 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -938,14 +938,14 @@ static int __init inet6_init(void)
err = proto_register(&pingv6_prot, 1);
if (err)
- goto out_unregister_ping_proto;
+ goto out_unregister_raw_proto;
/* We MUST register RAW sockets before we create the ICMP6,
* IGMP6, or NDISC control sockets.
*/
err = rawv6_init();
if (err)
- goto out_unregister_raw_proto;
+ goto out_unregister_ping_proto;
/* Register the family here so that the init calls below will
* be able to create sockets. (?? is this dangerous ??)
@@ -1113,11 +1113,11 @@ netfilter_fail:
igmp_fail:
ndisc_cleanup();
ndisc_fail:
- ip6_mr_cleanup();
+ icmpv6_cleanup();
icmp_fail:
- unregister_pernet_subsys(&inet6_net_ops);
+ ip6_mr_cleanup();
ipmr_fail:
- icmpv6_cleanup();
+ unregister_pernet_subsys(&inet6_net_ops);
register_pernet_fail:
sock_unregister(PF_INET6);
rtnl_unregister_all(PF_INET6);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index d212738e9d10..5516f55e214b 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -198,6 +198,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
}
}
+ lwtstate_put(f6i->fib6_nh.nh_lwtstate);
+
if (f6i->fib6_nh.nh_dev)
dev_put(f6i->fib6_nh.nh_dev);
@@ -987,7 +989,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
fib6_clean_expires(iter);
else
fib6_set_expires(iter, rt->expires);
- fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu);
+
+ if (rt->fib6_pmtu)
+ fib6_metric_set(iter, RTAX_MTU,
+ rt->fib6_pmtu);
return -EEXIST;
}
/* If we have the same destination and the same metric,
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 18a3794b0f52..e493b041d4ac 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1778,6 +1778,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
if (data[IFLA_GRE_COLLECT_METADATA])
parms->collect_md = true;
+ parms->erspan_ver = 1;
if (data[IFLA_GRE_ERSPAN_VER])
parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 5df2a58d945c..419960b0ba16 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1188,7 +1188,15 @@ route_lookup:
init_tel_txopt(&opt, encap_limit);
ipv6_push_frag_opts(skb, &opt.ops, &proto);
}
- hop_limit = hop_limit ? : ip6_dst_hoplimit(dst);
+
+ if (hop_limit == 0) {
+ if (skb->protocol == htons(ETH_P_IP))
+ hop_limit = ip_hdr(skb)->ttl;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ hop_limit = ipv6_hdr(skb)->hop_limit;
+ else
+ hop_limit = ip6_dst_hoplimit(dst);
+ }
/* Calculate max headroom for all the headers and adjust
* needed_headroom if necessary.
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 38dec9da90d3..eeaf7455d51e 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
}
mtu = dst_mtu(dst);
- if (!skb->ignore_df && skb->len > mtu) {
+ if (skb->len > mtu) {
skb_dst_update_pmtu(skb, mtu);
if (skb->protocol == htons(ETH_P_IPV6)) {
@@ -1094,7 +1094,8 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n,
}
t = rtnl_dereference(ip6n->tnls_wc[0]);
- unregister_netdevice_queue(t->dev, list);
+ if (t)
+ unregister_netdevice_queue(t->dev, list);
}
static int __net_init vti6_init_net(struct net *net)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7208c16302f6..18e00ce1719a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -956,7 +956,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
rt->dst.error = 0;
rt->dst.output = ip6_output;
- if (ort->fib6_type == RTN_LOCAL) {
+ if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) {
rt->dst.input = ip6_input;
} else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
rt->dst.input = ip6_mc_input;
@@ -996,7 +996,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
rt->rt6i_src = ort->fib6_src;
#endif
rt->rt6i_prefsrc = ort->fib6_prefsrc;
- rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
}
static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 6449a1c2283b..f0f5fedb8caa 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -947,8 +947,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
if (len < IEEE80211_DEAUTH_FRAME_LEN)
return;
- ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
- mgmt->sa, mgmt->da, mgmt->bssid, reason);
+ ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+ ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
sta_info_destroy_addr(sdata, mgmt->sa);
}
@@ -966,9 +966,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
- ibss_dbg(sdata,
- "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
- mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
+ ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+ ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
+ mgmt->bssid, auth_transaction);
if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
return;
@@ -1175,10 +1175,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
rx_timestamp = drv_get_tsf(local, sdata);
}
- ibss_dbg(sdata,
- "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
+ ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
mgmt->sa, mgmt->bssid,
- (unsigned long long)rx_timestamp,
+ (unsigned long long)rx_timestamp);
+ ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
(unsigned long long)beacon_timestamp,
(unsigned long long)(rx_timestamp - beacon_timestamp),
jiffies);
@@ -1537,9 +1537,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
tx_last_beacon = drv_tx_last_beacon(local);
- ibss_dbg(sdata,
- "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
- mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
+ ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+ ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
+ mgmt->bssid, tx_last_beacon);
if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
return;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 4fb2709cb527..513627896204 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -256,8 +256,27 @@ static void ieee80211_restart_work(struct work_struct *work)
flush_work(&local->radar_detected_work);
rtnl_lock();
- list_for_each_entry(sdata, &local->interfaces, list)
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ /*
+ * XXX: there may be more work for other vif types and even
+ * for station mode: a good thing would be to run most of
+ * the iface type's dependent _stop (ieee80211_mg_stop,
+ * ieee80211_ibss_stop) etc...
+ * For now, fix only the specific bug that was seen: race
+ * between csa_connection_drop_work and us.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ /*
+ * This worker is scheduled from the iface worker that
+ * runs on mac80211's workqueue, so we can't be
+ * scheduling this worker after the cancel right here.
+ * The exception is ieee80211_chswitch_done.
+ * Then we can have a race...
+ */
+ cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
+ }
flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+ }
ieee80211_scan_cancel(local);
/* make sure any new ROC will consider local->in_reconfig */
@@ -471,10 +490,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_RXSTBC_1 |
- IEEE80211_VHT_CAP_RXSTBC_2 |
- IEEE80211_VHT_CAP_RXSTBC_3 |
- IEEE80211_VHT_CAP_RXSTBC_4 |
+ IEEE80211_VHT_CAP_RXSTBC_MASK |
IEEE80211_VHT_CAP_TXSTBC |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
@@ -1208,6 +1224,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&local->ifa6_notifier);
#endif
+ ieee80211_txq_teardown_flows(local);
rtnl_lock();
@@ -1236,7 +1253,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
skb_queue_purge(&local->skb_queue);
skb_queue_purge(&local->skb_queue_unreliable);
skb_queue_purge(&local->skb_queue_tdls_chsw);
- ieee80211_txq_teardown_flows(local);
destroy_workqueue(local->workqueue);
wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 35ad3983ae4b..daf9db3c8f24 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -572,6 +572,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
forward = false;
reply = true;
target_metric = 0;
+
+ if (SN_GT(target_sn, ifmsh->sn))
+ ifmsh->sn = target_sn;
+
if (time_after(jiffies, ifmsh->last_sn_update +
net_traversal_jiffies(sdata)) ||
time_before(jiffies, ifmsh->last_sn_update)) {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 7fb9957359a3..3dbecae4be73 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1073,6 +1073,10 @@ static void ieee80211_chswitch_work(struct work_struct *work)
*/
if (sdata->reserved_chanctx) {
+ struct ieee80211_supported_band *sband = NULL;
+ struct sta_info *mgd_sta = NULL;
+ enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
+
/*
* with multi-vif csa driver may call ieee80211_csa_finish()
* many times while waiting for other interfaces to use their
@@ -1081,6 +1085,48 @@ static void ieee80211_chswitch_work(struct work_struct *work)
if (sdata->reserved_ready)
goto out;
+ if (sdata->vif.bss_conf.chandef.width !=
+ sdata->csa_chandef.width) {
+ /*
+ * For managed interface, we need to also update the AP
+ * station bandwidth and align the rate scale algorithm
+ * on the bandwidth change. Here we only consider the
+ * bandwidth of the new channel definition (as channel
+ * switch flow does not have the full HT/VHT/HE
+ * information), assuming that if additional changes are
+ * required they would be done as part of the processing
+ * of the next beacon from the AP.
+ */
+ switch (sdata->csa_chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ default:
+ bw = IEEE80211_STA_RX_BW_20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ bw = IEEE80211_STA_RX_BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = IEEE80211_STA_RX_BW_80;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ bw = IEEE80211_STA_RX_BW_160;
+ break;
+ }
+
+ mgd_sta = sta_info_get(sdata, ifmgd->bssid);
+ sband =
+ local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
+ }
+
+ if (sdata->vif.bss_conf.chandef.width >
+ sdata->csa_chandef.width) {
+ mgd_sta->sta.bandwidth = bw;
+ rate_control_rate_update(local, sband, mgd_sta,
+ IEEE80211_RC_BW_CHANGED);
+ }
+
ret = ieee80211_vif_use_reserved_context(sdata);
if (ret) {
sdata_info(sdata,
@@ -1091,6 +1137,13 @@ static void ieee80211_chswitch_work(struct work_struct *work)
goto out;
}
+ if (sdata->vif.bss_conf.chandef.width <
+ sdata->csa_chandef.width) {
+ mgd_sta->sta.bandwidth = bw;
+ rate_control_rate_update(local, sband, mgd_sta,
+ IEEE80211_RC_BW_CHANGED);
+ }
+
goto out;
}
@@ -1312,6 +1365,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
cbss->beacon_interval));
return;
drop_connection:
+ /*
+ * This is just so that the disconnect flow will know that
+ * we were trying to switch channel and failed. In case the
+ * mode is 1 (we are not allowed to Tx), we will know not to
+ * send a deauthentication frame. Those two fields will be
+ * reset when the disconnection worker runs.
+ */
+ sdata->vif.csa_active = true;
+ sdata->csa_block_tx = csa_ie.mode;
+
ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
mutex_unlock(&local->chanctx_mtx);
mutex_unlock(&local->mtx);
@@ -2522,6 +2585,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+ bool tx;
sdata_lock(sdata);
if (!ifmgd->associated) {
@@ -2529,6 +2593,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
return;
}
+ tx = !sdata->csa_block_tx;
+
/* AP is probably out of range (or not reachable for another reason) so
* remove the bss struct for that AP.
*/
@@ -2536,7 +2602,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
- true, frame_buf);
+ tx, frame_buf);
mutex_lock(&local->mtx);
sdata->vif.csa_active = false;
ifmgd->csa_waiting_bcn = false;
@@ -2547,7 +2613,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
}
mutex_unlock(&local->mtx);
- ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
+ ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
sdata_unlock(sdata);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 64742f2765c4..96611d5dfadb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1728,6 +1728,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
*/
if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
(ieee80211_is_mgmt(hdr->frame_control) ||
ieee80211_is_data(hdr->frame_control)) &&
!(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index cd332e3e1134..f353d9db54bc 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3078,27 +3078,18 @@ void ieee80211_clear_fast_xmit(struct sta_info *sta)
}
static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local,
- struct sk_buff *skb, int headroom,
- int *subframe_len)
+ struct sk_buff *skb, int headroom)
{
- int amsdu_len = *subframe_len + sizeof(struct ethhdr);
- int padding = (4 - amsdu_len) & 3;
-
- if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) {
+ if (skb_headroom(skb) < headroom) {
I802_DEBUG_INC(local->tx_expand_skb_head);
- if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) {
+ if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
wiphy_debug(local->hw.wiphy,
"failed to reallocate TX buffer\n");
return false;
}
}
- if (padding) {
- *subframe_len += padding;
- skb_put_zero(skb, padding);
- }
-
return true;
}
@@ -3122,8 +3113,7 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
return true;
- if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr),
- &subframe_len))
+ if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
return false;
data = skb_push(skb, sizeof(*amsdu_hdr));
@@ -3189,7 +3179,8 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
void *data;
bool ret = false;
unsigned int orig_len;
- int n = 1, nfrags;
+ int n = 2, nfrags, pad = 0;
+ u16 hdrlen;
if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
return false;
@@ -3222,9 +3213,6 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
if (skb->len + head->len > max_amsdu_len)
goto out;
- if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
- goto out;
-
nfrags = 1 + skb_shinfo(skb)->nr_frags;
nfrags += 1 + skb_shinfo(head)->nr_frags;
frag_tail = &skb_shinfo(head)->frag_list;
@@ -3240,10 +3228,24 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
if (max_frags && nfrags > max_frags)
goto out;
- if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2,
- &subframe_len))
+ if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
goto out;
+ /*
+ * Pad out the previous subframe to a multiple of 4 by adding the
+ * padding to the next one, that's being added. Note that head->len
+ * is the length of the full A-MSDU, but that works since each time
+ * we add a new subframe we pad out the previous one to a multiple
+ * of 4 and thus it no longer matters in the next round.
+ */
+ hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header);
+ if ((head->len - hdrlen) & 3)
+ pad = 4 - ((head->len - hdrlen) & 3);
+
+ if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) +
+ 2 + pad))
+ goto out_recalc;
+
ret = true;
data = skb_push(skb, ETH_ALEN + 2);
memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN);
@@ -3253,15 +3255,19 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
memcpy(data, &len, 2);
memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header));
+ memset(skb_push(skb, pad), 0, pad);
+
head->len += skb->len;
head->data_len += skb->len;
*frag_tail = skb;
- flow->backlog += head->len - orig_len;
- tin->backlog_bytes += head->len - orig_len;
-
- fq_recalc_backlog(fq, tin, flow);
+out_recalc:
+ if (head->len != orig_len) {
+ flow->backlog += head->len - orig_len;
+ tin->backlog_bytes += head->len - orig_len;
+ fq_recalc_backlog(fq, tin, flow);
+ }
out:
spin_unlock_bh(&fq->lock);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 88efda7c9f8a..716cd6442d86 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1135,7 +1135,7 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_chanctx_conf *chanctx_conf;
const struct ieee80211_reg_rule *rrule;
- struct ieee80211_wmm_ac *wmm_ac;
+ const struct ieee80211_wmm_ac *wmm_ac;
u16 center_freq = 0;
if (sdata->vif.type != NL80211_IFTYPE_AP &&
@@ -1154,20 +1154,19 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq));
- if (IS_ERR_OR_NULL(rrule) || !rrule->wmm_rule) {
+ if (IS_ERR_OR_NULL(rrule) || !rrule->has_wmm) {
rcu_read_unlock();
return;
}
if (sdata->vif.type == NL80211_IFTYPE_AP)
- wmm_ac = &rrule->wmm_rule->ap[ac];
+ wmm_ac = &rrule->wmm_rule.ap[ac];
else
- wmm_ac = &rrule->wmm_rule->client[ac];
+ wmm_ac = &rrule->wmm_rule.client[ac];
qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min);
qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max);
qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn);
- qparam->txop = !qparam->txop ? wmm_ac->cot / 32 :
- min_t(u16, qparam->txop, wmm_ac->cot / 32);
+ qparam->txop = min_t(u16, qparam->txop, wmm_ac->cot / 32);
rcu_read_unlock();
}
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index 82e6edf9c5d9..45f33d6dedf7 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -100,7 +100,7 @@ static int ncsi_write_package_info(struct sk_buff *skb,
bool found;
int rc;
- if (id > ndp->package_num) {
+ if (id > ndp->package_num - 1) {
netdev_info(ndp->ndev.dev, "NCSI: No package with id %u\n", id);
return -ENODEV;
}
@@ -240,7 +240,7 @@ static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
return 0; /* done */
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
- &ncsi_genl_family, 0, NCSI_CMD_PKG_INFO);
+ &ncsi_genl_family, NLM_F_MULTI, NCSI_CMD_PKG_INFO);
if (!hdr) {
rc = -EMSGSIZE;
goto err;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 5610061e7f2e..75c92a87e7b2 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4137,36 +4137,52 @@ static const struct vm_operations_struct packet_mmap_ops = {
.close = packet_mm_close,
};
-static void free_pg_vec(struct pgv *pg_vec, unsigned int len)
+static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
+ unsigned int len)
{
int i;
for (i = 0; i < len; i++) {
if (likely(pg_vec[i].buffer)) {
- kvfree(pg_vec[i].buffer);
+ if (is_vmalloc_addr(pg_vec[i].buffer))
+ vfree(pg_vec[i].buffer);
+ else
+ free_pages((unsigned long)pg_vec[i].buffer,
+ order);
pg_vec[i].buffer = NULL;
}
}
kfree(pg_vec);
}
-static char *alloc_one_pg_vec_page(unsigned long size)
+static char *alloc_one_pg_vec_page(unsigned long order)
{
char *buffer;
+ gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
+ __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
- buffer = kvzalloc(size, GFP_KERNEL);
+ buffer = (char *) __get_free_pages(gfp_flags, order);
if (buffer)
return buffer;
- buffer = kvzalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+ /* __get_free_pages failed, fall back to vmalloc */
+ buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
+ if (buffer)
+ return buffer;
- return buffer;
+ /* vmalloc failed, lets dig into swap here */
+ gfp_flags &= ~__GFP_NORETRY;
+ buffer = (char *) __get_free_pages(gfp_flags, order);
+ if (buffer)
+ return buffer;
+
+ /* complete and utter failure */
+ return NULL;
}
-static struct pgv *alloc_pg_vec(struct tpacket_req *req)
+static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
{
unsigned int block_nr = req->tp_block_nr;
- unsigned long size = req->tp_block_size;
struct pgv *pg_vec;
int i;
@@ -4175,7 +4191,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req)
goto out;
for (i = 0; i < block_nr; i++) {
- pg_vec[i].buffer = alloc_one_pg_vec_page(size);
+ pg_vec[i].buffer = alloc_one_pg_vec_page(order);
if (unlikely(!pg_vec[i].buffer))
goto out_free_pgvec;
}
@@ -4184,7 +4200,7 @@ out:
return pg_vec;
out_free_pgvec:
- free_pg_vec(pg_vec, block_nr);
+ free_pg_vec(pg_vec, order, block_nr);
pg_vec = NULL;
goto out;
}
@@ -4194,9 +4210,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
{
struct pgv *pg_vec = NULL;
struct packet_sock *po = pkt_sk(sk);
+ int was_running, order = 0;
struct packet_ring_buffer *rb;
struct sk_buff_head *rb_queue;
- int was_running;
__be16 num;
int err = -EINVAL;
/* Added to avoid minimal code churn */
@@ -4258,7 +4274,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
goto out;
err = -ENOMEM;
- pg_vec = alloc_pg_vec(req);
+ order = get_order(req->tp_block_size);
+ pg_vec = alloc_pg_vec(req, order);
if (unlikely(!pg_vec))
goto out;
switch (po->tp_version) {
@@ -4312,6 +4329,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
rb->frame_size = req->tp_frame_size;
spin_unlock_bh(&rb_queue->lock);
+ swap(rb->pg_vec_order, order);
swap(rb->pg_vec_len, req->tp_block_nr);
rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
@@ -4337,7 +4355,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
}
if (pg_vec)
- free_pg_vec(pg_vec, req->tp_block_nr);
+ free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
return err;
}
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 8f50036f62f0..3bb7c5fb3bff 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -64,6 +64,7 @@ struct packet_ring_buffer {
unsigned int frame_size;
unsigned int frame_max;
+ unsigned int pg_vec_order;
unsigned int pg_vec_pages;
unsigned int pg_vec_len;
diff --git a/net/rds/Kconfig b/net/rds/Kconfig
index 01b3bd6a3708..b9092111bc45 100644
--- a/net/rds/Kconfig
+++ b/net/rds/Kconfig
@@ -1,6 +1,6 @@
config RDS
- tristate "The RDS Protocol"
+ tristate "The Reliable Datagram Sockets Protocol"
depends on INET
---help---
The RDS (Reliable Datagram Sockets) protocol provides reliable,
diff --git a/net/rds/ib.c b/net/rds/ib.c
index c1d97640c0be..eba75c1ba359 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -341,15 +341,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
if (rds_conn_state(conn) == RDS_CONN_UP) {
struct rds_ib_device *rds_ibdev;
- struct rdma_dev_addr *dev_addr;
ic = conn->c_transport_data;
- dev_addr = &ic->i_cm_id->route.addr.dev_addr;
- rdma_addr_get_sgid(dev_addr,
- (union ib_gid *)&iinfo6->src_gid);
- rdma_addr_get_dgid(dev_addr,
- (union ib_gid *)&iinfo6->dst_gid);
-
+ rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
+ (union ib_gid *)&iinfo6->dst_gid);
rds_ibdev = ic->rds_ibdev;
iinfo6->max_send_wr = ic->i_send_ring.w_nr;
iinfo6->max_recv_wr = ic->i_recv_ring.w_nr;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 2c7b7c352d3e..b9bbcf3d6c63 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -37,7 +37,6 @@
#include <net/tcp.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
-#include <net/tcp.h>
#include <net/addrconf.h>
#include "rds.h"
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 00192a996be0..0f8465852254 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/rfkill.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 229d63c99be2..e12f8ef7baa4 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -300,21 +300,17 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
}
EXPORT_SYMBOL(tcf_generic_walker);
-static bool __tcf_idr_check(struct tc_action_net *tn, u32 index,
- struct tc_action **a, int bind)
+int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
{
struct tcf_idrinfo *idrinfo = tn->idrinfo;
struct tc_action *p;
spin_lock(&idrinfo->lock);
p = idr_find(&idrinfo->action_idr, index);
- if (IS_ERR(p)) {
+ if (IS_ERR(p))
p = NULL;
- } else if (p) {
+ else if (p)
refcount_inc(&p->tcfa_refcnt);
- if (bind)
- atomic_inc(&p->tcfa_bindcnt);
- }
spin_unlock(&idrinfo->lock);
if (p) {
@@ -323,23 +319,10 @@ static bool __tcf_idr_check(struct tc_action_net *tn, u32 index,
}
return false;
}
-
-int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
-{
- return __tcf_idr_check(tn, index, a, 0);
-}
EXPORT_SYMBOL(tcf_idr_search);
-bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
- int bind)
+static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
{
- return __tcf_idr_check(tn, index, a, bind);
-}
-EXPORT_SYMBOL(tcf_idr_check);
-
-int tcf_idr_delete_index(struct tc_action_net *tn, u32 index)
-{
- struct tcf_idrinfo *idrinfo = tn->idrinfo;
struct tc_action *p;
int ret = 0;
@@ -370,7 +353,6 @@ int tcf_idr_delete_index(struct tc_action_net *tn, u32 index)
spin_unlock(&idrinfo->lock);
return ret;
}
-EXPORT_SYMBOL(tcf_idr_delete_index);
int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
struct tc_action **a, const struct tc_action_ops *ops,
@@ -409,7 +391,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
p->idrinfo = idrinfo;
p->ops = ops;
- INIT_LIST_HEAD(&p->list);
*a = p;
return 0;
err3:
@@ -681,19 +662,30 @@ int tcf_action_destroy(struct tc_action *actions[], int bind)
return ret;
}
+static int tcf_action_destroy_1(struct tc_action *a, int bind)
+{
+ struct tc_action *actions[] = { a, NULL };
+
+ return tcf_action_destroy(actions, bind);
+}
+
static int tcf_action_put(struct tc_action *p)
{
return __tcf_action_put(p, false);
}
+/* Put all actions in this array, skip those NULL's. */
static void tcf_action_put_many(struct tc_action *actions[])
{
int i;
- for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
+ for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
struct tc_action *a = actions[i];
- const struct tc_action_ops *ops = a->ops;
+ const struct tc_action_ops *ops;
+ if (!a)
+ continue;
+ ops = a->ops;
if (tcf_action_put(a))
module_put(ops->owner);
}
@@ -896,17 +888,16 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
err = tcf_action_goto_chain_init(a, tp);
if (err) {
- struct tc_action *actions[] = { a, NULL };
-
- tcf_action_destroy(actions, bind);
+ tcf_action_destroy_1(a, bind);
NL_SET_ERR_MSG(extack, "Failed to init TC action chain");
return ERR_PTR(err);
}
}
if (!tcf_action_valid(a->tcfa_action)) {
- NL_SET_ERR_MSG(extack, "invalid action value, using TC_ACT_UNSPEC instead");
- a->tcfa_action = TC_ACT_UNSPEC;
+ tcf_action_destroy_1(a, bind);
+ NL_SET_ERR_MSG(extack, "Invalid control action value");
+ return ERR_PTR(-EINVAL);
}
return a;
@@ -1175,41 +1166,38 @@ err_out:
return err;
}
-static int tcf_action_delete(struct net *net, struct tc_action *actions[],
- int *acts_deleted, struct netlink_ext_ack *extack)
+static int tcf_action_delete(struct net *net, struct tc_action *actions[])
{
- u32 act_index;
- int ret, i;
+ int i;
for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
struct tc_action *a = actions[i];
const struct tc_action_ops *ops = a->ops;
-
/* Actions can be deleted concurrently so we must save their
* type and id to search again after reference is released.
*/
- act_index = a->tcfa_index;
+ struct tcf_idrinfo *idrinfo = a->idrinfo;
+ u32 act_index = a->tcfa_index;
+ actions[i] = NULL;
if (tcf_action_put(a)) {
/* last reference, action was deleted concurrently */
module_put(ops->owner);
} else {
+ int ret;
+
/* now do the delete */
- ret = ops->delete(net, act_index);
- if (ret < 0) {
- *acts_deleted = i + 1;
+ ret = tcf_idr_delete_index(idrinfo, act_index);
+ if (ret < 0)
return ret;
- }
}
}
- *acts_deleted = i;
return 0;
}
static int
tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
- int *acts_deleted, u32 portid, size_t attr_size,
- struct netlink_ext_ack *extack)
+ u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
{
int ret;
struct sk_buff *skb;
@@ -1227,7 +1215,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
}
/* now do the delete */
- ret = tcf_action_delete(net, actions, acts_deleted, extack);
+ ret = tcf_action_delete(net, actions);
if (ret < 0) {
NL_SET_ERR_MSG(extack, "Failed to delete TC action");
kfree_skb(skb);
@@ -1249,8 +1237,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *act;
size_t attr_size = 0;
- struct tc_action *actions[TCA_ACT_MAX_PRIO + 1] = {};
- int acts_deleted = 0;
+ struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack);
if (ret < 0)
@@ -1280,14 +1267,13 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
if (event == RTM_GETACTION)
ret = tcf_get_notify(net, portid, n, actions, event, extack);
else { /* delete */
- ret = tcf_del_notify(net, n, actions, &acts_deleted, portid,
- attr_size, extack);
+ ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
if (ret)
goto err;
- return ret;
+ return 0;
}
err:
- tcf_action_put_many(&actions[acts_deleted]);
+ tcf_action_put_many(actions);
return ret;
}
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index d30b23e42436..0c68bc9cf0b4 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -395,13 +395,6 @@ static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_bpf_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, bpf_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_bpf_ops __read_mostly = {
.kind = "bpf",
.type = TCA_ACT_BPF,
@@ -412,7 +405,6 @@ static struct tc_action_ops act_bpf_ops __read_mostly = {
.init = tcf_bpf_init,
.walk = tcf_bpf_walker,
.lookup = tcf_bpf_search,
- .delete = tcf_bpf_delete,
.size = sizeof(struct tcf_bpf),
};
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 54c0bf54f2ac..6f0f273f1139 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -198,13 +198,6 @@ static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_connmark_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, connmark_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_connmark_ops = {
.kind = "connmark",
.type = TCA_ACT_CONNMARK,
@@ -214,7 +207,6 @@ static struct tc_action_ops act_connmark_ops = {
.init = tcf_connmark_init,
.walk = tcf_connmark_walker,
.lookup = tcf_connmark_search,
- .delete = tcf_connmark_delete,
.size = sizeof(struct tcf_connmark_info),
};
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index e698d3fe2080..b8a67ae3105a 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -659,13 +659,6 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act)
return nla_total_size(sizeof(struct tc_csum));
}
-static int tcf_csum_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, csum_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_csum_ops = {
.kind = "csum",
.type = TCA_ACT_CSUM,
@@ -677,7 +670,6 @@ static struct tc_action_ops act_csum_ops = {
.walk = tcf_csum_walker,
.lookup = tcf_csum_search,
.get_fill_size = tcf_csum_get_fill_size,
- .delete = tcf_csum_delete,
.size = sizeof(struct tcf_csum),
};
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 6a3f25a8ffb3..cd1d9bd32ef9 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -243,13 +243,6 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act)
return sz;
}
-static int tcf_gact_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, gact_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_gact_ops = {
.kind = "gact",
.type = TCA_ACT_GACT,
@@ -261,7 +254,6 @@ static struct tc_action_ops act_gact_ops = {
.walk = tcf_gact_walker,
.lookup = tcf_gact_search,
.get_fill_size = tcf_gact_get_fill_size,
- .delete = tcf_gact_delete,
.size = sizeof(struct tcf_gact),
};
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index d1081bdf1bdb..06a3d4801878 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -167,16 +167,16 @@ static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
{
struct tcf_meta_ops *o;
- read_lock_bh(&ife_mod_lock);
+ read_lock(&ife_mod_lock);
list_for_each_entry(o, &ifeoplist, list) {
if (o->metaid == metaid) {
if (!try_module_get(o->owner))
o = NULL;
- read_unlock_bh(&ife_mod_lock);
+ read_unlock(&ife_mod_lock);
return o;
}
}
- read_unlock_bh(&ife_mod_lock);
+ read_unlock(&ife_mod_lock);
return NULL;
}
@@ -190,12 +190,12 @@ int register_ife_op(struct tcf_meta_ops *mops)
!mops->get || !mops->alloc)
return -EINVAL;
- write_lock_bh(&ife_mod_lock);
+ write_lock(&ife_mod_lock);
list_for_each_entry(m, &ifeoplist, list) {
if (m->metaid == mops->metaid ||
(strcmp(mops->name, m->name) == 0)) {
- write_unlock_bh(&ife_mod_lock);
+ write_unlock(&ife_mod_lock);
return -EEXIST;
}
}
@@ -204,7 +204,7 @@ int register_ife_op(struct tcf_meta_ops *mops)
mops->release = ife_release_meta_gen;
list_add_tail(&mops->list, &ifeoplist);
- write_unlock_bh(&ife_mod_lock);
+ write_unlock(&ife_mod_lock);
return 0;
}
EXPORT_SYMBOL_GPL(unregister_ife_op);
@@ -214,7 +214,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops)
struct tcf_meta_ops *m;
int err = -ENOENT;
- write_lock_bh(&ife_mod_lock);
+ write_lock(&ife_mod_lock);
list_for_each_entry(m, &ifeoplist, list) {
if (m->metaid == mops->metaid) {
list_del(&mops->list);
@@ -222,7 +222,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops)
break;
}
}
- write_unlock_bh(&ife_mod_lock);
+ write_unlock(&ife_mod_lock);
return err;
}
@@ -265,11 +265,8 @@ static const char *ife_meta_id2name(u32 metaid)
#endif
/* called when adding new meta information
- * under ife->tcf_lock for existing action
*/
-static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
- void *val, int len, bool exists,
- bool rtnl_held)
+static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held)
{
struct tcf_meta_ops *ops = find_ife_oplist(metaid);
int ret = 0;
@@ -277,15 +274,11 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
if (!ops) {
ret = -ENOENT;
#ifdef CONFIG_MODULES
- if (exists)
- spin_unlock_bh(&ife->tcf_lock);
if (rtnl_held)
rtnl_unlock();
request_module("ife-meta-%s", ife_meta_id2name(metaid));
if (rtnl_held)
rtnl_lock();
- if (exists)
- spin_lock_bh(&ife->tcf_lock);
ops = find_ife_oplist(metaid);
#endif
}
@@ -302,24 +295,17 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
}
/* called when adding new meta information
- * under ife->tcf_lock for existing action
*/
-static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
- int len, bool atomic)
+static int __add_metainfo(const struct tcf_meta_ops *ops,
+ struct tcf_ife_info *ife, u32 metaid, void *metaval,
+ int len, bool atomic, bool exists)
{
struct tcf_meta_info *mi = NULL;
- struct tcf_meta_ops *ops = find_ife_oplist(metaid);
int ret = 0;
- if (!ops)
- return -ENOENT;
-
mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
- if (!mi) {
- /*put back what find_ife_oplist took */
- module_put(ops->owner);
+ if (!mi)
return -ENOMEM;
- }
mi->metaid = metaid;
mi->ops = ops;
@@ -327,29 +313,61 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
if (ret != 0) {
kfree(mi);
- module_put(ops->owner);
return ret;
}
}
+ if (exists)
+ spin_lock_bh(&ife->tcf_lock);
list_add_tail(&mi->metalist, &ife->metalist);
+ if (exists)
+ spin_unlock_bh(&ife->tcf_lock);
+
+ return ret;
+}
+
+static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
+ struct tcf_ife_info *ife, u32 metaid,
+ bool exists)
+{
+ int ret;
+
+ if (!try_module_get(ops->owner))
+ return -ENOENT;
+ ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
+ if (ret)
+ module_put(ops->owner);
+ return ret;
+}
+
+static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
+ int len, bool exists)
+{
+ const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
+ int ret;
+ if (!ops)
+ return -ENOENT;
+ ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
+ if (ret)
+ /*put back what find_ife_oplist took */
+ module_put(ops->owner);
return ret;
}
-static int use_all_metadata(struct tcf_ife_info *ife)
+static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
{
struct tcf_meta_ops *o;
int rc = 0;
int installed = 0;
- read_lock_bh(&ife_mod_lock);
+ read_lock(&ife_mod_lock);
list_for_each_entry(o, &ifeoplist, list) {
- rc = add_metainfo(ife, o->metaid, NULL, 0, true);
+ rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
if (rc == 0)
installed += 1;
}
- read_unlock_bh(&ife_mod_lock);
+ read_unlock(&ife_mod_lock);
if (installed)
return 0;
@@ -396,7 +414,6 @@ static void _tcf_ife_cleanup(struct tc_action *a)
struct tcf_meta_info *e, *n;
list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
- module_put(e->ops->owner);
list_del(&e->metalist);
if (e->metaval) {
if (e->ops->release)
@@ -404,6 +421,7 @@ static void _tcf_ife_cleanup(struct tc_action *a)
else
kfree(e->metaval);
}
+ module_put(e->ops->owner);
kfree(e);
}
}
@@ -422,7 +440,6 @@ static void tcf_ife_cleanup(struct tc_action *a)
kfree_rcu(p, rcu);
}
-/* under ife->tcf_lock for existing action */
static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
bool exists, bool rtnl_held)
{
@@ -436,8 +453,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
val = nla_data(tb[i]);
len = nla_len(tb[i]);
- rc = load_metaops_and_vet(ife, i, val, len, exists,
- rtnl_held);
+ rc = load_metaops_and_vet(i, val, len, rtnl_held);
if (rc != 0)
return rc;
@@ -540,8 +556,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
p->eth_type = ife_type;
}
- if (exists)
- spin_lock_bh(&ife->tcf_lock);
if (ret == ACT_P_CREATED)
INIT_LIST_HEAD(&ife->metalist);
@@ -551,10 +565,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
NULL, NULL);
if (err) {
metadata_parse_err:
- if (exists)
- spin_unlock_bh(&ife->tcf_lock);
tcf_idr_release(*a, bind);
-
kfree(p);
return err;
}
@@ -569,17 +580,16 @@ metadata_parse_err:
* as we can. You better have at least one else we are
* going to bail out
*/
- err = use_all_metadata(ife);
+ err = use_all_metadata(ife, exists);
if (err) {
- if (exists)
- spin_unlock_bh(&ife->tcf_lock);
tcf_idr_release(*a, bind);
-
kfree(p);
return err;
}
}
+ if (exists)
+ spin_lock_bh(&ife->tcf_lock);
ife->tcf_action = parm->action;
/* protected by tcf_lock when modifying existing action */
rcu_swap_protected(ife->params, p, 1);
@@ -853,13 +863,6 @@ static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_ife_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, ife_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_ife_ops = {
.kind = "ife",
.type = TCA_ACT_IFE,
@@ -870,7 +873,6 @@ static struct tc_action_ops act_ife_ops = {
.init = tcf_ife_init,
.walk = tcf_ife_walker,
.lookup = tcf_ife_search,
- .delete = tcf_ife_delete,
.size = sizeof(struct tcf_ife_info),
};
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 51f235bbeb5b..23273b5303fd 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -337,13 +337,6 @@ static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_ipt_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, ipt_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_ipt_ops = {
.kind = "ipt",
.type = TCA_ACT_IPT,
@@ -354,7 +347,6 @@ static struct tc_action_ops act_ipt_ops = {
.init = tcf_ipt_init,
.walk = tcf_ipt_walker,
.lookup = tcf_ipt_search,
- .delete = tcf_ipt_delete,
.size = sizeof(struct tcf_ipt),
};
@@ -395,13 +387,6 @@ static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_xt_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, xt_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_xt_ops = {
.kind = "xt",
.type = TCA_ACT_XT,
@@ -412,7 +397,6 @@ static struct tc_action_ops act_xt_ops = {
.init = tcf_xt_init,
.walk = tcf_xt_walker,
.lookup = tcf_xt_search,
- .delete = tcf_xt_delete,
.size = sizeof(struct tcf_ipt),
};
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 38fd20f10f67..8bf66d0a6800 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -395,13 +395,6 @@ static void tcf_mirred_put_dev(struct net_device *dev)
dev_put(dev);
}
-static int tcf_mirred_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, mirred_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_mirred_ops = {
.kind = "mirred",
.type = TCA_ACT_MIRRED,
@@ -416,7 +409,6 @@ static struct tc_action_ops act_mirred_ops = {
.size = sizeof(struct tcf_mirred),
.get_dev = tcf_mirred_get_dev,
.put_dev = tcf_mirred_put_dev,
- .delete = tcf_mirred_delete,
};
static __net_init int mirred_init_net(struct net *net)
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 822e903bfc25..4313aa102440 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -300,13 +300,6 @@ static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_nat_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, nat_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_nat_ops = {
.kind = "nat",
.type = TCA_ACT_NAT,
@@ -316,7 +309,6 @@ static struct tc_action_ops act_nat_ops = {
.init = tcf_nat_init,
.walk = tcf_nat_walker,
.lookup = tcf_nat_search,
- .delete = tcf_nat_delete,
.size = sizeof(struct tcf_nat),
};
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 8a7a7cb94e83..ad99a99f11f6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
{
struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX);
+ if (!keys_start)
+ goto nla_failure;
for (; n > 0; n--) {
struct nlattr *key_start;
key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX);
+ if (!key_start)
+ goto nla_failure;
if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) ||
- nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) {
- nlmsg_trim(skb, keys_start);
- return -EINVAL;
- }
+ nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd))
+ goto nla_failure;
nla_nest_end(skb, key_start);
@@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
nla_nest_end(skb, keys_start);
return 0;
+nla_failure:
+ nla_nest_cancel(skb, keys_start);
+ return -EINVAL;
}
static int tcf_pedit_init(struct net *net, struct nlattr *nla,
@@ -418,7 +423,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind;
if (p->tcfp_keys_ex) {
- tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys);
+ if (tcf_pedit_key_ex_dump(skb,
+ p->tcfp_keys_ex,
+ p->tcfp_nkeys))
+ goto nla_put_failure;
if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt))
goto nla_put_failure;
@@ -460,13 +468,6 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_pedit_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, pedit_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_pedit_ops = {
.kind = "pedit",
.type = TCA_ACT_PEDIT,
@@ -477,7 +478,6 @@ static struct tc_action_ops act_pedit_ops = {
.init = tcf_pedit_init,
.walk = tcf_pedit_walker,
.lookup = tcf_pedit_search,
- .delete = tcf_pedit_delete,
.size = sizeof(struct tcf_pedit),
};
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 06f0742db593..5d8bfa878477 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -320,13 +320,6 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_police_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, police_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
MODULE_AUTHOR("Alexey Kuznetsov");
MODULE_DESCRIPTION("Policing actions");
MODULE_LICENSE("GPL");
@@ -340,7 +333,6 @@ static struct tc_action_ops act_police_ops = {
.init = tcf_police_init,
.walk = tcf_police_walker,
.lookup = tcf_police_search,
- .delete = tcf_police_delete,
.size = sizeof(struct tcf_police),
};
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 207b4132d1b0..44e9c00657bc 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -232,13 +232,6 @@ static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_sample_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, sample_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_sample_ops = {
.kind = "sample",
.type = TCA_ACT_SAMPLE,
@@ -249,7 +242,6 @@ static struct tc_action_ops act_sample_ops = {
.cleanup = tcf_sample_cleanup,
.walk = tcf_sample_walker,
.lookup = tcf_sample_search,
- .delete = tcf_sample_delete,
.size = sizeof(struct tcf_sample),
};
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index e616523ba3c1..52400d49f81f 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -196,13 +196,6 @@ static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_simp_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, simp_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_simp_ops = {
.kind = "simple",
.type = TCA_ACT_SIMP,
@@ -213,7 +206,6 @@ static struct tc_action_ops act_simp_ops = {
.init = tcf_simp_init,
.walk = tcf_simp_walker,
.lookup = tcf_simp_search,
- .delete = tcf_simp_delete,
.size = sizeof(struct tcf_defact),
};
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 926d7bc4a89d..73e44ce2a883 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -299,13 +299,6 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_skbedit_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, skbedit_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_skbedit_ops = {
.kind = "skbedit",
.type = TCA_ACT_SKBEDIT,
@@ -316,7 +309,6 @@ static struct tc_action_ops act_skbedit_ops = {
.cleanup = tcf_skbedit_cleanup,
.walk = tcf_skbedit_walker,
.lookup = tcf_skbedit_search,
- .delete = tcf_skbedit_delete,
.size = sizeof(struct tcf_skbedit),
};
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index d6a1af0c4171..588077fafd6c 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -259,13 +259,6 @@ static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_skbmod_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, skbmod_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_skbmod_ops = {
.kind = "skbmod",
.type = TCA_ACT_SKBMOD,
@@ -276,7 +269,6 @@ static struct tc_action_ops act_skbmod_ops = {
.cleanup = tcf_skbmod_cleanup,
.walk = tcf_skbmod_walker,
.lookup = tcf_skbmod_search,
- .delete = tcf_skbmod_delete,
.size = sizeof(struct tcf_skbmod),
};
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 8f09cf08d8fe..420759153d5f 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -548,13 +548,6 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tunnel_key_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_tunnel_key_ops = {
.kind = "tunnel_key",
.type = TCA_ACT_TUNNEL_KEY,
@@ -565,7 +558,6 @@ static struct tc_action_ops act_tunnel_key_ops = {
.cleanup = tunnel_key_release,
.walk = tunnel_key_walker,
.lookup = tunnel_key_search,
- .delete = tunnel_key_delete,
.size = sizeof(struct tcf_tunnel_key),
};
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 209e70ad2c09..033d273afe50 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -296,13 +296,6 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_vlan_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, vlan_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_vlan_ops = {
.kind = "vlan",
.type = TCA_ACT_VLAN,
@@ -313,7 +306,6 @@ static struct tc_action_ops act_vlan_ops = {
.cleanup = tcf_vlan_cleanup,
.walk = tcf_vlan_walker,
.lookup = tcf_vlan_search,
- .delete = tcf_vlan_delete,
.size = sizeof(struct tcf_vlan),
};
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 31bd1439cf60..1a67af8a6e8c 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1252,7 +1252,7 @@ replay:
}
chain = tcf_chain_get(block, chain_index, true);
if (!chain) {
- NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
+ NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
err = -ENOMEM;
goto errout;
}
@@ -1399,7 +1399,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
goto errout;
}
NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
- err = -EINVAL;
+ err = -ENOENT;
goto errout;
}
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index d5d2a6dc3921..f218ccf1e2d9 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -914,6 +914,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_U32_MAX + 1];
u32 htid, flags = 0;
+ size_t sel_size;
int err;
#ifdef CONFIG_CLS_U32_PERF
size_t size;
@@ -1076,8 +1077,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
}
s = nla_data(tb[TCA_U32_SEL]);
+ sel_size = struct_size(s, keys, s->nkeys);
+ if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
+ err = -EINVAL;
+ goto erridr;
+ }
- n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
+ n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
if (n == NULL) {
err = -ENOBUFS;
goto erridr;
@@ -1092,7 +1098,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
}
#endif
- memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
+ memcpy(&n->sel, s, sel_size);
RCU_INIT_POINTER(n->ht_up, ht);
n->handle = handle;
n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 35fc7252187c..c07c30b916d5 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -64,7 +64,6 @@
#include <linux/vmalloc.h>
#include <linux/reciprocal_div.h>
#include <net/netlink.h>
-#include <linux/version.h>
#include <linux/if_vlan.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
@@ -621,15 +620,20 @@ static bool cake_ddst(int flow_mode)
}
static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
- int flow_mode)
+ int flow_mode, u16 flow_override, u16 host_override)
{
- u32 flow_hash = 0, srchost_hash, dsthost_hash;
+ u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0;
u16 reduced_hash, srchost_idx, dsthost_idx;
struct flow_keys keys, host_keys;
if (unlikely(flow_mode == CAKE_FLOW_NONE))
return 0;
+ /* If both overrides are set we can skip packet dissection entirely */
+ if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) &&
+ (host_override || !(flow_mode & CAKE_FLOW_HOSTS)))
+ goto skip_hash;
+
skb_flow_dissect_flow_keys(skb, &keys,
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
@@ -676,6 +680,14 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
if (flow_mode & CAKE_FLOW_FLOWS)
flow_hash = flow_hash_from_keys(&keys);
+skip_hash:
+ if (flow_override)
+ flow_hash = flow_override - 1;
+ if (host_override) {
+ dsthost_hash = host_override - 1;
+ srchost_hash = host_override - 1;
+ }
+
if (!(flow_mode & CAKE_FLOW_FLOWS)) {
if (flow_mode & CAKE_FLOW_SRC_IP)
flow_hash ^= srchost_hash;
@@ -1571,7 +1583,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
struct cake_sched_data *q = qdisc_priv(sch);
struct tcf_proto *filter;
struct tcf_result res;
- u32 flow = 0;
+ u16 flow = 0, host = 0;
int result;
filter = rcu_dereference_bh(q->filter_list);
@@ -1595,10 +1607,12 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
#endif
if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
flow = TC_H_MIN(res.classid);
+ if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16))
+ host = TC_H_MAJ(res.classid) >> 16;
}
hash:
*t = cake_select_tin(sch, skb);
- return flow ?: cake_hash(*t, skb, flow_mode) + 1;
+ return cake_hash(*t, skb, flow_mode, flow, host) + 1;
}
static void cake_reconfigure(struct Qdisc *sch);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index ef5c9a82d4e8..a644292f9faf 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -215,7 +215,6 @@ static const struct seq_operations sctp_eps_ops = {
struct sctp_ht_iter {
struct seq_net_private p;
struct rhashtable_iter hti;
- int start_fail;
};
static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
@@ -224,7 +223,6 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
sctp_transport_walk_start(&iter->hti);
- iter->start_fail = 0;
return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
}
@@ -232,8 +230,6 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
{
struct sctp_ht_iter *iter = seq->private;
- if (iter->start_fail)
- return;
sctp_transport_walk_stop(&iter->hti);
}
@@ -264,8 +260,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
}
transport = (struct sctp_transport *)v;
- if (!sctp_transport_hold(transport))
- return 0;
assoc = transport->asoc;
epb = &assoc->base;
sk = epb->sk;
@@ -322,8 +316,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
}
transport = (struct sctp_transport *)v;
- if (!sctp_transport_hold(transport))
- return 0;
assoc = transport->asoc;
list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index e96b15a66aba..f73e9d38d5ba 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2658,20 +2658,23 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
}
if (params->spp_flags & SPP_IPV6_FLOWLABEL) {
- if (trans && trans->ipaddr.sa.sa_family == AF_INET6) {
- trans->flowlabel = params->spp_ipv6_flowlabel &
- SCTP_FLOWLABEL_VAL_MASK;
- trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
- } else if (asoc) {
- list_for_each_entry(trans,
- &asoc->peer.transport_addr_list,
- transports) {
- if (trans->ipaddr.sa.sa_family != AF_INET6)
- continue;
+ if (trans) {
+ if (trans->ipaddr.sa.sa_family == AF_INET6) {
trans->flowlabel = params->spp_ipv6_flowlabel &
SCTP_FLOWLABEL_VAL_MASK;
trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
}
+ } else if (asoc) {
+ struct sctp_transport *t;
+
+ list_for_each_entry(t, &asoc->peer.transport_addr_list,
+ transports) {
+ if (t->ipaddr.sa.sa_family != AF_INET6)
+ continue;
+ t->flowlabel = params->spp_ipv6_flowlabel &
+ SCTP_FLOWLABEL_VAL_MASK;
+ t->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+ }
asoc->flowlabel = params->spp_ipv6_flowlabel &
SCTP_FLOWLABEL_VAL_MASK;
asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
@@ -2687,12 +2690,13 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
trans->dscp |= SCTP_DSCP_SET_MASK;
} else if (asoc) {
- list_for_each_entry(trans,
- &asoc->peer.transport_addr_list,
+ struct sctp_transport *t;
+
+ list_for_each_entry(t, &asoc->peer.transport_addr_list,
transports) {
- trans->dscp = params->spp_dscp &
- SCTP_DSCP_VAL_MASK;
- trans->dscp |= SCTP_DSCP_SET_MASK;
+ t->dscp = params->spp_dscp &
+ SCTP_DSCP_VAL_MASK;
+ t->dscp |= SCTP_DSCP_SET_MASK;
}
asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
asoc->dscp |= SCTP_DSCP_SET_MASK;
@@ -5005,9 +5009,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net,
break;
}
+ if (!sctp_transport_hold(t))
+ continue;
+
if (net_eq(sock_net(t->asoc->base.sk), net) &&
t->asoc->peer.primary_path == t)
break;
+
+ sctp_transport_put(t);
}
return t;
@@ -5017,13 +5026,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net,
struct rhashtable_iter *iter,
int pos)
{
- void *obj = SEQ_START_TOKEN;
+ struct sctp_transport *t;
- while (pos && (obj = sctp_transport_get_next(net, iter)) &&
- !IS_ERR(obj))
- pos--;
+ if (!pos)
+ return SEQ_START_TOKEN;
- return obj;
+ while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
+ if (!--pos)
+ break;
+ sctp_transport_put(t);
+ }
+
+ return t;
}
int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
@@ -5082,8 +5096,6 @@ again:
tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
- if (!sctp_transport_hold(tsp))
- continue;
ret = cb(tsp, p);
if (ret)
break;
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 9ee6cfea56dd..d8026543bf4c 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -51,12 +51,12 @@ const char tipc_bclink_name[] = "broadcast-link";
* struct tipc_bc_base - base structure for keeping broadcast send state
* @link: broadcast send link structure
* @inputq: data input queue; will only carry SOCK_WAKEUP messages
- * @dest: array keeping number of reachable destinations per bearer
+ * @dests: array keeping number of reachable destinations per bearer
* @primary_bearer: a bearer having links to all broadcast destinations, if any
* @bcast_support: indicates if primary bearer, if any, supports broadcast
* @rcast_support: indicates if all peer nodes support replicast
* @rc_ratio: dest count as percentage of cluster size where send method changes
- * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast
+ * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast
*/
struct tipc_bc_base {
struct tipc_link *link;
diff --git a/net/tipc/diag.c b/net/tipc/diag.c
index aaabb0b776dd..73137f4aeb68 100644
--- a/net/tipc/diag.c
+++ b/net/tipc/diag.c
@@ -84,7 +84,9 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb,
if (h->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
+ .start = tipc_dump_start,
.dump = tipc_diag_dump,
+ .done = tipc_dump_done,
};
netlink_dump_start(net->diag_nlsk, skb, h, &c);
return 0;
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 88f027b502f6..66d5b2c5987a 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -980,20 +980,17 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port)
{
- u64 value = (u64)node << 32 | port;
struct tipc_dest *dst;
list_for_each_entry(dst, l, list) {
- if (dst->value != value)
- continue;
- return dst;
+ if (dst->node == node && dst->port == port)
+ return dst;
}
return NULL;
}
bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
{
- u64 value = (u64)node << 32 | port;
struct tipc_dest *dst;
if (tipc_dest_find(l, node, port))
@@ -1002,7 +999,8 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
dst = kmalloc(sizeof(*dst), GFP_ATOMIC);
if (unlikely(!dst))
return false;
- dst->value = value;
+ dst->node = node;
+ dst->port = port;
list_add(&dst->list, l);
return true;
}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 0febba41da86..892bd750b85f 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -133,13 +133,8 @@ void tipc_nametbl_stop(struct net *net);
struct tipc_dest {
struct list_head list;
- union {
- struct {
- u32 port;
- u32 node;
- };
- u64 value;
- };
+ u32 port;
+ u32 node;
};
struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port);
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 6ff2254088f6..99ee419210ba 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -167,7 +167,9 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
},
{
.cmd = TIPC_NL_SOCK_GET,
+ .start = tipc_dump_start,
.dumpit = tipc_nl_sk_dump,
+ .done = tipc_dump_done,
.policy = tipc_nl_policy,
},
{
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index c1e93c9515bc..ab7a2a7178f7 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2672,6 +2672,8 @@ void tipc_sk_reinit(struct net *net)
rhashtable_walk_stop(&iter);
} while (tsk == ERR_PTR(-EAGAIN));
+
+ rhashtable_walk_exit(&iter);
}
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
@@ -3227,45 +3229,69 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
struct netlink_callback *cb,
struct tipc_sock *tsk))
{
- struct net *net = sock_net(skb->sk);
- struct tipc_net *tn = tipc_net(net);
- const struct bucket_table *tbl;
- u32 prev_portid = cb->args[1];
- u32 tbl_id = cb->args[0];
- struct rhash_head *pos;
+ struct rhashtable_iter *iter = (void *)cb->args[0];
struct tipc_sock *tsk;
int err;
- rcu_read_lock();
- tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
- for (; tbl_id < tbl->size; tbl_id++) {
- rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
- spin_lock_bh(&tsk->sk.sk_lock.slock);
- if (prev_portid && prev_portid != tsk->portid) {
- spin_unlock_bh(&tsk->sk.sk_lock.slock);
+ rhashtable_walk_start(iter);
+ while ((tsk = rhashtable_walk_next(iter)) != NULL) {
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ if (err == -EAGAIN) {
+ err = 0;
continue;
}
+ break;
+ }
- err = skb_handler(skb, cb, tsk);
- if (err) {
- prev_portid = tsk->portid;
- spin_unlock_bh(&tsk->sk.sk_lock.slock);
- goto out;
- }
-
- prev_portid = 0;
- spin_unlock_bh(&tsk->sk.sk_lock.slock);
+ sock_hold(&tsk->sk);
+ rhashtable_walk_stop(iter);
+ lock_sock(&tsk->sk);
+ err = skb_handler(skb, cb, tsk);
+ if (err) {
+ release_sock(&tsk->sk);
+ sock_put(&tsk->sk);
+ goto out;
}
+ release_sock(&tsk->sk);
+ rhashtable_walk_start(iter);
+ sock_put(&tsk->sk);
}
+ rhashtable_walk_stop(iter);
out:
- rcu_read_unlock();
- cb->args[0] = tbl_id;
- cb->args[1] = prev_portid;
-
return skb->len;
}
EXPORT_SYMBOL(tipc_nl_sk_walk);
+int tipc_dump_start(struct netlink_callback *cb)
+{
+ struct rhashtable_iter *iter = (void *)cb->args[0];
+ struct net *net = sock_net(cb->skb->sk);
+ struct tipc_net *tn = tipc_net(net);
+
+ if (!iter) {
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return -ENOMEM;
+
+ cb->args[0] = (long)iter;
+ }
+
+ rhashtable_walk_enter(&tn->sk_rht, iter);
+ return 0;
+}
+EXPORT_SYMBOL(tipc_dump_start);
+
+int tipc_dump_done(struct netlink_callback *cb)
+{
+ struct rhashtable_iter *hti = (void *)cb->args[0];
+
+ rhashtable_walk_exit(hti);
+ kfree(hti);
+ return 0;
+}
+EXPORT_SYMBOL(tipc_dump_done);
+
int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
struct tipc_sock *tsk, u32 sk_filter_state,
u64 (*tipc_diag_gen_cookie)(struct sock *sk))
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index aff9b2ae5a1f..d43032e26532 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -68,4 +68,6 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
int (*skb_handler)(struct sk_buff *skb,
struct netlink_callback *cb,
struct tipc_sock *tsk));
+int tipc_dump_start(struct netlink_callback *cb);
+int tipc_dump_done(struct netlink_callback *cb);
#endif
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index c8e34ef22c30..2627b5d812e9 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -313,8 +313,8 @@ static void tipc_conn_send_work(struct work_struct *work)
conn_put(con);
}
-/* tipc_conn_queue_evt() - interrupt level call from a subscription instance
- * The queued work is launched into tipc_send_work()->tipc_send_to_sock()
+/* tipc_topsrv_queue_evt() - interrupt level call from a subscription instance
+ * The queued work is launched into tipc_conn_send_work()->tipc_conn_send_to_sock()
*/
void tipc_topsrv_queue_evt(struct net *net, int conid,
u32 event, struct tipc_event *evt)
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 93c0c225ab34..180b6640e531 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -213,9 +213,14 @@ static void tls_write_space(struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);
- /* We are already sending pages, ignore notification */
- if (ctx->in_tcp_sendpages)
+ /* If in_tcp_sendpages call lower protocol write space handler
+ * to ensure we wake up any waiting operations there. For example
+ * if do_tcp_sendpages where to call sk_wait_event.
+ */
+ if (ctx->in_tcp_sendpages) {
+ ctx->sk_write_space(sk);
return;
+ }
if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
gfp_t sk_allocation = sk->sk_allocation;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 5fb9b7dd9831..4b8ec659e797 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -669,13 +669,13 @@ static int nl80211_msg_put_wmm_rules(struct sk_buff *msg,
goto nla_put_failure;
if (nla_put_u16(msg, NL80211_WMMR_CW_MIN,
- rule->wmm_rule->client[j].cw_min) ||
+ rule->wmm_rule.client[j].cw_min) ||
nla_put_u16(msg, NL80211_WMMR_CW_MAX,
- rule->wmm_rule->client[j].cw_max) ||
+ rule->wmm_rule.client[j].cw_max) ||
nla_put_u8(msg, NL80211_WMMR_AIFSN,
- rule->wmm_rule->client[j].aifsn) ||
- nla_put_u8(msg, NL80211_WMMR_TXOP,
- rule->wmm_rule->client[j].cot))
+ rule->wmm_rule.client[j].aifsn) ||
+ nla_put_u16(msg, NL80211_WMMR_TXOP,
+ rule->wmm_rule.client[j].cot))
goto nla_put_failure;
nla_nest_end(msg, nl_wmm_rule);
@@ -766,9 +766,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
if (large) {
const struct ieee80211_reg_rule *rule =
- freq_reg_info(wiphy, chan->center_freq);
+ freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq));
- if (!IS_ERR(rule) && rule->wmm_rule) {
+ if (!IS_ERR_OR_NULL(rule) && rule->has_wmm) {
if (nl80211_msg_put_wmm_rules(msg, rule))
goto nla_put_failure;
}
@@ -12205,6 +12205,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
return -EOPNOTSUPP;
if (!info->attrs[NL80211_ATTR_MDID] ||
+ !info->attrs[NL80211_ATTR_IE] ||
!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 4fc66a117b7d..2f702adf2912 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -425,36 +425,23 @@ static const struct ieee80211_regdomain *
reg_copy_regd(const struct ieee80211_regdomain *src_regd)
{
struct ieee80211_regdomain *regd;
- int size_of_regd, size_of_wmms;
+ int size_of_regd;
unsigned int i;
- struct ieee80211_wmm_rule *d_wmm, *s_wmm;
size_of_regd =
sizeof(struct ieee80211_regdomain) +
src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule);
- size_of_wmms = src_regd->n_wmm_rules *
- sizeof(struct ieee80211_wmm_rule);
- regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
+ regd = kzalloc(size_of_regd, GFP_KERNEL);
if (!regd)
return ERR_PTR(-ENOMEM);
memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
- d_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
- s_wmm = (struct ieee80211_wmm_rule *)((u8 *)src_regd + size_of_regd);
- memcpy(d_wmm, s_wmm, size_of_wmms);
-
- for (i = 0; i < src_regd->n_reg_rules; i++) {
+ for (i = 0; i < src_regd->n_reg_rules; i++)
memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
sizeof(struct ieee80211_reg_rule));
- if (!src_regd->reg_rules[i].wmm_rule)
- continue;
- regd->reg_rules[i].wmm_rule = d_wmm +
- (src_regd->reg_rules[i].wmm_rule - s_wmm) /
- sizeof(struct ieee80211_wmm_rule);
- }
return regd;
}
@@ -860,9 +847,10 @@ static bool valid_regdb(const u8 *data, unsigned int size)
return true;
}
-static void set_wmm_rule(struct ieee80211_wmm_rule *rule,
+static void set_wmm_rule(struct ieee80211_reg_rule *rrule,
struct fwdb_wmm_rule *wmm)
{
+ struct ieee80211_wmm_rule *rule = &rrule->wmm_rule;
unsigned int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
@@ -876,11 +864,13 @@ static void set_wmm_rule(struct ieee80211_wmm_rule *rule,
rule->ap[i].aifsn = wmm->ap[i].aifsn;
rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot);
}
+
+ rrule->has_wmm = true;
}
static int __regdb_query_wmm(const struct fwdb_header *db,
const struct fwdb_country *country, int freq,
- u32 *dbptr, struct ieee80211_wmm_rule *rule)
+ struct ieee80211_reg_rule *rule)
{
unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
@@ -901,8 +891,6 @@ static int __regdb_query_wmm(const struct fwdb_header *db,
wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2;
wmm = (void *)((u8 *)db + wmm_ptr);
set_wmm_rule(rule, wmm);
- if (dbptr)
- *dbptr = wmm_ptr;
return 0;
}
}
@@ -910,8 +898,7 @@ static int __regdb_query_wmm(const struct fwdb_header *db,
return -ENODATA;
}
-int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
- struct ieee80211_wmm_rule *rule)
+int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule)
{
const struct fwdb_header *hdr = regdb;
const struct fwdb_country *country;
@@ -925,8 +912,7 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
country = &hdr->country[0];
while (country->coll_ptr) {
if (alpha2_equal(alpha2, country->alpha2))
- return __regdb_query_wmm(regdb, country, freq, dbptr,
- rule);
+ return __regdb_query_wmm(regdb, country, freq, rule);
country++;
}
@@ -935,32 +921,13 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
}
EXPORT_SYMBOL(reg_query_regdb_wmm);
-struct wmm_ptrs {
- struct ieee80211_wmm_rule *rule;
- u32 ptr;
-};
-
-static struct ieee80211_wmm_rule *find_wmm_ptr(struct wmm_ptrs *wmm_ptrs,
- u32 wmm_ptr, int n_wmms)
-{
- int i;
-
- for (i = 0; i < n_wmms; i++) {
- if (wmm_ptrs[i].ptr == wmm_ptr)
- return wmm_ptrs[i].rule;
- }
- return NULL;
-}
-
static int regdb_query_country(const struct fwdb_header *db,
const struct fwdb_country *country)
{
unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
struct ieee80211_regdomain *regdom;
- struct ieee80211_regdomain *tmp_rd;
- unsigned int size_of_regd, i, n_wmms = 0;
- struct wmm_ptrs *wmm_ptrs;
+ unsigned int size_of_regd, i;
size_of_regd = sizeof(struct ieee80211_regdomain) +
coll->n_rules * sizeof(struct ieee80211_reg_rule);
@@ -969,12 +936,6 @@ static int regdb_query_country(const struct fwdb_header *db,
if (!regdom)
return -ENOMEM;
- wmm_ptrs = kcalloc(coll->n_rules, sizeof(*wmm_ptrs), GFP_KERNEL);
- if (!wmm_ptrs) {
- kfree(regdom);
- return -ENOMEM;
- }
-
regdom->n_reg_rules = coll->n_rules;
regdom->alpha2[0] = country->alpha2[0];
regdom->alpha2[1] = country->alpha2[1];
@@ -1013,37 +974,11 @@ static int regdb_query_country(const struct fwdb_header *db,
1000 * be16_to_cpu(rule->cac_timeout);
if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) {
u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2;
- struct ieee80211_wmm_rule *wmm_pos =
- find_wmm_ptr(wmm_ptrs, wmm_ptr, n_wmms);
- struct fwdb_wmm_rule *wmm;
- struct ieee80211_wmm_rule *wmm_rule;
-
- if (wmm_pos) {
- rrule->wmm_rule = wmm_pos;
- continue;
- }
- wmm = (void *)((u8 *)db + wmm_ptr);
- tmp_rd = krealloc(regdom, size_of_regd + (n_wmms + 1) *
- sizeof(struct ieee80211_wmm_rule),
- GFP_KERNEL);
-
- if (!tmp_rd) {
- kfree(regdom);
- kfree(wmm_ptrs);
- return -ENOMEM;
- }
- regdom = tmp_rd;
-
- wmm_rule = (struct ieee80211_wmm_rule *)
- ((u8 *)regdom + size_of_regd + n_wmms *
- sizeof(struct ieee80211_wmm_rule));
+ struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr);
- set_wmm_rule(wmm_rule, wmm);
- wmm_ptrs[n_wmms].ptr = wmm_ptr;
- wmm_ptrs[n_wmms++].rule = wmm_rule;
+ set_wmm_rule(rrule, wmm);
}
}
- kfree(wmm_ptrs);
return reg_schedule_apply(regdom);
}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index e0825a019e9f..959ed3acd240 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1456,7 +1456,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
u8 *op_class)
{
u8 vht_opclass;
- u16 freq = chandef->center_freq1;
+ u32 freq = chandef->center_freq1;
if (freq >= 2412 && freq <= 2472) {
if (chandef->width > NL80211_CHAN_WIDTH_40)
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 911ca6d3cb5a..bfe2dbea480b 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -74,14 +74,14 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
return 0;
if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
- return force_zc ? -ENOTSUPP : 0; /* fail or fallback */
+ return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */
bpf.command = XDP_QUERY_XSK_UMEM;
rtnl_lock();
err = xdp_umem_query(dev, queue_id);
if (err) {
- err = err < 0 ? -ENOTSUPP : -EBUSY;
+ err = err < 0 ? -EOPNOTSUPP : -EBUSY;
goto err_rtnl_unlock;
}
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 1c48572223d1..5a2d1c9578a0 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -246,8 +246,6 @@ objtool_args += --no-fp
endif
ifdef CONFIG_GCOV_KERNEL
objtool_args += --no-unreachable
-else
-objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
endif
ifdef CONFIG_RETPOLINE
ifneq ($(RETPOLINE_CFLAGS),)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 5219280bf7ff..161b0224d6ae 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -380,6 +380,7 @@ our $Attribute = qr{
__noclone|
__deprecated|
__read_mostly|
+ __ro_after_init|
__kprobes|
$InitAttribute|
____cacheline_aligned|
@@ -3311,7 +3312,7 @@ sub process {
# known declaration macros
$sline =~ /^\+\s+$declaration_macros/ ||
# start of struct or union or enum
- $sline =~ /^\+\s+(?:union|struct|enum|typedef)\b/ ||
+ $sline =~ /^\+\s+(?:static\s+)?(?:const\s+)?(?:union|struct|enum|typedef)\b/ ||
# start or end of block or continuation of declaration
$sline =~ /^\+\s+(?:$|[\{\}\.\#\"\?\:\(\[])/ ||
# bitfield continuation
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
index 999d585eaa73..e083bcae343f 100755
--- a/scripts/depmod.sh
+++ b/scripts/depmod.sh
@@ -11,13 +11,14 @@ DEPMOD=$1
KERNELRELEASE=$2
if ! test -r System.map ; then
+ echo "Warning: modules_install: missing 'System.map' file. Skipping depmod." >&2
exit 0
fi
if [ -z $(command -v $DEPMOD) ]; then
- echo "'make modules_install' requires $DEPMOD. Please install it." >&2
+ echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
echo "This is probably in the kmod package." >&2
- exit 1
+ exit 0
fi
# older versions of depmod require the version string to start with three
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 4a7bd2192073..67ed9f6ccdf8 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -221,7 +221,6 @@ $(obj)/zconf.tab.o: $(obj)/zconf.lex.c
# check if necessary packages are available, and configure build flags
define filechk_conf_cfg
- $(CONFIG_SHELL) $(srctree)/scripts/kconfig/check-pkgconfig.sh; \
$(CONFIG_SHELL) $<
endef
diff --git a/scripts/kconfig/check-pkgconfig.sh b/scripts/kconfig/check-pkgconfig.sh
deleted file mode 100644
index 7a1c40bfb58c..000000000000
--- a/scripts/kconfig/check-pkgconfig.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# Check for pkg-config presence
-
-if [ -z $(command -v pkg-config) ]; then
- echo "'make *config' requires 'pkg-config'. Please install it." 1>&2
- exit 1
-fi
diff --git a/scripts/kconfig/gconf-cfg.sh b/scripts/kconfig/gconf-cfg.sh
index 533b3d8f8f08..480ecd8b9f41 100755
--- a/scripts/kconfig/gconf-cfg.sh
+++ b/scripts/kconfig/gconf-cfg.sh
@@ -3,6 +3,13 @@
PKG="gtk+-2.0 gmodule-2.0 libglade-2.0"
+if [ -z "$(command -v pkg-config)" ]; then
+ echo >&2 "*"
+ echo >&2 "* 'make gconfig' requires 'pkg-config'. Please install it."
+ echo >&2 "*"
+ exit 1
+fi
+
if ! pkg-config --exists $PKG; then
echo >&2 "*"
echo >&2 "* Unable to find the GTK+ installation. Please make sure that"
diff --git a/scripts/kconfig/mconf-cfg.sh b/scripts/kconfig/mconf-cfg.sh
index e6f9facd0077..c812872d7f9d 100755
--- a/scripts/kconfig/mconf-cfg.sh
+++ b/scripts/kconfig/mconf-cfg.sh
@@ -4,20 +4,23 @@
PKG="ncursesw"
PKG2="ncurses"
-if pkg-config --exists $PKG; then
- echo cflags=\"$(pkg-config --cflags $PKG)\"
- echo libs=\"$(pkg-config --libs $PKG)\"
- exit 0
-fi
+if [ -n "$(command -v pkg-config)" ]; then
+ if pkg-config --exists $PKG; then
+ echo cflags=\"$(pkg-config --cflags $PKG)\"
+ echo libs=\"$(pkg-config --libs $PKG)\"
+ exit 0
+ fi
-if pkg-config --exists $PKG2; then
- echo cflags=\"$(pkg-config --cflags $PKG2)\"
- echo libs=\"$(pkg-config --libs $PKG2)\"
- exit 0
+ if pkg-config --exists $PKG2; then
+ echo cflags=\"$(pkg-config --cflags $PKG2)\"
+ echo libs=\"$(pkg-config --libs $PKG2)\"
+ exit 0
+ fi
fi
-# Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses
-# by pkg-config.
+# Check the default paths in case pkg-config is not installed.
+# (Even if it is installed, some distributions such as openSUSE cannot
+# find ncurses by pkg-config.)
if [ -f /usr/include/ncursesw/ncurses.h ]; then
echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\"
echo libs=\"-lncursesw\"
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 83b5836615fb..143c05fec161 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -490,7 +490,6 @@ static void build_conf(struct menu *menu)
switch (prop->type) {
case P_MENU:
child_count++;
- prompt = prompt;
if (single_menu_mode) {
item_make("%s%*c%s",
menu->data ? "-->" : "++>",
diff --git a/scripts/kconfig/nconf-cfg.sh b/scripts/kconfig/nconf-cfg.sh
index 42f5ac73548e..001559ef0a60 100644
--- a/scripts/kconfig/nconf-cfg.sh
+++ b/scripts/kconfig/nconf-cfg.sh
@@ -4,20 +4,23 @@
PKG="ncursesw menuw panelw"
PKG2="ncurses menu panel"
-if pkg-config --exists $PKG; then
- echo cflags=\"$(pkg-config --cflags $PKG)\"
- echo libs=\"$(pkg-config --libs $PKG)\"
- exit 0
-fi
+if [ -n "$(command -v pkg-config)" ]; then
+ if pkg-config --exists $PKG; then
+ echo cflags=\"$(pkg-config --cflags $PKG)\"
+ echo libs=\"$(pkg-config --libs $PKG)\"
+ exit 0
+ fi
-if pkg-config --exists $PKG2; then
- echo cflags=\"$(pkg-config --cflags $PKG2)\"
- echo libs=\"$(pkg-config --libs $PKG2)\"
- exit 0
+ if pkg-config --exists $PKG2; then
+ echo cflags=\"$(pkg-config --cflags $PKG2)\"
+ echo libs=\"$(pkg-config --libs $PKG2)\"
+ exit 0
+ fi
fi
-# Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses
-# by pkg-config.
+# Check the default paths in case pkg-config is not installed.
+# (Even if it is installed, some distributions such as openSUSE cannot
+# find ncurses by pkg-config.)
if [ -f /usr/include/ncursesw/ncurses.h ]; then
echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\"
echo libs=\"-lncursesw -lmenuw -lpanelw\"
diff --git a/scripts/kconfig/qconf-cfg.sh b/scripts/kconfig/qconf-cfg.sh
index 0862e1562536..02ccc0ae1031 100755
--- a/scripts/kconfig/qconf-cfg.sh
+++ b/scripts/kconfig/qconf-cfg.sh
@@ -4,6 +4,13 @@
PKG="Qt5Core Qt5Gui Qt5Widgets"
PKG2="QtCore QtGui"
+if [ -z "$(command -v pkg-config)" ]; then
+ echo >&2 "*"
+ echo >&2 "* 'make xconfig' requires 'pkg-config'. Please install it."
+ echo >&2 "*"
+ exit 1
+fi
+
if pkg-config --exists $PKG; then
echo cflags=\"-std=c++11 -fPIC $(pkg-config --cflags Qt5Core Qt5Gui Qt5Widgets)\"
echo libs=\"$(pkg-config --libs $PKG)\"
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index fe06e77c15eb..f599031260d5 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -389,6 +389,9 @@ if ($arch eq "x86_64") {
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$";
$type = ".quad";
$alignment = 2;
+} elsif ($arch eq "nds32") {
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_NDS32_HI20_RELA\\s+_mcount\$";
+ $alignment = 2;
} else {
die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
}
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 71f39410691b..79f7dd57d571 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -74,7 +74,7 @@ scm_version()
fi
# Check for uncommitted changes
- if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
+ if git status -uno --porcelain | grep -qv '^.. scripts/package'; then
printf '%s' -dirty
fi
diff --git a/security/apparmor/secid.c b/security/apparmor/secid.c
index f2f22d00db18..4ccec1bcf6f5 100644
--- a/security/apparmor/secid.c
+++ b/security/apparmor/secid.c
@@ -79,7 +79,6 @@ int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
struct aa_label *label = aa_secid_to_label(secid);
int len;
- AA_BUG(!secdata);
AA_BUG(!seclen);
if (!label)
diff --git a/security/keys/dh.c b/security/keys/dh.c
index 711e89d8c415..3b602a1e27fa 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
}
dh_inputs.g_size = dlen;
- dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
+ dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key);
if (dlen < 0) {
ret = dlen;
goto out2;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 69517e18ef07..08d5662039e3 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -129,7 +129,7 @@ static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
runtime->avail = 0;
else
runtime->avail = runtime->buffer_size;
- runtime->buffer = kvmalloc(runtime->buffer_size, GFP_KERNEL);
+ runtime->buffer = kvzalloc(runtime->buffer_size, GFP_KERNEL);
if (!runtime->buffer) {
kfree(runtime);
return -ENOMEM;
@@ -655,7 +655,7 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime,
if (params->avail_min < 1 || params->avail_min > params->buffer_size)
return -EINVAL;
if (params->buffer_size != runtime->buffer_size) {
- newbuf = kvmalloc(params->buffer_size, GFP_KERNEL);
+ newbuf = kvzalloc(params->buffer_size, GFP_KERNEL);
if (!newbuf)
return -ENOMEM;
spin_lock_irq(&runtime->lock);
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index 1bd27576db98..a835558ddbc9 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -146,7 +146,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_decouple);
*/
void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *stream)
{
- snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_RUN);
+ snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL,
+ AZX_PPLCCTL_RUN, AZX_PPLCCTL_RUN);
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_link_stream_start);
@@ -171,7 +172,8 @@ void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *stream)
snd_hdac_ext_link_stream_clear(stream);
- snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_STRST);
+ snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL,
+ AZX_PPLCCTL_STRST, AZX_PPLCCTL_STRST);
udelay(3);
timeout = 50;
do {
@@ -242,7 +244,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_link_set_stream_id);
void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
int stream)
{
- snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, 0, (1 << stream));
+ snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, (1 << stream), 0);
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_link_clear_stream_id);
@@ -415,7 +417,6 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus,
bool enable, int index)
{
u32 mask = 0;
- u32 register_mask = 0;
if (!bus->spbcap) {
dev_err(bus->dev, "Address of SPB capability is NULL\n");
@@ -424,12 +425,8 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus,
mask |= (1 << index);
- register_mask = readl(bus->spbcap + AZX_REG_SPB_SPBFCCTL);
-
- mask |= register_mask;
-
if (enable)
- snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, 0, mask);
+ snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, mask);
else
snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, 0);
}
@@ -503,7 +500,6 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
bool enable, int index)
{
u32 mask = 0;
- u32 register_mask = 0;
if (!bus->drsmcap) {
dev_err(bus->dev, "Address of DRSM capability is NULL\n");
@@ -512,12 +508,8 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
mask |= (1 << index);
- register_mask = readl(bus->drsmcap + AZX_REG_SPB_SPBFCCTL);
-
- mask |= register_mask;
-
if (enable)
- snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, 0, mask);
+ snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, mask);
else
snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, 0);
}
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 0a5085537034..26d348b47867 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -3935,7 +3935,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus)
list_for_each_codec(codec, bus) {
/* FIXME: maybe a better way needed for forced reset */
- cancel_delayed_work_sync(&codec->jackpoll_work);
+ if (current_work() != &codec->jackpoll_work.work)
+ cancel_delayed_work_sync(&codec->jackpoll_work);
#ifdef CONFIG_PM
if (hda_codec_is_power_on(codec)) {
hda_call_codec_suspend(codec);
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index b2ec20e562bd..b455930a3eaf 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -68,6 +68,7 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_DEVMAP] = "devmap",
[BPF_MAP_TYPE_SOCKMAP] = "sockmap",
[BPF_MAP_TYPE_CPUMAP] = "cpumap",
+ [BPF_MAP_TYPE_XSKMAP] = "xskmap",
[BPF_MAP_TYPE_SOCKHASH] = "sockhash",
[BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
};
diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c
index 1832100d1b27..6d41323be291 100644
--- a/tools/bpf/bpftool/map_perf_ring.c
+++ b/tools/bpf/bpftool/map_perf_ring.c
@@ -194,8 +194,10 @@ int do_event_pipe(int argc, char **argv)
}
while (argc) {
- if (argc < 2)
+ if (argc < 2) {
BAD_ARG();
+ goto err_close_map;
+ }
if (is_prefix(*argv, "cpu")) {
char *endptr;
@@ -221,6 +223,7 @@ int do_event_pipe(int argc, char **argv)
NEXT_ARG();
} else {
BAD_ARG();
+ goto err_close_map;
}
do_all = false;
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 56c4b3f8a01b..439b8a27488d 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -759,12 +759,18 @@ class DebugfsProvider(Provider):
if len(vms) == 0:
self.do_read = False
- self.paths = filter(lambda x: "{}-".format(pid) in x, vms)
+ self.paths = list(filter(lambda x: "{}-".format(pid) in x, vms))
else:
self.paths = []
self.do_read = True
- self.reset()
+
+ def _verify_paths(self):
+ """Remove invalid paths"""
+ for path in self.paths:
+ if not os.path.exists(os.path.join(PATH_DEBUGFS_KVM, path)):
+ self.paths.remove(path)
+ continue
def read(self, reset=0, by_guest=0):
"""Returns a dict with format:'file name / field -> current value'.
@@ -780,6 +786,7 @@ class DebugfsProvider(Provider):
# If no debugfs filtering support is available, then don't read.
if not self.do_read:
return results
+ self._verify_paths()
paths = self.paths
if self._pid == 0:
@@ -1096,15 +1103,16 @@ class Tui(object):
pid = self.stats.pid_filter
self.screen.erase()
gname = self.get_gname_from_pid(pid)
+ self._gname = gname
if gname:
gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...'
if len(gname) > MAX_GUEST_NAME_LEN
else gname))
if pid > 0:
- self.screen.addstr(0, 0, 'kvm statistics - pid {0} {1}'
- .format(pid, gname), curses.A_BOLD)
+ self._headline = 'kvm statistics - pid {0} {1}'.format(pid, gname)
else:
- self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD)
+ self._headline = 'kvm statistics - summary'
+ self.screen.addstr(0, 0, self._headline, curses.A_BOLD)
if self.stats.fields_filter:
regex = self.stats.fields_filter
if len(regex) > MAX_REGEX_LEN:
@@ -1162,6 +1170,19 @@ class Tui(object):
return sorted_items
+ if not self._is_running_guest(self.stats.pid_filter):
+ if self._gname:
+ try: # ...to identify the guest by name in case it's back
+ pids = self.get_pid_from_gname(self._gname)
+ if len(pids) == 1:
+ self._refresh_header(pids[0])
+ self._update_pid(pids[0])
+ return
+ except:
+ pass
+ self._display_guest_dead()
+ # leave final data on screen
+ return
row = 3
self.screen.move(row, 0)
self.screen.clrtobot()
@@ -1184,6 +1205,7 @@ class Tui(object):
# print events
tavg = 0
tcur = 0
+ guest_removed = False
for key, values in get_sorted_events(self, stats):
if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0):
break
@@ -1191,7 +1213,10 @@ class Tui(object):
key = self.get_gname_from_pid(key)
if not key:
continue
- cur = int(round(values.delta / sleeptime)) if values.delta else ''
+ cur = int(round(values.delta / sleeptime)) if values.delta else 0
+ if cur < 0:
+ guest_removed = True
+ continue
if key[0] != ' ':
if values.delta:
tcur += values.delta
@@ -1204,13 +1229,21 @@ class Tui(object):
values.value * 100 / float(ltotal), cur))
row += 1
if row == 3:
- self.screen.addstr(4, 1, 'No matching events reported yet')
+ if guest_removed:
+ self.screen.addstr(4, 1, 'Guest removed, updating...')
+ else:
+ self.screen.addstr(4, 1, 'No matching events reported yet')
if row > 4:
tavg = int(round(tcur / sleeptime)) if tcur > 0 else ''
self.screen.addstr(row, 1, '%-40s %10d %8s' %
('Total', total, tavg), curses.A_BOLD)
self.screen.refresh()
+ def _display_guest_dead(self):
+ marker = ' Guest is DEAD '
+ y = min(len(self._headline), 80 - len(marker))
+ self.screen.addstr(0, y, marker, curses.A_BLINK | curses.A_STANDOUT)
+
def _show_msg(self, text):
"""Display message centered text and exit on key press"""
hint = 'Press any key to continue'
@@ -1219,10 +1252,10 @@ class Tui(object):
(x, term_width) = self.screen.getmaxyx()
row = 2
for line in text:
- start = (term_width - len(line)) / 2
+ start = (term_width - len(line)) // 2
self.screen.addstr(row, start, line)
row += 1
- self.screen.addstr(row + 1, (term_width - len(hint)) / 2, hint,
+ self.screen.addstr(row + 1, (term_width - len(hint)) // 2, hint,
curses.A_STANDOUT)
self.screen.getkey()
@@ -1319,6 +1352,12 @@ class Tui(object):
msg = '"' + str(val) + '": Invalid value'
self._refresh_header()
+ def _is_running_guest(self, pid):
+ """Check if pid is still a running process."""
+ if not pid:
+ return True
+ return os.path.isdir(os.path.join('/proc/', str(pid)))
+
def _show_vm_selection_by_guest(self):
"""Draws guest selection mask.
@@ -1346,7 +1385,7 @@ class Tui(object):
if not guest or guest == '0':
break
if guest.isdigit():
- if not os.path.isdir(os.path.join('/proc/', guest)):
+ if not self._is_running_guest(guest):
msg = '"' + guest + '": Not a running process'
continue
pid = int(guest)
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index f8cc38afffa2..32a194e3e07a 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -46,6 +46,9 @@
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
+# Some systems don't have a ping6 binary anymore
+which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
+
tests="
pmtu_vti6_exception vti6: PMTU exceptions
pmtu_vti4_exception vti4: PMTU exceptions
@@ -274,7 +277,7 @@ test_pmtu_vti6_exception() {
mtu "${ns_b}" veth_b 4000
mtu "${ns_a}" vti6_a 5000
mtu "${ns_b}" vti6_b 5000
- ${ns_a} ping6 -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
+ ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
# Check that exception was created
if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then
@@ -334,7 +337,7 @@ test_pmtu_vti4_link_add_mtu() {
fail=0
min=68
- max=$((65528 - 20))
+ max=$((65535 - 20))
# Check invalid values first
for v in $((min - 1)) $((max + 1)); do
${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10 2>/dev/null
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
index f03763d81617..30f9b54bd666 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
@@ -313,6 +313,54 @@
]
},
{
+ "id": "6aaf",
+ "name": "Add police actions with conform-exceed control pass/pipe [with numeric values]",
+ "category": [
+ "actions",
+ "police"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action police",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 0/3 index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action police index 1",
+ "matchPattern": "action order [0-9]*: police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action pass/pipe",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action police"
+ ]
+ },
+ {
+ "id": "29b1",
+ "name": "Add police actions with conform-exceed control <invalid>/drop",
+ "category": [
+ "actions",
+ "police"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action police",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 10/drop index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions ls action police",
+ "matchPattern": "action order [0-9]*: police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action ",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action police"
+ ]
+ },
+ {
"id": "c26f",
"name": "Add police action with invalid peakrate value",
"category": [
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 30cb0a0713ff..37908a83ddc2 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -159,12 +159,6 @@ static const char * const page_flag_names[] = {
};
-static const char * const debugfs_known_mountpoints[] = {
- "/sys/kernel/debug",
- "/debug",
- 0,
-};
-
/*
* data structures
*/
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index f82c2eaa859d..334b16db0ebb 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -30,8 +30,8 @@ struct slabinfo {
int alias;
int refs;
int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
- int hwcache_align, object_size, objs_per_slab;
- int sanity_checks, slab_size, store_user, trace;
+ unsigned int hwcache_align, object_size, objs_per_slab;
+ unsigned int sanity_checks, slab_size, store_user, trace;
int order, poison, reclaim_account, red_zone;
unsigned long partial, objects, slabs, objects_partial, objects_total;
unsigned long alloc_fastpath, alloc_slowpath;
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 91aaf73b00df..ed162a6c57c5 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1817,18 +1817,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat
return 0;
}
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
- unsigned long end = hva + PAGE_SIZE;
-
- if (!kvm->arch.pgd)
- return 0;
-
- trace_kvm_unmap_hva(hva);
- handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
- return 0;
-}
-
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end)
{
@@ -1860,13 +1848,20 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
unsigned long end = hva + PAGE_SIZE;
+ kvm_pfn_t pfn = pte_pfn(pte);
pte_t stage2_pte;
if (!kvm->arch.pgd)
return;
trace_kvm_set_spte_hva(hva);
- stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
+
+ /*
+ * We've moved a page around, probably through CoW, so let's treat it
+ * just like a translation fault and clean the cache to the PoC.
+ */
+ clean_dcache_guest_page(pfn, PAGE_SIZE);
+ stage2_pte = pfn_pte(pfn, PAGE_S2);
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
}
diff --git a/virt/kvm/arm/trace.h b/virt/kvm/arm/trace.h
index e53b596f483b..57b3edebbb40 100644
--- a/virt/kvm/arm/trace.h
+++ b/virt/kvm/arm/trace.h
@@ -134,21 +134,6 @@ TRACE_EVENT(kvm_mmio_emulate,
__entry->vcpu_pc, __entry->instr, __entry->cpsr)
);
-TRACE_EVENT(kvm_unmap_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
-
- TP_STRUCT__entry(
- __field( unsigned long, hva )
- ),
-
- TP_fast_assign(
- __entry->hva = hva;
- ),
-
- TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
-);
-
TRACE_EVENT(kvm_unmap_hva_range,
TP_PROTO(unsigned long start, unsigned long end),
TP_ARGS(start, end),